text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
VISIT, VISIT_EDGE, POST_VISIT = range(3)
def strongly_connected_components(graph):
'''
Find strongly connected components in a graph using iterative
depth-first search.
Based on:
http://code.activestate.com/recipes/578507-strongly-connected-components-of-a-directed-graph/
'''
identified = set()
stack = []
index = {}
boundaries = []
for v in graph:
if v not in index:
todo = [(VISIT, v)]
while todo:
op, v = todo.pop()
if op == VISIT:
index[v] = len(stack)
stack.append(v)
boundaries.append(index[v])
todo.append((POST_VISIT, v))
todo.extend([(VISIT_EDGE, w) for w in graph[v]])
elif op == VISIT_EDGE:
if v not in index:
todo.append((VISIT, v))
elif v not in identified:
while index[v] < boundaries[-1]:
boundaries.pop()
else:
# op == POST_VISIT
if boundaries[-1] == index[v]:
boundaries.pop()
scc = stack[index[v]:]
del stack[index[v]:]
identified.update(scc)
yield scc
| openvenues/libpostal | scripts/geodata/graph/scc.py | Python | mit | 1,392 | [
"VisIt"
] | 97b9f9a95f7b06f3119427284b3f56ccdffd576785a044645f4709f2e4d3f37b |
#encoding: utf-8
"""
tools.stats -- Toolbox functions for computing statistics
Exported namespace: bootstrap, smooth_pdf, integer_hist
Written by Joe Monaco
Center for Theoretical Neuroscience
Copyright (c) 2007-2008 Columbia Unversity. All Rights Reserved.
"""
import os as _os
import numpy as _N
from sys import platform as _plat, stderr as _err
def bootstrap(X, N, H0, *args):
"""Get a one-tail p-value for an algorithmic sampling process
H0(*args) must return a scalar null sample value.
The sign of the returned p-value indicates whether X is less than (-) or
greater than (+) the median of the sample distribution.
Arguments:
X -- the value for which to return a p-value
N -- sampling size of the empirical distribution (beware O(n))
H0 -- function that implements sampling process for the null result
args -- additional arguments will be passed to H0
"""
assert callable(H0), 'H0 must be a callable that returns a scalar sample'
tail = 0
for i in xrange(N):
tail += int(H0(*args) >= X)
if tail > float(N)/2:
tail = tail - N # negative p-value for X less than median
if not tail:
_err.write('warning: bootstrap needs N > %d; returning upper bound\n'%N)
tail = 1
return tail / float(N)
def smooth_pdf(a, sd=None):
"""Get a smoothed pdf of an array of data for visualization
Keyword arguments:
sd -- S.D. of the gaussian kernel used to perform the smoothing (default is
1/20 of the data range)
Return 2-row (x, pdf(x)) smoothed probability density estimate.
"""
from scipy.signal import gaussian, convolve
from numpy import array, arange, cumsum, trapz, histogram, diff, r_, c_
if sd is None:
sd = 0.05 * a.ptp()
data = a.copy().flatten() # get 1D copy of array data
nbins = len(data) > 1000 and len(data) or 1000 # num bins >~ O(len(data))
f, l = histogram(data, bins=nbins, normed=True) # fine pdf
sd_bins = sd * (float(nbins) / a.ptp()) # convert sd to bin units
kern_size = int(10*sd_bins) # sufficient convolution kernel size
g = gaussian(kern_size, sd_bins) # generate smoothing kernel
c = cumsum(f, dtype='d') # raw fine-grained cdf
cext = r_[array((0,)*(2*kern_size), 'd'), c,
array((c[-1],)*(2*kern_size), 'd')] # wrap data to kill boundary effect
cs = convolve(cext, g, mode='same') # smooth the extended cdf
ps = diff(cs) # differentiate smooth cdf to get smooth pdf
dl = l[1]-l[0] # get bin delta
l = r_[arange(l[0]-kern_size*dl, l[0], dl), l,
arange(l[-1]+dl, l[-1]+kern_size*dl, dl)] # pad index to match bounds
ps = ps[kern_size:kern_size+len(l)] # crop pdf to same length as index
ps /= trapz(ps, x=l) # normalize pdf integral to unity
return c_[l, ps].T # return 2-row concatenation of x and pdf(x)
def integer_hist(a, relative=False):
"""Get histogram data using integer bins
Parameters:
a -- the data to be histogrammed (ndim > 1 is flattened)
relative -- whether count should be relative frequency or raw counts
Returns (center, count):
center -- integer bin centers for the histogram
count -- bin frequencies, whether relative frequency or raw count
"""
data = a.round().flatten()
center = _N.arange(int(a.min()), int(a.max())+1)
if relative:
count = _N.empty(center.shape[0], 'd')
else:
count = _N.empty(center.shape[0], 'l')
for bin, c in enumerate(center):
count[bin] = (data == c).sum()
if relative:
count /= count.sum()
return center, count
| jdmonaco/grid-remapping-model | src/tools/stats.py | Python | mit | 3,643 | [
"Gaussian"
] | e1b16e91e0ca11657591ff20ff803c968dd15473517578d3a93617f0fd5d2115 |
#!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (c) 2016 The Regents of the University of California
#
# This file is part of kevlar (http://github.com/dib-lab/kevlar) and is
# licensed under the MIT license: see LICENSE.
# -----------------------------------------------------------------------------
# Core libraries
import builtins
from gzip import open as gzopen
from os import makedirs
from os.path import dirname
import re
from subprocess import Popen, PIPE, check_call
import sys
from tempfile import TemporaryFile
# Third-party libraries
import khmer
import networkx
import pysam
# Internal modules
from kevlar import seqio
from kevlar import sketch
from kevlar import reference
from kevlar import cigar
from kevlar import varmap
from kevlar import vcf
from kevlar import evaluate
from kevlar.intervalforest import IntervalForest
from kevlar.readpair import ReadPair
from kevlar.readgraph import ReadGraph
from kevlar.mutablestring import MutableString
from kevlar.sequence import parse_augmented_fastx, print_augmented_fastx
from kevlar.sequence import revcom
from kevlar.seqio import parse_partitioned_reads, parse_single_partition
from kevlar.timer import Timer
from kevlar.progress import ProgressIndicator
# Subcommands and command-line interface
from kevlar import novel
from kevlar import filter
from kevlar import augment
from kevlar import mutate
from kevlar import assemble
from kevlar import count
from kevlar import partition
from kevlar import localize
from kevlar import call
from kevlar import alac
from kevlar import varfilter
from kevlar import simlike
from kevlar import dist
from kevlar import split
from kevlar import unband
from kevlar import gentrio
from kevlar import cli
# C extensions
from kevlar.alignment import contig_align as align
import kevlar.assembly
import kevlar.sequence
from kevlar._version import get_versions
__version__ = get_versions()['version']
del get_versions
logstream = None
teelog = False
def plog(*args, **kwargs):
"""Print logging output."""
if logstream is not None:
print(*args, **kwargs, file=logstream)
if logstream is None or teelog:
print(*args, **kwargs, file=sys.stderr)
def open(filename, mode):
if mode not in ('r', 'w'):
raise ValueError('invalid mode "{}"'.format(mode))
if filename in ['-', None]:
filehandle = sys.stdin if mode == 'r' else sys.stdout
return filehandle
openfunc = builtins.open
if filename.endswith('.gz'):
openfunc = gzopen
mode += 't'
return openfunc(filename, mode)
def mkdirp(path, trim=False):
outdir = dirname(path) if trim else path
makedirs(outdir, exist_ok=True)
return outdir
def revcommin(seq):
rc = revcom(seq)
minseq = sorted((seq, rc))[0]
return minseq
def same_seq(seq1, seq2, seq2revcom=None):
if seq2revcom is None:
seq2revcom = revcom(seq2)
return seq1 == seq2 or seq1 == seq2revcom
def to_gml(graph, outfilename, logfile=sys.stderr):
"""Write the given read graph to a GML file."""
if not outfilename.endswith('.gml'):
print('[kevlar] WARNING: GML files usually need extension .gml',
file=logfile)
networkx.write_gml(graph, outfilename, stringizer=str)
message = '[kevlar] graph written to {}'.format(outfilename)
print(message, file=logfile)
def multi_file_iter_khmer(filenames):
for filename in filenames:
for record in khmer.ReadParser(filename):
yield record
def parse_bed(instream):
for line in instream:
if line.startswith('#'):
continue
line = line.strip()
if line == '':
continue
values = re.split(r'\s+', line)
chrom, start, end, *data = values
yield chrom, int(start), int(end), data
def bedstream(bedfilelist):
for bedfile in bedfilelist:
fh = kevlar.open(bedfile, 'r')
for values in parse_bed(fh):
yield values
def vcf_header(outstream, version='4.2', source='kevlar', infoheader=False):
print('##fileformat=VCFv', version, sep='', file=outstream)
print('##source=', source, sep='', file=outstream)
if infoheader:
print('##INFO=<GT,Number=3,Type=String,Description="Genotypes of each '
'individual in the trio (proband, mother, father)">',
file=outstream)
print('##INFO=<VW,Number=1,Type=String,Description="Genomic interval '
'bounding all k-mers that contain the alternate allele">',
file=outstream)
print('##INFO=<RW,Number=1,Type=String,Description="Genomic interval '
'bounding all k-mers that contain the reference allele">',
file=outstream)
print('#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO',
sep='\t', file=outstream)
| dib-lab/kevlar | kevlar/__init__.py | Python | mit | 4,870 | [
"pysam"
] | 2abb6b3811893a781308f91a103f4d25ea8dc367ce3e9ce8e361f81642a791af |
from astroquery.simbad import Simbad
from astropy.io import fits
from astropy.wcs import WCS
from astropy.stats import mad_std
from astropy.table import Table
from photutils import fit_2dgaussian, CircularAperture, CircularAnnulus, aperture_photometry, find_peaks
import numpy as np
import os
import matplotlib.pyplot as plt
target_simbad = '...' # name in Simbad
directory = '...' # directory containing FITS images
save_to = '...' # save to this directory
filter = '...' # read from FITS header
########################################################################################################################
Simbad.add_votable_fields('coo(fk5)','propermotions')
def calc_electrons(file, simbad):
# Read in relevant information
hdu = fits.open(file)
data = hdu[0].data
error = hdu[2].data
wcs = WCS(hdu[0].header)
targinfo = Simbad.query_object(simbad)
# Determine RA/Dec of target from Simbad query
targinfo_RA = targinfo['RA_fk5'][0]
targRA = [float(targinfo_RA[:2]), float(targinfo_RA[3:5]), float(targinfo_RA[6:])]
RAdeg = targRA[0]*15 + targRA[1]/4 + targRA[2]/240
dRA = targinfo['PMRA'][0] * (15/3600000.0) # minor correction for proper motion
RA = RAdeg + dRA
targinfo_Dec = targinfo['DEC_fk5'][0]
targDec = [float(targinfo_Dec[1:3]), float(targinfo_Dec[4:6]), float(targinfo_Dec[7:])]
Decdeg = (targDec[0]) + targDec[1]/60 + targDec[2]/3600
if targinfo_Dec[0] == '-':
Decdeg = np.negative(Decdeg) # makes negative declinations negative
dDec = targinfo['PMDEC'][0] * (15/3600000.0)
Dec = Decdeg + dDec
# Convert RA/Dec to pixels
pix = wcs.all_world2pix(RA,Dec,0)
xpix = int(pix[0]) # adding constants because reference pixel appears to be off (regardless of target)
ypix = int(pix[1])
# Trim data to 100x100 pixels near target; fit 2D Gaussian to find center pixel of target
centzoom = data[ypix-45:ypix+45, xpix-45:xpix+45]
centroid = fit_2dgaussian(centzoom)
xcent = xpix - 45 + int(centroid.x_mean.value)
ycent = ypix - 45 + int(centroid.y_mean.value)
#plt.figure()
#plt.imshow(centzoom, origin='lower', cmap='gray', vmin=np.median(data), vmax=np.median(data) + 200)
#plt.colorbar()
#plt.show()
# Find max pixel value in zoomed area, median of data, and median absolute deviation of data
peak = np.max(centzoom)
median = np.median(data) # global background estimate
sigma = mad_std(data) # like std of background, but more resilient to outliers
# Find an appropriate aperture radius
radius = 1
an_mean = peak # peak is just a starting value that will always be greater than the median
while an_mean > median + sigma:
annulus = CircularAnnulus((xcent, ycent), r_in=radius, r_out=radius + 1)
an_sum = aperture_photometry(data,annulus)
an_mean = an_sum['aperture_sum'][0] / annulus.area()
radius += 1 # radius is selected once mean pix value inside annulus is within 2 sigma of median
radius = 35
# Draw aperture around target, sum pixel values, and calculate error
aperture = CircularAperture((xcent, ycent), r=radius)
ap_table = aperture_photometry(data, aperture, error=error)
ap_sum = ap_table['aperture_sum'][0]
ap_error = ap_table['aperture_sum_err'][0]
#print ap_table
#plt.imshow(data, origin='lower', interpolation='nearest', vmin=np.median(data), vmax=np.median(data) + 200)
#aperture.plot()
#plt.show()
# Find appropriate sky aperture, sum pixel values, calculate error
def find_sky():
apzoom = data[ycent-250:ycent+250, xcent-250:xcent+250] # trim data to 400x400 region centered on target
errorzoom = error[ycent-250:ycent+250, xcent-250:xcent+250]
rand_x = np.random.randint(0,500) # randomly select pixel in region
rand_y = np.random.randint(0,500)
if rand_x in range(250-3*radius,250+3*radius)\
or rand_y in range(250-3*radius,250+3*radius):
return find_sky() # reselect pixels if aperture overlaps target
elif rand_x not in range(2*radius, 500-2*radius)\
or rand_y not in range(2*radius, 500-2*radius):
return find_sky()
else:
sky = CircularAperture((rand_x,rand_y), r=radius)
sky_table = aperture_photometry(apzoom, sky, error=errorzoom)
sky_sum = sky_table['aperture_sum'][0]
sky_error = sky_table['aperture_sum_err'][0]
sky_x = int(sky_table['xcenter'][0].value)
sky_y = int(sky_table['ycenter'][0].value)
sky_zoom = apzoom[sky_y-radius:sky_y+radius, sky_x - radius:sky_x + radius]
sky_avg = sky_sum/sky.area() # reselect pixels if bright source is in aperture
if np.max(sky_zoom) < median + 5*sigma and sky_avg > 0:
#plt.imshow(apzoom, origin='lower', interpolation='nearest', vmin=np.median(data), vmax=np.median(data) + 200)
#sky.plot()
#plt.show()
return [sky_sum, sky_error]
else:
return find_sky()
# Calculate final electron count with uncertainty
sample_size = 100
list = np.arange(0,sample_size)
sums = [] # list source-sky value of each iteration
errors = []
for i in list:
final_sum = ap_sum - find_sky()[0]
sums.append(final_sum)
final_error = ap_error + find_sky()[1]
errors.append(final_error)
electron_counts = np.mean(sums)
uncert = np.std(sums)
return [electron_counts, uncert, sums, errors] # return mean value of source-sky and propagated error
name_list = []
number_list = []
time_list = []
HA_list = []
ZA_list = []
air_list = []
filter_list = []
exp_list = []
electon_list = []
elec_error_list = []
mag_list = []
mag_error_list = []
all_counts_I = []
all_errors_I = []
Iexp = []
all_counts_R = []
all_errors_R = []
Rexp = []
# Return counts and propagated error from each frame in target directory
for name in os.listdir(directory):
ext = os.path.splitext(name)[1]
if ext == '.fits':
file = directory + '\\' + name
hdu = fits.open(file)[0]
if hdu.header['FILTERS'] == filter:
targname = target_simbad
number = hdu.header['OBSERNO']
time = hdu.header['UT']
hour_angle = hdu.header['HA']
zenith_angle = hdu.header['ZA']
airmass = hdu.header["AIRMASS"]
filter = hdu.header['FILTERS']
exposure = hdu.header['EXPTIME']
result = calc_electrons(file=file, simbad=target_simbad)
electrons = int(round(result[0]))
elec_error = int(round(result[1]))
ins_magnitude = round(-2.5*np.log10(electrons/exposure) + 25, 5)
ins_magnitude_error = round((2.5*elec_error)/(electrons*np.log(10)), 5)
name_list.append(str(targname))
number_list.append(number)
time_list.append(time)
HA_list.append(hour_angle)
ZA_list.append(zenith_angle)
air_list.append(airmass)
filter_list.append(filter)
exp_list.append(exposure)
electon_list.append(electrons)
elec_error_list.append(elec_error)
mag_list.append(ins_magnitude)
mag_error_list.append(ins_magnitude_error)
print number, filter, electrons, elec_error, ins_magnitude, ins_magnitude_error
# Put data in table and save in target directory
columns = 'Target', 'ObsNo', 'UT', 'HA', 'ZA', 'AirMass', 'Filter', 'IntTime', 'IntCounts', 'IC_error', 'Imag', 'IM_error'
data = [name_list, number_list, time_list, HA_list, ZA_list, air_list, filter_list, exp_list, electon_list,
elec_error_list, mag_list, mag_error_list]
data_table = Table(data=data, names=columns, meta={'name': target_simbad})
data_table.show_in_browser(jsviewer=True)
table_name = '%s\\%s_%s_data.txt' % (save_to, target_simbad, filter)
if os.path.isfile(table_name) is True:
print 'Data table already exists for the target \'%s\'' % target_simbad
else:
data_table.write(table_name, format='ascii')
'''
import matplotlib.pyplot as plt
RA = 139.4375
Dec = 46.2069
file = 'D:\\UChicago\\Reduced Data\\2015Jun02\\Part 1\\1RXS_J091744.5+461229\\lmi.1RXS_J091744.5+461229_91_I.fits'
hdu = fits.open(file)
data = hdu[0].data
wcs = WCS(hdu[0].header)
pix = wcs.all_world2pix(RA,Dec,0)
xpix = int(pix[0]) + 5 # adding constants because reference pixel appears to be off (regardless of target)
ypix = int(pix[1]) - 30
# Trim data to 100x100 pixels near target; fit 2D Gaussian to find center pixel of target
centzoom = data[ypix-50:ypix+50, xpix-50:xpix+50]
plt.figure()
plt.imshow(centzoom, origin='lower', cmap='gray', vmin=np.median(data), vmax=np.median(data)+200)
plt.colorbar()
plt.show()
'''
| CalebHarada/DCT-photometry | Junk/aperture_photometry.py | Python | mit | 9,318 | [
"Gaussian"
] | aaf5a2d46b1021d33d3cfd7b3b01cacfc1bfcba3dfb24ba1ec02902e0cdc2df4 |
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2015, PyStan developers
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# This file is part of PyStan.
#
# PyStan is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# PyStan is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyStan. If not, see <http://www.gnu.org/licenses/>.
#-----------------------------------------------------------------------------
import ast
import codecs
import os
import sys
LONG_DESCRIPTION = open('README.rst').read()
NAME = 'pystan'
DESCRIPTION = 'Python interface to Stan, a package for Bayesian inference'
AUTHOR = 'PyStan Developers'
AUTHOR_EMAIL = 'stan-users@googlegroups.com'
URL = 'https://github.com/stan-dev/pystan'
LICENSE = 'GPLv3'
CLASSIFIERS = [
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Cython',
'Development Status :: 4 - Beta',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis'
]
# VersionFinder from from django-compressor
class VersionFinder(ast.NodeVisitor):
def __init__(self):
self.version = None
def visit_Assign(self, node):
if node.targets[0].id == '__version__':
self.version = node.value.s
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
def find_version(*parts):
finder = VersionFinder()
finder.visit(ast.parse(read(*parts)))
return finder.version
###############################################################################
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
if len(set(('develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
)).intersection(sys.argv)) > 0:
import setuptools
extra_setuptools_args = dict(
install_requires=['Cython >= 0.19', 'numpy >= 1.7'],
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
###############################################################################
from distutils.errors import CCompilerError, DistutilsError
from distutils.extension import Extension
stan_include_dirs = ["pystan/stan/src",
"pystan/math",
"pystan/stan/lib/eigen_3.2.4",
"pystan/stan/lib/boost_1.58.0"]
stan_macros = [
('BOOST_RESULT_OF_USE_TR1', None),
('BOOST_NO_DECLTYPE', None),
('BOOST_DISABLE_ASSERTS', None),
('EIGEN_NO_DEBUG', None),
]
extra_compile_args = [
'-O0',
'-ftemplate-depth-256',
'-Wno-unused-function',
'-Wno-uninitialized',
]
stanc_sources = [
"pystan/stan/src/stan/lang/ast_def.cpp",
"pystan/stan/src/stan/lang/grammars/bare_type_grammar_inst.cpp",
"pystan/stan/src/stan/lang/grammars/expression07_grammar_inst.cpp",
"pystan/stan/src/stan/lang/grammars/expression_grammar_inst.cpp",
"pystan/stan/src/stan/lang/grammars/functions_grammar_inst.cpp",
"pystan/stan/src/stan/lang/grammars/program_grammar_inst.cpp",
"pystan/stan/src/stan/lang/grammars/statement_2_grammar_inst.cpp",
"pystan/stan/src/stan/lang/grammars/statement_grammar_inst.cpp",
"pystan/stan/src/stan/lang/grammars/term_grammar_inst.cpp",
"pystan/stan/src/stan/lang/grammars/var_decls_grammar_inst.cpp",
"pystan/stan/src/stan/lang/grammars/whitespace_grammar_inst.cpp",
]
extensions = [
Extension("pystan._api",
["pystan/_api.pyx"] + stanc_sources,
language='c++',
define_macros=stan_macros,
include_dirs=stan_include_dirs,
extra_compile_args=extra_compile_args),
Extension("pystan._chains",
["pystan/_chains.pyx"],
language='c++',
define_macros=stan_macros,
include_dirs=stan_include_dirs,
extra_compile_args=extra_compile_args),
# _misc.pyx does not use Stan libs
Extension("pystan._misc", ["pystan/_misc.pyx"], language='c++')
]
## package data
package_data_pats = ['*.hpp', '*.pxd', '*.pyx', 'tests/data/*.csv']
# get every file under pystan/stan/src and pystan/stan/lib
stan_files_all = sum(
[[os.path.join(path.replace('pystan/', ''), fn) for fn in files]
for path, dirs, files in os.walk('pystan/stan/src/')], [])
stan_math_files_all = sum(
[[os.path.join(path.replace('pystan/', ''), fn) for fn in files]
for path, dirs, files in os.walk('pystan/math/')], [])
lib_files_all = sum(
[[os.path.join(path.replace('pystan/', ''), fn) for fn in files]
for path, dirs, files in os.walk('pystan/stan/lib/')], [])
package_data_pats += stan_files_all
package_data_pats += stan_math_files_all
package_data_pats += lib_files_all
def setup_package():
metadata = dict(name=NAME,
version=find_version("pystan", "__init__.py"),
maintainer=AUTHOR,
maintainer_email=AUTHOR_EMAIL,
packages=['pystan',
'pystan.tests',
'pystan.external',
'pystan.external.pymc',
'pystan.external.enum',
'pystan.external.scipy'],
ext_modules=extensions,
package_data={'pystan': package_data_pats},
platforms='any',
description=DESCRIPTION,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
**extra_setuptools_args)
if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean')):
# For these actions, neither Numpy nor Cython is required.
#
# They are required to succeed when pip is used to install PyStan
# when, for example, Numpy is not yet present.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
dist = setup(**metadata)
else:
import distutils.core
distutils.core._setup_stop_after = 'commandline'
from distutils.core import setup
try:
from Cython.Build import cythonize
# FIXME: if header only works, no need for numpy.distutils at all
from numpy.distutils.command import install
except ImportError:
raise SystemExit("Cython>=0.19 and NumPy are required.")
metadata['ext_modules'] = cythonize(extensions)
dist = setup(**metadata)
metadata['cmdclass'] = {'install': install.install}
try:
dist.run_commands()
except KeyboardInterrupt:
raise SystemExit("Interrupted")
except (IOError, os.error) as exc:
from distutils.util import grok_environment_error
error = grok_environment_error(exc)
except (DistutilsError, CCompilerError) as msg:
raise SystemExit("error: " + str(msg))
if __name__ == '__main__':
setup_package()
| chmullig/pystan | setup.py | Python | gpl-3.0 | 8,501 | [
"VisIt"
] | 4c8e0540fb1bcee5479c642769064ab9ff545c8c4510cbb2b4154528fe32c065 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# oiddiscover - front end for openid discovery
# Copyright (C) 2003-2014 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Front end to handle openid dicovery requests"""
import cgi
import cgitb
cgitb.enable()
from shared.functionality.oiddiscover import main
from shared.cgiscriptstub import run_cgi_script_possibly_with_cert
run_cgi_script_possibly_with_cert(main)
| heromod/migrid | mig/cgi-bin/oiddiscover.py | Python | gpl-2.0 | 1,175 | [
"Brian"
] | b82ec80ab6ecfc438b12a502e70d50094fff1fcf6d8230538fc187fc922328a8 |
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Paul J. Robinson <pjrobinson@ucla.edu>
#
import warnings
warnings.warn('This is an incomplete version of CDFT method. See also another '
'implementation of cDFT in '
'pyscf/examples/1-advanced/033-constrained_dft.py',
DeprecationWarning)
'''
This is a purpose built constrained dft implementation which allows the
shifting of an orbital (or a linear combination of orbitals) by an arbitrary
constant. Allows the freedom to select thine own basis
'''
from functools import reduce
import numpy
from pyscf import lib
from pyscf import lo
from pyscf.pbc import gto, dft
def cdft(mf,cell,offset,orbital,basis=None):
'''
Input:
mf -- a mean field object for DFT or (in principle) HF (doesn't really matter)
shift -- float -- a semi aribitrary energy which displaces the selected orbitals by the diagonal
orbital -- int -- indicating which orbital are shifted in the selected basis
basis -- 2D numpy array -- the working basis in the basis of AOs from 'cell' (Defaults to AO basis)
Returns:
mf -- converged mean field object (with AO basis)
'''
if basis is not None:
a = basis
else:
a = numpy.eye(cell._bas.shape[1])
#
# Here we run the calculation using each IAO as an offset parameter
#
iaoi = a.T[orbital,:]
##gonna try nomrlaizing to see if that makes life better
##iaoi = iaoi / numpy.linalg.norm(iaoi)
mf.shift_hamiltonian= numpy.diag(iaoi) * offset
mf.constrained_dft = True
def get_veff(*args, **kwargs):
vxc = dft.rks.get_veff(mf, *args, **kwargs)
# Make a shift to the Veff matrix, while ecoul and exc are kept unchanged.
# The total energy is computed with the correct ecoul and exc.
vxc = lib.tag_array(vxc+mf.shift_hamiltonian,
ecoul=vxc.ecoul, exc=vxc.exc, vj=None, vk=None)
return vxc
mf.get_veff = get_veff
return mf
def fast_iao_mullikan_pop(mf,cell,a=None):
'''
Input: mf -- a preconverged mean fild object
Returns: mullikan populaion analysis in the basisIAO a
'''
#
# here we convert the density matrix to the IAO basis
#
if a is None:
a = numpy.eye(mf.make_rdm1().shape[1])
#converts the occupied MOs to the IAO basis
#ovlpS = mf.get_ovlp()
#CIb = reduce(numpy.dot, (a.T, ovlpS , mf.make_rdm1()))
#
# This is the mullikan population below here
#
mo_occ = mf.mo_coeff[:,mf.mo_occ>0]
mo_occ = reduce(numpy.dot, (a.T, mf.get_ovlp(), mo_occ))
dm = numpy.dot(mo_occ, mo_occ.T) * 2
pmol = cell.copy()
pmol.build(False, False, basis='minao')
return mf.mulliken_pop(pmol, dm, s=numpy.eye(pmol.nao_nr()))
if __name__ == '__main__':
cell = gto.Cell()
# .a is a matrix for lattice vectors.
cell.a = '''
2.4560000896 0.0000000000 0.0000000000
-1.2280000448 2.1269584693 0.0000000000
0.0000000000 0.0000000000 10.0000000000
'''
cell.atom='''
C 0.000000000 0.000000000 4.999999702
C 1.227999862 0.708986051 4.999999702
'''
cell.ke_cutoff = 50
cell.basis = 'gth-tzvp'
cell.pseudo = 'gth-pbe'
cell.verbose=0
cell.charge=0
cell.unit="Angstrom"
cell.build()
cell.rcut*=2
print("running intial DFT calc to generate IAOs")
mf = dft.RKS(cell)
mf.chkfile = 'graphene.chk'
mf.init_guess = 'chkfile'
mf.xc = 'pbe,pbe'
mf.kernel()
#we need to makVe the IAOs out of a converged calculation
print("generating IAOs")
mo_occ = mf.mo_coeff[:,mf.mo_occ>0]
a = lo.iao.iao(cell, mo_occ)
# Orthogonalize IAO
a = lo.vec_lowdin(a, mf.get_ovlp())
#arbitrary parameters
offset = 0.0001
orbital =4
print("running constrained dft")
mf = cdft(mf,mf.cell,offset,orbital,basis=a)
population = fast_iao_mullikan_pop(mf,a=a)
result = numpy.zeros(3)
result[0] = offset
result[1] = mf.e_tot
result[2] = population[0][4]
print(result)
| sunqm/pyscf | pyscf/pbc/dft/cdft.py | Python | apache-2.0 | 4,779 | [
"PySCF"
] | f570845a0b30656c07c7eb6e7b6ec70ac621bd2bd24947ccbcb29d695de57bb5 |
from base.twilltestcase import TwillTestCase
from base.test_db_util import (
get_user,
get_latest_history_for_user,
get_latest_hda,
)
admin_user = None
class UploadData( TwillTestCase ):
def test_0000_setup_upload_tests( self ):
"""
Configuring upload tests, setting admin_user
"""
self.logout()
self.login( email='test@bx.psu.edu' )
global admin_user
admin_user = get_user( email='test@bx.psu.edu' )
def create_fresh_history( self, user ):
"""
Deletes latest history for the given user, checks for an empty history,
and returns that new, empty history
"""
# in order to remove a lot of boiler plate - and not have cascading errors
history = get_latest_history_for_user( user )
self.delete_history( id=self.security.encode_id( history.id ) )
self.is_history_empty()
return get_latest_history_for_user( user )
def test_0005_upload_file( self ):
"""
Test uploading 1.bed, NOT setting the file format
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '1.bed' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '1.bed', hid=str( hda.hid ) )
self.check_history_for_string( "<th>1.Chrom</th><th>2.Start</th><th>3.End</th>" )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0006_upload_file( self ):
"""
Test uploading 1.bed.spaces, with space to tab selected, NOT setting the file format
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '1.bed.spaces', space_to_tab=True )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '1.bed', hid=str( hda.hid ) )
self.check_history_for_string( "<th>1.Chrom</th><th>2.Start</th><th>3.End</th>" )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0010_upload_file( self ):
"""
Test uploading 4.bed.gz, manually setting the file format
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '4.bed.gz', dbkey='hg17', ftype='bed' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '4.bed', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"peek", "<th>1.Chrom</th><th>2.Start</th><th>3.End</th>", use_string_contains=True )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0012_upload_file( self ):
"""
Test uploading 4.bed.bz2, manually setting the file format
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '4.bed.bz2', dbkey='hg17', ftype='bed' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '4.bed', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"peek", "<th>1.Chrom</th><th>2.Start</th><th>3.End</th>", use_string_contains=True )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0015_upload_file( self ):
"""
Test uploading 1.scf, manually setting the file format
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '1.scf', ftype='scf' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '1.scf', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"peek", "Binary scf sequence file", use_string_contains=True )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0020_upload_file( self ):
"""
Test uploading 1.scf, NOT setting the file format
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '1.scf' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"misc_info", "File Format' to 'Scf' when uploading scf files", use_string_contains=True )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0025_upload_file( self ):
"""
Test uploading 4.bed.zip, manually setting the file format
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '4.bed.zip', ftype='bed' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '4.bed', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"peek", "<th>1.Chrom</th><th>2.Start</th><th>3.End</th>", use_string_contains=True )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0030_upload_file( self ):
"""
Test uploading 4.bed.zip, NOT setting the file format
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '4.bed.zip' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '4.bed', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"peek", "<th>1.Chrom</th><th>2.Start</th><th>3.End</th>", use_string_contains=True )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0035_upload_file( self ):
"""
Test uploading 1.sam NOT setting the file format
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '1.sam' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '1.sam', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"peek", "<th>1.QNAME</th><th>2.FLAG</th><th>3.RNAME</th><th>4.POS</th>", use_string_contains=True )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0040_upload_file( self ):
"""
Test uploading 1.sff, NOT setting the file format
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '1.sff' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '1.sff', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"misc_info", "sff", use_string_contains=True )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0045_upload_file( self ):
"""
Test uploading 454Score.pdf, NOT setting the file format
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '454Score.pdf' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"name", "454Score.pdf" )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0050_upload_file( self ):
"""
Test uploading 454Score.png, NOT setting the file format
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '454Score.png' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"name", "454Score.png" )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0055_upload_file( self ):
"""
Test uploading lped composite datatype file, manually setting the file format
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
# lped data types include a ped_file and a map_file ( which is binary )
self.upload_file( None, ftype='lped', metadata=[ { 'name':'base_name', 'value':'rgenetics' } ], composite_data=[ { 'name':'ped_file', 'value':'tinywga.ped' }, { 'name':'map_file', 'value':'tinywga.map'} ] )
# Get the latest hid for testing
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
# We'll test against the resulting ped file and map file for correctness
self.verify_composite_datatype_file_content( 'tinywga.ped', str( hda.id ), base_name='rgenetics.ped' )
self.verify_composite_datatype_file_content( 'tinywga.map', str( hda.id ), base_name='rgenetics.map' )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"metadata_base_name", "rgenetics", use_string_contains=True )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0056_upload_file( self ):
"""
Test uploading lped composite datatype file, manually setting the file format, and using space to tab on one file (tinywga.ped)
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
# lped data types include a ped_file and a map_file ( which is binary )
self.upload_file( None, ftype='lped', metadata=[ { 'name':'base_name', 'value':'rgenetics' } ], composite_data=[ { 'name':'ped_file', 'value':'tinywga.ped', 'space_to_tab':True }, { 'name':'map_file', 'value':'tinywga.map'} ] )
# Get the latest hid for testing
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
# We'll test against the resulting ped file and map file for correctness
self.verify_composite_datatype_file_content( 'tinywga.ped.space_to_tab', str( hda.id ), base_name='rgenetics.ped' )
self.verify_composite_datatype_file_content( 'tinywga.map', str( hda.id ), base_name='rgenetics.map' )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"metadata_base_name", "rgenetics", use_string_contains=True )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0060_upload_file( self ):
"""
Test uploading pbed composite datatype file, manually setting the file format
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
# pbed data types include a bim_file, a bed_file and a fam_file
self.upload_file( None, ftype='pbed',
metadata=[ { 'name':'base_name', 'value':'rgenetics' } ],
composite_data=[
{ 'name':'bim_file', 'value':'tinywga.bim' },
{ 'name':'bed_file', 'value':'tinywga.bed' },
{ 'name':'fam_file', 'value':'tinywga.fam' } ])
# Get the latest hid for testing
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
# We'll test against the resulting ped file and map file for correctness
self.verify_composite_datatype_file_content( 'tinywga.bim', str( hda.id ), base_name='rgenetics.bim' )
self.verify_composite_datatype_file_content( 'tinywga.bed', str( hda.id ), base_name='rgenetics.bed' )
self.verify_composite_datatype_file_content( 'tinywga.fam', str( hda.id ), base_name='rgenetics.fam' )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"metadata_base_name", "rgenetics", use_string_contains=True )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0065_upload_file( self ):
"""
Test uploading asian_chars_1.txt, NOT setting the file format
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
self.upload_file( 'asian_chars_1.txt' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( 'asian_chars_1.txt', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"misc_info", "uploaded multi-byte char file", use_string_contains=True )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0070_upload_file( self ):
"""
Test uploading 2gen.fastq, NOT setting the file format
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
self.upload_file( '2gen.fastq' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '2gen.fastq', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "fastq" )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0075_upload_file( self ):
"""
Test uploading 1.wig, NOT setting the file format
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
self.upload_file( '1.wig' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '1.wig', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "wig" )
self.check_metadata_for_string( 'value="1.wig" value="\?"' )
self.check_metadata_for_string( 'Change data type selected value="wig" selected="yes"' )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0080_upload_file( self ):
"""
Test uploading 1.tabular, NOT setting the file format
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
self.upload_file( '1.tabular' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '1.tabular', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "tabular" )
self.check_metadata_for_string( 'value="1.tabular" value="\?"' )
self.check_metadata_for_string( 'Change data type selected value="tabular" selected="yes"' )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0085_upload_file( self ):
"""
Test uploading qualscores.qualsolid, NOT setting the file format
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
self.upload_file( 'qualscores.qualsolid' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( 'qualscores.qualsolid', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "qualsolid" )
self.check_metadata_for_string( 'Change data type value="qualsolid" selected="yes">qualsolid' )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0090_upload_file( self ):
"""
Test uploading qualscores.qual454, NOT setting the file format
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
self.upload_file( 'qualscores.qual454' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( 'qualscores.qual454', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "qual454" )
self.check_metadata_for_string( 'Change data type value="qual454" selected="yes">qual454' )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0095_upload_file( self ):
"""
Test uploading 3.maf, NOT setting the file format
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
self.upload_file( '3.maf' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '3.maf', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "maf" )
self.check_metadata_for_string( 'value="3.maf" value="\?"' )
self.check_metadata_for_string( 'Convert to new format <option value="interval">Convert MAF to Genomic Intervals <option value="fasta">Convert MAF to Fasta' )
self.check_metadata_for_string( 'Change data type selected value="maf" selected="yes"' )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0100_upload_file( self ):
"""
Test uploading 1.lav, NOT setting the file format
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
self.upload_file( '1.lav' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '1.lav', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "lav" )
self.check_metadata_for_string( 'value="1.lav" value="\?"' )
self.check_metadata_for_string( 'Change data type selected value="lav" selected="yes"' )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0105_upload_file( self ):
"""
Test uploading 1.interval, NOT setting the file format
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
self.upload_file( '1.interval' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '1.interval', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "interval" )
self.check_metadata_for_string( 'value="1.interval" value="\?"' )
self.check_metadata_for_string( 'Chrom column: <option value="1" selected> Start column: <option value="2" selected>' )
self.check_metadata_for_string( 'End column: <option value="3" selected> Strand column <option value="6" selected>' )
self.check_metadata_for_string( 'Convert to new format <option value="bed">Convert Genomic Intervals To BED' )
self.check_metadata_for_string( 'Change data type selected value="interval" selected="yes"' )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0110_upload_file( self ):
"""
Test uploading 5.gff3, NOT setting the file format
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
self.upload_file( '5.gff3' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '5.gff3', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "gff3" )
self.check_metadata_for_string( 'value="5.gff3" value="\?"' )
self.check_metadata_for_string( 'Convert to new format <option value="bed">Convert GFF to BED' )
self.check_metadata_for_string( 'Change data type selected value="gff3" selected="yes"' )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0115_upload_file( self ):
"""
Test uploading html_file.txt, NOT setting the file format
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
self.upload_file( 'html_file.txt' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"misc_info", "The uploaded file contains inappropriate HTML content", use_string_contains=True )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0120_upload_file( self ):
"""
Test uploading 5.gff, NOT setting the file format
Test sniffer for gff.
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '5.gff' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '5.gff', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "gff" )
self.check_metadata_for_string( 'value="5.gff" value="\?"' )
self.check_metadata_for_string( 'Convert to new format <option value="bed">Convert GFF to BED' )
self.check_metadata_for_string( 'Change data type selected value="gff" selected="yes"' )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0125_upload_file( self ):
"""
Test uploading 1.fasta, NOT setting the file format
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
self.upload_file( '1.fasta' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '1.fasta', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "fasta" )
self.check_metadata_for_string( 'value="1.fasta" value="\?" Change data type selected value="fasta" selected="yes"' )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0130_upload_file( self ):
"""
Test uploading 1.customtrack, NOT setting the file format
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
self.upload_file( '1.customtrack' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '1.customtrack', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "customtrack" )
self.check_metadata_for_string( 'value="1.customtrack" value="\?" Change data type selected value="customtrack" selected="yes"' )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0135_upload_file( self ):
"""
Test uploading shrimp_cs_test1.csfasta, NOT setting the file format
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
self.upload_file( 'shrimp_cs_test1.csfasta' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( 'shrimp_cs_test1.csfasta', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "csfasta" )
self.check_metadata_for_string( 'value="shrimp_cs_test1.csfasta" value="\?" Change data type value="csfasta" selected="yes"' )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0145_upload_file( self ):
"""
Test uploading 1.axt, NOT setting the file format
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
self.upload_file( '1.axt' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '1.axt', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "axt" )
self.check_metadata_for_string( 'value="1.axt" value="\?" Change data type selected value="axt" selected="yes"' )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0150_upload_file( self ):
"""
Test uploading 1.bam, which is a sorted Bam file creaed by the Galaxy sam_to_bam tool, NOT setting the file format
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '1.bam' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '1.bam', hid=str( hda.hid ), attributes={ 'ftype' : 'bam' } )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "bam" )
# Make sure the Bam index was created
assert hda.metadata.bam_index is not None, "Bam index was not correctly created for 1.bam"
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0155_upload_file( self ):
"""
Test uploading 3unsorted.bam, which is an unsorted Bam file, NOT setting the file format
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '3unsorted.bam' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
# Since 3unsorted.bam is not sorted, we cannot verify dataset correctness since the uploaded
# dataset will be sorted. However, the check below to see if the index was created is
# sufficient.
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "bam" )
# Make sure the Bam index was created
assert hda.metadata.bam_index is not None, "Bam index was not correctly created for 3unsorted.bam"
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0160_url_paste( self ):
"""
Test url paste behavior
"""
# Logged in as admin_user
history = self.create_fresh_history( admin_user )
self.upload_url_paste( 'hello world' )
self.check_history_for_exact_string( 'Pasted Entry' )
self.check_history_for_exact_string( 'hello world' )
self.upload_url_paste( u'hello world' )
self.check_history_for_exact_string( 'Pasted Entry' )
self.check_history_for_exact_string( 'hello world' )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0165_upload_file( self ):
"""
Test uploading 1.pileup, NOT setting the file format
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '1.pileup' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '1.pileup', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "pileup" )
self.check_metadata_for_string( 'value="1.pileup" value="\?" Change data type selected value="pileup" selected="yes"' )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0170_upload_file( self ):
"""
Test uploading 1.bigbed, NOT setting the file format
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '1.bigbed' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '1.bigbed', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "bigbed" )
self.check_metadata_for_string( 'value="1.bigbed" value="\?" Change data type selected value="bigbed" selected="yes"' )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_0175_upload_file( self ):
"""
Test uploading 1.bigwig, NOT setting the file format
"""
history = self.create_fresh_history( admin_user )
self.upload_file( '1.bigwig' )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '1.bigwig', hid=str( hda.hid ) )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ), "data_type", "bigwig" )
self.check_metadata_for_string( 'value="1.bigwig" value="\?" Change data type selected value="bigwig" selected="yes"' )
self.delete_history( id=self.security.encode_id( history.id ) )
def test_9999_clean_up( self ):
self.logout()
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/test/functional/test_get_data.py | Python | gpl-3.0 | 29,277 | [
"Galaxy"
] | 4841ae56c405fe0429d6f890c3e14be662d7d4efbee6594445545022ede3f648 |
from __future__ import print_function
import sys
import random
import os
from builtins import range
import time
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.grid.grid_search import H2OGridSearch
class Test_rf_grid_search:
"""
PUBDEV-1843: Grid testing. Subtask 2.
This class is created to test the gridsearch for random forest algo and make sure it runs. Only one test is
performed here.
Test Descriptions:
a. grab all truely griddable parameters and randomly or manually set the parameter values.
b. Next, build H2O random forest models using grid search. Count and make sure models
are only built for hyper-parameters set to legal values. No model is built for bad hyper-parameters
values. We should instead get a warning/error message printed out.
c. For each model built using grid search, we will extract the parameters used in building
that model and manually build a H2O random forest model. Training metrics are calculated from the
gridsearch model and the manually built model. If their metrics
differ by too much, print a warning message but don't fail the test.
d. we will check and make sure the models are built within the max_runtime_secs time limit that was set
for it as well. If max_runtime_secs was exceeded, declare test failure.
Note that for hyper-parameters containing all legal parameter names and parameter value lists with legal
and illegal values, grid-models should be built for all combinations of legal parameter values. For
illegal parameter values, a warning/error message should be printed out to warn the user but the
program should not throw an exception;
We will re-use the dataset generation methods for GLM. There will be only one data set for classification.
"""
# parameters set by users, change with care
max_grid_model = 50 # maximum number of grid models generated before adding max_runtime_secs
curr_time = str(round(time.time())) # store current timestamp, used as part of filenames.
seed = int(round(time.time()))
# parameters denoting filenames of interested that store training/validation/test data sets in csv format
training1_filename = "smalldata/gridsearch/multinomial_training1_set.csv"
json_filename = "gridsearch_rf_hyper_parameter_" + curr_time + ".json"
allowed_diff = 1e-2 # difference allow between grid search model and manually built model MSEs
# System parameters, do not change. Dire consequences may follow if you do
current_dir = os.path.dirname(os.path.realpath(sys.argv[1])) # directory of this test file
train_row_count = 0 # training data row count, randomly generated later
train_col_count = 0 # training data column count, randomly generated later
# following parameters are used to generate hyper-parameters
max_int_val = 10 # maximum size of random integer values
min_int_val = -2 # minimum size of random integer values
max_int_number = 3 # maximum number of integer random grid values to generate
max_real_val = 1 # maximum size of random float values
min_real_val = -0.1 # minimum size of random float values
max_real_number = 3 # maximum number of real grid values to generate
time_scale = 2 # maximum runtime scale
extra_time_fraction = 0.5 # since timing is never perfect, give some extra time on top of maximum runtime limit
min_runtime_per_tree = 0 # minimum run time found. Determined later
model_run_time = 0.0 # time taken to run a vanilla random forest model. Determined later.
allowed_runtime_diff = 0.05 # run time difference between random forest manually built and gridsearch models
# before we attempt to compare training metrics.
family = 'multinomial' # choose default family to be gaussian
training_metric = 'logloss' # metrics by which we evaluate model performance
test_name = "pyunit_rf_gridsearch_over_all_params_large.py" # name of this test
sandbox_dir = "" # sandbox directory where we are going to save our failed test data sets
# store information about training/test data sets
x_indices = [] # store predictor indices in the data set
y_index = 0 # store response index in the data set
training1_data = [] # store training data sets
test_failed = 0 # count total number of tests that have failed
# give the user opportunity to pre-assign hyper parameters for fixed values
hyper_params = dict()
hyper_params["balance_classes"] = [True, False]
hyper_params["fold_assignment"] = ["AUTO", "Random", "Modulo", "Stratified"]
hyper_params["stopping_metric"] = ['logloss']
# parameters to be excluded from hyper parameter list even though they may be gridable
exclude_parameter_lists = ['validation_frame', 'response_column', 'fold_column', 'offset_column',
'col_sample_rate_change_per_level', 'sample_rate_per_class', 'col_sample_rate_per_tree',
'nbins', 'nbins_top_level', 'nbins_cats', 'seed', 'class_sampling_factors',
'max_after_balance_size', 'min_split_improvement', 'histogram_type', 'mtries',
'weights_column', 'min_rows', 'r2_stopping', 'max_hit_ratio_k', 'score_tree_interval']
params_zero_one = ["sample_rate"]
params_more_than_zero = ['ntrees', 'max_depth']
params_more_than_one = []
params_zero_positive = ['max_runtime_secs', 'stopping_rounds', 'stopping_tolerance'] # >= 0
final_hyper_params = dict() # store the final hyper-parameters that we are going to use
gridable_parameters = [] # store griddable parameter names
gridable_types = [] # store the corresponding griddable parameter types
gridable_defaults = [] # store the gridabble parameter default values
possible_number_models = 0 # possible number of models built based on hyper-parameter specification
correct_model_number = 0 # count number of models built with bad hyper-parameter specification
true_correct_model_number = 0 # count number of models built with good hyper-parameter specification
nfolds = 5 # enable cross validation to test fold_assignment
def __init__(self):
self.setup_data()
self.setup_model()
def setup_data(self):
"""
This function performs all initializations necessary:
load the data sets and set the training set indices and response column index
"""
# create and clean out the sandbox directory first
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# preload data sets
self.training1_data = h2o.import_file(path=pyunit_utils.locate(self.training1_filename))
# set data set indices for predictors and response
self.y_index = self.training1_data.ncol-1
self.x_indices = list(range(self.y_index))
# set response to be categorical for classification tasks
self.training1_data[self.y_index] = self.training1_data[self.y_index].round().asfactor()
# save the training data files just in case the code crashed.
pyunit_utils.remove_csv_files(self.current_dir, ".csv", action='copy', new_dir_path=self.sandbox_dir)
def setup_model(self):
"""
This function setup the gridsearch hyper-parameters that will be used later on:
1. It will first try to grab all the parameters that are griddable and parameters used by random forest.
2. It will find the intersection of parameters that are both griddable and used by random forest.
3. There are several extra parameters that are used by random forest that are denoted as griddable but actually
are not. These parameters have to be discovered manually and they are captured in
self.exclude_parameter_lists.
4. We generate the gridsearch hyper-parameter. For numerical parameters, we will generate those randomly.
For enums, we will include all of them.
:return: None
"""
# build bare bone model to get all parameters
model = H2ORandomForestEstimator(ntrees=self.max_int_val, nfolds=self.nfolds, score_tree_interval=0)
model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
self.model_run_time = pyunit_utils.find_grid_runtime([model]) # find model train time
print("Time taken to build a base barebone model is {0}".format(self.model_run_time))
summary_list = model._model_json["output"]["model_summary"]
num_trees = summary_list["number_of_trees"][0]
if num_trees == 0:
self.min_runtime_per_tree = self.model_run_time
else:
self.min_runtime_per_tree = self.model_run_time/num_trees
# grab all gridable parameters and its type
(self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.get_gridables(model._model_json["parameters"])
# randomly generate griddable parameters including values outside legal range, like setting alpha values to
# be outside legal range of 0 and 1 and etc
(self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params,
self.exclude_parameter_lists,
self.gridable_parameters, self.gridable_types, self.gridable_defaults,
random.randint(1, self.max_int_number),
self.max_int_val, self.min_int_val,
random.randint(1, self.max_real_number),
self.max_real_val, self.min_real_val)
# scale the max_runtime_secs parameter and others as well to make sure they make sense
time_scale = self.time_scale * self.model_run_time
if "max_runtime_secs" in list(self.hyper_params):
self.hyper_params["max_runtime_secs"] = [time_scale * x for x
in self.hyper_params["max_runtime_secs"]]
# generate a new final_hyper_params which only takes a subset of all griddable parameters while
# hyper_params take all griddable parameters and generate the grid search hyper-parameters
[self.possible_number_models, self.final_hyper_params] = \
pyunit_utils.check_and_count_models(self.hyper_params, self.params_zero_one, self.params_more_than_zero,
self.params_more_than_one, self.params_zero_positive,
self.max_grid_model)
# must add max_runtime_secs to restrict unit test run time and as a promise to Arno to test for this
if ("max_runtime_secs" not in list(self.final_hyper_params)) and \
("max_runtime_secs" in list(self.hyper_params)):
self.final_hyper_params["max_runtime_secs"] = self.hyper_params["max_runtime_secs"]
len_good_time = len([x for x in self.hyper_params["max_runtime_secs"] if (x >= 0)])
self.possible_number_models = self.possible_number_models*len_good_time
self.final_hyper_params["seed"] = [self.seed] # added see to make test more repeatable
# write out the hyper-parameters used into json files.
pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
self.final_hyper_params)
def test_rf_grid_search_over_params(self):
"""
test_rf_gridsearch_sorting_metrics performs the following:
a. build H2O random forest models using grid search. Count and make sure models
are only built for hyper-parameters set to legal values. No model is built for bad hyper-parameters
values. We should instead get a warning/error message printed out.
b. For each model built using grid search, we will extract the parameters used in building
that model and manually build a H2O random forest model. Training metrics are calculated from the
gridsearch model and the manually built model. If their metrics
differ by too much, print a warning message but don't fail the test.
c. we will check and make sure the models are built within the max_runtime_secs time limit that was set
for it as well. If max_runtime_secs was exceeded, declare test failure.
"""
print("*******************************************************************************************")
print("test_rf_gridsearch_sorting_metrics for random forest ")
h2o.cluster_info()
try:
print("Hyper-parameters used here is {0}".format(self.final_hyper_params))
# start grid search
grid_model = H2OGridSearch(H2ORandomForestEstimator(nfolds=self.nfolds, score_tree_interval=0),
hyper_params=self.final_hyper_params)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
self.correct_model_number = len(grid_model) # store number of models built
# make sure the correct number of models are built by gridsearch
if (self.correct_model_number - self.possible_number_models)>0.9: # wrong grid model number
self.test_failed += 1
print("test_rf_gridsearch_sorting_metrics for random forest failed: number of models built by "
"gridsearch: {1} does not equal to all possible combinations of hyper-parameters: "
"{1}".format(self.correct_model_number, self.possible_number_models))
else:
# add parameters into params_dict. Use this to manually build model
params_dict = dict()
params_dict["nfolds"] = self.nfolds
params_dict["score_tree_interval"] = 0
total_run_time_limits = 0.0 # calculate upper bound of max_runtime_secs
true_run_time_limits = 0.0
manual_run_runtime = 0.0
# compare performance metric of model built by gridsearch with manually built model
for each_model in grid_model:
params_list = grid_model.get_hyperparams_dict(each_model._id)
params_list.update(params_dict)
model_params = dict()
# need to taken out max_runtime_secs from model parameters, it is now set in .train()
if "max_runtime_secs" in params_list:
model_params["max_runtime_secs"] = params_list["max_runtime_secs"]
max_runtime = params_list["max_runtime_secs"]
del params_list["max_runtime_secs"]
else:
max_runtime = 0
if "validation_frame" in params_list:
model_params["validation_frame"] = params_list["validation_frame"]
del params_list["validation_frame"]
# make sure manual model was provided the same max_runtime_secs as the grid model
each_model_runtime = pyunit_utils.find_grid_runtime([each_model])
manual_model = H2ORandomForestEstimator(**params_list)
manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data,
**model_params)
# collect the time taken to manually built all models
model_runtime = pyunit_utils.find_grid_runtime([manual_model]) # time taken to build this model
manual_run_runtime += model_runtime
summary_list = manual_model._model_json['output']['model_summary']
tree_num = summary_list["number_of_trees"][0]
if max_runtime > 0:
# shortest possible time it takes to build this model
if (max_runtime < self.min_runtime_per_tree) or (tree_num <= 1):
total_run_time_limits += model_runtime
else:
total_run_time_limits += max_runtime
true_run_time_limits += max_runtime
# compute and compare test metrics between the two models
grid_model_metrics = each_model.model_performance()._metric_json[self.training_metric]
manual_model_metrics = manual_model.model_performance()._metric_json[self.training_metric]
# just compare the mse in this case within tolerance:
if not((type(grid_model_metrics) == str) or (type(manual_model_metrics) == str)):
if (abs(grid_model_metrics) > 0) and \
(abs(grid_model_metrics - manual_model_metrics)/grid_model_metrics > self.allowed_diff):
print("test_rf_gridsearch_sorting_metrics for random forest warning: grid search model "
"metric ({0}) and manually built H2O model metric ({1}) differ too "
"much.".format(grid_model_metrics, manual_model_metrics))
total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction)
# make sure the max_runtime_secs is working to restrict model built time
if not(manual_run_runtime <= total_run_time_limits):
self.test_failed += 1
print("test_rf_gridsearch_sorting_metrics for random forest failed: time taken to manually build"
" models is {0}. Maximum allowed time is"
" {1}".format(manual_run_runtime, total_run_time_limits))
if self.test_failed == 0:
print("test_rf_gridsearch_sorting_metrics for random forest has passed!")
except Exception as e:
if self.possible_number_models > 0:
print("test_rf_gridsearch_sorting_metrics for random forest failed: exception ({0}) was thrown for no"
" reason.".format(e))
self.test_failed += 1
def test_grid_search_for_rf_over_all_params():
"""
Create and instantiate class and perform tests specified for random forest
:return: None
"""
test_rf_grid = Test_rf_grid_search()
test_rf_grid.test_rf_grid_search_over_params()
sys.stdout.flush()
if test_rf_grid.test_failed: # exit with error if any tests have failed
sys.exit(1)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_grid_search_for_rf_over_all_params)
else:
test_grid_search_for_rf_over_all_params()
| mathemage/h2o-3 | h2o-py/dynamic_tests/testdir_algos/rf/pyunit_rf_gridsearch_over_all_params_large.py | Python | apache-2.0 | 19,516 | [
"Gaussian"
] | 79b29322b34c5fb513c0733fd7c2047f5bfa5f7facb420f4e06ae155866a1390 |
import os
from urllib import request
from pybel import BELGraph
from pybel.dsl import *
from pybel.language import Entity
from pybel.io import from_json_file
from pybel.examples import egf_graph
from indra.statements import *
from indra.sources import bel
from indra.sources.bel import processor as pb
from indra.sources.bel.api import process_cbn_jgif_file, process_pybel_graph
from indra.databases import hgnc_client
mek_hgnc_id = hgnc_client.get_hgnc_id('MAP2K1')
mek_up_id = hgnc_client.get_uniprot_id(mek_hgnc_id)
def test_process_pybel():
pbp = bel.process_pybel_graph(egf_graph)
assert pbp.statements
def test_process_jgif():
test_file_url = 'https://s3.amazonaws.com/bigmech/travis/Hox-2.0-Hs.jgf'
test_file = 'Hox-2.0-Hs.jgf'
request.urlretrieve(url=test_file_url, filename=test_file)
pbp = process_cbn_jgif_file(test_file)
# Clean up
os.remove(test_file)
assert len(pbp.statements) == 26, len(pbp.statements)
assert isinstance(pbp.statements[0], Statement)
assert all(s.evidence[0].source_api == 'bel' for s in pbp.statements)
def test_nodelink_json():
test_file_url = \
'https://s3.amazonaws.com/bigmech/travis/Hox-2.0-Hs_nljson.json'
test_file = 'Hox-2.0-Hs_nljson.json'
request.urlretrieve(url=test_file_url, filename=test_file)
with open(test_file) as jr:
pbp = process_pybel_graph(from_json_file(file=jr))
# Clean up
os.remove(test_file)
assert len(pbp.statements) == 26, len(pbp.statements)
assert isinstance(pbp.statements[0], Statement)
assert all(s.evidence[0].source_api == 'bel' for s in pbp.statements)
def test_get_agent_hgnc():
mek = protein(name='MAP2K1', namespace='HGNC')
agent = pb.get_agent(mek, {})
assert isinstance(agent, Agent)
assert agent.name == 'MAP2K1'
assert agent.db_refs.get('HGNC') == mek_hgnc_id
assert agent.db_refs.get('UP') == mek_up_id
# Now create an agent with an identifier
mek = protein(name='Foo', namespace='HGNC', identifier='6840')
agent = pb.get_agent(mek, {})
assert isinstance(agent, Agent)
assert agent.name == 'MAP2K1'
assert agent.db_refs.get('HGNC') == mek_hgnc_id
assert agent.db_refs.get('UP') == mek_up_id
def test_get_agent_up():
mek = protein(namespace='UP', identifier='Q02750')
agent = pb.get_agent(mek, {})
assert isinstance(agent, Agent)
assert agent.name == 'MAP2K1'
assert agent.db_refs.get('HGNC') == mek_hgnc_id
assert agent.db_refs.get('UP') == mek_up_id
def test_get_agent_egid():
node_data = {'function': 'Protein', 'name': '5008', 'namespace': 'EGID'}
agent = pb.get_agent(node_data)
assert isinstance(agent, Agent)
assert agent.name == 'OSM'
assert len(agent.db_refs) == 3
assert agent.db_refs['EGID'] == '5008'
assert agent.db_refs['HGNC'] == '8506'
assert agent.db_refs['UP'] == 'P13725'
def test_get_agent_mgi():
node = protein(namespace='MGI', name='Nr1h3')
agent = pb.get_agent(node, {})
assert isinstance(agent, Agent)
assert agent.name == 'Nr1h3'
assert len(agent.db_refs) == 1
assert agent.db_refs.get('MGI') == 'Nr1h3'
def test_get_agent_rgd():
node = protein(namespace='RGD', name='Tp53')
agent = pb.get_agent(node, {})
assert isinstance(agent, Agent)
assert agent.name == 'Tp53'
assert len(agent.db_refs) == 1
assert agent.db_refs.get('RGD') == 'Tp53'
def test_get_agent_sfam():
node_data = {
'cname': 'PRKC Family',
'function': 'Protein',
'name': 'PRKC Family',
'namespace': 'SFAM'}
agent = pb.get_agent(node_data)
assert isinstance(agent, Agent)
assert len(agent.db_refs) == 2
assert agent.db_refs['SFAM'] == 'PRKC Family'
assert agent.db_refs['FPLX'] == 'PKC'
assert agent.name == 'PKC'
def test_get_agent_sdis():
node_data = {
'cname': 'metastasis',
'function': 'Pathology',
'name': 'metastasis',
'namespace': 'SDIS'}
agent = pb.get_agent(node_data)
assert isinstance(agent, Agent)
assert agent.name == 'metastasis'
assert len(agent.db_refs) == 1
assert agent.db_refs['SDIS'] == 'metastasis'
def test_get_agent_chebi():
node_data = {
'cname': 'nitric oxide',
'function': 'Abundance',
'name': 'nitric oxide',
'namespace': 'CHEBI'}
agent = pb.get_agent(node_data)
assert isinstance(agent, Agent)
assert agent.name == 'nitric oxide'
assert len(agent.db_refs) == 1
assert agent.db_refs['CHEBI'] == 'CHEBI:16480'
def test_get_agent_schem():
node_data = {
'cname': 'Promegestone',
'function': 'Abundance',
'name': 'Promegestone',
'namespace': 'SCHEM'}
agent = pb.get_agent(node_data)
assert isinstance(agent, Agent)
assert agent.name == 'Promegestone'
assert len(agent.db_refs) == 1
assert agent.db_refs['SCHEM'] == 'Promegestone'
def test_get_agent_mirna():
node_data = {
'cname': 'MIR218-1',
'function': 'miRNA',
'name': 'MIR218-1',
'namespace': 'HGNC'}
agent = pb.get_agent(node_data)
assert isinstance(agent, Agent)
assert agent.name == 'MIR218-1'
assert len(agent.db_refs) == 1
assert agent.db_refs['HGNC'] == '31595'
def test_get_agent_fusion():
node_data = {'function': 'Protein',
'fusion': {
'partner_5p': {'namespace': 'HGNC', 'name': 'BCR'},
'range_5p': {'missing': '?'},
'range_3p': {'missing': '?'},
'partner_3p': {'namespace': 'HGNC', 'name': 'ABL1'}}}
agent = pb.get_agent(node_data)
assert agent is None
def test_get_agent_up_no_id():
mek = protein(name='MAP2K1', namespace='UP')
agent = pb.get_agent(mek, {})
assert agent is None
def test_get_agent_meshpp():
apoptosis = bioprocess(name='Apoptosis', namespace='MESHPP')
agent = pb.get_agent(apoptosis)
assert isinstance(agent, Agent)
assert agent.name == 'Apoptosis'
assert agent.db_refs == {}
def test_get_agent_meshd():
hyperoxia = bioprocess(name='Hyperoxia', namespace='MESHD')
agent = pb.get_agent(hyperoxia)
assert isinstance(agent, Agent)
assert agent.name == 'Hyperoxia'
assert agent.db_refs == {}
def test_get_agent_with_mods():
mek = protein(name='MAP2K1', namespace='HGNC',
variants=[pmod('Ph')])
agent = pb.get_agent(mek, {})
assert isinstance(agent, Agent)
assert len(agent.mods) == 1
mod = agent.mods[0]
assert mod.mod_type == 'phosphorylation'
assert not mod.residue
assert not mod.position
mek = protein(name='MAP2K1', namespace='HGNC',
variants=[pmod('Ph', code='Ser')])
agent = pb.get_agent(mek, {})
assert isinstance(agent, Agent)
assert len(agent.mods) == 1
mod = agent.mods[0]
assert mod.mod_type == 'phosphorylation'
assert mod.residue == 'S'
assert not mod.position
mek = protein(name='MAP2K1', namespace='HGNC',
variants=[pmod('Ph', position=218)])
agent = pb.get_agent(mek, {})
assert isinstance(agent, Agent)
assert len(agent.mods) == 1
mod = agent.mods[0]
assert mod.mod_type == 'phosphorylation'
assert not mod.residue
assert mod.position == '218'
mek = protein(name='MAP2K1', namespace='HGNC',
variants=[pmod('Ph', position=218, code='Ser')])
agent = pb.get_agent(mek, {})
assert isinstance(agent, Agent)
assert len(agent.mods) == 1
mod = agent.mods[0]
assert mod.mod_type == 'phosphorylation'
assert mod.residue == 'S'
assert mod.position == '218'
def test_get_agent_with_muts():
mek = protein(name='MAP2K1', namespace='HGNC',
variants=[hgvs('p.Val600Glu')])
agent = pb.get_agent(mek, {})
assert isinstance(agent, Agent)
assert len(agent.mutations) == 1
mut = agent.mutations[0]
assert mut.position == '600'
assert mut.residue_from == 'V'
assert mut.residue_to == 'E'
def test_get_agent_with_activity():
mek = protein(name='MAP2K1', namespace='HGNC')
agent = pb.get_agent(mek, activity('act'))
assert isinstance(agent, Agent)
assert isinstance(agent.activity, ActivityCondition)
assert agent.activity.activity_type == 'activity'
assert agent.activity.is_active
def test_get_agent_complex():
mek = protein(name='MAP2K1', namespace='HGNC')
erk = protein(name='MAPK1', namespace='HGNC',
variants=[pmod('Ph', position=185, code='Thr')])
cplx = complex_abundance([mek, erk])
agent = pb.get_agent(cplx)
assert isinstance(agent, Agent)
assert agent.name == 'MAP2K1'
assert len(agent.bound_conditions) == 1
bc = agent.bound_conditions[0]
assert isinstance(bc, BoundCondition)
assert bc.is_bound is True
bc_agent = bc.agent
assert bc_agent.name == 'MAPK1'
assert len(bc_agent.mods) == 1
assert bc_agent.mods[0].mod_type == 'phosphorylation'
assert bc_agent.mods[0].residue == 'T'
assert bc_agent.mods[0].position == '185'
def test_get_agent_complex_none_agent():
"""If one of the agents in the complex can't be obtained (e.g., an
unhandled namespace), then the complex itself should be None."""
# Prime agent is None
mek = protein(name='MAP2K1', namespace='FOO')
erk = protein(name='MAPK1', namespace='HGNC',
variants=[pmod('Ph', position=185, code='Thr')])
cplx = complex_abundance([mek, erk])
agent = pb.get_agent(cplx)
assert agent is None
# Bound agent is None
mek = protein(name='MAP2K1', namespace='HGNC')
erk = protein(name='MAPK1', namespace='FOO',
variants=[pmod('Ph', position=185, code='Thr')])
cplx = complex_abundance([mek, erk])
agent = pb.get_agent(cplx)
assert agent is None
def test_get_agent_named_complex_go():
# TODO: Handle named complexes and map to FamPlex where possible
node_data = {
'cname': '0043509',
'function': 'Complex',
'name': '0043509',
'namespace': 'GOCCID'}
agent = pb.get_agent(node_data)
assert agent is None
def test_get_agent_with_translocation():
node_data = protein(name='MAPK1', namespace='HGNC')
# Some example edge data
edge_data = translocation(from_loc=Entity('GOCC', 'intracellular'),
to_loc=Entity('GOCC', 'extracellular space'))
agent = pb.get_agent(node_data, edge_data)
assert isinstance(agent, Agent)
assert agent.name == 'MAPK1'
assert agent.location == 'extracellular space'
def test_phosphorylation_one_site_with_evidence():
mek = protein(name='MAP2K1', namespace='HGNC')
erk = protein(name='MAPK1', namespace='HGNC',
variants=[pmod('Ph', position=185, code='Thr')])
g = BELGraph()
ev_text = 'Some evidence.'
ev_pmid = '123456'
edge_hash = g.add_directly_increases(mek, erk, evidence=ev_text, citation=ev_pmid,
annotations={"TextLocation": 'Abstract'})
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
assert isinstance(pbp.statements[0], Phosphorylation)
assert pbp.statements[0].residue == 'T'
assert pbp.statements[0].position == '185'
enz = pbp.statements[0].enz
sub = pbp.statements[0].sub
assert enz.name == 'MAP2K1'
assert enz.mods == []
assert sub.name == 'MAPK1'
assert sub.mods == []
# Check evidence
assert len(pbp.statements[0].evidence) == 1
ev = pbp.statements[0].evidence[0]
assert ev.source_api == 'bel'
assert ev.source_id == edge_hash
assert ev.pmid == ev_pmid
assert ev.text == ev_text
assert ev.annotations == {'bel': 'p(HGNC:MAP2K1) directlyIncreases '
'p(HGNC:MAPK1, pmod(Ph, Thr, 185))'}
assert ev.epistemics == {'direct': True, 'section_type': 'abstract'}
def test_phosphorylation_two_sites():
mek = protein(name='MAP2K1', namespace='HGNC')
erk = protein(name='MAPK1', namespace='HGNC',
variants=[pmod('Ph', position=185, code='Thr'),
pmod('Ph', position=187, code='Tyr')])
g = BELGraph()
g.add_directly_increases(mek, erk, evidence="Some evidence.",
citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 2
stmt1 = pbp.statements[0]
stmt2 = pbp.statements[1]
assert stmt1.residue == 'T'
assert stmt1.position == '185'
assert stmt2.residue == 'Y'
assert stmt2.position == '187'
assert stmt1.sub.mods == []
assert stmt2.sub.mods == []
assert len(pbp.statements[0].evidence) == 1
def test_regulate_amount1_prot_obj():
mek = protein(name='MAP2K1', namespace='HGNC')
erk = protein(name='MAPK1', namespace='HGNC')
g = BELGraph()
g.add_increases(mek, erk, evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
assert isinstance(pbp.statements[0], IncreaseAmount)
assert len(pbp.statements[0].evidence) == 1
def test_regulate_amount2_rna_obj():
# FIXME: Create a transcription-specific statement for p->rna
mek = protein(name='MAP2K1', namespace='HGNC')
erk = rna(name='MAPK1', namespace='HGNC')
g = BELGraph()
g.add_increases(mek, erk, evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
assert isinstance(pbp.statements[0], IncreaseAmount)
assert len(pbp.statements[0].evidence) == 1
def test_regulate_amount3_deg():
# FIXME: Create a stability-specific statement for p->deg(p(Foo))
mek = protein(name='MAP2K1', namespace='HGNC')
erk = protein(name='MAPK1', namespace='HGNC')
g = BELGraph()
g.add_increases(mek, erk, object_modifier=degradation(),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
assert isinstance(pbp.statements[0], DecreaseAmount)
assert len(pbp.statements[0].evidence) == 1
def test_regulate_amount4_subj_act():
mek = protein(name='MAP2K1', namespace='HGNC')
erk = protein(name='MAPK1', namespace='HGNC')
g = BELGraph()
g.add_increases(mek, erk, subject_modifier=activity(name='tscript'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
assert isinstance(pbp.statements[0], IncreaseAmount)
subj = pbp.statements[0].subj
assert subj.name == 'MAP2K1'
assert isinstance(subj.activity, ActivityCondition)
assert subj.activity.activity_type == 'transcription'
assert subj.activity.is_active
assert len(pbp.statements[0].evidence) == 1
g = BELGraph()
g.add_increases(mek, erk, subject_modifier=activity(name='act'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
assert isinstance(pbp.statements[0], IncreaseAmount)
subj = pbp.statements[0].subj
assert subj.name == 'MAP2K1'
assert isinstance(subj.activity, ActivityCondition)
assert subj.activity.activity_type == 'activity'
assert subj.activity.is_active
assert len(pbp.statements[0].evidence) == 1
def test_regulate_activity():
mek = protein(name='MAP2K1', namespace='HGNC')
erk = protein(name='MAPK1', namespace='HGNC')
g = BELGraph()
g.add_increases(mek, erk, subject_modifier=activity(name='kin'),
object_modifier=activity(name='kin'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
assert isinstance(pbp.statements[0], Activation)
subj = pbp.statements[0].subj
assert subj.name == 'MAP2K1'
assert isinstance(subj.activity, ActivityCondition)
assert subj.activity.activity_type == 'kinase'
assert subj.activity.is_active
obj = pbp.statements[0].obj
assert obj.name == 'MAPK1'
assert obj.activity is None
assert pbp.statements[0].obj_activity == 'kinase'
assert len(pbp.statements[0].evidence) == 1
def test_active_form():
p53_pmod = protein(name='TP53', namespace='HGNC',
variants=[pmod('Ph', position=33, code='Ser')])
p53_obj = protein(name='TP53', namespace='HGNC')
g = BELGraph()
g.add_increases(p53_pmod, p53_obj, object_modifier=activity(name='tscript'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, ActiveForm)
assert stmt.activity == 'transcription'
assert stmt.is_active is True
ag = stmt.agent
assert ag.name == 'TP53'
assert len(ag.mods) == 1
mc = ag.mods[0]
assert mc.mod_type == 'phosphorylation'
assert mc.residue == 'S'
assert mc.position == '33'
assert len(pbp.statements[0].evidence) == 1
def test_gef():
sos = protein(name='SOS1', namespace='HGNC')
kras = protein(name='KRAS', namespace='HGNC')
g = BELGraph()
g.add_directly_increases(sos, kras,
subject_modifier=activity(name='activity'),
object_modifier=activity(name='gtp'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, Gef)
assert stmt.gef.name == 'SOS1'
assert stmt.ras.name == 'KRAS'
assert stmt.gef.activity.activity_type == 'activity'
assert stmt.gef.activity.is_active is True
assert stmt.ras.activity is None
assert len(pbp.statements[0].evidence) == 1
def test_indirect_gef_is_activation():
sos = protein(name='SOS1', namespace='HGNC')
kras = protein(name='KRAS', namespace='HGNC')
g = BELGraph()
g.add_increases(sos, kras, subject_modifier=activity(name='activity'),
object_modifier=activity(name='gtp'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, Activation)
assert stmt.subj.name == 'SOS1'
assert stmt.obj.name == 'KRAS'
assert stmt.subj.activity.activity_type == 'activity'
assert stmt.subj.activity.is_active is True
assert stmt.obj.activity is None
assert stmt.obj_activity == 'gtpbound'
assert len(pbp.statements[0].evidence) == 1
def test_gap():
sos = protein(name='RASA1', namespace='HGNC')
kras = protein(name='KRAS', namespace='HGNC')
g = BELGraph()
g.add_directly_decreases(sos, kras,
subject_modifier=activity(name='activity'),
object_modifier=activity(name='gtp'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, Gap)
assert stmt.gap.name == 'RASA1'
assert stmt.ras.name == 'KRAS'
assert stmt.gap.activity.activity_type == 'activity'
assert stmt.gap.activity.is_active is True
assert stmt.ras.activity is None
assert len(pbp.statements[0].evidence) == 1
def test_activation_bioprocess():
bax = protein(name='BAX', namespace='HGNC')
apoptosis = bioprocess(name='apoptosis', namespace='GOBP')
g = BELGraph()
g.add_increases(bax, apoptosis, evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, Activation)
assert stmt.subj.name == 'BAX'
assert stmt.obj.name == 'apoptosis'
assert stmt.obj.db_refs == {} # FIXME: Update when GO lookup is implemented
assert len(pbp.statements[0].evidence) == 1
def test_gtpactivation():
kras = protein(name='KRAS', namespace='HGNC')
braf = protein(name='BRAF', namespace='HGNC')
g = BELGraph()
g.add_directly_increases(kras, braf,
subject_modifier=activity(name='gtp'),
object_modifier=activity(name='kin'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, GtpActivation)
assert stmt.subj.name == 'KRAS'
assert stmt.subj.activity.activity_type == 'gtpbound'
assert stmt.subj.activity.is_active is True
assert stmt.obj.name == 'BRAF'
assert stmt.obj.activity is None
assert stmt.obj_activity == 'kinase'
assert len(stmt.evidence) == 1
def test_conversion():
enz = protein(name='PLCG1', namespace='HGNC')
react_1 = abundance('SCHEM', '1-Phosphatidyl-D-myo-inositol 4,5-bisphosphate')
p1 = abundance('SCHEM', 'Diacylglycerol')
p2 = abundance('SCHEM', 'Inositol 1,4,5-trisphosphate')
rxn = reaction(
reactants=react_1,
products=[p1, p2],
)
g = BELGraph()
g.add_directly_increases(enz, rxn,
subject_modifier=activity(name='activity'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, Conversion)
assert stmt.subj.name == 'PLCG1'
assert stmt.subj.activity.activity_type == 'activity'
assert stmt.subj.activity.is_active is True
assert len(stmt.obj_from) == 1
assert isinstance(stmt.obj_from[0], Agent)
assert stmt.obj_from[0].name == '1-Phosphatidyl-D-myo-inositol ' \
'4,5-bisphosphate'
assert len(stmt.obj_to) == 2
# why do these not appear in alphabetical order?
# PyBEL sorts the nodes based on their BEL, and
# Inositol 1,4,5-trisphosphate gets quoted.
assert stmt.obj_to[0].name == 'Inositol 1,4,5-trisphosphate'
assert stmt.obj_to[1].name == 'Diacylglycerol'
assert len(stmt.evidence) == 1
def test_controlled_transloc_loc_cond():
"""Controlled translocations are currently not handled."""
subj = protein(name='MAP2K1', namespace='HGNC')
obj = protein(name='MAPK1', namespace='HGNC')
g = BELGraph()
transloc = translocation(from_loc=Entity('GOCC', 'intracellular'),
to_loc=Entity('GOCC', 'extracellular space'))
g.add_increases(subj, obj, object_modifier=transloc,
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert not pbp.statements
def test_subject_transloc_loc_cond():
"""Translocations of the subject are treated as location conditions on the
subject (using the to_loc location as the condition)"""
subj = protein(name='MAP2K1', namespace='HGNC')
obj = protein(name='MAPK1', namespace='HGNC')
transloc = translocation(from_loc=Entity('GOCC', 'intracellular'),
to_loc=Entity('GOCC', 'extracellular space'))
g = BELGraph()
g.add_increases(subj, obj, subject_modifier=transloc,
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, IncreaseAmount)
assert stmt.subj.name == 'MAP2K1'
assert stmt.subj.location == 'extracellular space'
assert stmt.obj.name == 'MAPK1'
def test_subject_transloc_active_form():
"""ActiveForms where the subject is a translocation--should draw on the
to-location of the subject."""
subj = protein(name='MAP2K1', namespace='HGNC')
obj = protein(name='MAP2K1', namespace='HGNC')
transloc = translocation(from_loc=Entity('GOCC', 'intracellular'),
to_loc=Entity('GOCC', 'extracellular space'))
g = BELGraph()
g.add_increases(subj, obj, subject_modifier=transloc,
object_modifier=activity(name='kin'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, ActiveForm)
assert stmt.agent.name == 'MAP2K1'
assert stmt.agent.location == 'extracellular space'
assert stmt.agent.activity is None
assert stmt.activity == 'kinase'
assert stmt.is_active is True
def test_complex_stmt_with_activation():
raf = protein(name='BRAF', namespace='HGNC')
mek = protein(name='MAP2K1', namespace='HGNC')
erk = protein(name='MAPK1', namespace='HGNC')
cplx = complex_abundance([raf, mek])
g = BELGraph()
g.add_directly_increases(cplx, erk,
object_modifier=activity(name='kin'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 2
stmt1 = pbp.statements[0]
assert isinstance(stmt1, Complex)
assert len(stmt1.agent_list()) == 2
assert sorted([ag.name for ag in stmt1.agent_list()]) == ['BRAF', 'MAP2K1']
assert stmt1.evidence
stmt2 = pbp.statements[1]
assert isinstance(stmt2, Activation)
assert stmt2.subj.name == 'BRAF'
assert stmt2.subj.bound_conditions[0].agent.name == 'MAP2K1'
assert stmt2.obj.name == 'MAPK1'
assert stmt2.obj.activity is None
assert stmt2.obj_activity == 'kinase'
if __name__ == '__main__':
test_get_agent_fusion()
| pvtodorov/indra | indra/tests/test_pybel_api.py | Python | bsd-2-clause | 26,279 | [
"Pybel"
] | 2fa8d970d201b10d53034b89894438f656f3eda30ce3cc4cc5116444c51ab25f |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from __future__ import print_function, division
import warnings
import numpy as np
##############################################################################
# Functions
##############################################################################
def lengths_and_angles_to_box_vectors(a_length, b_length, c_length, alpha, beta, gamma):
"""Convert from the lengths/angles of the unit cell to the box
vectors (Bravais vectors). The angles should be in degrees.
Parameters
----------
a_length : scalar or np.ndarray
length of Bravais unit vector **a**
b_length : scalar or np.ndarray
length of Bravais unit vector **b**
c_length : scalar or np.ndarray
length of Bravais unit vector **c**
alpha : scalar or np.ndarray
angle between vectors **b** and **c**, in degrees.
beta : scalar or np.ndarray
angle between vectors **c** and **a**, in degrees.
gamma : scalar or np.ndarray
angle between vectors **a** and **b**, in degrees.
Returns
-------
a : np.ndarray
If the inputs are scalar, the vectors will one dimesninoal (length 3).
If the inputs are one dimension, shape=(n_frames, ), then the output
will be (n_frames, 3)
b : np.ndarray
If the inputs are scalar, the vectors will one dimesninoal (length 3).
If the inputs are one dimension, shape=(n_frames, ), then the output
will be (n_frames, 3)
c : np.ndarray
If the inputs are scalar, the vectors will one dimesninoal (length 3).
If the inputs are one dimension, shape=(n_frames, ), then the output
will be (n_frames, 3)
Examples
--------
>>> import numpy as np
>>> result = lengths_and_angles_to_box_vectors(1, 1, 1, 90.0, 90.0, 90.0)
Notes
-----
This code is adapted from gyroid, which is licensed under the BSD
http://pythonhosted.org/gyroid/_modules/gyroid/unitcell.html
"""
if np.all(alpha < 2*np.pi) and np.all(beta < 2*np.pi) and np.all(gamma < 2*np.pi):
warnings.warn('All your angles were less than 2*pi. Did you accidently give me radians?')
alpha = alpha * np.pi / 180
beta = beta * np.pi / 180
gamma = gamma * np.pi / 180
a = np.array([a_length, np.zeros_like(a_length), np.zeros_like(a_length)])
b = np.array([b_length*np.cos(gamma), b_length*np.sin(gamma), np.zeros_like(b_length)])
cx = c_length*np.cos(beta)
cy = c_length*(np.cos(alpha) - np.cos(beta)*np.cos(gamma))
cz = np.sqrt(c_length*c_length - cx*cx - cy*cy)
c = np.array([cx,cy,cz])
if not a.shape == b.shape == c.shape:
raise TypeError('Shape is messed up.')
return a.T, b.T, c.T
def box_vectors_to_lengths_and_angles(a, b, c):
"""Convert box vectors into the lengths and angles defining the box.
Parameters
----------
a : np.ndarray
the vector defining the first edge of the periodic box (length 3), or
an array of this vector in multiple frames, where a[i,:] gives the
length 3 array of vector a in each frame of a simulation
b : np.ndarray
the vector defining the second edge of the periodic box (length 3), or
an array of this vector in multiple frames, where b[i,:] gives the
length 3 array of vector a in each frame of a simulation
c : np.ndarray
the vector defining the third edge of the periodic box (length 3), or
an array of this vector in multiple frames, where c[i,:] gives the
length 3 array of vector a in each frame of a simulation
Examples
--------
>>> a = np.array([2,0,0], dtype=float)
>>> b = np.array([0,1,0], dtype=float)
>>> c = np.array([0,1,1], dtype=float)
>>> l1, l2, l3, alpha, beta, gamma = box_vectors_to_lengths_and_angles(a, b, c)
>>> (l1 == 2.0) and (l2 == 1.0) and (l3 == np.sqrt(2))
True
>>> np.abs(alpha - 45) < 1e-6
True
>>> np.abs(beta - 90.0) < 1e-6
True
>>> np.abs(gamma - 90.0) < 1e-6
True
Returns
-------
a_length : scalar or np.ndarray
length of Bravais unit vector **a**
b_length : scalar or np.ndarray
length of Bravais unit vector **b**
c_length : scalar or np.ndarray
length of Bravais unit vector **c**
alpha : scalar or np.ndarray
angle between vectors **b** and **c**, in degrees.
beta : scalar or np.ndarray
angle between vectors **c** and **a**, in degrees.
gamma : scalar or np.ndarray
angle between vectors **a** and **b**, in degrees.
"""
if not a.shape == b.shape == c.shape:
raise TypeError('Shape is messed up.')
if not a.shape[-1] == 3:
raise TypeError('The last dimension must be length 3')
if not (a.ndim in [1,2]):
raise ValueError('vectors must be 1d or 2d (for a vectorized '
'operation on multiple frames)')
last_dim = a.ndim-1
a_length = np.sqrt(np.sum(a*a, axis=last_dim))
b_length = np.sqrt(np.sum(b*b, axis=last_dim))
c_length = np.sqrt(np.sum(c*c, axis=last_dim))
# we allow 2d input, where the first dimension is the frame index
# so we want to do the dot product only over the last dimension
alpha = np.arccos(np.einsum('...i, ...i', b, c) / (b_length * c_length))
beta = np.arccos(np.einsum('...i, ...i', c, a) / (c_length * a_length))
gamma = np.arccos(np.einsum('...i, ...i', a, b) / (a_length * b_length))
# convert to degrees
alpha = alpha * 180.0 / np.pi
beta = beta * 180.0 / np.pi
gamma = gamma * 180.0 / np.pi
return a_length, b_length, c_length, alpha, beta, gamma
| marscher/mdtraj | MDTraj/utils/unitcell.py | Python | lgpl-2.1 | 6,646 | [
"MDTraj"
] | af0ee111f668239cd3306151d23e7645771c9fdebf3e2da715b25378bca0471d |
import ast
from lenatu import _facts as facts
import collections
def _nodes_of_block(block):
"""
Returns nodes that define or execute the given block.
"""
def visit(node):
if getattr(node, "executed_in", block) is block or getattr(node, "defined_block", None) is block:
yield node
for child in ast.iter_child_nodes(node):
for n in visit(child): yield n
return visit(block.defined_by)
def _variable_accesses_in(block):
"""
Returns (node, attribute, variable-name, usage) tuples for all
accesses to variables by code executed in the given block.
"""
for node in _nodes_of_block(block):
defined_block = getattr(node, "defined_block", None)
executed_in = getattr(node, "executed_in", None)
f = facts.NAME_FIELDS.get(type(node), lambda _:[])
for attribute, usage, which_block in f(node):
accessed_block = defined_block if which_block == facts.DEFINED else executed_in
if (accessed_block == block):
value = getattr(node, attribute)
if isinstance(value, list):
for v in value:
yield (node, attribute, v, usage)
else:
yield (node, attribute, value, usage)
def _blocks_defined_in(block):
"""
Returns blocks that are directly defined in the given block.
"""
def visit(node):
if getattr(node, "executed_in", None) is block or getattr(node, "defined_block", None) is block:
if getattr(node, "executed_in", None) is block and getattr(node, "defined_block", block) is not block:
yield node.defined_block
# no need to look inside the children of this node. We'll only find
# the same block again.
else:
for child in ast.iter_child_nodes(node):
for b in visit(child): yield b
return visit(block.defined_by)
def _scope_lookup(identifier, usages, blocks):
"""
Find the block the given identifier belongs to.
We search backwards, starting with block[-1]. block[0] must be a module.
Blocks from classes are ignored (with the exception of `block[-1]).
"""
if not isinstance(blocks[0].defined_by, ast.Module):
raise ValueError("block[0] should be a module.")
if facts.GLOBAL in usages:
return blocks[0]
for block in reversed(blocks):
if block == blocks[-1]:
if facts.NONLOCAL in usages:
continue # don't look in the local block
else:
if not facts.are_locals_visible_to_childen(block):
continue # skip over enclosing class-blocks
if identifier in block.local_variables:
return block
else:
# identifier is a global variable which isn't assigned directly in the module.
return blocks[0]
def _assign_scopes(block, enclosing_blocks):
"""
Sets `block.local_variables` and the `xyz_block` attributes of all
nodes executed within `block`.
:param enclosing_blocks: Enclosing blocks (without `block`). The module
is `enclosing_blocks[0]` and the direct parent is `enclosing_blocks[-1]`.
Empty if `block` is the module.
All these blocks must have `local_variables` set already.
"""
all_usages = collections.defaultdict(set)
for _, _, identifier, usage in _variable_accesses_in(block):
all_usages[identifier].add(usage)
local_variables = [identifier for identifier, usages in all_usages.items() if facts.is_local_variable(usages)]
block.local_variables = local_variables
# Variables used in this block are local to one of these blocks
candidate_blocks = enclosing_blocks + [block]
# For each used variable find the block that variable is defined in.
scope_map = {identifier : _scope_lookup(identifier, usages, candidate_blocks) for identifier, usages in all_usages.items()}
# Inject scopes into the AST nodes
for node, attribute, _ , _ in _variable_accesses_in(block):
variable = getattr(node, attribute)
if isinstance(variable, list):
scope = [scope_map[v] for v in variable]
else:
scope = scope_map[variable]
setattr(node, attribute + "_block", scope)
def augment_scopes(block, enclosing_blocks=[]):
"""
Augment the block and all sub-blocks with scope information.
This will set the block's `local_variables` field and adds the
`xyz_block` attributes to the nodes.
"""
_assign_scopes(block, enclosing_blocks)
for child_block in _blocks_defined_in(block):
augment_scopes(child_block, enclosing_blocks + [block])
| smurn/lenatu | lenatu/_scope.py | Python | mit | 4,948 | [
"VisIt"
] | 49739d0f21b3d62864ad6f72240f98165768029948d611126f3f58402b8f39d1 |
from common import Modules, data_strings, load_yara_rules, PEParseModule, ModuleMetadata
class vertexnet(PEParseModule):
def __init__(self):
md = ModuleMetadata(
module_name="vertexnet",
bot_name="VertexNet",
description="General purpose malware",
authors=["Brian Wallace (@botnet_hunter)"],
version="1.0.0",
date="March 25, 2015",
references=[]
)
PEParseModule.__init__(self, md)
self.yara_rules = None
pass
def _generate_yara_rules(self):
if self.yara_rules is None:
self.yara_rules = load_yara_rules("vertexnet.yara")
return self.yara_rules
def get_bot_information(self, file_data):
results = {}
for s in data_strings(file_data):
if r'<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">' in s:
s = s[:s.find(r'<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">')]
config = []
c = ""
index = 0
for x in s:
c += x
index += 1
if index % 4 == 0 and (c.endswith("P") or c.endswith("PA") or c.endswith("PAD")):
for suff in ["P", "PA", "PAD"]:
if c.endswith(suff):
c = c[:-len(suff)]
break
config.append(c)
c = ""
if len(c) > 0:
config.append(c)
if len(config) == 7:
results['drop_location'] = config[0]
results['cmd_get_interval'] = int(config[1])
results['http_port'] = int(config[2])
results['refresh_interval'] = int(config[3])
results['mutex'] = config[4]
results['http_path'] = config[5]
results['server'] = config[6]
if results['server'].startswith("http://"):
results['server'] = results['server'][len('http://'):]
if results['server'].endswith("/"):
results['server'] = results['server'][:-1]
results['c2_uri'] = "http://{0}:{1}{2}".format(results['server'], results['http_port'], results['http_path'])
return results
return results
Modules.list.append(vertexnet()) | bwall/bamfdetect | BAMF_Detect/modules/vertexnet.py | Python | mit | 2,546 | [
"Brian"
] | 799d335c155e2d9e3b3a93eb66be4e04876338428b29daa99318dd0515fd8632 |
# Copyright 2017 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from reference_models.propagation.itm import itm
# This test data comes directly from the example cases given on the NTIA/ITS
# information page at:
# https://www.its.bldrdoc.gov/resources/radio-propagation-software/itm/itm.aspx
# (see Fortran souce code and sample results section)
# The input data can be found in the file called qkpfldat.txt
# The expected output can be found in the file called qkpflanx.txt
# Profile path corresponds to: CRYSTAL PALACE TO MURSLEY, ENGLAND
##
## QKPFL TEST 1, PATH 2200 (MEASURED MEDIAN LB=133.2 DB)
## CRYSTAL PALACE TO MURSLEY, ENGLAND
##
## DISTANCE 77.8 KM
## FREQUENCY 41.5 MHZ
## ANTENNA HEIGHTS 143.9 8.5 M
## EFFECTIVE HEIGHTS 240.5 18.4 M
## TERRAIN, DELTA H 89. M
##
## POL=0, EPS=15., SGM= .005 S/M
## CLIM=5, NS=314., K= 1.368
## PROFILE- NP= 156, XI= .499 KM
##
## A DOUBLE-HORIZON PATH
## DIFFRACTION IS THE DOMINANT MODE
##
## ESTIMATED QUANTILES OF BASIC TRANSMISSION LOSS (DB)
## FREE SPACE VALUE- 102.6 DB
##
## RELIA- WITH CONFIDENCE
## BILITY 50.0 90.0 10.0
##
## 1.0 128.6 137.6 119.6
## 10.0 132.2 140.8 123.5
## 50.0 135.8 144.3 127.2
## 90.0 138.0 146.5 129.4
## 99.0 139.7 148.4 131.0
## QKPFL TEST 2, PATH 1979 (MEASURED MEDIAN LB=149.5 DB)
## CRYSTAL PALACE TO MURSLEY, ENGLAND
##
## DISTANCE 77.8 KM
## FREQUENCY 573.3 MHZ
## ANTENNA HEIGHTS 194.0 9.1 M
## EFFECTIVE HEIGHTS 292.5 19.0 M
## TERRAIN, DELTA H 91. M
##
## POL=0, EPS=15., SGM= .005 S/M
## CLIM=5, NS=314., K= 1.368
## PROFILE- NP= 156, XI= .499 KM
##
## A DOUBLE-HORIZON PATH
## DIFFRACTION IS THE DOMINANT MODE
##
## ESTIMATED QUANTILES OF BASIC TRANSMISSION LOSS (DB)
## FREE SPACE VALUE- 125.4 DB
##
## RELIA- WITH CONFIDENCE
## BILITY 50.0 90.0 10.0
##
## 1.0 144.3 154.1 134.4
## 10.0 150.9 159.5 142.3
## 50.0 157.6 165.7 149.4
## 90.0 161.6 169.9 153.3
## 99.0 164.9 173.6 156.2
PROFILE = [156, 77800./156.,
96., 84., 65., 46., 46., 46., 61., 41., 33., 27., 23., 19., 15., 15., 15.,
15., 15., 15., 15., 15., 15., 15., 15., 15., 17., 19., 21., 23., 25., 27.,
29., 35., 46., 41., 35., 30., 33., 35., 37., 40., 35., 30., 51., 62., 76.,
46., 46., 46., 46., 46., 46., 50., 56., 67., 106., 83., 95., 112., 137., 137.,
76., 103., 122., 122., 83., 71., 61., 64., 67., 71., 74., 77., 79., 86., 91.,
83., 76., 68., 63., 76., 107., 107., 107., 119., 127., 133., 135., 137., 142., 148.,
152., 152., 107., 137., 104., 91., 99., 120., 152., 152., 137., 168., 168., 122., 137.,
137., 170., 183., 183., 187., 194., 201., 192., 152., 152., 166., 177., 198., 156., 127.,
116., 107., 104., 101., 98., 95., 103., 91., 97., 102., 107., 107., 107., 103., 98.,
94., 91., 105., 122., 122., 122., 122., 122., 137., 137., 137., 137., 137., 137., 137.,
137., 140., 144., 147., 150., 152., 159.,
]
# Utility function to compute the vertical incidence angles at CBSD and Rx
# This code is derived from the qlrps() routine in itm.py, as specified
# in R2-SGN-21.
# NOTE:
# This routine was initially used in the Winnforum ITM model, before replacement by
# the angles directly calculated by the hzns() routine it used to replicate.
# Keeping it as part of a unit test just for non regression test purposes.
def _GetHorizonAnglesLegacy(its_elev, height_cbsd, height_rx, refractivity):
"""Gets the horizon angles given a terrain profile.
Derived from ITM hzns() routine as specified in R2-SGN-21.
Inputs:
its_elev: Terrain profile in ITM format
- pfl[0] = number of terrain points + 1
- pfl[1] = step size, in meters
- pfl[i] = elevation above mean sea level, in meters
height_cbsd:Height of the CBSD
height_rx: Height of the reception point
Returns:
a tuple of:
ver_cbsd: Vertical incidence angle. Positive value means upwards.
ver_rx: Vertical incidence angle. Positive value means upwards.
hor_dist_cbsd: Horizon distance from CBSD (ie diffraction edge)
hor_dist_rx: Horizon distance from Rx (ie diffraction edge).
"""
num_points = int(its_elev[0])
step = its_elev[1]
dist = num_points * step
# Find the refractivity at the average terrain height
start_avg = int(3.0 + 0.1 * num_points)
end_avg = num_points - start_avg + 6
zsys = np.mean(its_elev[start_avg-1:end_avg])
refractivity *= np.exp(-zsys/9460.0)
# Find the ray down-curvature per meter
gma = 157e-9
gme = gma*(1.0 - 0.04665 * np.exp(refractivity/179.3))
alt_cbsd = its_elev[2] + height_cbsd
alt_rx = its_elev[num_points+2] + height_rx
qc = 0.5 * gme
q = qc * dist
# theta0 and theta1 the slopes, dl0 and dl1 the horizon distances
theta1 = (alt_rx - alt_cbsd) / dist
theta0 = theta1 - q
theta1 = -theta1 - q
dl0 = dist
dl1 = dist
if num_points >= 2:
sa = 0.0
sb = dist
wq = True
for i in range(1, num_points):
sa += step
sb -= step
q = its_elev[i+2] - (qc*sa + theta0) * sa - alt_cbsd
if q > 0.0:
theta0 += q/sa
dl0 = sa
wq = False
if not wq:
q = its_elev[i+2] - (qc*sb + theta1) * sb - alt_rx
if q > 0.0:
theta1 += q/sb
dl1 = sb
return (np.arctan(theta0) * 180/np.pi,
np.arctan(theta1) * 180/np.pi,
dl0,
dl1)
class TestItm(unittest.TestCase):
def test_qkpfl_path2200_its(self):
confidence_vals = [0.5, 0.9, 0.1]
reliability_vals = [0.01, 0.1, 0.5, 0.9, 0.99]
expected_losses = [128.6, 137.6, 119.6,
132.2, 140.8, 123.5,
135.8, 144.3, 127.2,
138.0, 146.5, 129.4,
139.7, 148.4, 131.0]
frequency = 41.5
height1 = 143.9
height2 = 8.5
refractivity = 314.0 # Typical
refractivity_final=True
dielectric = 15 # Typical for ground
conductivity = 0.005 # Typical for ground
climate = 5 # Continental temperate
polarization = 0 # Vertical
mdvar = 12
k = 0
for reliability in reliability_vals:
for confidence in confidence_vals:
loss, ver0, ver1, err, mode = itm.point_to_point(PROFILE, height1, height2,
dielectric, .005, refractivity,
frequency, climate, polarization,
confidence, reliability,
mdvar, refractivity_final)
self.assertAlmostEqual(loss, expected_losses[k], 1)
k+=1
def test_qkpfl_path1979_its(self):
confidence_vals = [0.5, 0.9, 0.1]
reliability_vals = [0.01, 0.1, 0.5, 0.9, 0.99]
expected_losses = [144.3, 154.1, 134.4,
150.9, 159.5, 142.3,
157.6, 165.7, 149.4,
161.6, 169.9, 153.3,
164.9, 173.6, 156.2]
frequency = 573.3
height1 = 194.0
height2 = 9.1
refractivity = 314.0 # Typical
refractivity_final=True
dielectric = 15 # Typical for ground
conductivity = 0.005 # Typical for ground
climate = 5 # Continental temperate
polarization = 0 # Vertical
mdvar = 12
k = 0
for reliability in reliability_vals:
for confidence in confidence_vals:
loss, ver0, ver1, err, mode = itm.point_to_point(PROFILE, height1, height2,
dielectric, .005, refractivity,
frequency, climate, polarization,
confidence, reliability, mdvar,
refractivity_final)
self.assertAlmostEqual(loss, expected_losses[k], 1)
k+=1
def test_default_arg(self):
frequency = 573.3
height1 = 194.0
height2 = 9.1
refractivity = 314.0 # Typical
dielectric = 15 # Typical for ground
conductivity = 0.005 # Typical for ground
climate = 5 # Continental temperate
polarization = 0 # Vertical
confidence = 0.5
reliability = 0.5
# Expected default arguments
mdvar = 12
refractivity_final = False
loss1, _, _, _, _ = itm.point_to_point(PROFILE, height1, height2,
dielectric, .005, refractivity,
frequency, climate, polarization,
confidence, reliability,
mdvar, refractivity_final)
loss2, _, _, _, _ = itm.point_to_point(PROFILE, height1, height2,
dielectric, .005, refractivity,
frequency, climate, polarization,
confidence, reliability)
self.assertEqual(loss1, loss2)
def test_reliabilities(self):
# test scalar vs vector version
frequency = 573.3
height1 = 194.0
height2 = 9.1
refractivity = 314.0 # Typical
dielectric = 15 # Typical for ground
conductivity = 0.005 # Typical for ground
climate = 5 # Continental temperate
polarization = 0 # Vertical
confidence = 0.5
mdvar = 12
refractivity_final = False
reliabilities = np.arange(0.1, 1.0, 0.1)
losses, _, _, _, _ = itm.point_to_point(PROFILE, height1, height2,
dielectric, .005, refractivity,
frequency, climate, polarization,
confidence, reliabilities)
self.assertEqual(len(losses), len(reliabilities))
for rel, exp_loss in zip(reliabilities, losses):
loss, _, _, _, _ = itm.point_to_point(PROFILE, height1, height2,
dielectric, .005, refractivity,
frequency, climate, polarization,
confidence, rel)
self.assertEqual(loss, exp_loss)
def test_horizon_angles(self):
refractivity = 314.
a0, a1, d0, d1 = _GetHorizonAnglesLegacy(PROFILE, 143.9, 8.5, refractivity)
_, v0, v1, _, _ = itm.point_to_point(PROFILE, 143.9, 8.5,
dielectric=15, conductivity=.005,
refractivity=refractivity,
freq_mhz=41.5, climate=5,
polarization=0, confidence=0.5, reliabilities=0.5)
self.assertAlmostEqual(a0, np.arctan(-0.003900)*180./np.pi, 4)
self.assertAlmostEqual(a1, np.arctan(0.000444)*180./np.pi, 4)
self.assertAlmostEqual(d0, 55357.7, 1)
self.assertAlmostEqual(d1, 19450.0, 1)
# test exactness of new method vs old method
self.assertEqual(a0, v0)
self.assertEqual(a1, v1)
def test_horizon_angles_los(self):
refractivity = 314.
PROFILE = [5, 28.5, 10, 10, 8, 9, 11, 12]
a0, a1, _, _ = _GetHorizonAnglesLegacy(PROFILE, 100, 50, refractivity)
_, v0, v1, _, _ = itm.point_to_point(PROFILE, 100, 50,
dielectric=15, conductivity=.005,
refractivity=refractivity,
freq_mhz=41.5, climate=5,
polarization=0, confidence=0.5, reliabilities=0.5)
self.assertEqual(a0, v0)
self.assertEqual(a1, v1)
if __name__ == '__main__':
unittest.main()
| Wireless-Innovation-Forum/Spectrum-Access-System | src/harness/reference_models/propagation/itm/itm_test.py | Python | apache-2.0 | 12,660 | [
"CRYSTAL"
] | 77daee0d021d6af8edd9087f192e2e0bf5424fd500864f8edf25764f69de6f9f |
#!/usr/bin/env python
# Purdue CS530 - Introduction to Scientific Visualization
# Fall 2013
# Simple example to illustrate the typical structure of the rendering
# pipeline in VTK. Here, we draw a sphere and enter the interactive mode.
# Our example needs the VTK Python package
import vtk
def render_demo():
# Step 1. Data Source
# Create a sphere geometry
sphere_src = vtk.vtkSphereSource()
sphere_src.SetRadius(1.0)
sphere_src.SetCenter(0.0, 0.0, 0.0)
# In reality, vtkSphereSource creates a polygonal approximation (a
# triangulation) of a sphere. Set the resolution.
sphere_src.SetThetaResolution(20)
sphere_src.SetPhiResolution(20)
# Step 2. Data Processing
# We will visualize the edges of the sphere that we just created. For
# that, we must first extract the edges from the sphere triangulation.
# VTK makes that easy.
edge_extractor = vtk.vtkExtractEdges()
# Create a pipeline link between sphere source and edge extractor
edge_extractor.SetInputConnection(sphere_src.GetOutputPort())
# Now our edge extractor acts as a second data source: it supplies the
# geometry of the edges of the sphere
# Step 3. Mappers
# Create a set of graphical primitives to represent the sphere
sphere_mapper = vtk.vtkPolyDataMapper()
# Create a pipeline link between sphere source and mapper
sphere_mapper.SetInputConnection(sphere_src.GetOutputPort())
# Similarly, create a mapper for the edges of the sphere
edge_mapper = vtk.vtkPolyDataMapper()
edge_mapper.SetInputConnection(edge_extractor.GetOutputPort())
# Step 4. Actors
# Insert the graphical primitives into the scene to be rendered
sphere_actor = vtk.vtkActor()
# Assign sphere mapper to actor
sphere_actor.SetMapper(sphere_mapper)
# The actor now controls the graphical properties of the sphere. We can
# access them to change the color of the sphere. The color is set in RGB
# mode using values from 0 to 1 for each of the three color channels.
# Here we set the color to orange.
sphere_actor.GetProperty().SetColor(1, 0.5, 0)
# Same thing for the edges of the sphere
edge_actor = vtk.vtkActor()
edge_actor.SetMapper(edge_mapper)
# We want our edges to be drawn in dark green
edge_actor.GetProperty().SetColor(0, 0.5, 0)
# We also want them to be shown as thick lines
edge_actor.GetProperty().SetLineWidth(3)
# Step 5. Renderer
# Render the scene to form an image that can be displayed
my_renderer = vtk.vtkRenderer()
# Add all our actors to the renderer
my_renderer.AddActor(sphere_actor)
my_renderer.AddActor(edge_actor)
my_renderer.SetBackground(0.1, 0.2, 0.4)
# Step 6. Render Window
# Provides a window in which the rendered image can be displayed on the
# screen
my_window = vtk.vtkRenderWindow()
my_window.AddRenderer(my_renderer)
# The window size controls the resolution of the final image
my_window.SetSize(600, 600)
# Step 7. Interactor
# Create an interactor: the user will be able to control the
# visualization through mouse and keyboard interaction
my_interactor = vtk.vtkRenderWindowInteractor()
my_interactor.SetRenderWindow(my_window)
# IMPORTANT NOTE: always initialize the interactor before doing the
# rendering!
my_interactor.Initialize()
# Step 8. Actual Rendering
# Draw something on the screen (finally!)
my_window.Render()
# Step 9. Interaction Starts
# We are done: entering the interaction loop. The user is in charge
my_interactor.Start()
if __name__=='__main__': # pragma: no cover
render_demo() | saullocastro/pyNastran | pyNastran/gui/vtk_examples/extract_edges.py | Python | lgpl-3.0 | 3,743 | [
"VTK"
] | 3befa651aece731a10962448c298d3acfb84b429b505e30b72f90cf12c20440e |
# Created by Ashkan Bigdeli 2016
# Annow.py
# Main UI and calls for data processing
# wx is external UI module
import wx, os
from src import pid_etr
from time import strftime
script_dir =os.path.dirname(__file__)
class ManualWindow(wx.Frame):
#for manual page display
def __init__(self):
"""Constructor"""
wx.Frame.__init__(self, None, title="Annow User Manual", size=(720, 500))
panel = wx.Panel(self)
txt =wx.TextCtrl(panel, size=(700,450), style=wx.TE_MULTILINE | wx.TE_READONLY |wx.EXPAND)
man = os.path.join(script_dir, "src/README.txt")
with open(man) as readme:
for line in readme:
txt.AppendText(line)
class MainWindow(wx.Frame):
def __init__(self,parent, title):
wx.Frame.__init__(self,parent,title = title, size = (1300,650))
panel = wx.Panel(self, -1)
panel.SetBackgroundColour('#ABABAB')
#set up standard menu options
filemenu= wx.Menu()
menuManual= filemenu.Append(wx.NewId(), "Manual"," Usage Guide")
menuAbout= filemenu.Append(wx.ID_ABOUT, "&About"," Information about this program")
menuExit = filemenu.Append(wx.ID_EXIT,"E&xit"," Terminate the program")
#creating and binding the menu
menuBar = wx.MenuBar()
menuBar.Append(filemenu,"&Menu") # Adding the "filemenu" to the MenuBar
self.SetMenuBar(menuBar)
self.Bind(wx.EVT_MENU, self.OnManual, menuManual)
self.Bind(wx.EVT_MENU, self.OnExit, menuExit)
self.Bind(wx.EVT_MENU, self.OnAbout, menuAbout)
#fonts
entry_font = wx.Font(12, wx.MODERN, wx.NORMAL, wx.NORMAL)
header_font = wx.Font(12,wx.MODERN, wx.NORMAL, wx.BOLD)
#set up UI for all run information including event binding, sizing
run_info = wx.StaticText(panel, label = "Enter Your Run Information")
run_info.SetFont(header_font)
#run name layout and bindings
lblrun = wx.StaticText(panel, label = "Run Name: ", size=(175, -1))
lblrun.SetFont(entry_font)
self.editrun = wx.TextCtrl(panel, value="", size=(326, -1))
ri_sizer = wx.BoxSizer(wx.HORIZONTAL)
ri_sizer.AddSpacer(10)
ri_sizer.Add(lblrun)
ri_sizer.Add(self.editrun)
#run directory layout and bindings
lblrun_dir = wx.StaticText(panel, label = "Run Directory: ", size=(175, -1))
lblrun_dir.SetFont(entry_font)
self.editrun_dir = wx.TextCtrl(panel, value = "", size=(326, -1))
self.dir_button = wx.Button(panel, label="Directory")
self.dir_button.Bind(wx.EVT_BUTTON, self.opendir)
rd_sizer=wx.BoxSizer(wx.HORIZONTAL)
rd_sizer.AddSpacer(10)
rd_sizer.Add(lblrun_dir)
rd_sizer.Add(self.editrun_dir)
rd_sizer.AddSpacer(10)
rd_sizer.Add(self.dir_button)
#ftp layout and bindings
ftp_info = wx.StaticText(panel, label = "Enter FTP Information")
ftp_info.SetFont(header_font)
self.ftp_site = wx.StaticText(panel, label = "FTP Site: ", size=(175, -1))
self.ftp_site.SetFont(entry_font)
self.edit_ftp = wx.TextCtrl(panel, value = "", size=(326, -1))
ftp_sizer=wx.BoxSizer(wx.HORIZONTAL)
ftp_sizer.AddSpacer(10)
ftp_sizer.Add(self.ftp_site)
ftp_sizer.Add(self.edit_ftp)
ftp_dir = wx.StaticText(panel, label = "FTP Directory: ", size=(175, -1))
ftp_dir.SetFont(entry_font)
self.edit_ftp_dir = wx.TextCtrl(panel, value = "", size=(326, -1))
ftp_dir_sizer=wx.BoxSizer(wx.HORIZONTAL)
ftp_dir_sizer.AddSpacer(10)
ftp_dir_sizer.Add(ftp_dir)
ftp_dir_sizer.Add(self.edit_ftp_dir)
ftp_suffix = wx.StaticText(panel, label = "FASTA Suffix: ", size=(175, -1))
ftp_suffix.SetFont(entry_font)
self.edit_suffix = wx.TextCtrl(panel, value = "", size=(326, -1))
ftp_suffix_sizer=wx.BoxSizer(wx.HORIZONTAL)
ftp_suffix_sizer.AddSpacer(10)
ftp_suffix_sizer.Add(ftp_suffix)
ftp_suffix_sizer.Add(self.edit_suffix)
#local fasta layout and bindings
fasta_info = wx.StaticText(panel, label = "Or Select a Subject Database")
fasta_info.SetFont(header_font)
local_fasta = wx.StaticText(panel, label = "Local FASTA: ", size=(175, -1))
local_fasta.SetFont(entry_font)
self.edit_fasta = wx.TextCtrl(panel, value = "", size=(326, -1))
self.subject_button = wx.Button(panel, label="Subject")
self.subject_button.Bind(wx.EVT_BUTTON, self.openfile)
local_fasta_sizer=wx.BoxSizer(wx.HORIZONTAL)
local_fasta_sizer.AddSpacer(5)
local_fasta_sizer.Add(local_fasta)
local_fasta_sizer.Add(self.edit_fasta)
local_fasta_sizer.AddSpacer(10)
local_fasta_sizer.Add(self.subject_button)
# query fasta layout and bindings
query_info = wx.StaticText(panel, label = "Local Query Database")
query_info.SetFont(header_font)
query = wx.StaticText(panel, label = "Local FASTA: ", size=(175, -1))
query.SetFont(entry_font)
self.edit_query = wx.TextCtrl(panel, value = "", size=(326, -1))
self.query_button = wx.Button(panel, label="Query")
self.query_button.Bind(wx.EVT_BUTTON, self.openfile)
query_sizer=wx.BoxSizer(wx.HORIZONTAL)
query_sizer.AddSpacer(5)
query_sizer.Add(query)
query_sizer.Add(self.edit_query)
query_sizer.AddSpacer(10)
query_sizer.Add(self.query_button)
#add all run, ftp, local/remote, query to a single sizer for UI
info_sizer=wx.BoxSizer(wx.VERTICAL)
info_sizer.Add(run_info)
info_sizer.AddSpacer(10)
info_sizer.Add(ri_sizer)
info_sizer.AddSpacer(10)
info_sizer.Add(rd_sizer)
info_sizer.Add(ftp_info)
info_sizer.AddSpacer(10)
info_sizer.Add(ftp_sizer)
info_sizer.AddSpacer(10)
info_sizer.Add(ftp_dir_sizer)
info_sizer.AddSpacer(10)
info_sizer.Add(ftp_suffix_sizer)
info_sizer.AddSpacer(10)
info_sizer.Add(fasta_info)
info_sizer.Add(local_fasta_sizer)
sbox = wx.StaticBox(panel, -1, 'Query Info:')
sboxSizer = wx.StaticBoxSizer(sbox, wx.VERTICAL)
sboxSizer.Add(info_sizer,0,wx.EXPAND, 200)
# set up run options including sizing, event bindings
eval_in = wx.StaticText(panel,label ='E-Value:')
self.edit_eval= wx.TextCtrl(panel,value="0.001",size=(50,-1))
eval_box = wx.BoxSizer(wx.HORIZONTAL)
eval_box.AddSpacer(10)
eval_box.Add(eval_in)
eval_box.Add(self.edit_eval)
self.num_hits = wx.StaticText(panel,label ='# Hits:')
self.hits = wx.SpinCtrl(panel, 1, min=1, max = 5,size=(50,-1))
self.hits.SetValue(1)
hits_box = wx.BoxSizer(wx.HORIZONTAL)
hits_box.AddSpacer(10)
hits_box.Add(self.num_hits)
hits_box.Add(self.hits)
self.percent_match = wx.StaticText(panel,label ='% Match:')
self.match= wx.TextCtrl(panel,value="0.001",size=(50,-1))
self.match.SetValue("99.0")
match_box = wx.BoxSizer(wx.HORIZONTAL)
match_box.AddSpacer(10)
match_box.Add(self.percent_match)
match_box.Add(self.match)
self.update = wx.CheckBox(panel, -1, 'Update Fasta')
self.show_align= wx.CheckBox(panel,-1, 'Show Alignments')
self.show_align.Bind(wx.EVT_CHECKBOX, self.alignment_warning)
# add run options to a sizer for layout managment
options = wx.BoxSizer(wx.HORIZONTAL)
options.AddSpacer(10)
options.Add(eval_box)
options.AddSpacer(25)
options.Add(match_box)
options.AddSpacer(25)
options.Add(hits_box)
options.AddSpacer(25)
options.Add(self.update)
options.AddSpacer(25)
options.Add(self.show_align)
#aggreagate query info and options into one sizer for layout managment
sbox2 = wx.StaticBox(panel, -1, 'Subject Info:')
sboxSizer2 = wx.StaticBoxSizer(sbox2, wx.VERTICAL)
sboxSizer2.Add(query_info)
sboxSizer2.AddSpacer(10)
sboxSizer2.Add(query_sizer)
sboxSizer2.AddSpacer(10)
sboxSizer2.Add(options)
#aggregate all run panels and results panel and options into single panel
run_sizer= wx.BoxSizer(wx.VERTICAL)
run_sizer.Add(sboxSizer, wx.ALIGN_CENTER)
run_sizer.AddSpacer(10)
run_sizer.Add(sboxSizer2)
run_sizer.AddSpacer(10)
self.run_button = wx.Button(panel, label="Run",size=(100,50))
self.run_button.Bind(wx.EVT_BUTTON,self.run)
run_sizer.Add(self.run_button,0,wx.ALIGN_CENTRE)
self.result_box = wx.TextCtrl(panel, size=(600,450), style=wx.TE_MULTILINE | wx.TE_READONLY |wx.HSCROLL)
#set layout for run and result panels
main_sizer=wx.BoxSizer(wx.HORIZONTAL)
main_sizer.AddSpacer(25)
main_sizer.Add(run_sizer,0,wx.ALIGN_LEFT,wx.EXPAND)
main_sizer.AddSpacer(25)
main_sizer.Add(self.result_box,0,wx.ALIGN_RIGHT,wx.EXPAND)
layout_sizer=wx.BoxSizer(wx.VERTICAL)
layout_sizer.AddSpacer(50)
layout_sizer.Add(main_sizer,1,wx.EXPAND)
layout_sizer.AddSpacer(50)
#set layout and display UI
self.SetSizer(layout_sizer)
self.Centre()
self.Show()
# Menu Item events
def OnAbout(self,e):
# Create a message dialog box
dlg = wx.MessageDialog(self, "Annow version 1.0 Gene Annotation Software\nCreated and maintained by Ashkan Bigdeli\nFree to use and distribute\nSource code and the latest version can be found on Github\nhttps://github.com/ashbig/Annow/", "Annow", wx.OK)
dlg.ShowModal() # Shows it
dlg.Destroy() # finally destroy it when finished.
def OnManual(self,e):
# Create a message dialog box
man_page = ManualWindow()
man_page.Show()
def OnExit(self,e):
self.Close(True) # Close the frame.
# GUI Events
def openfile(self, event):
dlg = wx.FileDialog(self, "Choose a file", os.getcwd(), "", "*.*", wx.OPEN)
label = event.GetEventObject().GetLabel()
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
if label == "Subject":
self.edit_fasta.SetValue(path)
if label == "Query":
self.edit_query.SetValue(path)
dlg.Destroy()
def opendir(self, event):
dlg = wx.DirDialog(self, "Choose a directory:", style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON)
label = event.GetEventObject().GetLabel()
if dlg.ShowModal() == wx.ID_OK:
if label == "Directory":
self.editrun_dir.SetValue(dlg.GetPath())
dlg.Destroy()
def alignment_warning(self,e):
if self.show_align.IsChecked():
dlg = wx.MessageDialog(self, 'Generating Alignments Will Increase Run Time 2x !', 'Annow 1.0', wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
# Data processing event
def run(self, event):
run_name = self.editrun.GetValue()
run_name = run_name.replace(" ", "_")
run_dir = self.editrun_dir.GetValue()
ftp_url = self.edit_ftp.GetValue()
ftp_dir = self.edit_ftp_dir.GetValue()
ftp_suffix = self.edit_suffix.GetValue()
local_subject = self.edit_fasta.GetValue()
query_db = self.edit_query.GetValue()
evalue = self.edit_eval.GetValue()
hit_val = self.hits.GetValue()
alignments = self.show_align.GetValue()
update_fasta = self.update.GetValue()
match_val = self.match.GetValue()
#check to see if we have input from the user
if len(run_name) < 1:
self.editrun.SetValue("You Must Enter A Run Name!")
return
if len(run_dir) < 1:
self.editrun_dir.SetValue("You Must Enter An Output Directory!")
return
if len(ftp_url) <1 or len(ftp_dir) <1 or len(ftp_suffix) < 1:
if len(local_subject) < 1:
self.edit_ftp.SetValue("You Must Enter Remote or Subject Database Completely!")
return
if len(query_db) < 1:
self.edit_query.SetValue("You Must Enter A Query FASTA!")
return
#display run params for user
self.result_box.AppendText("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
self.result_box.AppendText("\n\nRUN PARAMETERS:\n")
self.result_box.AppendText("Run Name: " + run_name +"\n")
self.result_box.AppendText("Run Directory: " + run_dir +"\n")
self.result_box.AppendText("FTP Site: " + ftp_url + "\n")
self.result_box.AppendText("FTP Directory: " + ftp_dir + '\n')
self.result_box.AppendText("FTP File Suffix: " + ftp_suffix + '\n')
self.result_box.AppendText("Local Subject DB: " + local_subject + '\n')
self.result_box.AppendText("Query Database: " + query_db + '\n')
self.result_box.AppendText("Blast E-Value: " + evalue + '\n')
self.result_box.AppendText("Match Criteria for Updating: " + str(match_val) + '\n')
self.result_box.AppendText("Sequence Hits: " + str(hit_val) + '\n')
self.result_box.AppendText("Generate Alignments: " + str(alignments) + '\n')
self.result_box.AppendText("Update Input Query: " + str(update_fasta) + '\n\n')
self.result_box.AppendText("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
# make run specific directory
time = strftime("-%m-%d-%H-%M")
new_run_path = os.path.join(run_dir,(run_name + time))
os.mkdir(new_run_path, 0755)
#if ftp, download, unzip, concatenate and generate a file fasta file
if len(local_subject) < 1:
self.result_box.AppendText("Downloading FASTA files...\n")
dwnld_msg = pid_etr.download(ftp_suffix, ftp_url, ftp_dir, run_name, new_run_path)
self.result_box.AppendText(dwnld_msg + "\n")
if "error" in dwnld_msg:
return
self.result_box.AppendText("Unzipping Downloaded FASTA's...\n")
unzip_msg = pid_etr.unzip(new_run_path)
self.result_box.AppendText(unzip_msg + "\n")
if "error" in unzip_msg:
return
self.result_box.AppendText("Removing Intermmediate Files...\n")
remove_msg = pid_etr.remove(ftp_suffix, new_run_path)
self.result_box.AppendText(remove_msg + "\n")
if "error" in remove_msg:
return
self.result_box.AppendText("Concatinating Files...\n")
concat = pid_etr.concat_fasta(run_name, new_run_path)
concat_msg = concat[0]
self.result_box.AppendText(concat_msg + "\n")
if "error" in concat_msg:
return
local_subject= concat[1]
self.result_box.AppendText("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
#create blast database and update user
self.result_box.AppendText("\n\nMaking BLAST Database...\n")
make_blast_db = pid_etr.create_db(new_run_path, local_subject, run_name)
blast_db_msg = make_blast_db[0]
self.result_box.AppendText(blast_db_msg)
if "error" in blast_db_msg:
return
blast_db = make_blast_db[1]
blast_db_summary = make_blast_db[2]
with open(blast_db_summary) as summary:
for line in summary:
self.result_box.AppendText(line)
self.result_box.AppendText(blast_db_msg)
# perform blast and update user
self.result_box.AppendText("\n\nPerforming blast...\n\n")
results_tuple = pid_etr.perform_blast(query_db, blast_db, new_run_path, run_name, evalue, str(hit_val))
result_msg = results_tuple[0]
self.result_box.AppendText(result_msg)
if "error" in result_msg:
return
results = results_tuple[1]
self.result_box.AppendText("Blast complete! Raw Results are located in " + results + "\n\n")
self.result_box.AppendText("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
# summarize results and update user
self.result_box.AppendText("\nSummarizing results...")
summarize_tuple = pid_etr.summarize_results(results, match_val,str(hit_val))
summarize_msg = summarize_tuple[0]
self.result_box.AppendText(summarize_msg)
if "error" in summarize_msg:
return
summary = summarize_tuple[1]
new_annotation = summarize_tuple[2]
if "Concordance" in new_annotation:
update_fasta=False
self.result_box.AppendText("\n" + new_annotation + "\n")
# update fasta is asked
if update_fasta:
self.result_box.AppendText("\n\n\nGenerating a new FASTA with updated annotations...\n\n")
update_tuple = pid_etr.update_fasta(run_dir, run_name, new_annotation, query_db)
update_message = update_tuple[0]
self.result_box.AppendText(update_message)
if "error" in update_message:
return
self.result_box.AppendText("\n\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
# provide alignments if asked
if alignments:
self.result_box.AppendText("\nGenerating an alignments file...")
alignments_tuple = pid_etr.perform_blast_align(query_db, blast_db, new_run_path, run_name, evalue, str(hit_val))
align_message = alignments_tuple[0]
self.result_box.AppendText(align_message)
if "error" in align_message:
return
self.result_box.AppendText("\n\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
self.result_box.AppendText("\n\nYour run is complete! Check " + run_dir + " for your results!\n\n\n")
#run app
app = wx.App(False)
frame = MainWindow(None, "Annow 1.0")
app.MainLoop() | ashbig/Annow | annow_v1_0/Annow.py | Python | gpl-3.0 | 19,421 | [
"BLAST"
] | c80b986c0f21807a526e84fb9123787afec9d48815e5ba979b5a874165da8351 |
# Author: Samuel Genheden samuel.genheden@gmail.com
"""
Program to make input to Gaussian for (R)ESPA calculations
Examples
--------
pdb2respa_inp.py mol1.pdb mol2.pdb
pdb2respa_inp.py mol1.pdb -v ff03 -c -1
"""
import sys
import os
import argparse
import time
import numpy as np
from sgenlib import pdb
if __name__ == '__main__' :
# Command-line input
parser = argparse.ArgumentParser(description="Making Gaussian input for (R)ESPA calculations")
parser.add_argument('file',nargs="+",help="the PDB files")
parser.add_argument('-v','--version',choices=["ff94","ff03"],help="the force field version, can be either ff94 or ff03",default="ff94")
parser.add_argument('-c','--charge',type=float,help="The net charge of the molecule(s)",default=0)
parser.add_argument('-p','--processors',type=int,help="The number of processors to use",default=1)
args = parser.parse_args()
method = {"ff94" : "HF/6-31G* SCF","ff03" : "B3LYP/cc-pVTZ SCRF"}
for filename in args.file :
h,t = os.path.splitext(filename)
pdbfile = pdb.PDBFile(filename=filename)
with open("%s_mk.com"%h,"w") as fout :
fout.write("%Mem=256MB\n")
fout.write("%snproc=%d\n"%('%',args.processors))
fout.write("\n")
fout.write("#P %s Pop=(Minimal,MK) IOp(6/33=2,6/41=10,6/42=17)\n\n"%method[args.version])
fout.write("MK ESP on %s, at %s\n"%(filename,time.strftime("%d/%m/%Y")))
fout.write("\n")
fout.write("%d 1\n"%args.charge)
for atom in pdbfile.atoms :
fout.write("%s %8.3f %8.3f %8.3f\n"%(atom.element(),atom.x,atom.y,atom.z))
fout.write("\n")
fout.write("\n")
| SGenheden/Scripts | Pdb/pdb2respa_inp.py | Python | mit | 1,634 | [
"Gaussian"
] | d998521490c73a44d580397b494e6ce2ad63021ffbb8f36073664356c289e761 |
import re
import pytz
import datetime as dt
from collections import defaultdict
import lxml.html
from pupa.scrape import Scraper, Bill, VoteEvent
from openstates.utils import LXMLMixin
from .utils import (clean_text, house_get_actor_from_action,
senate_get_actor_from_action)
bill_types = {
'HB ': 'bill',
'HJR': 'joint resolution',
'HCR': 'concurrent resolution',
'SB ': 'bill',
'SJR': 'joint resolution',
'SCR': 'concurrent resolution'
}
TIMEZONE = pytz.timezone('America/Chicago')
class MOBillScraper(Scraper, LXMLMixin):
_house_base_url = 'http://www.house.mo.gov'
# List of URLS that aren't working when we try to visit them (but
# probably should work):
_bad_urls = []
_subjects = defaultdict(list)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._scrape_subjects(self.latest_session())
def _get_action(self, actor, action):
# Alright. This covers both chambers and everyting else.
flags = [
('Introduced', 'introduction'),
('Offered', 'introduction'),
('First Read', 'reading-1'),
('Read Second Time', 'reading-2'),
('Second Read', 'reading-2'),
# make sure passage is checked before reading-3
('Third Read and Passed', 'passage'),
('Third Read', 'reading-3'),
('Referred', 'referral-committee'),
('Withdrawn', 'withdrawal'),
('S adopted', 'passage'),
('Truly Agreed To and Finally Passed', 'passage'),
('Signed by Governor', 'executive-signature'),
('Approved by Governor', 'executive-signature'),
('Vetoed by Governor', 'executive-veto'),
('Legislature voted to override Governor\'s veto', 'veto-override-passage'),
]
categories = []
for flag, acat in flags:
if flag in action:
categories.append(acat)
return categories or None
def _get_votes(self, date, actor, action, bill, url):
vre = r'(?P<leader>.*)(AYES|YEAS):\s+(?P<yeas>\d+)\s+(NOES|NAYS):\s+(?P<nays>\d+).*'
if 'YEAS' in action.upper() or 'AYES' in action.upper():
match = re.match(vre, action)
if match:
v = match.groupdict()
yes, no = int(v['yeas']), int(v['nays'])
vote = VoteEvent(
chamber=actor,
motion_text=v['leader'],
result='pass' if yes > no else 'fail',
classification='passage',
start_date=TIMEZONE.localize(date),
bill=bill,
)
vote.add_source(url)
yield vote
def _parse_cosponsors_from_bill(self, bill, url):
bill_page = self.get(url).text
bill_page = lxml.html.fromstring(bill_page)
table = bill_page.xpath('//table[@id="CoSponsorTable"]')
assert len(table) == 1
for row in table[0].xpath('./tr'):
name = row[0].text_content()
if re.search(r'no co-sponsors', name, re.IGNORECASE):
continue
bill.add_sponsorship(
row[0].text_content(),
entity_type='person',
classification='cosponsor',
primary=False,
)
def _scrape_subjects(self, session):
self._scrape_senate_subjects(session)
if 'S' in session:
self.warning('skipping house subjects for special session')
else:
self._scrape_house_subjects(session)
def session_type(self, session):
# R or S1
return 'R' if len(session) == 4 else session[4:]
def _scrape_senate_subjects(self, session):
self.info('Collecting subject tags from upper house.')
subject_list_url = 'http://www.senate.mo.gov/{}info/BTS_Web/'\
'Keywords.aspx?SessionType=%s'.format(session[2:4], self.session_type(session))
subject_page = self.lxmlize(subject_list_url)
# Create a list of all possible bill subjects.
subjects = self.get_nodes(subject_page, '//h3')
for subject in subjects:
subject_text = self.get_node(
subject,
'./a[string-length(text()) > 0]/text()[normalize-space()]')
subject_text = re.sub(r'([\s]*\([0-9]+\)$)', '', subject_text)
# Bills are in hidden spans after the subject labels.
bill_ids = subject.getnext().xpath(
'./b/a/text()[normalize-space()]')
for bill_id in bill_ids:
self.info('Found {}.'.format(bill_id))
self._subjects[bill_id].append(subject_text)
def _parse_senate_billpage(self, bill_url, year):
bill_page = self.lxmlize(bill_url)
# get all the info needed to record the bill
# TODO probably still needs to be fixed
bill_id = bill_page.xpath('//*[@id="lblBillNum"]')[0].text_content()
bill_title = bill_page.xpath('//*[@id="lblBillTitle"]')[0].text_content()
bill_desc = bill_page.xpath('//*[@id="lblBriefDesc"]')[0].text_content()
# bill_lr = bill_page.xpath('//*[@id="lblLRNum"]')[0].text_content()
bill_type = "bill"
triplet = bill_id[:3]
if triplet in bill_types:
bill_type = bill_types[triplet]
subs = []
bid = bill_id.replace(" ", "")
if bid in self._subjects:
subs = self._subjects[bid]
self.info("With subjects for this bill")
self.info(bid)
bill = Bill(
bill_id,
title=bill_desc,
chamber='upper',
legislative_session=year,
classification=bill_type,
)
bill.subject = subs
bill.add_abstract(bill_desc, note='abstract')
bill.add_source(bill_url)
if bill_title:
bill.add_title(bill_title)
# Get the primary sponsor
sponsor = bill_page.xpath('//a[@id="hlSponsor"]')[0]
bill_sponsor = sponsor.text_content()
# bill_sponsor_link = sponsor.attrib.get('href')
bill.add_sponsorship(
bill_sponsor,
entity_type='person',
classification='primary',
primary=True,
)
# cosponsors show up on their own page, if they exist
cosponsor_tag = bill_page.xpath('//a[@id="hlCoSponsors"]')
if len(cosponsor_tag) > 0 and cosponsor_tag[0].attrib.get('href'):
self._parse_senate_cosponsors(bill, cosponsor_tag[0].attrib['href'])
# get the actions
action_url = bill_page.xpath('//a[@id="hlAllActions"]')
if len(action_url) > 0:
action_url = action_url[0].attrib['href']
self._parse_senate_actions(bill, action_url)
# stored on a separate page
versions_url = bill_page.xpath('//a[@id="hlFullBillText"]')
if len(versions_url) > 0 and versions_url[0].attrib.get('href'):
self._parse_senate_bill_versions(bill, versions_url[0].attrib['href'])
yield bill
def _parse_senate_bill_versions(self, bill, url):
bill.add_source(url)
versions_page = self.get(url).text
versions_page = lxml.html.fromstring(versions_page)
version_tags = versions_page.xpath('//li/font/a')
# some pages are updated and use different structure
if not version_tags:
version_tags = versions_page.xpath('//tr/td/a[contains(@href, ".pdf")]')
for version_tag in version_tags:
description = version_tag.text_content()
pdf_url = version_tag.attrib['href']
if pdf_url.endswith('pdf'):
mimetype = 'application/pdf'
else:
mimetype = None
bill.add_version_link(description, pdf_url, media_type=mimetype,
on_duplicate='ignore')
def _parse_senate_actions(self, bill, url):
bill.add_source(url)
actions_page = self.get(url).text
actions_page = lxml.html.fromstring(actions_page)
bigtable = actions_page.xpath('/html/body/font/form/table/tr[3]/td/div/table/tr')
for row in bigtable:
date = row[0].text_content()
date = dt.datetime.strptime(date, '%m/%d/%Y')
action = row[1].text_content()
actor = senate_get_actor_from_action(action)
type_class = self._get_action(actor, action)
bill.add_action(
action, TIMEZONE.localize(date), chamber=actor, classification=type_class)
def _parse_senate_cosponsors(self, bill, url):
bill.add_source(url)
cosponsors_page = self.get(url).text
cosponsors_page = lxml.html.fromstring(cosponsors_page)
# cosponsors are all in a table
cosponsors = cosponsors_page.xpath('//table[@id="dgCoSponsors"]/tr/td/a')
for cosponsor_row in cosponsors:
# cosponsors include district, so parse that out
cosponsor_string = cosponsor_row.text_content()
cosponsor = clean_text(cosponsor_string)
cosponsor = cosponsor.split(',')[0]
# they give us a link to the congressperson, so we might
# as well keep it.
if cosponsor_row.attrib.get('href'):
# cosponsor_url = cosponsor_row.attrib['href']
bill.add_sponsorship(
cosponsor,
entity_type='person',
classification='cosponsor',
primary=False,
)
else:
bill.add_sponsorship(
cosponsor,
entity_type='person',
classification='cosponsor',
primary=False,
)
def _scrape_house_subjects(self, session):
self.info('Collecting subject tags from lower house.')
subject_list_url = \
'http://house.mo.gov/LegislationSP.aspx?code=R&category=subjectindex&year={}'\
.format(session)
subject_page = self.lxmlize(subject_list_url)
# Create a list of all the possible bill subjects.
subjects = self.get_nodes(
subject_page,
"//div[@id='ContentPlaceHolder1_panelParentDIV']" # ...
"/div[@id='panelDIV']//div[@id='ExpandedPanel']//a")
# Find the list of bills within each subject.
for subject in subjects:
subject_text = re.sub(r"\([0-9]+\).*", '', subject.text, re.IGNORECASE).strip()
self.info('Searching for bills in {}.'.format(subject_text))
subject_page = self.lxmlize(subject.attrib['href'])
bill_nodes = self.get_nodes(
subject_page,
'//table[@id="reportgrid"]/tbody/tr[@class="reportbillinfo"]')
# Move onto the next subject if no bills were found.
if bill_nodes is None or not (len(bill_nodes) > 0):
continue
for bill_node in bill_nodes:
bill_id = self.get_node(
bill_node,
'(./td)[1]/a/text()[normalize-space()]')
# Skip to the next bill if no ID could be found.
if bill_id is None or not (len(bill_id) > 0):
continue
self.info('Found {}.'.format(bill_id))
self._subjects[bill_id].append(subject_text)
def _parse_house_actions(self, bill, url):
bill.add_source(url)
actions_page = self.get(url).text
actions_page = lxml.html.fromstring(actions_page)
rows = actions_page.xpath('//table/tr')
for row in rows:
# new actions are represented by having dates in the first td
# otherwise, it's a continuation of the description from the
# previous action
if len(row) > 0 and row[0].tag == 'td':
if len(row[0].text_content().strip()) > 0:
date = row[0].text_content().strip()
date = dt.datetime.strptime(date, '%m/%d/%Y')
action = row[2].text_content().strip()
else:
action += ('\n' + row[2].text_content())
action = action.rstrip()
actor = house_get_actor_from_action(action)
type_class = self._get_action(actor, action)
yield from self._get_votes(date, actor, action, bill, url)
bill.add_action(
action, TIMEZONE.localize(date), chamber=actor, classification=type_class)
def _parse_house_billpage(self, url, year):
bill_list_page = self.get(url).text
bill_list_page = lxml.html.fromstring(bill_list_page)
# find the first center tag, take the text after
# 'House of Representatives' and before 'Bills' as
# the session name
# header_tag = bill_list_page.xpath(
# '//*[@id="ContentPlaceHolder1_lblAssemblyInfo"]'
# )[0].text_content()
# if header_tag.find('1st Extraordinary Session') != -1:
# session = year + ' 1st Extraordinary Session'
# elif header_tag.find('2nd Extraordinary Session') != -1:
# session = year + ' 2nd Extraordinary Session'
# else:
session = year
bills = bill_list_page.xpath('//table[@id="reportgrid"]//tr')
isEven = False
count = 0
bills = bills[2:]
for bill in bills:
if not isEven:
# the non even rows contain bill links, the other rows contain brief
# descriptions of the bill.
count = count + 1
yield from self._parse_house_bill(bill[0][0].attrib['href'], session)
isEven = not isEven
def _parse_house_bill(self, url, session):
# using the print page makes the page simpler, and also *drastically* smaller
# (8k rather than 100k)
url = re.sub("billsummary", "billsummaryprn", url)
url = '%s/%s' % (self._house_base_url, url)
# the URL is an iframed version now, so swap in for the actual bill page
url = url.replace('Bill.aspx', 'BillContent.aspx')
url = url.replace('&code=R', '&code=R&style=new')
# http://www.house.mo.gov/Bill.aspx?bill=HB26&year=2017&code=R
# http://www.house.mo.gov/BillContent.aspx?bill=HB26&year=2017&code=R&style=new
bill_page = self.get(url).text
bill_page = lxml.html.fromstring(bill_page)
bill_page.make_links_absolute(url)
bill_id = bill_page.xpath('//*[@class="entry-title"]/div')
if len(bill_id) == 0:
self.info("WARNING: bill summary page is blank! (%s)" % url)
self._bad_urls.append(url)
return
bill_id = bill_id[0].text_content()
bill_id = clean_text(bill_id)
bill_desc = bill_page.xpath('//*[@class="BillDescription"]')[0].text_content()
bill_desc = clean_text(bill_desc)
table_rows = bill_page.xpath('//table/tr')
# if there is a cosponsor all the rows are pushed down one for the extra row
# for the cosponsor:
cosponsorOffset = 0
if table_rows[2][0].text_content().strip() == 'Co-Sponsor:':
cosponsorOffset = 1
lr_label_tag = table_rows[3 + cosponsorOffset]
assert lr_label_tag[0].text_content().strip() == 'LR Number:'
# bill_lr = lr_label_tag[1].text_content()
lastActionOffset = 0
if table_rows[4 + cosponsorOffset][0].text_content().strip() == 'Governor Action:':
lastActionOffset = 1
official_title_tag = table_rows[5 + cosponsorOffset + lastActionOffset]
assert official_title_tag[0].text_content().strip() == 'Bill String:'
official_title = official_title_tag[1].text_content()
# could substitute the description for the name,
# but keeping it separate for now.
bill_type = "bill"
triplet = bill_id[:3]
if triplet in bill_types:
bill_type = bill_types[triplet]
bill_number = int(bill_id[4:])
else:
bill_number = int(bill_id[3:])
subs = []
bid = bill_id.replace(" ", "")
if bid in self._subjects:
subs = self._subjects[bid]
self.info("With subjects for this bill")
self.info(bid)
if bill_desc == "":
if bill_number <= 20:
# blank bill titles early in session are approp. bills
bill_desc = 'Appropriations Bill'
else:
self.error("Blank title. Skipping. {} / {} / {}".format(
bill_id, bill_desc, official_title
))
return
bill = Bill(
bill_id,
chamber='lower',
title=bill_desc,
legislative_session=session,
classification=bill_type,
)
bill.subject = subs
bill.add_title(official_title, note='official')
bill.add_source(url)
bill_sponsor = clean_text(table_rows[0][1].text_content())
# try:
# bill_sponsor_link = table_rows[0][1][0].attrib['href']
# except IndexError:
# return
bill.add_sponsorship(
bill_sponsor,
entity_type='person',
classification='primary',
primary=True,
)
# check for cosponsors
sponsors_url, = bill_page.xpath(
"//a[contains(@href, 'CoSponsors.aspx')]/@href")
self._parse_cosponsors_from_bill(bill, sponsors_url)
# actions_link_tag = bill_page.xpath('//div[@class="Sections"]/a')[0]
# actions_link = '%s/%s' % (self._house_base_url,actions_link_tag.attrib['href'])
# actions_link = re.sub("content", "print", actions_link)
actions_link, = bill_page.xpath(
"//a[contains(@href, 'BillActions.aspx')]/@href")
yield from self._parse_house_actions(bill, actions_link)
# get bill versions
doc_tags = bill_page.xpath('//div[@class="BillDocuments"][1]/span')
for doc_tag in reversed(doc_tags):
doc = clean_text(doc_tag.text_content())
text_url = '%s%s' % (
self._house_base_url,
doc_tag[0].attrib['href']
)
bill.add_document_link(doc, text_url, media_type='text/html')
# get bill versions
version_tags = bill_page.xpath('//div[@class="BillDocuments"][2]/span')
for version_tag in reversed(version_tags):
version = clean_text(version_tag.text_content())
for vurl in version_tag.xpath(".//a"):
if vurl.text == 'PDF':
mimetype = 'application/pdf'
else:
mimetype = 'text/html'
bill.add_version_link(version, vurl.attrib['href'], media_type=mimetype,
on_duplicate='ignore')
# house bill versions
# everything between the row containing "Bill Text"" and the next div.DocHeaderRow
version_rows = bill_page.xpath(
'//div[contains(text(),"Bill Text")]/'
'following-sibling::div[contains(@class,"DocRow") '
'and count(preceding-sibling::div[contains(@class,"DocHeaderRow")])=1]')
for row in version_rows:
# some rows are just broken links, not real versions
if row.xpath('.//div[contains(@class,"textType")]/a/@href'):
version = row.xpath('.//div[contains(@class,"textType")]/a/text()')[0].strip()
path = row.xpath('.//div[contains(@class,"textType")]/a/@href')[0].strip()
if '.pdf' in path:
mimetype = 'application/pdf'
else:
mimetype = 'text/html'
bill.add_version_link(version, path, media_type=mimetype,
on_duplicate='ignore')
# house bill summaries
# everything between the row containing "Bill Summary"" and the next div.DocHeaderRow
summary_rows = bill_page.xpath(
'//div[contains(text(),"Bill Summary")]/'
'following-sibling::div[contains(@class,"DocRow") '
'and count(following-sibling::div[contains(@class,"DocHeaderRow")])=1]')
# if there are no amedments, we need a different xpath for summaries
if not summary_rows:
summary_rows = bill_page.xpath(
'//div[contains(text(),"Bill Summary")]/'
'following-sibling::div[contains(@class,"DocRow")]')
for row in reversed(summary_rows):
version = row.xpath('.//div[contains(@class,"textType")]/a/text()')[0].strip()
if version:
path = row.xpath('.//div[contains(@class,"textType")]/a/@href')[0].strip()
summary_name = 'Bill Summary ({})'.format(version)
if '.pdf' in path:
mimetype = 'application/pdf'
else:
mimetype = 'text/html'
bill.add_document_link(summary_name, path, media_type=mimetype,
on_duplicate='ignore')
# house bill amendments
amendment_rows = bill_page.xpath('//div[contains(text(),"Amendment")]/'
'following-sibling::div[contains(@class,"DocRow")]')
for row in reversed(amendment_rows):
version = row.xpath('.//div[contains(@class,"DocInfoCell")]/a[1]/text()')[0].strip()
path = row.xpath('.//div[contains(@class,"DocInfoCell")]/a[1]/@href')[0].strip()
summary_name = 'Amendment {}'.format(version)
defeated_icon = row.xpath('.//img[contains(@title,"Defeated")]')
if defeated_icon:
summary_name = '{} (Defeated)'.format(summary_name)
adopted_icon = row.xpath('.//img[contains(@title,"Adopted")]')
if adopted_icon:
summary_name = '{} (Adopted)'.format(summary_name)
distributed_icon = row.xpath('.//img[contains(@title,"Distributed")]')
if distributed_icon:
summary_name = '{} (Distributed)'.format(summary_name)
if '.pdf' in path:
mimetype = 'application/pdf'
else:
mimetype = 'text/html'
bill.add_version_link(summary_name, path, media_type=mimetype,
on_duplicate='ignore')
yield bill
def _scrape_upper_chamber(self, session):
self.info('Scraping bills from upper chamber.')
year2 = "%02d" % (int(session[:4]) % 100)
# Save the root URL, since we'll use it later.
bill_root = 'http://www.senate.mo.gov/{}info/BTS_Web/'.format(year2)
index_url = bill_root + 'BillList.aspx?SessionType=' + self.session_type(session)
index_page = self.get(index_url).text
index_page = lxml.html.fromstring(index_page)
# Each bill is in it's own table (nested within a larger table).
bill_tables = index_page.xpath('//a[@id]')
if not bill_tables:
return
for bill_table in bill_tables:
# Here we just search the whole table string to get the BillID that
# the MO senate site uses.
if re.search(r'dgBillList.*hlBillNum', bill_table.attrib['id']):
yield from self._parse_senate_billpage(
bill_root + bill_table.attrib.get('href'),
session,
)
def _scrape_lower_chamber(self, session):
self.info('Scraping bills from lower chamber.')
if 'S' in session:
year = session[:4]
code = session[4:]
else:
year = session
code = 'R'
bill_page_url = '{}/BillList.aspx?year={}&code={}'.format(
self._house_base_url, year, code)
yield from self._parse_house_billpage(bill_page_url, year)
def scrape(self, chamber=None, session=None):
if not session:
session = self.latest_session()
self.info('no session specified, using %s', session)
if chamber in ['upper', None]:
yield from self._scrape_upper_chamber(session)
if chamber in ['lower', None]:
yield from self._scrape_lower_chamber(session)
if len(self._bad_urls) > 0:
self.warning('WARNINGS:')
for url in self._bad_urls:
self.warning('{}'.format(url))
| votervoice/openstates | openstates/mo/bills.py | Python | gpl-3.0 | 24,779 | [
"VisIt"
] | b5121b2669445f9b5a31702e816b3e5d6f9dde126e135c98cbee696c3ef52e0b |
import decimal
import gc
import itertools
import multiprocessing
import weakref
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import Unicode
from sqlalchemy import util
from sqlalchemy.engine import result
from sqlalchemy.engine.processors import to_decimal_processor_factory
from sqlalchemy.orm import aliased
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import declarative_base
from sqlalchemy.orm import join as orm_join
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import Load
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm.session import _sessions
from sqlalchemy.sql import column
from sqlalchemy.sql import util as sql_util
from sqlalchemy.sql.visitors import cloned_traverse
from sqlalchemy.sql.visitors import replacement_traverse
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import gc_collect
from ..orm import _fixtures
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
class ASub(A):
pass
def assert_cycles(expected=0):
def decorate(fn):
def go():
fn() # warmup, configure mappers, caches, etc.
gc_collect()
gc_collect()
gc_collect() # multiple calls seem to matter
# gc.set_debug(gc.DEBUG_COLLECTABLE)
try:
return fn() # run for real
finally:
unreachable = gc_collect()
assert unreachable <= expected
gc_collect()
return go
return decorate
def profile_memory(
maxtimes=250, assert_no_sessions=True, get_num_objects=None
):
def decorate(func):
# run the test N times. if length of gc.get_objects()
# keeps growing, assert false
def get_objects_skipping_sqlite_issue():
# pysqlite keeps adding weakref objects which only
# get reset after 220 iterations. We'd like to keep these
# tests under 50 iterations and ideally about ten, so
# just filter them out so that we get a "flatline" more quickly.
if testing.against("sqlite+pysqlite"):
return [
o
for o in gc.get_objects()
if not isinstance(o, weakref.ref)
]
else:
return gc.get_objects()
def profile(queue, func_args):
# give testing.db a brand new pool and don't
# touch the existing pool, since closing a socket
# in the subprocess can affect the parent
testing.db.pool = testing.db.pool.recreate()
gc_collect()
samples = []
max_ = 0
max_grew_for = 0
success = False
until_maxtimes = 0
try:
while True:
if until_maxtimes >= maxtimes // 5:
break
for x in range(5):
try:
func(*func_args)
except Exception as err:
queue.put(
(
"result",
False,
"Test raised an exception: %r" % err,
)
)
raise
gc_collect()
samples.append(
get_num_objects()
if get_num_objects is not None
else len(get_objects_skipping_sqlite_issue())
)
if assert_no_sessions:
assert len(_sessions) == 0, "%d sessions remain" % (
len(_sessions),
)
# queue.put(('samples', samples))
latest_max = max(samples[-5:])
if latest_max > max_:
queue.put(
(
"status",
"Max grew from %s to %s, max has "
"grown for %s samples"
% (max_, latest_max, max_grew_for),
)
)
max_ = latest_max
max_grew_for += 1
until_maxtimes += 1
continue
else:
queue.put(
(
"status",
"Max remained at %s, %s more attempts left"
% (max_, max_grew_for),
)
)
max_grew_for -= 1
if max_grew_for == 0:
success = True
break
except Exception as err:
queue.put(("result", False, "got exception: %s" % err))
else:
if not success:
queue.put(
(
"result",
False,
"Ran for a total of %d times, memory kept "
"growing: %r" % (maxtimes, samples),
)
)
else:
queue.put(("result", True, "success"))
def run_plain(*func_args):
import queue as _queue
q = _queue.Queue()
profile(q, func_args)
while True:
row = q.get()
typ = row[0]
if typ == "samples":
print("sample gc sizes:", row[1])
elif typ == "status":
print(row[1])
elif typ == "result":
break
else:
assert False, "can't parse row"
assert row[1], row[2]
# return run_plain
def run_in_process(*func_args):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(
target=profile, args=(queue, func_args)
)
proc.start()
while True:
row = queue.get()
typ = row[0]
if typ == "samples":
print("sample gc sizes:", row[1])
elif typ == "status":
print(row[1])
elif typ == "result":
break
else:
assert False, "can't parse row"
proc.join()
assert row[1], row[2]
return run_in_process
return decorate
def assert_no_mappers():
clear_mappers()
gc_collect()
class EnsureZeroed(fixtures.ORMTest):
def setup_test(self):
_sessions.clear()
clear_mappers()
# enable query caching, however make the cache small so that
# the tests don't take too long. issues w/ caching include making
# sure sessions don't get stuck inside of it. However it will
# make tests like test_mapper_reset take a long time because mappers
# are very much a part of what's in the cache.
self.engine = engines.testing_engine(
options={"use_reaper": False, "query_cache_size": 10}
)
@testing.add_to_marker.memory_intensive
class MemUsageTest(EnsureZeroed):
__requires__ = ("cpython", "no_windows")
def test_type_compile(self):
from sqlalchemy.dialects.sqlite.base import dialect as SQLiteDialect
cast = sa.cast(column("x"), sa.Integer)
@profile_memory()
def go():
dialect = SQLiteDialect()
cast.compile(dialect=dialect)
go()
@testing.requires.cextensions
def test_DecimalResultProcessor_init(self):
@profile_memory()
def go():
to_decimal_processor_factory({}, 10)
go()
@testing.requires.cextensions
def test_DecimalResultProcessor_process(self):
@profile_memory()
def go():
to_decimal_processor_factory(decimal.Decimal, 10)(1.2)
go()
@testing.requires.cextensions
def test_cycles_in_row(self):
tup = result.result_tuple(["a", "b", "c"])
@profile_memory()
def go():
obj = {"foo": {}}
obj["foo"]["bar"] = obj
row = tup([1, 2, obj])
obj["foo"]["row"] = row
del row
go()
def test_ad_hoc_types(self):
"""test storage of bind processors, result processors
in dialect-wide registry."""
from sqlalchemy.dialects import mysql, postgresql, sqlite
from sqlalchemy import types
eng = engines.testing_engine()
for args in (
(types.Integer,),
(types.String,),
(types.PickleType,),
(types.Enum, "a", "b", "c"),
(sqlite.DATETIME,),
(postgresql.ENUM, "a", "b", "c"),
(types.Interval,),
(postgresql.INTERVAL,),
(mysql.VARCHAR,),
):
@profile_memory()
def go():
type_ = args[0](*args[1:])
bp = type_._cached_bind_processor(eng.dialect)
rp = type_._cached_result_processor(eng.dialect, 0)
bp, rp # strong reference
go()
assert not eng.dialect._type_memos
@testing.fails()
def test_fixture_failure(self):
class Foo:
pass
stuff = []
@profile_memory(maxtimes=20)
def go():
stuff.extend(Foo() for i in range(100))
go()
@testing.add_to_marker.memory_intensive
class MemUsageWBackendTest(fixtures.MappedTest, EnsureZeroed):
__requires__ = "cpython", "memory_process_intensive", "no_asyncio"
__sparse_backend__ = True
# ensure a pure growing test trips the assertion
@testing.fails_if(lambda: True)
def test_fixture(self):
class Foo:
pass
x = []
@profile_memory(maxtimes=10)
def go():
x[-1:] = [Foo(), Foo(), Foo(), Foo(), Foo(), Foo()]
go()
def test_session(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
metadata.create_all(self.engine)
m1 = self.mapper_registry.map_imperatively(
A,
table1,
properties={
"bs": relationship(
B, cascade="all, delete", order_by=table2.c.col1
)
},
)
m2 = self.mapper_registry.map_imperatively(B, table2)
@profile_memory()
def go():
with Session(self.engine) as sess:
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.commit()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.commit()
go()
metadata.drop_all(self.engine)
del m1, m2
assert_no_mappers()
def test_sessionmaker(self):
@profile_memory()
def go():
sessmaker = sessionmaker(bind=self.engine)
sess = sessmaker()
r = sess.execute(select(1))
r.close()
sess.close()
del sess
del sessmaker
go()
@testing.emits_warning("Compiled statement cache for mapper.*")
@testing.emits_warning("Compiled statement cache for lazy loader.*")
@testing.crashes("sqlite", ":memory: connection not suitable here")
def test_orm_many_engines(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
metadata.create_all(self.engine)
m1 = self.mapper_registry.map_imperatively(
A,
table1,
properties={
"bs": relationship(
B, cascade="all, delete", order_by=table2.c.col1
)
},
_compiled_cache_size=50,
)
m2 = self.mapper_registry.map_imperatively(
B, table2, _compiled_cache_size=50
)
@profile_memory()
def go():
engine = engines.testing_engine(
options={
"logging_name": "FOO",
"pool_logging_name": "BAR",
"use_reaper": False,
}
)
with Session(engine) as sess:
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.commit()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.commit()
engine.dispose()
go()
metadata.drop_all(self.engine)
del m1, m2
assert_no_mappers()
@testing.emits_warning("Compiled statement cache for.*")
def test_many_updates(self):
metadata = MetaData()
wide_table = Table(
"t",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
*[Column("col%d" % i, Integer) for i in range(10)],
)
class Wide:
pass
self.mapper_registry.map_imperatively(
Wide, wide_table, _compiled_cache_size=10
)
metadata.create_all(self.engine)
with Session(self.engine) as session:
w1 = Wide()
session.add(w1)
session.commit()
del session
counter = [1]
@profile_memory()
def go():
with Session(self.engine) as session:
w1 = session.query(Wide).first()
x = counter[0]
dec = 10
while dec > 0:
# trying to count in binary here,
# works enough to trip the test case
if pow(2, dec) < x:
setattr(w1, "col%d" % dec, counter[0])
x -= pow(2, dec)
dec -= 1
session.commit()
counter[0] += 1
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.requires.savepoints
def test_savepoints(self):
metadata = MetaData()
some_table = Table(
"t",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
class SomeClass:
pass
self.mapper_registry.map_imperatively(SomeClass, some_table)
metadata.create_all(self.engine)
with Session(self.engine) as session:
target_strings = (
session.connection().dialect.identifier_preparer._strings
)
@profile_memory(
assert_no_sessions=False,
get_num_objects=lambda: len(target_strings),
)
def go():
with Session(self.engine) as session, session.begin():
sc = SomeClass()
session.add(sc)
with session.begin_nested():
session.query(SomeClass).first()
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.crashes("mysql+cymysql", "blocking")
def test_unicode_warnings(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", Unicode(30)),
)
metadata.create_all(self.engine)
i = [1]
# the times here is cranked way up so that we can see
# pysqlite clearing out its internal buffer and allow
# the test to pass
@testing.emits_warning()
@profile_memory()
def go():
# execute with a non-unicode object. a warning is emitted,
# this warning shouldn't clog up memory.
with self.engine.connect() as conn:
conn.execute(
table1.select().where(table1.c.col2 == "foo%d" % i[0])
)
i[0] += 1
try:
go()
finally:
metadata.drop_all(self.engine)
def test_warnings_util(self):
counter = itertools.count()
import warnings
warnings.filterwarnings("ignore", "memusage warning.*")
@profile_memory()
def go():
util.warn_limited(
"memusage warning, param1: %s, param2: %s",
(next(counter), next(counter)),
)
go()
def test_mapper_reset(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
@profile_memory()
def go():
self.mapper_registry.map_imperatively(
A,
table1,
properties={"bs": relationship(B, order_by=table2.c.col1)},
)
self.mapper_registry.map_imperatively(B, table2)
sess = Session(self.engine, autoflush=False)
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.flush()
sess.close()
clear_mappers()
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
def test_alias_pathing(self):
metadata = MetaData()
a = Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("bid", Integer, ForeignKey("b.id")),
Column("type", String(30)),
)
asub = Table(
"asub",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("data", String(30)),
)
b = Table(
"b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
self.mapper_registry.map_imperatively(
A, a, polymorphic_identity="a", polymorphic_on=a.c.type
)
self.mapper_registry.map_imperatively(
ASub, asub, inherits=A, polymorphic_identity="asub"
)
self.mapper_registry.map_imperatively(
B, b, properties={"as_": relationship(A)}
)
metadata.create_all(self.engine)
sess = Session(self.engine)
a1 = ASub(data="a1")
a2 = ASub(data="a2")
a3 = ASub(data="a3")
b1 = B(as_=[a1, a2, a3])
sess.add(b1)
sess.commit()
del sess
# sqlite has a slow enough growth here
# that we have to run it more times to see the
# "dip" again
@profile_memory(maxtimes=120)
def go():
sess = Session(self.engine)
sess.query(B).options(subqueryload(B.as_.of_type(ASub))).all()
sess.close()
del sess
try:
go()
finally:
metadata.drop_all(self.engine)
clear_mappers()
def test_path_registry(self):
metadata = MetaData()
a = Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Column("bar", Integer),
)
b = Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("a_id", ForeignKey("a.id")),
)
m1 = self.mapper_registry.map_imperatively(
A, a, properties={"bs": relationship(B)}
)
self.mapper_registry.map_imperatively(B, b)
@profile_memory()
def go():
ma = sa.inspect(aliased(A))
m1._path_registry[m1.attrs.bs][ma][m1.attrs.bar]
go()
clear_mappers()
def test_with_inheritance(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
ForeignKey("mytable.col1"),
primary_key=True,
test_needs_autoincrement=True,
),
Column("col3", String(30)),
)
@profile_memory()
def go():
class A(fixtures.ComparableEntity):
pass
class B(A):
pass
clear_mappers()
self.mapper_registry.map_imperatively(
A,
table1,
polymorphic_on=table1.c.col2,
polymorphic_identity="a",
)
self.mapper_registry.map_imperatively(
B, table2, inherits=A, polymorphic_identity="b"
)
sess = Session(self.engine, autoflush=False)
a1 = A()
a2 = A()
b1 = B(col3="b1")
b2 = B(col3="b2")
for x in [a1, a2, b1, b2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_([A(), A(), B(col3="b1"), B(col3="b2")], alist)
for a in alist:
sess.delete(a)
sess.flush()
# don't need to clear_mappers()
del B
del A
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
def test_with_manytomany(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table3 = Table(
"t1tot2",
metadata,
Column("t1", Integer, ForeignKey("mytable.col1")),
Column("t2", Integer, ForeignKey("mytable2.col1")),
)
@profile_memory()
def go():
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
self.mapper_registry.map_imperatively(
A,
table1,
properties={
"bs": relationship(
B, secondary=table3, backref="as", order_by=table3.c.t1
)
},
)
self.mapper_registry.map_imperatively(B, table2)
sess = Session(self.engine, autoflush=False)
a1 = A(col2="a1")
a2 = A(col2="a2")
b1 = B(col2="b1")
b2 = B(col2="b2")
a1.bs.append(b1)
a2.bs.append(b2)
for x in [a1, a2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_([A(bs=[B(col2="b1")]), A(bs=[B(col2="b2")])], alist)
for a in alist:
sess.delete(a)
sess.flush()
# mappers necessarily find themselves in the compiled cache,
# so to allow them to be GC'ed clear out the cache
self.engine.clear_compiled_cache()
del B
del A
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
def test_many_discarded_relationships(self):
"""a use case that really isn't supported, nonetheless we can
guard against memleaks here so why not"""
m1 = MetaData()
t1 = Table("t1", m1, Column("id", Integer, primary_key=True))
t2 = Table(
"t2",
m1,
Column("id", Integer, primary_key=True),
Column("t1id", ForeignKey("t1.id")),
)
class T1:
pass
t1_mapper = self.mapper_registry.map_imperatively(T1, t1)
@testing.emits_warning()
@profile_memory()
def go():
class T2:
pass
t2_mapper = self.mapper_registry.map_imperatively(T2, t2)
t1_mapper.add_property("bar", relationship(t2_mapper))
s1 = Session(testing.db)
# this causes the path_registry to be invoked
s1.query(t1_mapper)._compile_context()
go()
# fails on newer versions of pysqlite due to unusual memory behavior
# in pysqlite itself. background at:
# https://thread.gmane.org/gmane.comp.python.db.pysqlite.user/2290
@testing.crashes("mysql+cymysql", "blocking")
def test_join_cache_deprecated_coercion(self):
metadata = MetaData()
table1 = Table(
"table1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
table2 = Table(
"table2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("t1id", Integer, ForeignKey("table1.id")),
)
class Foo:
pass
class Bar:
pass
self.mapper_registry.map_imperatively(
Foo,
table1,
properties={
"bars": relationship(
self.mapper_registry.map_imperatively(Bar, table2)
)
},
)
metadata.create_all(self.engine)
session = sessionmaker(self.engine)
@profile_memory()
def go():
s = table2.select()
sess = session()
with testing.expect_deprecated(
"Implicit coercion of SELECT and " "textual SELECT constructs"
):
sess.query(Foo).join(s, Foo.bars).all()
sess.rollback()
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.crashes("mysql+cymysql", "blocking")
def test_join_cache(self):
metadata = MetaData()
table1 = Table(
"table1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
table2 = Table(
"table2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("t1id", Integer, ForeignKey("table1.id")),
)
class Foo:
pass
class Bar:
pass
self.mapper_registry.map_imperatively(
Foo,
table1,
properties={
"bars": relationship(
self.mapper_registry.map_imperatively(Bar, table2)
)
},
)
metadata.create_all(self.engine)
session = sessionmaker(self.engine)
@profile_memory()
def go():
s = aliased(Bar, table2.select().subquery())
sess = session()
sess.query(Foo).join(s, Foo.bars).all()
sess.rollback()
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.add_to_marker.memory_intensive
class CycleTest(_fixtures.FixtureTest):
__requires__ = ("cpython", "no_windows")
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
def test_query(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
return s.query(User).all()
go()
def test_session_execute_orm(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
stmt = select(User)
s.execute(stmt)
go()
def test_cache_key(self):
User, Address = self.classes("User", "Address")
configure_mappers()
@assert_cycles()
def go():
stmt = select(User)
stmt._generate_cache_key()
go()
def test_proxied_attribute(self):
from sqlalchemy.ext import hybrid
users = self.tables.users
class Foo:
@hybrid.hybrid_property
def user_name(self):
return self.name
self.mapper_registry.map_imperatively(Foo, users)
# unfortunately there's a lot of cycles with an aliased()
# for now, however calling upon clause_element does not seem
# to make it worse which is what this was looking to test
@assert_cycles(69)
def go():
a1 = aliased(Foo)
a1.user_name.__clause_element__()
go()
def test_query_alias(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
u1 = aliased(User)
@assert_cycles()
def go():
s.query(u1).all()
go()
def test_entity_path_w_aliased(self):
User, Address = self.classes("User", "Address")
configure_mappers()
@assert_cycles()
def go():
u1 = aliased(User)
inspect(u1)._path_registry[User.addresses.property]
go()
def test_orm_objects_from_query(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
def generate():
objects = s.query(User).filter(User.id == 7).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_orm_objects_from_query_w_selectinload(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
def generate():
objects = s.query(User).options(selectinload(User.addresses)).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_selectinload_option_unbound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
selectinload(User.addresses)
go()
def test_selectinload_option_bound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
Load(User).selectinload(User.addresses)
go()
def test_orm_path(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
inspect(User)._path_registry[User.addresses.property][
inspect(Address)
]
go()
def test_joinedload_option_unbound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
joinedload(User.addresses)
go()
def test_joinedload_option_bound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
l1 = Load(User).joinedload(User.addresses)
l1._generate_cache_key()
go()
def test_orm_objects_from_query_w_joinedload(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
def generate():
objects = s.query(User).options(joinedload(User.addresses)).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_query_filtered(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
@assert_cycles()
def go():
return s.query(User).filter(User.id == 7).all()
go()
def test_query_joins(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
# cycles here are due to ClauseElement._cloned_set, others
# as of cache key
@assert_cycles(4)
def go():
s.query(User).join(User.addresses).all()
go()
def test_query_joinedload(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
def generate():
s.query(User).options(joinedload(User.addresses)).all()
# cycles here are due to ClauseElement._cloned_set and Load.context,
# others as of cache key. The orm.instances() function now calls
# dispose() on both the context and the compiled state to try
# to reduce these cycles.
@assert_cycles(18)
def go():
generate()
go()
def test_plain_join(self):
users, addresses = self.tables("users", "addresses")
@assert_cycles()
def go():
str(users.join(addresses).compile(testing.db))
go()
def test_plain_join_select(self):
users, addresses = self.tables("users", "addresses")
# cycles here are due to ClauseElement._cloned_set, others
# as of cache key
@assert_cycles(7)
def go():
s = select(users).select_from(users.join(addresses))
state = s._compile_state_factory(s, s.compile(testing.db))
state.froms
go()
def test_orm_join(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
str(orm_join(User, Address, User.addresses).compile(testing.db))
go()
def test_join_via_query_relationship(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
s.query(User).join(User.addresses)
go()
def test_join_via_query_to_entity(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
s.query(User).join(Address)
go()
def test_result_fetchone(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.connection(bind_arguments=dict(mapper=User)).execute(
stmt
)
while True:
row = result.fetchone()
if row is None:
break
go()
def test_result_fetchall(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.execute(stmt)
rows = result.fetchall() # noqa
go()
def test_result_fetchmany(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.execute(stmt)
for partition in result.partitions(3):
pass
go()
def test_result_fetchmany_unique(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.execute(stmt)
for partition in result.unique().partitions(3):
pass
go()
def test_core_select_from_orm_query(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
# ORM query using future select for .statement is adding
# some ORMJoin cycles here during compilation. not worth trying to
# find it
@assert_cycles(4)
def go():
s.execute(stmt)
go()
def test_adapt_statement_replacement_traversal(self):
User, Address = self.classes("User", "Address")
statement = select(User).select_from(
orm_join(User, Address, User.addresses)
)
@assert_cycles()
def go():
replacement_traverse(statement, {}, lambda x: None)
go()
def test_adapt_statement_cloned_traversal(self):
User, Address = self.classes("User", "Address")
statement = select(User).select_from(
orm_join(User, Address, User.addresses)
)
@assert_cycles()
def go():
cloned_traverse(statement, {}, {})
go()
def test_column_adapter_lookup(self):
User, Address = self.classes("User", "Address")
u1 = aliased(User)
@assert_cycles()
def go():
adapter = sql_util.ColumnAdapter(inspect(u1).selectable)
adapter.columns[User.id]
go()
def test_orm_aliased(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
u1 = aliased(User)
inspect(u1)
go()
@testing.fails()
def test_the_counter(self):
@assert_cycles()
def go():
x = []
x.append(x)
go()
def test_weak_sequence(self):
class Foo:
pass
f = Foo()
@assert_cycles()
def go():
util.WeakSequence([f])
go()
@testing.provide_metadata
def test_optimized_get(self):
Base = declarative_base(metadata=self.metadata)
class Employee(Base):
__tablename__ = "employee"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
type = Column(String(10))
__mapper_args__ = {"polymorphic_on": type}
class Engineer(Employee):
__tablename__ = " engineer"
id = Column(ForeignKey("employee.id"), primary_key=True)
engineer_name = Column(String(50))
__mapper_args__ = {"polymorphic_identity": "engineer"}
Base.metadata.create_all(testing.db)
s = Session(testing.db)
s.add(Engineer(engineer_name="wally"))
s.commit()
s.close()
@assert_cycles()
def go():
e1 = s.query(Employee).first()
e1.engineer_name
go()
def test_visit_binary_product(self):
a, b, q, e, f, j, r = [column(chr_) for chr_ in "abqefjr"]
from sqlalchemy import and_, func
from sqlalchemy.sql.util import visit_binary_product
expr = and_((a + b) == q + func.sum(e + f), j == r)
def visit(expr, left, right):
pass
@assert_cycles()
def go():
visit_binary_product(visit, expr)
go()
def test_session_transaction(self):
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.close()
go()
def test_session_commit_rollback(self):
# this is enabled by #5074
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.commit()
go()
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.rollback()
go()
def test_session_multi_transaction(self):
@assert_cycles()
def go():
s = Session(testing.db)
assert s._transaction is None
s.connection()
s.close()
assert s._transaction is None
s.connection()
assert s._transaction is not None
s.close()
go()
| sqlalchemy/sqlalchemy | test/aaa_profiling/test_memusage.py | Python | mit | 45,650 | [
"VisIt"
] | 201dadd0bfad21685bfa49c91b5012c3d9f577d116e150d68d8297fa061ae025 |
# Orca
#
# Copyright 2005-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Provides debug utilities for Orca. Debugging is managed by a debug
level, which is held in the debugLevel field. All other methods take
a debug level, which is compared to the current debug level to
determine if the content should be output."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2008 Sun Microsystems Inc."
__license__ = "LGPL"
import inspect
import traceback
import os
import pyatspi
import subprocess
import sys
from . import orca_state
# Used to turn off all debugging.
#
LEVEL_OFF = 10000
# Used to describe events of considerable importance and which will prevent
# normal program execution.
#
LEVEL_SEVERE = 1000
# Used to decribe events of interest to end users or system managers or which
# indicate potential problems, but which Orca can deal with without crashing.
#
LEVEL_WARNING = 900
# Used to indicate reasonably significant messages that make sense to end users
# and system managers.
#
# For the purposes of Orca, LEVEL_INFO means display the text being sent to
# speech and braille.
#
LEVEL_INFO = 800
# Used to indicate static configuration information to assist in debugging
# problems that may be associated with a particular configuration.
#
# For the purposes of Orca, LEVEL_CONFIGURATION means display the various
# apsects of whether a particular feature (e.g., speech, braille, etc.)
# is enabled or not as well as details about that feature.
#
LEVEL_CONFIGURATION = 700
# Used for lowest volume of detailed tracing information.
#
# For the purposes of Orca, this is braille and keyboard input, script
# activation and deletion, locus of focus changes, and visual changes
# to the locus of focus.
#
LEVEL_FINE = 600
# Used for medium volume of detailed tracing information.
#
# For the purposes of Orca, this is for debugging speech and braille
# generators and tracking the synthesis of device events.
#
LEVEL_FINER = 500
# Used for maximum volume of detailed tracing information.
#
# For the purposes of Orca, this is for tracking all AT-SPI object
# events. NOTE that one can up the debug level of AT-SPI object
# events by setting the eventDebugLevel. In addition, one can filter
# events by setting eventDebugFilter to a regular expression that
# matches event type names.
#
LEVEL_FINEST = 400
# Used for all detailed tracing information, even finer than LEVEL_FINEST
#
LEVEL_ALL = 0
debugLevel = LEVEL_SEVERE
# The debug file. If this is not set, then all debug output is done
# via stdout. If this is set, then all debug output is sent to the
# file. This can be useful for debugging because one can pass in a
# non-buffered file to better track down hangs.
#
debugFile = None
# The debug filter should be either None (which means to match all
# events) or a compiled regular expression from the 're' module (see
# http://www.amk.ca/python/howto/regex/). The regular expression will
# be used as a matching function - if the event type creates a match
# in the regular expression, then it will be considered for output. A
# typical call to this method might look like:
#
# debug.eventDebugFilter = rc.compile('focus:|window:activate')
#
eventDebugLevel = LEVEL_FINEST
eventDebugFilter = None
# What module(s) should be traced if traceit is being used. By default
# we'll just attend to ourself. (And by default, we will not enable
# traceit.) Note that enabling this functionality will drag your system
# to a complete and utter halt and should only be used in extreme
# desperation by developers who are attempting to reproduce a very
# specific, immediate issue. Trust me. :-) Disabling braille monitor in
# this case is also strongly advised.
#
TRACE_MODULES = ['orca']
# Specific modules to ignore with traceit.
#
TRACE_IGNORE_MODULES = ['traceback', 'linecache', 'locale', 'gettext',
'logging', 'UserDict', 'encodings', 'posixpath',
'genericpath', 're']
# Specific apps to trace with traceit.
#
TRACE_APPS = []
# What AT-SPI event(s) should be traced if traceit is being used. By
# default, we'll trace everything. Examples of what you might wish to
# do to narrow things down include:
#
# TRACE_EVENTS = ['object:state-changed', 'focus:']
# (for any and all object:state-changed events plus focus: events)
# TRACE_EVENTS = ['object:state-changed:selected']
# (if you know the exact event type of interest)
#
TRACE_EVENTS = []
# What pyatspi role(s) should be traced if traceit is being used. By
# default, we'll trace everything. An example of what you might wish
# to do to narrow things down, if you know buttons trigger the problem:
#
# TRACE_ROLES = [pyatspi.ROLE_PUSH_BUTTON, pyatspi.ROLE_TOGGLE_BUTTON]
#
TRACE_ROLES = []
# Whether or not traceit should only trace the work being done when
# processing an actual event. This is when most bad things happen.
# So we'll default to True.
#
TRACE_ONLY_PROCESSING_EVENTS = True
def printException(level):
"""Prints out information regarding the current exception.
Arguments:
- level: the accepted debug level
"""
if level >= debugLevel:
println(level)
traceback.print_exc(100, debugFile)
println(level)
def printStack(level):
"""Prints out the current stack.
Arguments:
- level: the accepted debug level
"""
if level >= debugLevel:
println(level)
traceback.print_stack(None, 100, debugFile)
println(level)
def println(level, text = ""):
"""Prints the text to stderr unless debug is enabled.
If debug is enabled the text will be redirected to the
file debugFile.
Arguments:
- level: the accepted debug level
- text: the text to print (default is a blank line)
"""
if level >= debugLevel:
if debugFile:
try:
debugFile.writelines([text, "\n"])
except TypeError:
text = "TypeError when trying to write text"
debugFile.writelines([text, "\n"])
except:
text = "Exception when trying to write text"
debugFile.writelines([text, "\n"])
else:
try:
sys.stderr.writelines([text, "\n"])
except TypeError:
text = "TypeError when trying to write text"
sys.stderr.writelines([text, "\n"])
except:
text = "Exception when trying to write text"
sys.stderr.writelines([text, "\n"])
def printResult(level, result=None):
"""Prints the return result, along with information about the
method, arguments, and any errors encountered."""
if level < debugLevel:
return
stack = inspect.stack()
current, prev = stack[1], stack[2]
frame = current[0]
# To better print arguments which are accessible objects
args = inspect.getargvalues(frame)
for key, value in list(args.locals.items()):
args.locals[key] = str(value)
fArgs = str.replace(inspect.formatargvalues(*args), "'", "")
callString = 'CALL: %s.%s (line %s) -> %s.%s%s' % (
inspect.getmodulename(prev[1]), prev[3], prev[2],
inspect.getmodulename(current[1]), current[3], fArgs)
string = '%s\n%s %s' % (callString, 'RESULT:', result)
println(level, '%s' % string)
def printObjectEvent(level, event, sourceInfo=None):
"""Prints out an Python Event object. The given level may be
overridden if the eventDebugLevel is greater. Furthermore, only
events with event types matching the eventDebugFilter regular
expression will be printed.
Arguments:
- level: the accepted debug level
- event: the Python Event to print
- sourceInfo: additional string to print out
"""
if eventDebugFilter and not eventDebugFilter.match(event.type):
return
level = max(level, eventDebugLevel)
text = "OBJECT EVENT: %-40s detail=(%d,%d,%s)" \
% (event.type, event.detail1, event.detail2, event.any_data)
println(level, text)
if sourceInfo:
println(level, " %s" % sourceInfo)
def printInputEvent(level, string):
"""Prints out an input event. The given level may be overridden
if the eventDebugLevel (see setEventDebugLevel) is greater.
Arguments:
- level: the accepted debug level
- string: the string representing the input event
"""
println(max(level, eventDebugLevel), string)
def printDetails(level, indent, accessible, includeApp=True):
"""Lists the details of the given accessible with the given
indentation.
Arguments:
- level: the accepted debug level
- indent: a string containing spaces for indentation
- accessible: the accessible whose details are to be listed
- includeApp: if True, include information about the app
"""
if level >= debugLevel and accessible:
println(level,
getAccessibleDetails(level, accessible, indent, includeApp))
def getAccessibleDetails(level, acc, indent="", includeApp=True):
"""Returns a string, suitable for printing, that describes the
given accessible.
Arguments:
- indent: A string to prefix the output with
- includeApp: If True, include information about the app
for this accessible.
"""
if level < debugLevel:
return ""
if includeApp:
app = acc.getApplication()
if app:
try:
string = indent + "app.name='%s' " % app.name
except (LookupError, RuntimeError):
string = indent + "app.name='<error getting name>' "
else:
string = indent + "app=None "
else:
string = indent
# create the States string
try:
stateSet = acc.getState()
except:
string += "(exception getting state set)"
try:
states = stateSet.getStates()
except:
string += "(exception getting states)"
states = []
state_strings = []
for state in states:
state_strings.append(pyatspi.stateToString(state))
state_string = ' '.join(state_strings)
# create the relations string
try:
relations = acc.getRelationSet()
except:
string += "(exception getting relation set)"
relations = None
if relations:
relation_strings = []
for relation in relations:
relation_strings.append( \
pyatspi.relationToString(relation.getRelationType()))
rel_string = ' '.join(relation_strings)
else:
rel_string = ''
try:
string += "name='%s' role='%s' state='%s' relations='%s'" \
% (acc.name or 'None', acc.getRoleName(),
state_string, rel_string)
except:
string += "(exception fetching data)"
return string
# The following code originated from the following URL:
#
# http://www.dalkescientific.com/writings/diary/archive/ \
# 2005/04/20/tracing_python_code.html
#
import linecache
def _getFileAndModule(frame):
filename, module = None, None
try:
filename = frame.f_globals["__file__"]
module = frame.f_globals["__name__"]
except:
pass
else:
if (filename.endswith(".pyc") or filename.endswith(".pyo")):
filename = filename[:-1]
return filename, module
def _shouldTraceIt():
objEvent = orca_state.currentObjectEvent
if not objEvent:
return not TRACE_ONLY_PROCESSING_EVENTS
eventSource = objEvent.source
if TRACE_APPS:
app = objEvent.host_application or eventSource.getApplication()
try:
app = objEvent.host_application or eventSource.getApplication()
except:
pass
else:
if not app.name in TRACE_APPS:
return False
if TRACE_ROLES and not eventSource.getRole() in TRACE_ROLES:
return False
if TRACE_EVENTS and \
not [x for x in map(objEvent.type.startswith, TRACE_EVENTS) if x]:
return False
return True
def traceit(frame, event, arg):
"""Line tracing utility to output all lines as they are executed by
the interpreter. This is to be used by sys.settrace and is for
debugging purposes.
Arguments:
- frame: is the current stack frame
- event: 'call', 'line', 'return', 'exception', 'c_call', 'c_return',
or 'c_exception'
- arg: depends on the event type (see docs for sys.settrace)
"""
if not _shouldTraceIt():
return None
filename, module = _getFileAndModule(frame)
if not (filename and module):
return traceit
if module in TRACE_IGNORE_MODULES:
return traceit
if TRACE_MODULES and not module.split('.')[0] in TRACE_MODULES:
return traceit
if not event in ['call', 'line', 'return']:
return traceit
lineno = frame.f_lineno
line = linecache.getline(filename, lineno).rstrip()
output = 'TRACE %s:%s: %s' % (module, lineno, line)
if event == 'call':
argvals = inspect.getargvalues(frame)
keys = [x for x in argvals[0] if x != 'self']
try:
values = list(map(argvals[3].get, keys))
except TypeError:
if len(keys) == 1 and isinstance(keys[0], list):
values = list(map(argvals[3].get, keys[0]))
else:
return traceit
for i, key in enumerate(keys):
output += '\n ARG %s=%s' % (key, values[i])
lineElements = line.strip().split()
if lineElements and lineElements[0] == 'return':
if event == 'line':
return traceit
output = '%s (rv: %s)' % (output, arg)
println(LEVEL_ALL, output)
return traceit
def getOpenFDCount(pid):
procs = subprocess.check_output([ 'lsof', '-w', '-Ff', '-p', str(pid)])
procs = procs.decode('UTF-8').split('\n')
files = list(filter(lambda s: s and s[0] == 'f' and s[1:].isdigit(), procs))
return len(files)
def getCmdline(pid):
try:
openFile = os.popen('cat /proc/%s/cmdline' % pid)
cmdline = openFile.read()
openFile.close()
except:
cmdline = '(Could not obtain cmdline)'
cmdline = cmdline.replace('\x00', ' ')
return cmdline
def pidOf(procName):
openFile = subprocess.Popen('pgrep %s' % procName,
shell=True,
stdout=subprocess.PIPE).stdout
pids = openFile.read()
openFile.close()
return [int(p) for p in pids.split()]
def examineProcesses():
desktop = pyatspi.Registry.getDesktop(0)
println(LEVEL_ALL, 'INFO: Desktop has %i apps:' % desktop.childCount)
for i, app in enumerate(desktop):
pid = app.get_process_id()
cmd = getCmdline(pid)
fds = getOpenFDCount(pid)
try:
name = app.name
except:
name = 'ERROR: Could not get name'
else:
if name == '':
name = 'WARNING: Possible hang'
println(LEVEL_ALL, '%3i. %s (pid: %s) %s file descriptors: %i' \
% (i+1, name, pid, cmd, fds))
# Other 'suspect' processes which might not show up as accessible apps.
otherApps = ['apport']
for app in otherApps:
pids = pidOf(app)
if not pids:
println(LEVEL_ALL, 'INFO: no pid for %s' % app)
continue
for pid in pids:
cmd = getCmdline(pid)
fds = getOpenFDCount(pid)
println(LEVEL_ALL, 'INFO: %s (pid: %s) %s file descriptors: %i' \
% (app, pid, cmd, fds))
| h4ck3rm1k3/orca-sonar | src/orca/debug.py | Python | lgpl-2.1 | 16,430 | [
"ORCA"
] | 46b6e7c66608844b62712fbb6cb3c4564f4aa60975b56fc3cbc12eead5c2d85f |
import re
import os
import ast
from .cli import CLI
from .runner import Runner
def _show(node):
for name, val in ast.iter_fields(node):
print("{name}: {val}".format(name=name, val=val))
class SetUpVisitor(ast.NodeVisitor):
def __init__(self):
self.packages = []
def append_testsuite(self, testsuite):
path = testsuite.split('.')
self.packages.append(path[0])
def visit_Call(self, node):
if isinstance(node.func, ast.Name):
if node.func.id == "setup":
for keyword in node.keywords:
if keyword.arg == "packages":
if isinstance(keyword.value, ast.List):
for package in keyword.value.elts:
self.packages.append(package.value)
elif isinstance(keyword.value, ast.Str):
self.packages.append(keyword.value.value)
if keyword.arg == "test_suite":
if isinstance(keyword.value, ast.List):
for package in keyword.value.elts:
self.append_testsuite(package.value)
elif isinstance(keyword.value, ast.Str):
self.append_testsuite(keyword.value.value)
class PythonRunner(Runner):
IMPORT_REGEX = re.compile(r'^\s*import\s*(.*?)(\s*as\s*\S*|)$')
FROM_IMPORT_REGEX = re.compile(r'^\s*from\s*(\S*?)\s*import\s*(.*?)(\s*as\s*\S*|)$')
def reset(self):
self.imports = []
def get_imports(self, path, module_name):
if path == module_name:
return self.make_from_package(path, module_name)
module_file = module_name + '.py'
module_path = os.path.normpath(os.path.join(path, module_file))
if os.path.exists(module_path):
module_abspath = os.path.abspath(module_path)
if module_abspath not in self.imports:
self.imports.append(module_abspath)
return self.open_code(module_path, module_file)
return dict()
def make_code(self, file, filepath, filename):
if os.path.basename(filepath) == 'setup.py':
return self.make_from_setup_py(filepath, filename)
files = dict()
code = ''
for line in file:
m = self.IMPORT_REGEX.match(line)
if m:
modules = m.group(1)
for module_name in modules.split(','):
files.update(self.get_imports(os.path.dirname(filepath), module_name.strip()))
else:
m = self.FROM_IMPORT_REGEX.match(line)
if m:
module = m.group(1)
module_names = module.split('.')
if len(module_names) == 0:
files.update(self.get_imports(os.path.dirname(filepath), os.path.dirname(filepath)))
else:
module_name = os.path.join(*module_names)
if module.startswith('.'):
name = os.path.join(os.path.dirname(filename), module_name)
files.update(self.get_imports(os.path.dirname(filepath), name))
else:
files.update(self.get_imports(os.path.dirname(filepath), module_name))
code += line
files[filename] = code
# print(files.keys())
return files
def make_from_setup_py(self, filepath, filename):
files = dict()
code = ''
file = self.file_open(filepath, 'r')
code = file.read()
tree = ast.parse(code)
setup = SetUpVisitor()
setup.visit(tree)
root = os.path.dirname(filepath)
for package in setup.packages:
module_path = os.path.join(root, package)
files.update(self.make_from_package(module_path, package))
files[filename] = code
return files
def make_from_package(self, dirpath, dirname_):
files = dict()
dirname = os.path.normpath(dirname_)
if dirpath in self.imports:
return files
if os.path.exists(dirpath):
self.imports.append(dirpath)
for f in os.listdir(dirpath):
package_path = os.path.join(dirname, f)
path = os.path.join(dirpath, f)
if os.path.isdir(path):
package_codes = self.make_from_package(path, package_path)
files.update(package_codes)
elif os.path.isfile(path):
name, ext = os.path.splitext(path)
if ext == '.pyc':
continue
package_codes = self.open_code(path, package_path)
files.update(package_codes)
return files
class PythonCLI(CLI):
def __init__(self, compiler=None):
super(PythonCLI, self).__init__('Python', compiler, False, False)
def get_runner(self, args, options):
return PythonRunner(args.language, args.compiler, args.save, args.encoding, args.retry, args.retry_wait)
def python(compiler=None):
cli = PythonCLI(compiler)
cli.execute()
def main():
python()
def python2():
python('cpython-2.7-*')
def python3():
python('cpython-*')
def pypy():
python('pypy-*')
def pypy2():
python('pypy-2*')
def pypy3():
python('pypy-3*')
if __name__ == '__main__':
main()
| srz-zumix/wandbox-api | wandbox/__python__.py | Python | mit | 5,533 | [
"VisIt"
] | 306005b162daa8b39cfd48e02763953d92ab17c88cd8a8cfd922abd5aeb9047c |
#########################################################################
# crossComptOscillator.py ---
#
# Filename: crossComptOscillator.py
# Author: Upinder S. Bhalla
# Maintainer:
# Created: Oct 12 16:26:05 2014 (+0530)
# Version:
# Last-Updated: May 15 2017
# By:
# Update #:
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log: Indentation clean up
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import math
import pylab
import numpy
import moose
import os
import signal
PID = os.getpid()
def doNothing( *args ):
pass
signal.signal( signal.SIGUSR1, doNothing )
def makeModel():
# create container for model
r0 = 1e-6 # m
r1 = 1e-6 # m
num = 25
diffLength = 1e-6 # m
len = num * diffLength # m
diffConst = 1e-12 # m^2/sec
motorConst = 1e-6 # m/sec
concA = 1 # millimolar
model = moose.Neutral( 'model' )
compartment = moose.CylMesh( '/model/compartment' )
compartment.r0 = r0
compartment.r1 = r1
compartment.x0 = 0
compartment.x1 = len
compartment.diffLength = diffLength
assert( compartment.numDiffCompts == num )
# create molecules and reactions
a = moose.Pool( '/model/compartment/a' )
b = moose.Pool( '/model/compartment/b' )
c = moose.Pool( '/model/compartment/c' )
d = moose.Pool( '/model/compartment/d' )
"""
r1 = moose.Reac( '/model/compartment/r1' )
moose.connect( r1, 'sub', b, 'reac' )
moose.connect( r1, 'sub', d, 'reac' )
moose.connect( r1, 'prd', c, 'reac' )
r1.Kf = 100 # 1/(mM.sec)
r1.Kb = 0.01 # 1/sec
"""
# Assign parameters
a.diffConst = 0.0;
b.diffConst = 0.0;
#b.motorRate = motorRate
c.diffConst = 0.0;
d.diffConst = 0.0;
#d.diffConst = diffConst;
os.kill( PID, signal.SIGUSR1 )
a.motorConst = motorConst
b.motorConst = motorConst
c.motorConst = -motorConst
d.motorConst = -motorConst
# Make solvers
ksolve = moose.Ksolve( '/model/compartment/ksolve' )
dsolve = moose.Dsolve( '/model/compartment/dsolve' )
stoich = moose.Stoich( '/model/compartment/stoich' )
stoich.compartment = compartment
stoich.ksolve = ksolve
stoich.dsolve = dsolve
stoich.path = "/model/compartment/##"
assert( dsolve.numPools == 4 )
a.vec[0].concInit = concA * 1
b.vec[num-1].concInit = concA * 2
c.vec[0].concInit = concA * 3
d.vec[num-1].concInit = concA * 4
def displayPlots():
a = moose.element( '/model/compartment/a' )
b = moose.element( '/model/compartment/b' )
c = moose.element( '/model/compartment/c' )
d = moose.element( '/model/compartment/d' )
pos = numpy.arange( 0, a.vec.conc.size, 1 )
pylab.plot( pos, a.vec.conc, label='a' )
pylab.plot( pos, b.vec.conc, label='b' )
pylab.plot( pos, c.vec.conc, label='c' )
pylab.plot( pos, d.vec.conc, label='d' )
pylab.legend()
pylab.show()
def main():
dt4 = 0.01
dt5 = 0.01
runtime = 10.0 # seconds
# Set up clocks. The dsolver to know before assigning stoich
moose.setClock( 4, dt4 )
moose.setClock( 5, dt5 )
makeModel()
moose.useClock( 4, '/model/compartment/dsolve', 'process' )
# Ksolve must be scheduled after dsolve.
moose.useClock( 5, '/model/compartment/ksolve', 'process' )
moose.reinit()
moose.start( runtime ) # Run the model
a = moose.element( '/model/compartment/a' )
b = moose.element( '/model/compartment/b' )
c = moose.element( '/model/compartment/c' )
d = moose.element( '/model/compartment/d' )
atot = sum( a.vec.conc )
btot = sum( b.vec.conc )
ctot = sum( c.vec.conc )
dtot = sum( d.vec.conc )
print(('tot = ', atot, btot, ctot, dtot, ' (b+c)=', btot+ctot))
displayPlots()
moose.start( runtime ) # Run the model
atot = sum( a.vec.conc )
btot = sum( b.vec.conc )
ctot = sum( c.vec.conc )
dtot = sum( d.vec.conc )
print(('tot = ', atot, btot, ctot, dtot, ' (b+c)=', btot+ctot))
quit()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
| BhallaLab/moose | moose-examples/snippets/cylinderMotor.py | Python | gpl-3.0 | 4,461 | [
"MOOSE"
] | c7eccb493145d410a0b4741d95a4c59cb9606af6bc7a5d227bb96fb1d5724a23 |
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import OrderedDict
import os
from commoncode.testcase import FileBasedTesting
from scancode import api
class TestAPI(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_get_package_infos_can_pickle(self):
test_file = self.get_test_loc('api/package/package.json')
package = api.get_package_infos(test_file)
import pickle
import cPickle
try:
_pickled = pickle.dumps(package, pickle.HIGHEST_PROTOCOL)
_cpickled = cPickle.dumps(package, pickle.HIGHEST_PROTOCOL)
self.fail('pickle.HIGHEST_PROTOCOL used to fail to pickle this data')
except:
_pickled = pickle.dumps(package)
_cpickled = cPickle.dumps(package)
def test_get_file_infos_flag_are_not_null(self):
# note the test file is EMPTY on purpose to generate all False is_* flags
test_dir = self.get_test_loc('api/info')
info = api.get_file_infos(test_dir)
is_key_values = [v for k, v in info.items() if k.startswith('is_')]
assert all(v is not None for v in is_key_values)
def test_get_package_infos_works_for_maven_dot_pom(self):
test_file = self.get_test_loc('api/package/p6spy-1.3.pom')
packages = api.get_package_infos(test_file)
assert len(packages) == 1
package = packages[0]
assert package['version'] == '1.3'
def test_get_package_infos_works_for_maven_pom_dot_xml(self):
test_file = self.get_test_loc('api/package/pom.xml')
packages = api.get_package_infos(test_file)
assert len(packages) == 1
package = packages[0]
assert package['version'] == '1.3'
def test_get_file_infos_include_base_name(self):
test_dir = self.get_test_loc('api/info/test.txt')
info = api.get_file_infos(test_dir)
assert 'test' == info['base_name']
def test_get_copyrights_include_copyrights_and_authors(self):
test_file = self.get_test_loc('api/copyright/iproute.c')
cops = list(api.get_copyrights(test_file))
expected = [
OrderedDict([
(u'statements', [u'Copyright (c) 2010 Patrick McHardy']),
(u'holders', [u'Patrick McHardy']),
(u'authors', []),
(u'start_line', 2), (u'end_line', 2)]),
OrderedDict([
(u'statements', []),
(u'holders', []),
(u'authors', [u'Patrick McHardy <kaber@trash.net>']),
(u'start_line', 11), (u'end_line', 11)])
]
assert expected == cops
| yashdsaraf/scancode-toolkit | tests/scancode/test_api.py | Python | apache-2.0 | 4,108 | [
"VisIt"
] | 6046f53ef236f7a0e0bedd3287225149e43eb688ac8a6c92c5c1f4cad80bea6f |
import matplotlib
matplotlib.use('Agg')
from matplotlib.pyplot import *
import localgroup
import triangle
import sklearn
from sklearn import mixture
import numpy as np
import pickle
import matplotlib.patches as mpatches
save_path = "/afs/slac.stanford.edu/u/ki/mwillia1/Thesis/LocalGroupHaloProps/testplot/"
# Inside the Likelihood object is a "triplet" object called T, which contains an array of sample local groups, each with kinematic parameters consistent with the observational data. Let's plot these kinematic parameters in a "triangle" figure, to show all their 1 and 2-D marginal distributions.
# In[2]:
L = localgroup.Likelihood(isPair=False)
L.generate(Nsamples=200000)
# In[3]:
L.set_PDF(mixture.GMM(n_components=10, covariance_type='full'))
L.approximate()
# In[4]:
figure_obs = L.plot_samples(10, color='m', overlay=False)
# The above plot shows a Gaussian Mixture model fitted Gaussians. The shaded regions show two standard deviations. The samples data has been preprocessed to zero the mean and scale by standard deviation. Since we are using the Gaussian Mixture Model to model the underlying PDF of the data, more components is always better.
# # How to evaluate goodness of fit:
# Due to lack of a standard goodness of fit test for GMM's, the best we can do is graphically show that the model reproduces the data well. We proceed by drawing a set of points from the fitted model, where each point is a local group with (MW_D, MW_vr, MW_vt, M33_D, M33_vr, M33_vt). We then plot the 1D and 2D marginalizations of the drawn point set and show that the marginalizations match the marginalizations of the true data.
# In[5]:
figure_model = L.model_gof(L.T.Nsamples, color="r", fig=None)
print "made figure model: ", len(figure_model.axes)
print figure_model.axes
#figure_model.savefig("/afs/slac.stanford.edu/u/ki/mwillia1/Thesis/LocalGroupHaloProps/figmodel.pdf", dpi=800)
# In[6]:
L.model_gof(L.T.Nsamples, color="r", fig=figure_obs)
red_patch = mpatches.Patch(color='red')
magenta_patch = mpatches.Patch(color='magenta')
figure_obs.legend(handles=[red_patch, magenta_patch], labels=["Model Generated", "Observation Generated"])
# In[7]:
figure_obs
# The above plot shows that the points drawn from the model create a population that is very similar to the true data.
# In[8]:
figure_obs.savefig(save_path+"LGMM.pdf", dpi=800)
figure_obs.savefig(save_path+"LGMM.png", dpi=800)
# # Reading Simulation Points:
# Below we read the preconfigured files containing the Consuelo (soon to be Dark Sky) Local Group analogs into a Triplet object. We plot the marginalizations of the simulation data, which allows us to compare with the LG prior.
# In[9]:
path_cut = '/lustre/ki/pfs/mwillia1/LG_project/Consuelo_Boxes/All_Boxes_quad_dat_M31_larger.npy'
path = '/lustre/ki/pfs/mwillia1/LG_project/Consuelo_Boxes/All_Boxes_quad_dat_M31_larger.npy'
#path = '/afs/slac.stanford.edu/u/ki/mwillia1/Thesis/data_files/MW_M31_pairs.txt'
npoints = 200000
halo_props = ['MW_Mvir', 'M31_Mvir', 'M33_Mvir']
# In[10]:
Tr = localgroup.Triplet(isPair=False)
Tr.read_sim_points(path, npoints, halo_props, h=0.7, a=1.0, npy=True)
#Tr_cut = localgroup.Triplet(isPair=False)
#Tr_cut.read_sim_points(path_cut, npoints, halo_props, h=0.7, a=1.0, npy=True)
# In[11]:
Tr.transform_to_M31(sim=True)
#Tr_cut.transform_to_M31(sim=True)
# In[12]:
#Tr.mass_filter('sim')
# In[13]:
Tr.dist_filter((Tr.sim_samples[:,0] < 1) & (Tr.sim_samples[:,3] < 0.4) & (Tr.sim_samples[:,6] < 1))
#Tr_cut.dist_filter((Tr_cut.sim_samples[:,0] < 1) & (Tr_cut.sim_samples[:,3] < 0.4))
# In[14]:
Tr.preprocess(L.samples_means, L.samples_stds, mode='sim')
#Tr_cut.preprocess(L.samples_means, L.samples_stds, mode='sim')
# In[ ]:
sim_plot = Tr.plot_kinematics('sim', L.samples_means, L.samples_stds, color='b', fig=None)
# In[28]:
#sim_plot = Tr.plot_kinematics('sim', L.samples_means, L.samples_stds, color='c', fig=None)
# In[ ]:
print Tr.M33.Mvir.shape
print Tr.LMC.Mvir.shape
dat = np.transpose(np.vstack((np.transpose(Tr.sim_samples), np.log10(Tr.M31.Mvir), np.log10(Tr.MW.Mvir), np.log10(Tr.M33.Mvir), np.log10(Tr.LMC.Mvir), Tr.M31.Cvir, Tr.MW.Cvir)))
#dat = np.transpose(np.vstack((np.transpose(Tr.sim_samples), np.log10(Tr.M31.Mvir), np.log10(Tr.MW.Mvir))))
Tr.GMM(1, dat)
#dat = np.transpose(np.vstack((np.transpose(Tr_cut.sim_samples), np.log10(Tr_cut.M31.Mvir), np.log10(Tr_cut.MW.Mvir), np.log10(Tr_cut.M33.Mvir))))
#Tr_cut.GMM(20, dat)
# In[ ]:
Tr.GMM_sample(1000000, L, simple=True)
#Tr_cut.GMM_sample(20000000)
# In[ ]:
gmm_MW = np.copy(Tr.gmm_samples[:,10])
gmm_M31 = np.copy(Tr.gmm_samples[:,9])
gmm_M33 = np.copy(Tr.gmm_samples[:,11])
gmm_LMC = np.copy(Tr.gmm_samples[:,12])
gmm_MW_C = np.copy(Tr.gmm_samples[:,14])
gmm_M31_C = np.copy(Tr.gmm_samples[:,13])
#gmm_M33_C = np.copy(Tr.gmm_samples[:,11])
gmm_LG = np.log10(np.power(10,gmm_MW) + np.power(10,gmm_M31))
#cond = gmm_MW < gmm_M31
#Tr.gmm_samples = Tr.gmm_samples[cond]
#gmm_MW = gmm_MW[cond]
#gmm_M31 = gmm_M31[cond]
#gmm_M33 = gmm_M33[cond]
#gmm_LG = gmm_LG[cond]
figure_model = L.model_gof(L.T.Nsamples, color="r", fig=None)
#print "figmod: ", len(figmod.axes)
Tr.gmm_samples = Tr.gmm_samples[:,0:9]
print "figure model len: ", len(figure_model.axes)
#print figure_model.axes
Tr.plot_kinematics('gmm', L.samples_means, L.samples_stds, color='c', fig=figure_model)
red_patch = mpatches.Patch(color='red')
cyan_patch = mpatches.Patch(color='cyan')
figure_model.legend(handles=[cyan_patch, red_patch], labels=["GMM Prior", "GMM Likelihood"], fontsize=16)
figure_model.savefig(save_path+'LvsPGMM.png', dpi=600)
figure_model.savefig(save_path+'LvsPGMM.pdf', dpi=600)
tot_gmm_samples = np.empty((0,15))
for i in range(20):
Tr.GMM_sample(100000000, L, reps=1)
temp_gmm_samples = np.copy(Tr.gmm_samples)
r,c = tot_gmm_samples.shape
x,y = temp_gmm_samples.shape
tot_gmm_samples.resize((r+c,c))
tot_gmm_samples[r:] = temp_gmm_samples
Tr.gmm_samples = tot_gmm_samples
gmm_MW = np.copy(Tr.gmm_samples[:,10])
gmm_M31 = np.copy(Tr.gmm_samples[:,9])
gmm_M33 = np.copy(Tr.gmm_samples[:,11])
gmm_LMC = np.copy(Tr.gmm_samples[:,12])
gmm_MW_C = np.copy(Tr.gmm_samples[:,14])
gmm_M31_C = np.copy(Tr.gmm_samples[:,13])
gmm_LG = np.log10(np.power(10,gmm_MW) + np.power(10,gmm_M31))
Tr.gmm_samples = Tr.gmm_samples[:,0:9]
Tr.compute_model_weights(L, 'gmm', normalize=True)
#Tr_cut.compute_model_weights(L, 'gmm')
# In[ ]:
count, smallest_weight = Tr.calculate_N95(filter_samples=False)
print "N95 ", count
print "smallest weight ", smallest_weight
cond = Tr.weights[:] > smallest_weight
gmm_MW = gmm_MW[cond]
gmm_M31 = gmm_M31[cond]
gmm_M33 = gmm_M33[cond]
gmm_LMC = gmm_LMC[cond]
gmm_LG = gmm_LG[cond]
gmm_MW_C = gmm_MW_C[cond]
gmm_M31_C = gmm_M31_C[cond]
Tr.gmm_samples = Tr.gmm_samples[cond]
Tr.weights = Tr.weights[cond]
#print Tr_cut.calculate_N95()
# In[18]:
Tr.unprocess(L.samples_means, L.samples_stds, 'gmm')
#data2 = np.transpose(np.vstack((np.transpose(Tr.gmm_samples), gmm_MW, gmm_M31, gmm_M33, gmm_MW_C, gmm_M31_C, gmm_M33_C)))
data2 = np.transpose(np.vstack((np.transpose(Tr.gmm_samples), gmm_MW, gmm_M31, gmm_M33, gmm_LMC, gmm_MW_C, gmm_M31_C)))
#labs=["mwd", "mwvr", "mwvt", "m33d", "m33vr", "m33vt", "MWMvir", "M31Mvir", "M33Mvir"]
#labs=["mwd", "mwvr", "mwvt", "m33d", "m33vr", "m33vt", "LMCd", "LMCvr", "LMCvt", "MWMvir", "M31Mvir"]
labs = ["$D^{\\rm M31} Mpc$", "$v_{\\rm rad}^{\\rm M31} km/s$", "$v_{\\rm tan}^{\\rm M31} km/s$", "$D^{\\rm M33} Mpc$", "$v_{\\rm rad}^{\\rm M33} km/s$", "$v_{\\rm tan}^{\\rm M33} km/s$","$D^{\\rm LMC} Mpc$", "$v_{\\rm rad}^{\\rm LMC} km/s$", "$v_{\\rm tan}^{\\rm LMC} km/s$", "$Mvir_{MW}$", "$Mvir_{M31}$", "$Mvir_{M33}$", "$Mvir_{LMC}$", "$Cvir_{MW}$", "$Cvir_{M31}$"]
#labs=["mwd", "mwvr", "mwvt", "m33d", "m33vr", "m33vt", "MWMvir", "M31Mvir", "M33Mvir", "MW Cvir", "M31 Cvir", "M33 Cvir"]
pl = triangle.corner(data2, labels=labs, quantiles=[0.16,0.5,0.84], fig=None, weights=None, plot_contours=True, show_titles=True, title_args={"fontsize": 16}, label_args={"fontsize": 16}, plot_datapoints=False, bins=20, color='c')
Tr.preprocess(L.samples_means, L.samples_stds, mode='gmm')
# In[ ]:
Tr.unprocess(L.samples_means, L.samples_stds, mode='sim')
data = np.transpose(np.vstack((np.transpose(Tr.sim_samples), np.log10(Tr.MW.Mvir), np.log10(Tr.M31.Mvir), np.log10(Tr.M33.Mvir), np.log10(Tr.LMC.Mvir), Tr.MW.Cvir, Tr.M31.Cvir)))
labs = ["$D^{\\rm M31} Mpc$", "$v_{\\rm rad}^{\\rm M31} km/s$", "$v_{\\rm tan}^{\\rm M31} km/s$", "$D^{\\rm M33} Mpc$", "$v_{\\rm rad}^{\\rm M33} km/s$", "$v_{\\rm tan}^{\\rm M33} km/s$","$D^{\\rm LMC} Mpc$", "$v_{\\rm rad}^{\\rm LMC} km/s$", "$v_{\\rm tan}^{\\rm LMC} km/s$", "$Mvir_{MW}$", "$Mvir_{M31}$", "$Mvir_{M33}$", "$Mvir_{LMC}$", "$Cvir_{MW}$", "$Cvir_{M31}$"]
sim_plot = triangle.corner(data, labels=labs, quantiles=[0.16,0.5,0.84], fig=pl, weights=None, plot_contours=True, show_titles=True, title_args={"fontsize": 12}, plot_datapoints=False, bins=20, color='b', label_kwargs={"fontsize": 16})
blue_patch = mpatches.Patch(color='b')
cyan_patch = mpatches.Patch(color='c')
sim_plot.legend(handles=[blue_patch, cyan_patch], labels=["CONSUELO Prior", "GMM-fit CONSUELO Prior"], fontsize=48)
Tr.preprocess(L.samples_means, L.samples_stds, mode='sim')
# In[29]:
# In[ ]:
#name = 'gmm_CONSUELO_prior.png'
sim_plot.savefig(save_path+'Q_GMMP_GOF.png', dpi=600)
sim_plot.savefig(save_path+'Q_GMMP_GOF.pdf', dpi=600)
#prior CvsM relation
hlist_path = '/lustre/ki/pfs/mwillia1/LG_project/Consuelo_Boxes/4001/4001hlist.npy'
hlist = np.load(hlist_path)
hlist=hlist[np.abs(np.log10(hlist['mvir'])-12)<1.0]
cvir = hlist['rvir']/hlist['rs']
mvir = np.log10(hlist['mvir'])
bins = np.arange(np.min(mvir), np.max(mvir), .01)
dat = np.vstack((mvir, cvir)).T
conc = [[el[1] for el in dat if ((el[0] > bins[i]) & (el[0] < bins[i+1]))] for i in range(len(bins)-1)]
conc_means = np.array([np.mean(np.array(c)) for c in conc])
conc_stds = np.array([np.std(np.array(c)) for c in conc])
# In[ ]:
labs = ["$Mvir_{MW}$", "$Cvir_{MW}$"]
all_mvir = np.transpose(np.vstack((gmm_MW, gmm_MW_C)))
figure = triangle.corner(all_mvir, labels=labs, quantiles=[0.16,0.5,0.84], fig=None, weights=Tr.weights, plot_contours=True, show_titles=True, title_args={"fontsize": 16}, label_args={"fontsize": 16}, plot_datapoints=False, bins=20, color='g')
#figure.suptitle("Weighted Mass Posterior PDF, GMM Prior", fontsize=16, horizontalalignment='left')
faxes = np.reshape(figure.axes, (2,2))
ax=faxes[1,0]
ax.plot(bins[1:], conc_means, lw=2, color='k')
ax.fill_between(bins[1:], conc_means+conc_stds, conc_means-conc_stds, facecolor='k', alpha=0.2)
figure.savefig(save_path+'Q_GMMP_MW_MvsC.png', dpi=800)
figure.savefig(save_path+'Q_GMMP_MW_MvsC.pdf', dpi=800)
labs = ["$Mvir_{M31}$", "$Cvir_{M31}$"]
all_mvir = np.transpose(np.vstack((gmm_M31, gmm_M31_C)))
figure = triangle.corner(all_mvir, labels=labs, quantiles=[0.16,0.5,0.84], fig=None, weights=Tr.weights, plot_contours=True, show_titles=True, title_args={"fontsize": 16}, label_args={"fontsize": 16}, plot_datapoints=False, bins=20, color='g')
#figure.suptitle("Weighted Mass Posterior PDF, GMM Prior", fontsize=16, horizontalalignment='left')
faxes = np.reshape(figure.axes, (2,2))
ax=faxes[1,0]
ax.plot(bins[1:], conc_means, lw=2, color='k')
ax.fill_between(bins[1:], conc_means+conc_stds, conc_means-conc_stds, facecolor='k', alpha=0.2)
figure.savefig(save_path+'Q_GMMP_M31_MvsC.png', dpi=800)
figure.savefig(save_path+'Q_GMMP_M31_MvsC.pdf', dpi=800)
# In[31]:
labs = ["$Mvir_{MW}$", "$Mvir_{M31}$", "$Mvir_{M33}$", "$Mvir_{LMC}$", "$Mvir_{MW+M31}$"]
all_mvir = np.transpose(np.vstack((gmm_MW, gmm_M31, gmm_M33, gmm_LMC, gmm_LG)))
figure = triangle.corner(all_mvir, labels=labs, quantiles=[0.16,0.5,0.84], fig=None, weights=Tr.weights, plot_contours=True, show_titles=True, title_args={"fontsize": 16}, label_args={"fontsize": 16}, plot_datapoints=False, bins=20, color='g')
#figure.suptitle("Weighted Mass Posterior PDF, GMM Prior", fontsize=16, horizontalalignment='left')
figure.savefig(save_path+'Q_GMMP_all_Mvir.png', dpi=800)
figure.savefig(save_path+'Q_GMMP_all_Mvir.pdf', dpi=800)
| drphilmarshall/LocalGroupHaloProps | final_scripts/fingmm.py | Python | gpl-2.0 | 12,395 | [
"Gaussian"
] | 8a7444eece887c6cd1959d2038f5083e1edf5f63189e05c2c3ead05837246d10 |
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit.Chem import rdDepictor
from shapely import geometry
import numpy as np
from itertools import combinations
import operator
from timeit import default_timer as timer
class Molecule(object):
"""
This class deals with the drawing of the 2D molecule in RDkit environment. Once the data has been imported and 2D
representation drawn, the coordinates of each atom are obtained from the drawing. Since it is known, which atom
is close to each plotted residue, the residues are placed in vicinity of that atom and the overlap treated by
minimising distances in 2D. The resulting 2D coordinates where each residue should be placed in the final image
are inherited by Figure class.
Takes:
* topology_data_object * - information about the system (lintools.Data object)
Initializing the object will lead to execution of the functions present in the class
providing necessary material for next steps, namely assembly of figure. This was done
since it is very unlikely that the process would ever have to be done seperately step
by step.
"""
__version__= "09.2016"
def __init__(self, topology_data_object):
self.topology_data = topology_data_object
self.molsize1 = 900
self.molsize2 = 450
self.draw_molecule()
def draw_molecule(self):
self.ligand_atom_coords_from_diagr={}
self.nearest_points ={}
self.nearest_points_projection = {}
self.nearest_points_coords ={}
self.coefficient ={}
self.arc_coords=None
self.load_molecule_in_rdkit_smiles(molSize=(int(self.molsize1),int(self.molsize2)))
self.convex_hull()
self.make_new_projection_values()
def load_molecule_in_rdkit_smiles(self, molSize,kekulize=True,bonds=[],bond_color=None,atom_color = {}, size= {} ):
"""
Loads mol file in rdkit without the hydrogens - they do not have to appear in the final
figure. Once loaded, the molecule is converted to SMILES format which RDKit appears to
draw best - since we do not care about the actual coordinates of the original molecule, it
is sufficient to have just 2D information.
Some molecules can be problematic to import and steps such as stopping sanitize function can
be taken. This is done automatically if problems are observed. However, better solutions can
also be implemented and need more research.
The molecule is then drawn from SMILES in 2D representation without hydrogens. The drawing is
saved as an SVG file.
"""
mol_in_rdkit = self.topology_data.mol #need to reload without hydrogens
try:
mol_in_rdkit = Chem.RemoveHs(mol_in_rdkit)
self.topology_data.smiles = Chem.MolFromSmiles(Chem.MolToSmiles(mol_in_rdkit))
except ValueError:
mol_in_rdkit = Chem.RemoveHs(mol_in_rdkit, sanitize = False)
self.topology_data.smiles = Chem.MolFromSmiles(Chem.MolToSmiles(mol_in_rdkit), sanitize=False)
self.atom_identities = {}
i=0
for atom in self.topology_data.smiles.GetAtoms():
self.atom_identities[mol_in_rdkit.GetProp('_smilesAtomOutputOrder')[1:].rsplit(",")[i]] = atom.GetIdx()
i+=1
mc = Chem.Mol(self.topology_data.smiles.ToBinary())
if kekulize:
try:
Chem.Kekulize(mc)
except:
mc = Chem.Mol(self.topology_data.smiles.ToBinary())
if not mc.GetNumConformers():
rdDepictor.Compute2DCoords(mc)
atoms=[]
colors={}
for i in range(mol_in_rdkit.GetNumAtoms()):
atoms.append(i)
if len(atom_color)==0:
colors[i]=(1,1,1)
else:
colors = atom_color
drawer = rdMolDraw2D.MolDraw2DSVG(int(molSize[0]),int(molSize[1]))
drawer.DrawMolecule(mc,highlightAtoms=atoms,highlightBonds=bonds, highlightAtomColors=colors,highlightAtomRadii=size,highlightBondColors=bond_color)
drawer.FinishDrawing()
self.svg = drawer.GetDrawingText().replace('svg:','')
filesvg = open("molecule.svg", "w+")
filesvg.write(self.svg)
def convex_hull(self):
"""
Draws a convex hull around ligand atoms and expands it, giving space to put diagramms on.
This is done with the help of Shapely.geometry class. The initial convex hull the residue
coordinates are inserted on, determines the order the coordinates are going to be moved, i.e.
if the residue 1 is on the right side of residue 2, it will be pushed to the right, while
residue 2 will be moved to the left.
Also determines the 2D coordinates of all atoms in drawing and makes a list with those.
"""
#Get coordinates of ligand atoms (needed to draw the convex hull around)
self.ligand_atom_coords = []
ligand_atoms = [x.name for x in self.topology_data.universe.ligand_noH.atoms]
with open ("molecule.svg", "r") as f:
lines = f.readlines()
i=0
for line in lines:
if line.startswith("<ellipse"):
self.ligand_atom_coords.append([float(line.rsplit("'",10)[1]), float(line.rsplit("'",10)[3])])
for atom_id in self.atom_identities:
if i == self.atom_identities[atom_id]:
self.ligand_atom_coords_from_diagr[ligand_atoms[int(atom_id)]]=[float(line.rsplit("'",10)[1]), float(line.rsplit("'",10)[3])]
i+=1
self.ligand_atom_coords=np.array(self.ligand_atom_coords)
self.a = geometry.MultiPoint(self.ligand_atom_coords).convex_hull
self.b = self.a.boundary.buffer(130).convex_hull
self.b_for_all ={}
self.b_lenght = self.b.boundary.length
for residue in self.topology_data.closest_atoms:
mean_distance =np.array([x[1] for x in self.topology_data.closest_atoms[residue]]).mean()
b = self.a.boundary.parallel_offset(mean_distance*50+50,"left",join_style=2).convex_hull
projection =[]
projection_init = []
for atom in self.topology_data.closest_atoms[residue]:
point =geometry.Point((self.ligand_atom_coords_from_diagr[atom[0]][0],self.ligand_atom_coords_from_diagr[atom[0]][1]))
projection.append(abs(b.boundary.project(point) % b.boundary.length))
projection_init.append(abs(self.b.boundary.project(point) % self.b.boundary.length))
# Check whether projections are not crossing the boundary point (i.e. end of circle) - is one number in the projection very different from any other?
for (index1,number1), (index2,number2) in combinations(enumerate(projection),2):
if abs(number1-number2)>b.boundary.length/2:
proj =[]
for atom in projection:
if atom == max([number1,number2]):
proj.append(atom-b.boundary.length)
else:
proj.append(atom)
projection = proj
for (index1,number1), (index2,number2) in combinations(enumerate(projection_init),2):
if abs(number1-number2)>self.b.boundary.length/2:
proj =[]
for atom in projection_init:
if atom == max([number1,number2]):
proj.append(atom-self.b.boundary.length)
else:
proj.append(atom)
projection_init = proj
self.nearest_points_projection[residue] = np.array(projection).mean()
self.b_for_all[residue] = np.array(projection_init).mean()
self.nearest_points[residue] = b.boundary.interpolate(self.nearest_points_projection[residue] % b.boundary.length)
self.nearest_points_coords[residue]=self.nearest_points[residue].x,self.nearest_points[residue].y
def calc_2d_forces(self,x1,y1,x2,y2,width):
"""Calculate overlap in 2D space"""
#calculate a
if x1>x2:
a = x1-x2
else:
a = x2-x1
a_sq=a*a
#calculate b
if y1>y2:
b = y1-y2
else:
b = y2-y1
b_sq=b*b
#calculate c
from math import sqrt
c_sq = a_sq+b_sq
c = sqrt(c_sq)
if c > width:
return 0,0
else:
overlap = width-c
return -overlap/2, overlap/2
def do_step(self, values, xy_values,coeff, width):
"""Calculates forces between two diagrams and pushes them apart by tenth of width"""
forces = {k:[] for k,i in enumerate(xy_values)}
for (index1, value1), (index2,value2) in combinations(enumerate(xy_values),2):
f = self.calc_2d_forces(value1[0],value1[1],value2[0],value2[1],width)
if coeff[index1] < coeff[index2]:
if self.b_lenght-coeff[index2]<self.b_lenght/10: #a quick and dirty solution, but works
forces[index1].append(f[1]) # push to left (smaller projection value)
forces[index2].append(f[0])
else:
#all is normal
forces[index1].append(f[0]) # push to left (smaller projection value)
forces[index2].append(f[1])
else:
if self.b_lenght-coeff[index1]<self.b_lenght/10: #a quick and dirty solution, but works
forces[index1].append(f[0]) # push to left (smaller projection value)
forces[index2].append(f[1])
else:
#if all is normal
forces[index1].append(f[1]) # push to left (smaller projection value)
forces[index2].append(f[0])
forces = {k:sum(v) for k,v in forces.items()}
energy = sum([abs(x) for x in forces.values()])
return [(forces[k]/10+v) for k, v in enumerate(values)], energy
def make_new_projection_values(self,width=160):
"""Run do_step function until the diagramms have diverged from each other.
Also determines how big the figure is going to be by calculating the borders
from new residue coordinates. These are then added some buffer space.
"""
#Make gap between residues bigger if plots have a lot of rings - each ring after the 4th
#give extra 12.5px space
start = timer()
if self.topology_data.ring_number>4:
width = width + (self.topology_data.ring_number-4)*12.5
values = [v for v in self.nearest_points_projection.values()]
xy_values = [v for v in self.nearest_points_coords.values()]
coeff_value = [v for v in self.b_for_all.values()]
energy = 100
while energy > 0.2:
values, energy = self.do_step(values,xy_values,coeff_value, width)
time = timer() - start
i=0
xy_values =[]
for residue in self.nearest_points_coords:
b = self.a.boundary.parallel_offset(self.topology_data.closest_atoms[residue][0][1]*50+50,"left",join_style=2).convex_hull
self.nearest_points_projection[residue] = values[i]
self.nearest_points[residue] = b.boundary.interpolate(self.nearest_points_projection[residue] % b.boundary.length)
self.nearest_points_coords[residue] = self.nearest_points[residue].x, self.nearest_points[residue].y
xy_values.append(self.nearest_points_coords[residue])
i+=1
values = [v for v in self.nearest_points_projection.values()]
if time>30:
self.molsize1 = self.molsize1 + self.molsize1 * 0.2 #Increase molecule svg size
self.molsize2 = self.molsize2 + self.molsize2 * 0.2
self.draw_molecule()
break
#Calculate the borders of the final image
max_x = max(v[0] for k,v in self.nearest_points_coords.items())
min_x = min(v[0] for k,v in self.nearest_points_coords.items())
min_y = min(v[1] for k,v in self.nearest_points_coords.items())
max_y = max(v[1] for k,v in self.nearest_points_coords.items())
if min_x<0:
self.x_dim =(max_x-min_x)+600 #600 acts as buffer
elif max_x<self.molsize1 and min_x<0: #In case all residues are grouped on one end of the molecule
self.x_dim = (self.molsize1-min_x)+600
elif max_x<self.molsize1 and min_x>0:
self.x_dim = self.molsize1+600
else:
self.x_dim = max_x+600
if min_y<0:
self.y_dim = (max_y-min_y)+400 #400 acts as buffer
elif max_y<self.molsize2 and min_y<0:
self.y_dim = (self.molsize2-min_y)+400
elif max_y<self.molsize2 and min_y>0:
self.y_dim = self.molsize2+400
else:
self.y_dim = max_y+400
end = timer()
print "Drawing molecule:"+str(end-start)
| ldomic/lintools | lintools/molecule.py | Python | gpl-3.0 | 13,282 | [
"RDKit"
] | 48d4f6d6b895bf41cd9674e323d982e774e5abcd9b61b5945c27c8c641d91a6b |
import os
import platform
import time
import json
import tempfile
import random
import string
from pwd import getpwuid
from sos.presets import (NO_PRESET, GENERIC_PRESETS, PRESETS_PATH,
PresetDefaults, DESC, NOTE, OPTS)
from sos.policies.package_managers import PackageManager
from sos.utilities import (ImporterHelper, import_module, get_human_readable,
bold)
from sos.report.plugins import IndependentPlugin, ExperimentalPlugin
from sos.options import SoSOptions
from sos import _sos as _
from textwrap import fill
def import_policy(name):
policy_fqname = "sos.policies.distros.%s" % name
try:
return import_module(policy_fqname, Policy)
except ImportError:
return None
def load(cache={}, sysroot=None, init=None, probe_runtime=True,
remote_exec=None, remote_check=''):
if 'policy' in cache:
return cache.get('policy')
import sos.policies.distros
helper = ImporterHelper(sos.policies.distros)
for module in helper.get_modules():
for policy in import_policy(module):
if policy.check(remote=remote_check):
cache['policy'] = policy(sysroot=sysroot, init=init,
probe_runtime=probe_runtime,
remote_exec=remote_exec)
if 'policy' not in cache:
cache['policy'] = GenericPolicy()
return cache['policy']
class Policy():
"""Policies represent distributions that sos supports, and define the way
in which sos behaves on those distributions. A policy should define at
minimum a way to identify the distribution, and a package manager to allow
for package based plugin enablement.
Policies also control preferred ContainerRuntime()'s, upload support to
default locations for distribution vendors, disclaimer text, and default
presets supported by that distribution or vendor's products.
Every Policy will also need at least one "tagging class" for plugins.
:param sysroot: Set the sysroot for the system, if not /
:type sysroot: ``str`` or ``None``
:param probe_runtime: Should the Policy try to load a ContainerRuntime
:type probe_runtime: ``bool``
:cvar distro: The name of the distribution the Policy represents
:vartype distro: ``str``
:cvar vendor: The name of the vendor producing the distribution
:vartype vendor: ``str``
:cvar vendor_urls: List of URLs for the vendor's website, or support portal
:vartype vendor_urls: ``list`` of ``tuples`` formatted
``(``description``, ``url``)``
:cvar vendor_text: Additional text to add to the banner message
:vartype vendor_text: ``str``
:cvar name_pattern: The naming pattern to be used for naming archives
generated by sos. Values of `legacy`, and `friendly`
are preset patterns. May also be set to an explicit
custom pattern, see `get_archive_name()`
:vartype name_pattern: ``str``
"""
msg = _("""\
This command will collect system configuration and diagnostic information \
from this %(distro)s system.
For more information on %(vendor)s visit:
%(vendor_urls)s
The generated archive may contain data considered sensitive and its content \
should be reviewed by the originating organization before being passed to \
any third party.
%(changes_text)s
%(vendor_text)s
""")
distro = "Unknown"
vendor = "Unknown"
vendor_urls = [('Example URL', "http://www.example.com/")]
vendor_text = ""
PATH = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
default_scl_prefix = ""
name_pattern = 'legacy'
presets = {"": PresetDefaults()}
presets_path = PRESETS_PATH
_in_container = False
def __init__(self, sysroot=None, probe_runtime=True, remote_exec=None):
"""Subclasses that choose to override this initializer should call
super() to ensure that they get the required platform bits attached.
super(SubClass, self).__init__(). Policies that require runtime
tests to construct PATH must call self.set_exec_path() after
modifying PATH in their own initializer."""
self._parse_uname()
self.case_id = None
self.probe_runtime = probe_runtime
self.package_manager = PackageManager()
self.valid_subclasses = [IndependentPlugin]
self.remote_exec = remote_exec
if not self.remote_exec:
self.set_exec_path()
self.sysroot = sysroot
self.register_presets(GENERIC_PRESETS)
def check(self, remote=''):
"""
This function is responsible for determining if the underlying system
is supported by this policy.
If `remote` is provided, it should be the contents of os-release from
a remote host, or a similar vendor-specific file that can be used in
place of a locally available file.
:returns: ``True`` if the Policy should be loaded, else ``False``
:rtype: ``bool``
"""
return False
@property
def forbidden_paths(self):
"""This property is used to determine the list of forbidden paths
set by the policy. Note that this property will construct a
*cumulative* list based on all subclasses of a given policy.
:returns: All patterns of policy forbidden paths
:rtype: ``list``
"""
if not hasattr(self, '_forbidden_paths'):
self._forbidden_paths = []
for cls in self.__class__.__mro__:
if hasattr(cls, 'set_forbidden_paths'):
self._forbidden_paths.extend(cls.set_forbidden_paths())
return list(set(self._forbidden_paths))
@classmethod
def set_forbidden_paths(cls):
"""Use this to *append* policy-specifc forbidden paths that apply to
all plugins. Setting this classmethod on an invidual policy will *not*
override subclass-specific paths
"""
return [
'*.pyc',
'*.pyo',
'*.swp'
]
def in_container(self):
"""Are we running inside a container?
:returns: ``True`` if in a container, else ``False``
:rtype: ``bool``
"""
return self._in_container
def dist_version(self):
"""
Return the OS version
"""
pass
def get_preferred_archive(self):
"""
Return the class object of the prefered archive format for this
platform
"""
from sos.archive import TarFileArchive
return TarFileArchive
def get_archive_name(self):
"""
This function should return the filename of the archive without the
extension.
This uses the policy's `name_pattern` attribute to determine the name.
There are two pre-defined naming patterns - `legacy` and `friendly`
that give names like the following:
* legacy - `sosreport-tux.123456-20171224185433`
* friendly - `sosreport-tux-mylabel-123456-2017-12-24-ezcfcop.tar.xz`
A custom name_pattern can be used by a policy provided that it
defines name_pattern using a format() style string substitution.
Usable substitutions are:
* name - the short hostname of the system
* label - the label given by --label
* case - the case id given by --case-id or --ticker-number
* rand - a random string of 7 alpha characters
Note that if a datestamp is needed, the substring should be set
in `name_pattern` in the format accepted by ``strftime()``.
:returns: A name to be used for the archive, as expanded from
the Policy `name_pattern`
:rtype: ``str``
"""
name = self.get_local_name().split('.')[0]
case = self.case_id
label = self.commons['cmdlineopts'].label
date = ''
rand = ''.join(random.choice(string.ascii_lowercase) for x in range(7))
if self.name_pattern == 'legacy':
nstr = "sosreport-{name}{case}{date}"
case = '.' + case if case else ''
date = '-%Y%m%d%H%M%S'
elif self.name_pattern == 'friendly':
nstr = "sosreport-{name}{label}{case}{date}-{rand}"
case = '-' + case if case else ''
label = '-' + label if label else ''
date = '-%Y-%m-%d'
else:
nstr = self.name_pattern
nstr = nstr.format(
name=name,
label=label,
case=case,
date=date,
rand=rand
)
return self.sanitize_filename(time.strftime(nstr))
# for some specific binaries like "xz", we need to determine package
# providing it; that is policy specific. By default return the binary
# name itself until particular policy overwrites it
def _get_pkg_name_for_binary(self, binary):
return binary
def get_tmp_dir(self, opt_tmp_dir):
if not opt_tmp_dir:
return tempfile.gettempdir()
return opt_tmp_dir
def get_default_scl_prefix(self):
return self.default_scl_prefix
def match_plugin(self, plugin_classes):
"""Determine what subclass of a Plugin should be used based on the
tagging classes assigned to the Plugin
:param plugin_classes: The classes that the Plugin subclasses
:type plugin_classes: ``list``
:returns: The first subclass that matches one of the Policy's
`valid_subclasses`
:rtype: A tagging class for Plugins
"""
if len(plugin_classes) > 1:
for p in plugin_classes:
# Give preference to the first listed tagging class
# so that e.g. UbuntuPlugin is chosen over DebianPlugin
# on an Ubuntu installation.
if issubclass(p, self.valid_subclasses[0]):
return p
return plugin_classes[0]
def validate_plugin(self, plugin_class, experimental=False):
"""
Verifies that the plugin_class should execute under this policy
:param plugin_class: The tagging class being checked
:type plugin_class: A Plugin() tagging class
:returns: ``True`` if the `plugin_class` is allowed by the policy
:rtype: ``bool``
"""
valid_subclasses = [IndependentPlugin] + self.valid_subclasses
if experimental:
valid_subclasses += [ExperimentalPlugin]
return any(issubclass(plugin_class, class_) for
class_ in valid_subclasses)
def pre_work(self):
"""
This function is called prior to collection.
"""
pass
def post_work(self):
"""
This function is called after the sosreport has been generated.
"""
pass
def pkg_by_name(self, pkg):
"""Wrapper to retrieve a package from the Policy's package manager
:param pkg: The name of the package
:type pkg: ``str``
:returns: The first package that matches `pkg`
:rtype: ``str``
"""
return self.package_manager.pkg_by_name(pkg)
def _parse_uname(self):
(system, node, release,
version, machine, processor) = platform.uname()
self.system = system
self.hostname = node
self.release = release
self.smp = version.split()[1] == "SMP"
self.machine = machine
def set_commons(self, commons):
"""Set common host data for the Policy to reference
"""
self.commons = commons
def _set_PATH(self, path):
os.environ['PATH'] = path
def set_exec_path(self):
self._set_PATH(self.PATH)
def is_root(self):
"""This method should return true if the user calling the script is
considered to be a superuser
:returns: ``True`` if user is superuser, else ``False``
:rtype: ``bool``
"""
return (os.getuid() == 0)
def get_preferred_hash_name(self):
"""Returns the string name of the hashlib-supported checksum algorithm
to use"""
return "sha256"
@classmethod
def display_help(self, section):
section.set_title('SoS Policies')
section.add_text(
'Policies help govern how SoS operates on across different distri'
'butions of Linux. They control aspects such as plugin enablement,'
' $PATH determination, how/which package managers are queried, '
'default upload specifications, and more.'
)
section.add_text(
"When SoS intializes most functions, for example %s and %s, one "
"of the first operations is to determine the correct policy to "
"load for the local system. Policies will determine the proper "
"package manager to use, any applicable container runtime(s), and "
"init systems so that SoS and report plugins can properly function"
" for collections. Generally speaking a single policy will map to"
" a single distribution; for example there are separate policies "
"for Debian, Ubuntu, RHEL, and Fedora."
% (bold('sos report'), bold('sos collect'))
)
section.add_text(
"It is currently not possible for users to directly control which "
"policy is loaded."
)
pols = {
'policies.cos': 'The Google Cloud-Optimized OS distribution',
'policies.debian': 'The Debian distribution',
'policies.redhat': ('Red Hat family distributions, not necessarily'
' including forks'),
'policies.ubuntu': 'Ubuntu/Canonical distributions'
}
seealso = section.add_section('See Also')
seealso.add_text(
"For more information on distribution policies, see below\n"
)
for pol in pols:
seealso.add_text("{:>8}{:<20}{:<30}".format(' ', pol, pols[pol]),
newline=False)
def display_results(self, archive, directory, checksum, archivestat=None,
map_file=None):
"""Display final information about a generated archive
:param archive: The name of the archive that was generated
:type archive: ``str``
:param directory: The build directory for sos if --build was used
:type directory: ``str``
:param checksum: The checksum of the archive
:type checksum: ``str``
:param archivestat: stat() information for the archive
:type archivestat: `os.stat_result`
:param map_file: If sos clean was invoked, the location of the mapping
file for this run
:type map_file: ``str``
"""
# Logging is already shutdown and all terminal output must use the
# print() call.
# make sure a report exists
if not archive and not directory:
return False
self._print()
if map_file:
self._print(_("A mapping of obfuscated elements is available at"
"\n\t%s\n" % map_file))
if archive:
self._print(_("Your sosreport has been generated and saved "
"in:\n\t%s\n") % archive, always=True)
self._print(_(" Size\t%s") %
get_human_readable(archivestat.st_size))
self._print(_(" Owner\t%s") %
getpwuid(archivestat.st_uid).pw_name)
else:
self._print(_("Your sosreport build tree has been generated "
"in:\n\t%s\n") % directory, always=True)
if checksum:
self._print(" " + self.get_preferred_hash_name() + "\t" + checksum)
self._print()
self._print(_("Please send this file to your support "
"representative."))
self._print()
def _print(self, msg=None, always=False):
"""A wrapper around print that only prints if we are not running in
quiet mode"""
if always or not self.commons['cmdlineopts'].quiet:
if msg:
print(msg)
else:
print()
def get_msg(self):
"""This method is used to prepare the preamble text to display to
the user in non-batch mode. If your policy sets self.distro that
text will be substituted accordingly. You can also override this
method to do something more complicated.
:returns: Formatted banner message string
:rtype: ``str``
"""
if self.commons['cmdlineopts'].allow_system_changes:
changes_text = "Changes CAN be made to system configuration."
else:
changes_text = "No changes will be made to system configuration."
width = 72
_msg = self.msg % {'distro': self.distro, 'vendor': self.vendor,
'vendor_urls': self._fmt_vendor_urls(),
'vendor_text': self.vendor_text,
'tmpdir': self.commons['tmpdir'],
'changes_text': changes_text}
_fmt = ""
for line in _msg.splitlines():
_fmt = _fmt + fill(line, width, replace_whitespace=False) + '\n'
return _fmt
def _fmt_vendor_urls(self):
"""Formats all items in the ``vendor_urls`` class attr into a usable
string for the banner message.
:returns: Formatted string of URLS
:rtype: ``str``
"""
width = max([len(v[0]) for v in self.vendor_urls])
return "\n".join("\t{desc:<{width}} : {url}".format(
desc=u[0], width=width, url=u[1])
for u in self.vendor_urls
)
def register_presets(self, presets, replace=False):
"""Add new presets to this policy object.
Merges the presets dictionary ``presets`` into this ``Policy``
object, or replaces the current presets if ``replace`` is
``True``.
``presets`` should be a dictionary mapping ``str`` preset names
to ``<class PresetDefaults>`` objects specifying the command
line defaults.
:param presets: dictionary of presets to add or replace
:param replace: replace presets rather than merge new presets.
"""
if replace:
self.presets = {}
self.presets.update(presets)
def find_preset(self, preset):
"""Find a preset profile matching the specified preset string.
:param preset: a string containing a preset profile name.
:returns: a matching PresetProfile.
"""
# FIXME: allow fuzzy matching?
for match in self.presets.keys():
if match == preset:
return self.presets[match]
return None
def probe_preset(self):
"""Return a ``PresetDefaults`` object matching the runing host.
Stub method to be implemented by derived policy classes.
:returns: a ``PresetDefaults`` object.
"""
return self.presets[NO_PRESET]
def load_presets(self, presets_path=None):
"""Load presets from disk.
Read JSON formatted preset data from the specified path,
or the default location at ``/var/lib/sos/presets``.
:param presets_path: a directory containing JSON presets.
"""
presets_path = presets_path or self.presets_path
if not os.path.exists(presets_path):
return
for preset_path in os.listdir(presets_path):
preset_path = os.path.join(presets_path, preset_path)
with open(preset_path) as pf:
try:
preset_data = json.load(pf)
except ValueError:
continue
for preset in preset_data.keys():
pd = PresetDefaults(preset, opts=SoSOptions())
data = preset_data[preset]
pd.desc = data[DESC] if DESC in data else ""
pd.note = data[NOTE] if NOTE in data else ""
if OPTS in data:
for arg in data[OPTS]:
setattr(pd.opts, arg, data[OPTS][arg])
pd.builtin = False
self.presets[preset] = pd
def add_preset(self, name=None, desc=None, note=None, opts=SoSOptions()):
"""Add a new on-disk preset and write it to the configured
presets path.
:param preset: the new PresetDefaults to add
"""
presets_path = self.presets_path
if not name:
raise ValueError("Preset name cannot be empty")
if name in self.presets.keys():
raise ValueError("A preset with name '%s' already exists" % name)
preset = PresetDefaults(name=name, desc=desc, note=note, opts=opts)
preset.builtin = False
self.presets[preset.name] = preset
preset.write(presets_path)
def del_preset(self, name=""):
if not name or name not in self.presets.keys():
raise ValueError("Unknown profile: '%s'" % name)
preset = self.presets[name]
if preset.builtin:
raise ValueError("Cannot delete built-in preset '%s'" %
preset.name)
preset.delete(self.presets_path)
self.presets.pop(name)
class GenericPolicy(Policy):
"""This Policy will be returned if no other policy can be loaded. This
should allow for IndependentPlugins to be executed on any system"""
def get_msg(self):
return self.msg % {'distro': self.system}
# vim: set et ts=4 sw=4 :
| sosreport/sos | sos/policies/__init__.py | Python | gpl-2.0 | 21,922 | [
"VisIt"
] | 3ac29f575671884f81b57bfc1759939e946d0577937f929669ac08f27232fe82 |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1453357629.174176
__CHEETAH_genTimestamp__ = 'Thu Jan 21 15:27:09 2016'
__CHEETAH_src__ = '/home/babel/Build/Test/OpenPLi5/openpli5.0/build/tmp/work/tmnanoseplus-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+186ea358f6-r0/git/plugin/controllers/views/web/mediaplayerfindfile.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Jan 21 15:27:08 2016'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class mediaplayerfindfile(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(mediaplayerfindfile, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_24877621 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2findfile>
''')
for file in VFFSL(SL,"files",True): # generated from line 4, col 2
write(u'''\t<e2file>
\t\t<e2name>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"file.name",True)) # u'$str($file.name)' on line 6, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$str($file.name)')) # from line 6, col 11.
write(u'''</e2name>
\t\t<e2path>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"file.path",True)) # u'$str($file.path)' on line 7, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$str($file.path)')) # from line 7, col 11.
write(u'''</e2path>
\t</e2file>
''')
write(u'''</e2findfile>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_24877621
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_mediaplayerfindfile= 'respond'
## END CLASS DEFINITION
if not hasattr(mediaplayerfindfile, '_initCheetahAttributes'):
templateAPIClass = getattr(mediaplayerfindfile, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(mediaplayerfindfile)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=mediaplayerfindfile()).run()
| MOA-2011/e2openplugin-OpenWebif | plugin/controllers/views/web/mediaplayerfindfile.py | Python | gpl-2.0 | 5,500 | [
"VisIt"
] | 7081495685e9023a13c450b49f2c5d30a78dc7c1328400fe0d6f449114f9fcab |
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''Wigner rotation D-matrix for real spherical harmonics'''
from math import sqrt
from functools import reduce
import numpy
from scipy.special import factorial
from pyscf.symm import sph
def Dmatrix(l, alpha, beta, gamma, reorder_p=False):
'''Wigner rotation D-matrix
D_{mm'} = <lm|R(alpha,beta,gamma)|lm'>
alpha, beta, gamma are Euler angles (in z-y-z convention)
Kwargs:
reorder_p (bool): Whether to put the p functions in the (x,y,z) order.
'''
if l == 0:
return numpy.eye(1)
else:
d = dmatrix(l, beta, reorder_p=False)
ms = numpy.arange(-l, l+1)
D = numpy.einsum('i,ij,j->ij', numpy.exp(-1j*alpha*ms), d,
numpy.exp(-1j*gamma*ms))
D = _dmat_to_real(l, D, reorder_p=False)
if reorder_p and l == 1:
D = D[[2,0,1]][:,[2,0,1]]
return D
def _dmat_to_real(l, d, reorder_p=False):
''' Transform the input D-matrix to make it compatible with the real
spherical harmonic functions.
Kwargs:
reorder_p (bool): Whether to put the p functions in the (x,y,z) order.
'''
# The input D matrix works for pure spherical harmonics. The real
# representation should be U^\dagger * D * U, where U is the unitary
# matrix that transform the complex harmonics to the real harmonics.
u = sph.sph_pure2real(l, reorder_p)
return reduce(numpy.dot, (u.conj().T, d, u)).real
def dmatrix(l, beta, reorder_p=False):
'''Wigner small-d matrix (in z-y-z convention)'''
c = numpy.cos(beta/2)
s = numpy.sin(beta/2)
if l == 0:
return numpy.eye(1)
elif l == 1:
mat = numpy.array(((c**2 , sqrt(2)*c*s , s**2 ), \
(-sqrt(2)*c*s, c**2-s**2 , sqrt(2)*c*s), \
(s**2 , -sqrt(2)*c*s, c**2 )))
if reorder_p:
mat = mat[[2,0,1]][:,[2,0,1]]
return mat
elif l == 2:
c3s = c**3*s
s3c = s**3*c
c2s2 = (c*s)**2
c4 = c**4
s4 = s**4
s631 = sqrt(6)*(c3s-s3c)
s622 = sqrt(6)*c2s2
c4s2 = c4-3*c2s2
c2s4 = 3*c2s2-s4
c4s4 = c4-4*c2s2+s4
return numpy.array((( c4 , 2*c3s, s622, 2*s3c, s4 ),
(-2*c3s , c4s2 , s631, c2s4 , 2*s3c),
( s622 ,-s631 , c4s4, s631 , s622 ),
(-2*s3c , c2s4 ,-s631, c4s2 , 2*c3s),
( s4 ,-2*s3c, s622,-2*c3s, c4 )))
else:
facs = factorial(numpy.arange(2*l+1))
cs = c**numpy.arange(2*l+1)
ss = s**numpy.arange(2*l+1)
mat = numpy.zeros((2*l+1,2*l+1))
for i,m1 in enumerate(range(-l, l+1)):
for j,m2 in enumerate(range(-l, l+1)):
#:fac = sqrt( factorial(l+m1)*factorial(l-m1) \
#: *factorial(l+m2)*factorial(l-m2))
#:for k in range(max(m2-m1,0), min(l+m2, l-m1)+1):
#: mat[i,j] += (-1)**(m1+m2+k) \
#: * c**(2*l+m2-m1-2*k) * s**(m1-m2+2*k) \
#: / (factorial(l+m2-k) * factorial(k) \
#: * factorial(m1-m2+k) * factorial(l-m1-k))
#:mat[i,j] *= fac
k = numpy.arange(max(m2-m1,0), min(l+m2, l-m1)+1)
tmp = (cs[2*l+m2-m1-2*k] * ss[m1-m2+2*k] /
(facs[l+m2-k] * facs[k] * facs[m1-m2+k] * facs[l-m1-k]))
mask = ((m1+m2+k) & 0b1).astype(bool)
mat[i,j] -= tmp[ mask].sum()
mat[i,j] += tmp[~mask].sum()
ms = numpy.arange(-l, l+1)
msfac = numpy.sqrt(facs[l+ms] * facs[l-ms])
mat *= numpy.einsum('i,j->ij', msfac, msfac)
return mat
def get_euler_angles(c1, c2):
'''Find the three Euler angles (alpha, beta, gamma in z-y-z convention)
that rotates coordinates c1 to coordinates c2.
yp = numpy.einsum('j,kj->k', c1[1], geom.rotation_mat(c1[2], beta))
tmp = numpy.einsum('ij,kj->ik', c1 , geom.rotation_mat(c1[2], alpha))
tmp = numpy.einsum('ij,kj->ik', tmp, geom.rotation_mat(yp , beta ))
c2 = numpy.einsum('ij,kj->ik', tmp, geom.rotation_mat(c2[2], gamma))
(For backward compatibility) if c1 and c2 are two points in the real
space, the Euler angles define the rotation transforms the old coordinates
to the new coordinates (new_x, new_y, new_z) in which c1 is identical to c2.
tmp = numpy.einsum('j,kj->k', c1 , geom.rotation_mat((0,0,1), gamma))
tmp = numpy.einsum('j,kj->k', tmp, geom.rotation_mat((0,1,0), beta) )
c2 = numpy.einsum('j,kj->k', tmp, geom.rotation_mat((0,0,1), alpha))
'''
c1 = numpy.asarray(c1)
c2 = numpy.asarray(c2)
if c1.ndim == 2 and c2.ndim == 2:
zz = c1[2].dot(c2[2])
beta = numpy.arccos(zz)
if abs(zz) < 1 - 1e-12:
yp = numpy.cross(c1[2], c2[2])
yp /= numpy.linalg.norm(yp)
else:
yp = c1[1]
yy = yp.dot(c1[1])
alpha = numpy.arccos(yy)
if numpy.cross(c1[1], yp).dot(c1[2]) < 0:
alpha = -alpha
gamma = numpy.arccos(yp.dot(c2[1]))
if numpy.cross(yp, c2[1]).dot(c2[2]) < 0:
gamma = -gamma
else: # For backward compatibility, c1 and c2 are two points
norm1 = numpy.linalg.norm(c1)
norm2 = numpy.linalg.norm(c2)
assert(abs(norm1 - norm2) < 1e-12)
xy_norm = numpy.linalg.norm(c1[:2])
if xy_norm > 1e-12:
gamma = -numpy.arccos(c1[0] / xy_norm)
else:
gamma = 0
xy_norm = numpy.linalg.norm(c2[:2])
if xy_norm > 1e-12:
alpha = numpy.arccos(c2[0] / xy_norm)
else:
alpha = 0
beta = numpy.arccos(c2[2]/norm1) - numpy.arccos(c1[2]/norm2)
return alpha, beta, gamma
| gkc1000/pyscf | pyscf/symm/Dmatrix.py | Python | apache-2.0 | 6,569 | [
"PySCF"
] | b885298aae7bfdcb99edb9e5b8d0e15b1b8e841cbba976b462ffb7d7179ff58d |
#!/usr/bin/env python
# coding: utf-8
# # Hartree-Fock methods
#
#
# ## Why Hartree-Fock? Derivation of Hartree-Fock equations in coordinate space
#
# Hartree-Fock (HF) theory is an algorithm for finding an approximative expression for the ground state of a given Hamiltonian. The basic ingredients are
# * Define a single-particle basis $\{\psi_{\alpha}\}$ so that
# $$
# \hat{h}^{\mathrm{HF}}\psi_{\alpha} = \varepsilon_{\alpha}\psi_{\alpha}
# $$
# with the Hartree-Fock Hamiltonian defined as
# $$
# \hat{h}^{\mathrm{HF}}=\hat{t}+\hat{u}_{\mathrm{ext}}+\hat{u}^{\mathrm{HF}}
# $$
# * The term $\hat{u}^{\mathrm{HF}}$ is a single-particle potential to be determined by the HF algorithm.
#
# * The HF algorithm means to choose $\hat{u}^{\mathrm{HF}}$ in order to have
# $$
# \langle \hat{H} \rangle = E^{\mathrm{HF}}= \langle \Phi_0 | \hat{H}|\Phi_0 \rangle
# $$
# that is to find a local minimum with a Slater determinant $\Phi_0$ being the ansatz for the ground state.
# * The variational principle ensures that $E^{\mathrm{HF}} \ge E_0$, with $E_0$ the exact ground state energy.
#
# We will show that the Hartree-Fock Hamiltonian $\hat{h}^{\mathrm{HF}}$ equals our definition of the operator $\hat{f}$ discussed in connection with the new definition of the normal-ordered Hamiltonian (see later lectures), that is we have, for a specific matrix element
# $$
# \langle p |\hat{h}^{\mathrm{HF}}| q \rangle =\langle p |\hat{f}| q \rangle=\langle p|\hat{t}+\hat{u}_{\mathrm{ext}}|q \rangle +\sum_{i\le F} \langle pi | \hat{V} | qi\rangle_{AS},
# $$
# meaning that
# $$
# \langle p|\hat{u}^{\mathrm{HF}}|q\rangle = \sum_{i\le F} \langle pi | \hat{V} | qi\rangle_{AS}.
# $$
# The so-called Hartree-Fock potential $\hat{u}^{\mathrm{HF}}$ brings an explicit medium dependence due to the summation over all single-particle states below the Fermi level $F$. It brings also in an explicit dependence on the two-body interaction (in nuclear physics we can also have complicated three- or higher-body forces). The two-body interaction, with its contribution from the other bystanding fermions, creates an effective mean field in which a given fermion moves, in addition to the external potential $\hat{u}_{\mathrm{ext}}$ which confines the motion of the fermion. For systems like nuclei, there is no external confining potential. Nuclei are examples of self-bound systems, where the binding arises due to the intrinsic nature of the strong force. For nuclear systems thus, there would be no external one-body potential in the Hartree-Fock Hamiltonian.
#
# ## Variational Calculus and Lagrangian Multipliers
#
# The calculus of variations involves
# problems where the quantity to be minimized or maximized is an integral.
#
# In the general case we have an integral of the type
# $$
# E[\Phi]= \int_a^b f(\Phi(x),\frac{\partial \Phi}{\partial x},x)dx,
# $$
# where $E$ is the quantity which is sought minimized or maximized.
# The problem is that although $f$ is a function of the variables $\Phi$, $\partial \Phi/\partial x$ and $x$, the exact dependence of
# $\Phi$ on $x$ is not known. This means again that even though the integral has fixed limits $a$ and $b$, the path of integration is
# not known. In our case the unknown quantities are the single-particle wave functions and we wish to choose an integration path which makes
# the functional $E[\Phi]$ stationary. This means that we want to find minima, or maxima or saddle points. In physics we search normally for minima.
# Our task is therefore to find the minimum of $E[\Phi]$ so that its variation $\delta E$ is zero subject to specific
# constraints. In our case the constraints appear as the integral which expresses the orthogonality of the single-particle wave functions.
# The constraints can be treated via the technique of Lagrangian multipliers
#
# Let us specialize to the expectation value of the energy for one particle in three-dimensions.
# This expectation value reads
# $$
# E=\int dxdydz \psi^*(x,y,z) \hat{H} \psi(x,y,z),
# $$
# with the constraint
# $$
# \int dxdydz \psi^*(x,y,z) \psi(x,y,z)=1,
# $$
# and a Hamiltonian
# $$
# \hat{H}=-\frac{1}{2}\nabla^2+V(x,y,z).
# $$
# We will, for the sake of notational convenience, skip the variables $x,y,z$ below, and write for example $V(x,y,z)=V$.
#
# The integral involving the kinetic energy can be written as, with the function $\psi$ vanishing
# strongly for large values of $x,y,z$ (given here by the limits $a$ and $b$),
# $$
# \int_a^b dxdydz \psi^* \left(-\frac{1}{2}\nabla^2\right) \psi dxdydz = \psi^*\nabla\psi|_a^b+\int_a^b dxdydz\frac{1}{2}\nabla\psi^*\nabla\psi.
# $$
# We will drop the limits $a$ and $b$ in the remaining discussion.
# Inserting this expression into the expectation value for the energy and taking the variational minimum we obtain
# $$
# \delta E = \delta \left\{\int dxdydz\left( \frac{1}{2}\nabla\psi^*\nabla\psi+V\psi^*\psi\right)\right\} = 0.
# $$
# The constraint appears in integral form as
# $$
# \int dxdydz \psi^* \psi=\mathrm{constant},
# $$
# and multiplying with a Lagrangian multiplier $\lambda$ and taking the variational minimum we obtain the final variational equation
# $$
# \delta \left\{\int dxdydz\left( \frac{1}{2}\nabla\psi^*\nabla\psi+V\psi^*\psi-\lambda\psi^*\psi\right)\right\} = 0.
# $$
# We introduce the function $f$
# $$
# f = \frac{1}{2}\nabla\psi^*\nabla\psi+V\psi^*\psi-\lambda\psi^*\psi=
# \frac{1}{2}(\psi^*_x\psi_x+\psi^*_y\psi_y+\psi^*_z\psi_z)+V\psi^*\psi-\lambda\psi^*\psi,
# $$
# where we have skipped the dependence on $x,y,z$ and introduced the shorthand $\psi_x$, $\psi_y$ and $\psi_z$ for the various derivatives.
#
# For $\psi^*$ the Euler-Lagrange equations yield
# $$
# \frac{\partial f}{\partial \psi^*}- \frac{\partial }{\partial x}\frac{\partial f}{\partial \psi^*_x}-\frac{\partial }{\partial y}\frac{\partial f}{\partial \psi^*_y}-\frac{\partial }{\partial z}\frac{\partial f}{\partial \psi^*_z}=0,
# $$
# which results in
# $$
# -\frac{1}{2}(\psi_{xx}+\psi_{yy}+\psi_{zz})+V\psi=\lambda \psi.
# $$
# We can then identify the Lagrangian multiplier as the energy of the system. The last equation is
# nothing but the standard
# Schroedinger equation and the variational approach discussed here provides
# a powerful method for obtaining approximate solutions of the wave function.
#
#
#
# ## Derivation of Hartree-Fock equations in coordinate space
#
# Let us denote the ground state energy by $E_0$. According to the
# variational principle we have
# $$
# E_0 \le E[\Phi] = \int \Phi^*\hat{H}\Phi d\mathbf{\tau}
# $$
# where $\Phi$ is a trial function which we assume to be normalized
# $$
# \int \Phi^*\Phi d\mathbf{\tau} = 1,
# $$
# where we have used the shorthand $d\mathbf{\tau}=dx_1dx_2\dots dx_A$.
#
#
#
#
# In the Hartree-Fock method the trial function is a Slater
# determinant which can be rewritten as
# $$
# \Psi(x_1,x_2,\dots,x_A,\alpha,\beta,\dots,\nu) = \frac{1}{\sqrt{A!}}\sum_{P} (-)^PP\psi_{\alpha}(x_1)
# \psi_{\beta}(x_2)\dots\psi_{\nu}(x_A)=\sqrt{A!}\hat{A}\Phi_H,
# $$
# where we have introduced the anti-symmetrization operator $\hat{A}$ defined by the
# summation over all possible permutations *p* of two fermions.
# It is defined as
# $$
# \hat{A} = \frac{1}{A!}\sum_{p} (-)^p\hat{P},
# $$
# with the the Hartree-function given by the simple product of all possible single-particle function
# $$
# \Phi_H(x_1,x_2,\dots,x_A,\alpha,\beta,\dots,\nu) =
# \psi_{\alpha}(x_1)
# \psi_{\beta}(x_2)\dots\psi_{\nu}(x_A).
# $$
# Our functional is written as
# $$
# E[\Phi] = \sum_{\mu=1}^A \int \psi_{\mu}^*(x_i)\hat{h}_0(x_i)\psi_{\mu}(x_i) dx_i
# + \frac{1}{2}\sum_{\mu=1}^A\sum_{\nu=1}^A
# \left[ \int \psi_{\mu}^*(x_i)\psi_{\nu}^*(x_j)\hat{v}(r_{ij})\psi_{\mu}(x_i)\psi_{\nu}(x_j)dx_idx_j- \int \psi_{\mu}^*(x_i)\psi_{\nu}^*(x_j)
# \hat{v}(r_{ij})\psi_{\nu}(x_i)\psi_{\mu}(x_j)dx_idx_j\right]
# $$
# The more compact version reads
# $$
# E[\Phi]
# = \sum_{\mu}^A \langle \mu | \hat{h}_0 | \mu\rangle+ \frac{1}{2}\sum_{\mu\nu}^A\left[\langle \mu\nu |\hat{v}|\mu\nu\rangle-\langle \nu\mu |\hat{v}|\mu\nu\rangle\right].
# $$
# Since the interaction is invariant under the interchange of two particles it means for example that we have
# $$
# \langle \mu\nu|\hat{v}|\mu\nu\rangle = \langle \nu\mu|\hat{v}|\nu\mu\rangle,
# $$
# or in the more general case
# $$
# \langle \mu\nu|\hat{v}|\sigma\tau\rangle = \langle \nu\mu|\hat{v}|\tau\sigma\rangle.
# $$
# The direct and exchange matrix elements can be brought together if we define the antisymmetrized matrix element
# $$
# \langle \mu\nu|\hat{v}|\mu\nu\rangle_{AS}= \langle \mu\nu|\hat{v}|\mu\nu\rangle-\langle \mu\nu|\hat{v}|\nu\mu\rangle,
# $$
# or for a general matrix element
# $$
# \langle \mu\nu|\hat{v}|\sigma\tau\rangle_{AS}= \langle \mu\nu|\hat{v}|\sigma\tau\rangle-\langle \mu\nu|\hat{v}|\tau\sigma\rangle.
# $$
# It has the symmetry property
# $$
# \langle \mu\nu|\hat{v}|\sigma\tau\rangle_{AS}= -\langle \mu\nu|\hat{v}|\tau\sigma\rangle_{AS}=-\langle \nu\mu|\hat{v}|\sigma\tau\rangle_{AS}.
# $$
# The antisymmetric matrix element is also hermitian, implying
# $$
# \langle \mu\nu|\hat{v}|\sigma\tau\rangle_{AS}= \langle \sigma\tau|\hat{v}|\mu\nu\rangle_{AS}.
# $$
# With these notations we rewrite the Hartree-Fock functional as
# <!-- Equation labels as ordinary links -->
# <div id="H2Expectation2"></div>
#
# $$
# \begin{equation}
# \int \Phi^*\hat{H_I}\Phi d\mathbf{\tau}
# = \frac{1}{2}\sum_{\mu=1}^A\sum_{\nu=1}^A \langle \mu\nu|\hat{v}|\mu\nu\rangle_{AS}. \label{H2Expectation2} \tag{1}
# \end{equation}
# $$
# Adding the contribution from the one-body operator $\hat{H}_0$ to
# ([1](#H2Expectation2)) we obtain the energy functional
# <!-- Equation labels as ordinary links -->
# <div id="FunctionalEPhi"></div>
#
# $$
# \begin{equation}
# E[\Phi]
# = \sum_{\mu=1}^A \langle \mu | h | \mu \rangle +
# \frac{1}{2}\sum_{{\mu}=1}^A\sum_{{\nu}=1}^A \langle \mu\nu|\hat{v}|\mu\nu\rangle_{AS}. \label{FunctionalEPhi} \tag{2}
# \end{equation}
# $$
# In our coordinate space derivations below we will spell out the Hartree-Fock equations in terms of their integrals.
#
#
#
#
# If we generalize the Euler-Lagrange equations to more variables
# and introduce $N^2$ Lagrange multipliers which we denote by
# $\epsilon_{\mu\nu}$, we can write the variational equation for the functional of $E$
# $$
# \delta E - \sum_{\mu\nu}^A \epsilon_{\mu\nu} \delta
# \int \psi_{\mu}^* \psi_{\nu} = 0.
# $$
# For the orthogonal wave functions $\psi_{i}$ this reduces to
# $$
# \delta E - \sum_{\mu=1}^A \epsilon_{\mu} \delta
# \int \psi_{\mu}^* \psi_{\mu} = 0.
# $$
# Variation with respect to the single-particle wave functions $\psi_{\mu}$ yields then
# 3
# 3
#
# <
# <
# <
# !
# !
# M
# A
# T
# H
# _
# B
# L
# O
# C
# K
# $$
# \sum_{\mu=1}^A \int \psi_{\mu}^*\hat{h_0}(x_i)\delta\psi_{\mu}
# dx_i
# + \frac{1}{2}\sum_{{\mu}=1}^A\sum_{{\nu}=1}^A \left[ \int
# \psi_{\mu}^*\psi_{\nu}^*\hat{v}(r_{ij})\delta\psi_{\mu}\psi_{\nu} dx_idx_j- \int
# \psi_{\mu}^*\psi_{\nu}^*\hat{v}(r_{ij})\psi_{\nu}\delta\psi_{\mu}
# dx_idx_j \right]- \sum_{{\mu}=1}^A E_{\mu} \int \delta\psi_{\mu}^*
# \psi_{\mu}dx_i
# - \sum_{{\mu}=1}^A E_{\mu} \int \psi_{\mu}^*
# \delta\psi_{\mu}dx_i = 0.
# $$
# Although the variations $\delta\psi$ and $\delta\psi^*$ are not
# independent, they may in fact be treated as such, so that the
# terms dependent on either $\delta\psi$ and $\delta\psi^*$ individually
# may be set equal to zero. To see this, simply
# replace the arbitrary variation $\delta\psi$ by $i\delta\psi$, so that
# $\delta\psi^*$ is replaced by $-i\delta\psi^*$, and combine the two
# equations. We thus arrive at the Hartree-Fock equations
# <!-- Equation labels as ordinary links -->
# <div id="eq:hartreefockcoordinatespace"></div>
#
# $$
# \begin{equation}
# \left[ -\frac{1}{2}\nabla_i^2+ \sum_{\nu=1}^A\int \psi_{\nu}^*(x_j)\hat{v}(r_{ij})\psi_{\nu}(x_j)dx_j \right]\psi_{\mu}(x_i) - \left[ \sum_{{\nu}=1}^A \int\psi_{\nu}^*(x_j)\hat{v}(r_{ij})\psi_{\mu}(x_j) dx_j\right] \psi_{\nu}(x_i) = \epsilon_{\mu} \psi_{\mu}(x_i). \label{eq:hartreefockcoordinatespace} \tag{3}
# \end{equation}
# $$
# Notice that the integration $\int dx_j$ implies an
# integration over the spatial coordinates $\mathbf{r_j}$ and a summation
# over the spin-coordinate of fermion $j$. We note that the factor of $1/2$ in front of the sum involving the two-body interaction, has been removed. This is due to the fact that we need to vary both $\delta\psi_{\mu}^*$ and
# $\delta\psi_{\nu}^*$. Using the symmetry properties of the two-body interaction and interchanging $\mu$ and $\nu$
# as summation indices, we obtain two identical terms.
#
#
#
#
# The two first terms in the last equation are the one-body kinetic energy and the
# electron-nucleus potential. The third or *direct* term is the averaged electronic repulsion of the other
# electrons. As written, the
# term includes the *self-interaction* of
# electrons when $\mu=\nu$. The self-interaction is cancelled in the fourth
# term, or the *exchange* term. The exchange term results from our
# inclusion of the Pauli principle and the assumed determinantal form of
# the wave-function. Equation ([3](#eq:hartreefockcoordinatespace)), in addition to the kinetic energy and the attraction from the atomic nucleus that confines the motion of a single electron, represents now the motion of a single-particle modified by the two-body interaction. The additional contribution to the Schroedinger equation due to the two-body interaction, represents a mean field set up by all the other bystanding electrons, the latter given by the sum over all single-particle states occupied by $N$ electrons.
#
# The Hartree-Fock equation is an example of an integro-differential equation. These equations involve repeated calculations of integrals, in addition to the solution of a set of coupled differential equations.
# The Hartree-Fock equations can also be rewritten in terms of an eigenvalue problem. The solution of an eigenvalue problem represents often a more practical algorithm and the solution of coupled integro-differential equations.
# This alternative derivation of the Hartree-Fock equations is given below.
#
#
#
#
# ## Analysis of Hartree-Fock equations in coordinate space
#
# A theoretically convenient form of the
# Hartree-Fock equation is to regard the direct and exchange operator
# defined through
# $$
# V_{\mu}^{d}(x_i) = \int \psi_{\mu}^*(x_j)
# \hat{v}(r_{ij})\psi_{\mu}(x_j) dx_j
# $$
# and
# $$
# V_{\mu}^{ex}(x_i) g(x_i)
# = \left(\int \psi_{\mu}^*(x_j)
# \hat{v}(r_{ij})g(x_j) dx_j
# \right)\psi_{\mu}(x_i),
# $$
# respectively.
#
#
#
#
# The function $g(x_i)$ is an arbitrary function,
# and by the substitution $g(x_i) = \psi_{\nu}(x_i)$
# we get
# $$
# V_{\mu}^{ex}(x_i) \psi_{\nu}(x_i)
# = \left(\int \psi_{\mu}^*(x_j)
# \hat{v}(r_{ij})\psi_{\nu}(x_j)
# dx_j\right)\psi_{\mu}(x_i).
# $$
# We may then rewrite the Hartree-Fock equations as
# $$
# \hat{h}^{HF}(x_i) \psi_{\nu}(x_i) = \epsilon_{\nu}\psi_{\nu}(x_i),
# $$
# with
# $$
# \hat{h}^{HF}(x_i)= \hat{h}_0(x_i) + \sum_{\mu=1}^AV_{\mu}^{d}(x_i) -
# \sum_{\mu=1}^AV_{\mu}^{ex}(x_i),
# $$
# and where $\hat{h}_0(i)$ is the one-body part. The latter is normally chosen as a part which yields solutions in closed form. The harmonic oscilltor is a classical problem thereof.
# We normally rewrite the last equation as
# $$
# \hat{h}^{HF}(x_i)= \hat{h}_0(x_i) + \hat{u}^{HF}(x_i).
# $$
# ## Hartree-Fock by varying the coefficients of a wave function expansion
#
# Another possibility is to expand the single-particle functions in a known basis and vary the coefficients,
# that is, the new single-particle wave function is written as a linear expansion
# in terms of a fixed chosen orthogonal basis (for example the well-known harmonic oscillator functions or the hydrogen-like functions etc).
# We define our new Hartree-Fock single-particle basis by performing a unitary transformation
# on our previous basis (labelled with greek indices) as
# <!-- Equation labels as ordinary links -->
# <div id="eq:newbasis"></div>
#
# $$
# \begin{equation}
# \psi_p^{HF} = \sum_{\lambda} C_{p\lambda}\phi_{\lambda}. \label{eq:newbasis} \tag{4}
# \end{equation}
# $$
# In this case we vary the coefficients $C_{p\lambda}$. If the basis has infinitely many solutions, we need
# to truncate the above sum. We assume that the basis $\phi_{\lambda}$ is orthogonal.
#
#
#
#
# It is normal to choose a single-particle basis defined as the eigenfunctions
# of parts of the full Hamiltonian. The typical situation consists of the solutions of the one-body part of the Hamiltonian, that is we have
# $$
# \hat{h}_0\phi_{\lambda}=\epsilon_{\lambda}\phi_{\lambda}.
# $$
# The single-particle wave functions $\phi_{\lambda}(\mathbf{r})$, defined by the quantum numbers $\lambda$ and $\mathbf{r}$
# are defined as the overlap
# $$
# \phi_{\lambda}(\mathbf{r}) = \langle \mathbf{r} | \lambda \rangle .
# $$
# In deriving the Hartree-Fock equations, we will expand the single-particle functions in a known basis and vary the coefficients,
# that is, the new single-particle wave function is written as a linear expansion
# in terms of a fixed chosen orthogonal basis (for example the well-known harmonic oscillator functions or the hydrogen-like functions etc).
#
# We stated that a unitary transformation keeps the orthogonality. To see this consider first a basis of vectors $\mathbf{v}_i$,
# $$
# \mathbf{v}_i = \begin{bmatrix} v_{i1} \\ \dots \\ \dots \\v_{in} \end{bmatrix}
# $$
# We assume that the basis is orthogonal, that is
# $$
# \mathbf{v}_j^T\mathbf{v}_i = \delta_{ij}.
# $$
# An orthogonal or unitary transformation
# $$
# \mathbf{w}_i=\mathbf{U}\mathbf{v}_i,
# $$
# preserves the dot product and orthogonality since
# $$
# \mathbf{w}_j^T\mathbf{w}_i=(\mathbf{U}\mathbf{v}_j)^T\mathbf{U}\mathbf{v}_i=\mathbf{v}_j^T\mathbf{U}^T\mathbf{U}\mathbf{v}_i= \mathbf{v}_j^T\mathbf{v}_i = \delta_{ij}.
# $$
# This means that if the coefficients $C_{p\lambda}$ belong to a unitary or orthogonal trasformation (using the Dirac bra-ket notation)
# $$
# \vert p\rangle = \sum_{\lambda} C_{p\lambda}\vert\lambda\rangle,
# $$
# orthogonality is preserved, that is $\langle \alpha \vert \beta\rangle = \delta_{\alpha\beta}$
# and $\langle p \vert q\rangle = \delta_{pq}$.
#
# This propertry is extremely useful when we build up a basis of many-body Stater determinant based states.
#
# **Note also that although a basis $\vert \alpha\rangle$ contains an infinity of states, for practical calculations we have always to make some truncations.**
#
#
#
#
#
# Before we develop the Hartree-Fock equations, there is another very useful property of determinants that we will use both in connection with Hartree-Fock calculations and later shell-model calculations.
#
# Consider the following determinant
# $$
# \left| \begin{array}{cc} \alpha_1b_{11}+\alpha_2sb_{12}& a_{12}\\
# \alpha_1b_{21}+\alpha_2b_{22}&a_{22}\end{array} \right|=\alpha_1\left|\begin{array}{cc} b_{11}& a_{12}\\
# b_{21}&a_{22}\end{array} \right|+\alpha_2\left| \begin{array}{cc} b_{12}& a_{12}\\b_{22}&a_{22}\end{array} \right|
# $$
# We can generalize this to an $n\times n$ matrix and have
# $$
# \left| \begin{array}{cccccc} a_{11}& a_{12} & \dots & \sum_{k=1}^n c_k b_{1k} &\dots & a_{1n}\\
# a_{21}& a_{22} & \dots & \sum_{k=1}^n c_k b_{2k} &\dots & a_{2n}\\
# \dots & \dots & \dots & \dots & \dots & \dots \\
# \dots & \dots & \dots & \dots & \dots & \dots \\
# a_{n1}& a_{n2} & \dots & \sum_{k=1}^n c_k b_{nk} &\dots & a_{nn}\end{array} \right|=
# \sum_{k=1}^n c_k\left| \begin{array}{cccccc} a_{11}& a_{12} & \dots & b_{1k} &\dots & a_{1n}\\
# a_{21}& a_{22} & \dots & b_{2k} &\dots & a_{2n}\\
# \dots & \dots & \dots & \dots & \dots & \dots\\
# \dots & \dots & \dots & \dots & \dots & \dots\\
# a_{n1}& a_{n2} & \dots & b_{nk} &\dots & a_{nn}\end{array} \right| .
# $$
# This is a property we will use in our Hartree-Fock discussions.
#
#
#
#
# We can generalize the previous results, now
# with all elements $a_{ij}$ being given as functions of
# linear combinations of various coefficients $c$ and elements $b_{ij}$,
# $$
# \left| \begin{array}{cccccc} \sum_{k=1}^n b_{1k}c_{k1}& \sum_{k=1}^n b_{1k}c_{k2} & \dots & \sum_{k=1}^n b_{1k}c_{kj} &\dots & \sum_{k=1}^n b_{1k}c_{kn}\\
# \sum_{k=1}^n b_{2k}c_{k1}& \sum_{k=1}^n b_{2k}c_{k2} & \dots & \sum_{k=1}^n b_{2k}c_{kj} &\dots & \sum_{k=1}^n b_{2k}c_{kn}\\
# \dots & \dots & \dots & \dots & \dots & \dots \\
# \dots & \dots & \dots & \dots & \dots &\dots \\
# \sum_{k=1}^n b_{nk}c_{k1}& \sum_{k=1}^n b_{nk}c_{k2} & \dots & \sum_{k=1}^n b_{nk}c_{kj} &\dots & \sum_{k=1}^n b_{nk}c_{kn}\end{array} \right|=det(\mathbf{C})det(\mathbf{B}),
# $$
# where $det(\mathbf{C})$ and $det(\mathbf{B})$ are the determinants of $n\times n$ matrices
# with elements $c_{ij}$ and $b_{ij}$ respectively.
# This is a property we will use in our Hartree-Fock discussions. Convince yourself about the correctness of the above expression by setting $n=2$.
#
#
#
#
#
#
# With our definition of the new basis in terms of an orthogonal basis we have
# $$
# \psi_p(x) = \sum_{\lambda} C_{p\lambda}\phi_{\lambda}(x).
# $$
# If the coefficients $C_{p\lambda}$ belong to an orthogonal or unitary matrix, the new basis
# is also orthogonal.
# Our Slater determinant in the new basis $\psi_p(x)$ is written as
# $$
# \frac{1}{\sqrt{A!}}
# \left| \begin{array}{ccccc} \psi_{p}(x_1)& \psi_{p}(x_2)& \dots & \dots & \psi_{p}(x_A)\\
# \psi_{q}(x_1)&\psi_{q}(x_2)& \dots & \dots & \psi_{q}(x_A)\\
# \dots & \dots & \dots & \dots & \dots \\
# \dots & \dots & \dots & \dots & \dots \\
# \psi_{t}(x_1)&\psi_{t}(x_2)& \dots & \dots & \psi_{t}(x_A)\end{array} \right|=\frac{1}{\sqrt{A!}}
# \left| \begin{array}{ccccc} \sum_{\lambda} C_{p\lambda}\phi_{\lambda}(x_1)& \sum_{\lambda} C_{p\lambda}\phi_{\lambda}(x_2)& \dots & \dots & \sum_{\lambda} C_{p\lambda}\phi_{\lambda}(x_A)\\
# \sum_{\lambda} C_{q\lambda}\phi_{\lambda}(x_1)&\sum_{\lambda} C_{q\lambda}\phi_{\lambda}(x_2)& \dots & \dots & \sum_{\lambda} C_{q\lambda}\phi_{\lambda}(x_A)\\
# \dots & \dots & \dots & \dots & \dots \\
# \dots & \dots & \dots & \dots & \dots \\
# \sum_{\lambda} C_{t\lambda}\phi_{\lambda}(x_1)&\sum_{\lambda} C_{t\lambda}\phi_{\lambda}(x_2)& \dots & \dots & \sum_{\lambda} C_{t\lambda}\phi_{\lambda}(x_A)\end{array} \right|,
# $$
# which is nothing but $det(\mathbf{C})det(\Phi)$, with $det(\Phi)$ being the determinant given by the basis functions $\phi_{\lambda}(x)$.
#
#
#
# In our discussions hereafter we will use our definitions of single-particle states above and below the Fermi ($F$) level given by the labels
# $ijkl\dots \le F$ for so-called single-hole states and $abcd\dots > F$ for so-called particle states.
# For general single-particle states we employ the labels $pqrs\dots$.
#
#
#
#
# In Eq. ([2](#FunctionalEPhi)), restated here
# $$
# E[\Phi]
# = \sum_{\mu=1}^A \langle \mu | h | \mu \rangle +
# \frac{1}{2}\sum_{{\mu}=1}^A\sum_{{\nu}=1}^A \langle \mu\nu|\hat{v}|\mu\nu\rangle_{AS},
# $$
# we found the expression for the energy functional in terms of the basis function $\phi_{\lambda}(\mathbf{r})$. We then varied the above energy functional with respect to the basis functions $|\mu \rangle$.
# Now we are interested in defining a new basis defined in terms of
# a chosen basis as defined in Eq. ([4](#eq:newbasis)). We can then rewrite the energy functional as
# <!-- Equation labels as ordinary links -->
# <div id="FunctionalEPhi2"></div>
#
# $$
# \begin{equation}
# E[\Phi^{HF}]
# = \sum_{i=1}^A \langle i | h | i \rangle +
# \frac{1}{2}\sum_{ij=1}^A\langle ij|\hat{v}|ij\rangle_{AS}, \label{FunctionalEPhi2} \tag{5}
# \end{equation}
# $$
# where $\Phi^{HF}$ is the new Slater determinant defined by the new basis of Eq. ([4](#eq:newbasis)).
#
#
#
#
#
# Using Eq. ([4](#eq:newbasis)) we can rewrite Eq. ([5](#FunctionalEPhi2)) as
# <!-- Equation labels as ordinary links -->
# <div id="FunctionalEPhi3"></div>
#
# $$
# \begin{equation}
# E[\Psi]
# = \sum_{i=1}^A \sum_{\alpha\beta} C^*_{i\alpha}C_{i\beta}\langle \alpha | h | \beta \rangle +
# \frac{1}{2}\sum_{ij=1}^A\sum_{{\alpha\beta\gamma\delta}} C^*_{i\alpha}C^*_{j\beta}C_{i\gamma}C_{j\delta}\langle \alpha\beta|\hat{v}|\gamma\delta\rangle_{AS}. \label{FunctionalEPhi3} \tag{6}
# \end{equation}
# $$
# We wish now to minimize the above functional. We introduce again a set of Lagrange multipliers, noting that
# since $\langle i | j \rangle = \delta_{i,j}$ and $\langle \alpha | \beta \rangle = \delta_{\alpha,\beta}$,
# the coefficients $C_{i\gamma}$ obey the relation
# $$
# \langle i | j \rangle=\delta_{i,j}=\sum_{\alpha\beta} C^*_{i\alpha}C_{i\beta}\langle \alpha | \beta \rangle=
# \sum_{\alpha} C^*_{i\alpha}C_{i\alpha},
# $$
# which allows us to define a functional to be minimized that reads
# <!-- Equation labels as ordinary links -->
# <div id="_auto1"></div>
#
# $$
# \begin{equation}
# F[\Phi^{HF}]=E[\Phi^{HF}] - \sum_{i=1}^A\epsilon_i\sum_{\alpha} C^*_{i\alpha}C_{i\alpha}.
# \label{_auto1} \tag{7}
# \end{equation}
# $$
# Minimizing with respect to $C^*_{i\alpha}$, remembering that the equations for $C^*_{i\alpha}$ and $C_{i\alpha}$
# can be written as two independent equations, we obtain
# $$
# \frac{d}{dC^*_{i\alpha}}\left[ E[\Phi^{HF}] - \sum_{j}\epsilon_j\sum_{\alpha} C^*_{j\alpha}C_{j\alpha}\right]=0,
# $$
# which yields for every single-particle state $i$ and index $\alpha$ (recalling that the coefficients $C_{i\alpha}$ are matrix elements of a unitary (or orthogonal for a real symmetric matrix) matrix)
# the following Hartree-Fock equations
# $$
# \sum_{\beta} C_{i\beta}\langle \alpha | h | \beta \rangle+
# \sum_{j=1}^A\sum_{\beta\gamma\delta} C^*_{j\beta}C_{j\delta}C_{i\gamma}\langle \alpha\beta|\hat{v}|\gamma\delta\rangle_{AS}=\epsilon_i^{HF}C_{i\alpha}.
# $$
# We can rewrite this equation as (changing dummy variables)
# $$
# \sum_{\beta} \left\{\langle \alpha | h | \beta \rangle+
# \sum_{j}^A\sum_{\gamma\delta} C^*_{j\gamma}C_{j\delta}\langle \alpha\gamma|\hat{v}|\beta\delta\rangle_{AS}\right\}C_{i\beta}=\epsilon_i^{HF}C_{i\alpha}.
# $$
# Note that the sums over greek indices run over the number of basis set functions (in principle an infinite number).
#
#
#
#
#
# Defining
# $$
# h_{\alpha\beta}^{HF}=\langle \alpha | h | \beta \rangle+
# \sum_{j=1}^A\sum_{\gamma\delta} C^*_{j\gamma}C_{j\delta}\langle \alpha\gamma|\hat{v}|\beta\delta\rangle_{AS},
# $$
# we can rewrite the new equations as
# <!-- Equation labels as ordinary links -->
# <div id="eq:newhf"></div>
#
# $$
# \begin{equation}
# \sum_{\beta}h_{\alpha\beta}^{HF}C_{i\beta}=\epsilon_i^{HF}C_{i\alpha}. \label{eq:newhf} \tag{8}
# \end{equation}
# $$
# The latter is nothing but a standard eigenvalue problem. Compared with Eq. ([3](#eq:hartreefockcoordinatespace)),
# we see that we do not need to compute any integrals in an iterative procedure for solving the equations.
# It suffices to tabulate the matrix elements $\langle \alpha | h | \beta \rangle$ and $\langle \alpha\gamma|\hat{v}|\beta\delta\rangle_{AS}$ once and for all. Successive iterations require thus only a look-up in tables over one-body and two-body matrix elements. These details will be discussed below when we solve the Hartree-Fock equations numerical.
#
#
#
# ## Hartree-Fock algorithm
#
# Our Hartree-Fock matrix is thus
# $$
# \hat{h}_{\alpha\beta}^{HF}=\langle \alpha | \hat{h}_0 | \beta \rangle+
# \sum_{j=1}^A\sum_{\gamma\delta} C^*_{j\gamma}C_{j\delta}\langle \alpha\gamma|\hat{v}|\beta\delta\rangle_{AS}.
# $$
# The Hartree-Fock equations are solved in an iterative waym starting with a guess for the coefficients $C_{j\gamma}=\delta_{j,\gamma}$ and solving the equations by diagonalization till the new single-particle energies
# $\epsilon_i^{\mathrm{HF}}$ do not change anymore by a prefixed quantity.
#
#
#
#
# Normally we assume that the single-particle basis $|\beta\rangle$ forms an eigenbasis for the operator
# $\hat{h}_0$, meaning that the Hartree-Fock matrix becomes
# $$
# \hat{h}_{\alpha\beta}^{HF}=\epsilon_{\alpha}\delta_{\alpha,\beta}+
# \sum_{j=1}^A\sum_{\gamma\delta} C^*_{j\gamma}C_{j\delta}\langle \alpha\gamma|\hat{v}|\beta\delta\rangle_{AS}.
# $$
# The Hartree-Fock eigenvalue problem
# $$
# \sum_{\beta}\hat{h}_{\alpha\beta}^{HF}C_{i\beta}=\epsilon_i^{\mathrm{HF}}C_{i\alpha},
# $$
# can be written out in a more compact form as
# $$
# \hat{h}^{HF}\hat{C}=\epsilon^{\mathrm{HF}}\hat{C}.
# $$
# The Hartree-Fock equations are, in their simplest form, solved in an iterative way, starting with a guess for the
# coefficients $C_{i\alpha}$. We label the coefficients as $C_{i\alpha}^{(n)}$, where the subscript $n$ stands for iteration $n$.
# To set up the algorithm we can proceed as follows:
#
# * We start with a guess $C_{i\alpha}^{(0)}=\delta_{i,\alpha}$. Alternatively, we could have used random starting values as long as the vectors are normalized. Another possibility is to give states below the Fermi level a larger weight.
#
# * The Hartree-Fock matrix simplifies then to (assuming that the coefficients $C_{i\alpha} $ are real)
# $$
# \hat{h}_{\alpha\beta}^{HF}=\epsilon_{\alpha}\delta_{\alpha,\beta}+
# \sum_{j = 1}^A\sum_{\gamma\delta} C_{j\gamma}^{(0)}C_{j\delta}^{(0)}\langle \alpha\gamma|\hat{v}|\beta\delta\rangle_{AS}.
# $$
# Solving the Hartree-Fock eigenvalue problem yields then new eigenvectors $C_{i\alpha}^{(1)}$ and eigenvalues
# $\epsilon_i^{HF(1)}$.
# * With the new eigenvalues we can set up a new Hartree-Fock potential
# $$
# \sum_{j = 1}^A\sum_{\gamma\delta} C_{j\gamma}^{(1)}C_{j\delta}^{(1)}\langle \alpha\gamma|\hat{v}|\beta\delta\rangle_{AS}.
# $$
# The diagonalization with the new Hartree-Fock potential yields new eigenvectors and eigenvalues.
# This process is continued till for example
# $$
# \frac{\sum_{p} |\epsilon_i^{(n)}-\epsilon_i^{(n-1)}|}{m} \le \lambda,
# $$
# where $\lambda$ is a user prefixed quantity ($\lambda \sim 10^{-8}$ or smaller) and $p$ runs over all calculated single-particle
# energies and $m$ is the number of single-particle states.
#
#
#
# ## Analysis of Hartree-Fock equations and Koopman's theorem
#
# We can rewrite the ground state energy by adding and subtracting $\hat{u}^{HF}(x_i)$
# $$
# E_0^{HF} =\langle \Phi_0 | \hat{H} | \Phi_0\rangle =
# \sum_{i\le F}^A \langle i | \hat{h}_0 +\hat{u}^{HF}| j\rangle+ \frac{1}{2}\sum_{i\le F}^A\sum_{j \le F}^A\left[\langle ij |\hat{v}|ij \rangle-\langle ij|\hat{v}|ji\rangle\right]-\sum_{i\le F}^A \langle i |\hat{u}^{HF}| i\rangle,
# $$
# which results in
# $$
# E_0^{HF}
# = \sum_{i\le F}^A \varepsilon_i^{HF} + \frac{1}{2}\sum_{i\le F}^A\sum_{j \le F}^A\left[\langle ij |\hat{v}|ij \rangle-\langle ij|\hat{v}|ji\rangle\right]-\sum_{i\le F}^A \langle i |\hat{u}^{HF}| i\rangle.
# $$
# Our single-particle states $ijk\dots$ are now single-particle states obtained from the solution of the Hartree-Fock equations.
#
#
#
# Using our definition of the Hartree-Fock single-particle energies we obtain then the following expression for the total ground-state energy
# $$
# E_0^{HF}
# = \sum_{i\le F}^A \varepsilon_i - \frac{1}{2}\sum_{i\le F}^A\sum_{j \le F}^A\left[\langle ij |\hat{v}|ij \rangle-\langle ij|\hat{v}|ji\rangle\right].
# $$
# This form will be used in our discussion of Koopman's theorem.
#
#
#
# In the atomic physics case we have
# $$
# E[\Phi^{\mathrm{HF}}(N)]
# = \sum_{i=1}^H \langle i | \hat{h}_0 | i \rangle +
# \frac{1}{2}\sum_{ij=1}^N\langle ij|\hat{v}|ij\rangle_{AS},
# $$
# where $\Phi^{\mathrm{HF}}(N)$ is the new Slater determinant defined by the new basis of Eq. ([4](#eq:newbasis))
# for $N$ electrons (same $Z$). If we assume that the single-particle wave functions in the new basis do not change
# when we remove one electron or add one electron, we can then define the corresponding energy for the $N-1$ systems as
# $$
# E[\Phi^{\mathrm{HF}}(N-1)]
# = \sum_{i=1; i\ne k}^N \langle i | \hat{h}_0 | i \rangle +
# \frac{1}{2}\sum_{ij=1;i,j\ne k}^N\langle ij|\hat{v}|ij\rangle_{AS},
# $$
# where we have removed a single-particle state $k\le F$, that is a state below the Fermi level.
#
#
#
# Calculating the difference
# $$
# E[\Phi^{\mathrm{HF}}(N)]- E[\Phi^{\mathrm{HF}}(N-1)] = \langle k | \hat{h}_0 | k \rangle +
# \frac{1}{2}\sum_{i=1;i\ne k}^N\langle ik|\hat{v}|ik\rangle_{AS} + \frac{1}{2}\sum_{j=1;j\ne k}^N\langle kj|\hat{v}|kj\rangle_{AS},
# $$
# we obtain
# $$
# E[\Phi^{\mathrm{HF}}(N)]- E[\Phi^{\mathrm{HF}}(N-1)] = \langle k | \hat{h}_0 | k \rangle +\sum_{j=1}^N\langle kj|\hat{v}|kj\rangle_{AS}
# $$
# which is just our definition of the Hartree-Fock single-particle energy
# $$
# E[\Phi^{\mathrm{HF}}(N)]- E[\Phi^{\mathrm{HF}}(N-1)] = \epsilon_k^{\mathrm{HF}}
# $$
# Similarly, we can now compute the difference (we label the single-particle states above the Fermi level as $abcd > F$)
# $$
# E[\Phi^{\mathrm{HF}}(N+1)]- E[\Phi^{\mathrm{HF}}(N)]= \epsilon_a^{\mathrm{HF}}.
# $$
# These two equations can thus be used to the electron affinity or ionization energies, respectively.
# Koopman's theorem states that for example the ionization energy of a closed-shell system is given by the energy of the highest occupied single-particle state. If we assume that changing the number of electrons from $N$ to $N+1$ does not change the Hartree-Fock single-particle energies and eigenfunctions, then Koopman's theorem simply states that the ionization energy of an atom is given by the single-particle energy of the last bound state. In a similar way, we can also define the electron affinities.
#
#
#
#
# As an example, consider a simple model for atomic sodium, Na. Neutral sodium has eleven electrons,
# with the weakest bound one being confined the $3s$ single-particle quantum numbers. The energy needed to remove an electron from neutral sodium is rather small, 5.1391 eV, a feature which pertains to all alkali metals.
# Having performed a Hartree-Fock calculation for neutral sodium would then allows us to compute the
# ionization energy by using the single-particle energy for the $3s$ states, namely $\epsilon_{3s}^{\mathrm{HF}}$.
#
# From these considerations, we see that Hartree-Fock theory allows us to make a connection between experimental
# observables (here ionization and affinity energies) and the underlying interactions between particles.
# In this sense, we are now linking the dynamics and structure of a many-body system with the laws of motion which govern the system. Our approach is a reductionistic one, meaning that we want to understand the laws of motion
# in terms of the particles or degrees of freedom which we believe are the fundamental ones. Our Slater determinant, being constructed as the product of various single-particle functions, follows this philosophy.
#
#
#
#
# With similar arguments as in atomic physics, we can now use Hartree-Fock theory to make a link
# between nuclear forces and separation energies. Changing to nuclear system, we define
# $$
# E[\Phi^{\mathrm{HF}}(A)]
# = \sum_{i=1}^A \langle i | \hat{h}_0 | i \rangle +
# \frac{1}{2}\sum_{ij=1}^A\langle ij|\hat{v}|ij\rangle_{AS},
# $$
# where $\Phi^{\mathrm{HF}}(A)$ is the new Slater determinant defined by the new basis of Eq. ([4](#eq:newbasis))
# for $A$ nucleons, where $A=N+Z$, with $N$ now being the number of neutrons and $Z$ th enumber of protons. If we assume again that the single-particle wave functions in the new basis do not change from a nucleus with $A$ nucleons to a nucleus with $A-1$ nucleons, we can then define the corresponding energy for the $A-1$ systems as
# $$
# E[\Phi^{\mathrm{HF}}(A-1)]
# = \sum_{i=1; i\ne k}^A \langle i | \hat{h}_0 | i \rangle +
# \frac{1}{2}\sum_{ij=1;i,j\ne k}^A\langle ij|\hat{v}|ij\rangle_{AS},
# $$
# where we have removed a single-particle state $k\le F$, that is a state below the Fermi level.
#
#
#
#
# Calculating the difference
# $$
# E[\Phi^{\mathrm{HF}}(A)]- E[\Phi^{\mathrm{HF}}(A-1)]
# = \langle k | \hat{h}_0 | k \rangle +
# \frac{1}{2}\sum_{i=1;i\ne k}^A\langle ik|\hat{v}|ik\rangle_{AS} + \frac{1}{2}\sum_{j=1;j\ne k}^A\langle kj|\hat{v}|kj\rangle_{AS},
# $$
# which becomes
# $$
# E[\Phi^{\mathrm{HF}}(A)]- E[\Phi^{\mathrm{HF}}(A-1)]
# = \langle k | \hat{h}_0 | k \rangle +\sum_{j=1}^A\langle kj|\hat{v}|kj\rangle_{AS}
# $$
# which is just our definition of the Hartree-Fock single-particle energy
# $$
# E[\Phi^{\mathrm{HF}}(A)]- E[\Phi^{\mathrm{HF}}(A-1)]
# = \epsilon_k^{\mathrm{HF}}
# $$
# Similarly, we can now compute the difference (recall that the single-particle states $abcd > F$)
# $$
# E[\Phi^{\mathrm{HF}}(A+1)]- E[\Phi^{\mathrm{HF}}(A)]= \epsilon_a^{\mathrm{HF}}.
# $$
# If we then recall that the binding energy differences
# $$
# BE(A)-BE(A-1) \hspace{0.5cm} \mathrm{and} \hspace{0.5cm} BE(A+1)-BE(A),
# $$
# define the separation energies, we see that the Hartree-Fock single-particle energies can be used to
# define separation energies. We have thus our first link between nuclear forces (included in the potential energy term) and an observable quantity defined by differences in binding energies.
#
#
#
#
# We have thus the following interpretations (if the single-particle fields do not change)
# $$
# BE(A)-BE(A-1)\approx E[\Phi^{\mathrm{HF}}(A)]- E[\Phi^{\mathrm{HF}}(A-1)]
# = \epsilon_k^{\mathrm{HF}},
# $$
# and
# $$
# BE(A+1)-BE(A)\approx E[\Phi^{\mathrm{HF}}(A+1)]- E[\Phi^{\mathrm{HF}}(A)] = \epsilon_a^{\mathrm{HF}}.
# $$
# If we use $^{16}\mbox{O}$ as our closed-shell nucleus, we could then interpret the separation energy
# $$
# BE(^{16}\mathrm{O})-BE(^{15}\mathrm{O})\approx \epsilon_{0p^{\nu}_{1/2}}^{\mathrm{HF}},
# $$
# and
# $$
# BE(^{16}\mathrm{O})-BE(^{15}\mathrm{N})\approx \epsilon_{0p^{\pi}_{1/2}}^{\mathrm{HF}}.
# $$
# Similalry, we could interpret
# $$
# BE(^{17}\mathrm{O})-BE(^{16}\mathrm{O})\approx \epsilon_{0d^{\nu}_{5/2}}^{\mathrm{HF}},
# $$
# and
# $$
# BE(^{17}\mathrm{F})-BE(^{16}\mathrm{O})\approx\epsilon_{0d^{\pi}_{5/2}}^{\mathrm{HF}}.
# $$
# We can continue like this for all $A\pm 1$ nuclei where $A$ is a good closed-shell (or subshell closure)
# nucleus. Examples are $^{22}\mbox{O}$, $^{24}\mbox{O}$, $^{40}\mbox{Ca}$, $^{48}\mbox{Ca}$, $^{52}\mbox{Ca}$, $^{54}\mbox{Ca}$, $^{56}\mbox{Ni}$,
# $^{68}\mbox{Ni}$, $^{78}\mbox{Ni}$, $^{90}\mbox{Zr}$, $^{88}\mbox{Sr}$, $^{100}\mbox{Sn}$, $^{132}\mbox{Sn}$ and $^{208}\mbox{Pb}$, to mention some possile cases.
#
#
#
#
# We can thus make our first interpretation of the separation energies in terms of the simplest
# possible many-body theory.
# If we also recall that the so-called energy gap for neutrons (or protons) is defined as
# $$
# \Delta S_n= 2BE(N,Z)-BE(N-1,Z)-BE(N+1,Z),
# $$
# for neutrons and the corresponding gap for protons
# $$
# \Delta S_p= 2BE(N,Z)-BE(N,Z-1)-BE(N,Z+1),
# $$
# we can define the neutron and proton energy gaps for $^{16}\mbox{O}$ as
# $$
# \Delta S_{\nu}=\epsilon_{0d^{\nu}_{5/2}}^{\mathrm{HF}}-\epsilon_{0p^{\nu}_{1/2}}^{\mathrm{HF}},
# $$
# and
# $$
# \Delta S_{\pi}=\epsilon_{0d^{\pi}_{5/2}}^{\mathrm{HF}}-\epsilon_{0p^{\pi}_{1/2}}^{\mathrm{HF}}.
# $$
# <!-- --- begin exercise --- -->
#
# ## Exercise 1: Derivation of Hartree-Fock equations
#
# Consider a Slater determinant built up of single-particle orbitals $\psi_{\lambda}$,
# with $\lambda = 1,2,\dots,N$.
#
# The unitary transformation
# $$
# \psi_a = \sum_{\lambda} C_{a\lambda}\phi_{\lambda},
# $$
# brings us into the new basis.
# The new basis has quantum numbers $a=1,2,\dots,N$.
#
#
# **a)**
# Show that the new basis is orthonormal.
#
# **b)**
# Show that the new Slater determinant constructed from the new single-particle wave functions can be
# written as the determinant based on the previous basis and the determinant of the matrix $C$.
#
# **c)**
# Show that the old and the new Slater determinants are equal up to a complex constant with absolute value unity.
#
# <!-- --- begin hint in exercise --- -->
#
# **Hint.**
# Use the fact that $C$ is a unitary matrix.
#
# <!-- --- end hint in exercise --- -->
#
#
#
#
# <!-- --- end exercise --- -->
#
#
#
#
# <!-- --- begin exercise --- -->
#
# ## Exercise 2: Derivation of Hartree-Fock equations
#
# Consider the Slater determinant
# $$
# \Phi_{0}=\frac{1}{\sqrt{n!}}\sum_{p}(-)^{p}P
# \prod_{i=1}^{n}\psi_{\alpha_{i}}(x_{i}).
# $$
# A small variation in this function is given by
# $$
# \delta\Phi_{0}=\frac{1}{\sqrt{n!}}\sum_{p}(-)^{p}P
# \psi_{\alpha_{1}}(x_{1})\psi_{\alpha_{2}}(x_{2})\dots
# \psi_{\alpha_{i-1}}(x_{i-1})(\delta\psi_{\alpha_{i}}(x_{i}))
# \psi_{\alpha_{i+1}}(x_{i+1})\dots\psi_{\alpha_{n}}(x_{n}).
# $$
# **a)**
# Show that
# $$
# \langle \delta\Phi_{0}|\sum_{i=1}^{n}\left\{t(x_{i})+u(x_{i})
# \right\}+\frac{1}{2}
# \sum_{i\neq j=1}^{n}v(x_{i},x_{j})|\Phi_{0}\rangle=\sum_{i=1}^{n}\langle \delta\psi_{\alpha_{i}}|\hat{t}+\hat{u}
# |\phi_{\alpha_{i}}\rangle
# +\sum_{i\neq j=1}^{n}\left\{\langle\delta\psi_{\alpha_{i}}
# \psi_{\alpha_{j}}|\hat{v}|\psi_{\alpha_{i}}\psi_{\alpha_{j}}\rangle-
# \langle\delta\psi_{\alpha_{i}}\psi_{\alpha_{j}}|\hat{v}
# |\psi_{\alpha_{j}}\psi_{\alpha_{i}}\rangle\right\}
# $$
# <!-- --- end exercise --- -->
#
#
#
#
# <!-- --- begin exercise --- -->
#
# ## Exercise 3: Developing a Hartree-Fock program
#
# Neutron drops are a powerful theoretical laboratory for testing,
# validating and improving nuclear structure models. Indeed, all
# approaches to nuclear structure, from ab initio theory to shell model
# to density functional theory are applicable in such systems. We will,
# therefore, use neutron drops as a test system for setting up a
# Hartree-Fock code. This program can later be extended to studies of
# the binding energy of nuclei like $^{16}$O or $^{40}$Ca. The
# single-particle energies obtained by solving the Hartree-Fock
# equations can then be directly related to experimental separation
# energies.
# Since Hartree-Fock theory is the starting point for
# several many-body techniques (density functional theory, random-phase
# approximation, shell-model etc), the aim here is to develop a computer
# program to solve the Hartree-Fock equations in a given single-particle basis,
# here the harmonic oscillator.
#
# The Hamiltonian for a system of $N$ neutron drops confined in a
# harmonic potential reads
# $$
# \hat{H} = \sum_{i=1}^{N} \frac{\hat{p}_{i}^{2}}{2m}+\sum_{i=1}^{N} \frac{1}{2} m\omega {r}_{i}^{2}+\sum_{i<j} \hat{V}_{ij},
# $$
# with $\hbar^{2}/2m = 20.73$ fm$^{2}$, $mc^{2} = 938.90590$ MeV, and
# $\hat{V}_{ij}$ is the two-body interaction potential whose
# matrix elements are precalculated
# and to be read in by you.
#
# The Hartree-Fock algorithm can be broken down as follows. We recall that our Hartree-Fock matrix is
# $$
# \hat{h}_{\alpha\beta}^{HF}=\langle \alpha \vert\hat{h}_0 \vert \beta \rangle+
# \sum_{j=1}^N\sum_{\gamma\delta} C^*_{j\gamma}C_{j\delta}\langle \alpha\gamma|V|\beta\delta\rangle_{AS}.
# $$
# Normally we assume that the single-particle basis $\vert\beta\rangle$
# forms an eigenbasis for the operator $\hat{h}_0$ (this is our case), meaning that the
# Hartree-Fock matrix becomes
# $$
# \hat{h}_{\alpha\beta}^{HF}=\epsilon_{\alpha}\delta_{\alpha,\beta}+
# \sum_{j=1}^N\sum_{\gamma\delta} C^*_{j\gamma}C_{j\delta}\langle \alpha\gamma|V|\beta\delta\rangle_{AS}.
# $$
# The Hartree-Fock eigenvalue problem
# $$
# \sum_{\beta}\hat{h}_{\alpha\beta}^{HF}C_{i\beta}=\epsilon_i^{\mathrm{HF}}C_{i\alpha},
# $$
# can be written out in a more compact form as
# $$
# \hat{h}^{HF}\hat{C}=\epsilon^{\mathrm{HF}}\hat{C}.
# $$
# The equations are often rewritten in terms of a so-called density matrix,
# which is defined as
# <!-- Equation labels as ordinary links -->
# <div id="_auto2"></div>
#
# $$
# \begin{equation}
# \rho_{\gamma\delta}=\sum_{i=1}^{N}\langle\gamma|i\rangle\langle i|\delta\rangle = \sum_{i=1}^{N}C_{i\gamma}C^*_{i\delta}.
# \label{_auto2} \tag{9}
# \end{equation}
# $$
# It means that we can rewrite the Hartree-Fock Hamiltonian as
# $$
# \hat{h}_{\alpha\beta}^{HF}=\epsilon_{\alpha}\delta_{\alpha,\beta}+
# \sum_{\gamma\delta} \rho_{\gamma\delta}\langle \alpha\gamma|V|\beta\delta\rangle_{AS}.
# $$
# It is convenient to use the density matrix since we can precalculate in every iteration the product of two eigenvector components $C$.
#
#
# Note that $\langle \alpha\vert\hat{h}_0\vert\beta \rangle$ denotes the
# matrix elements of the one-body part of the starting hamiltonian. For
# self-bound nuclei $\langle \alpha\vert\hat{h}_0\vert\beta \rangle$ is the
# kinetic energy, whereas for neutron drops, $\langle \alpha \vert \hat{h}_0 \vert \beta \rangle$ represents the harmonic oscillator hamiltonian since
# the system is confined in a harmonic trap. If we are working in a
# harmonic oscillator basis with the same $\omega$ as the trapping
# potential, then $\langle \alpha\vert\hat{h}_0 \vert \beta \rangle$ is
# diagonal.
#
#
# The python
# [program](https://github.com/CompPhysics/ManyBodyMethods/tree/master/doc/src/hfock/Code)
# shows how one can, in a brute force way read in matrix elements in
# $m$-scheme and compute the Hartree-Fock single-particle energies for
# four major shells. The interaction which has been used is the
# so-called N3LO interaction of [Machleidt and
# Entem](http://journals.aps.org/prc/abstract/10.1103/PhysRevC.68.041001)
# using the [Similarity Renormalization
# Group](http://journals.aps.org/prc/abstract/10.1103/PhysRevC.75.061001)
# approach method to renormalize the interaction, using an oscillator
# energy $\hbar\omega=10$ MeV.
#
# The nucleon-nucleon two-body matrix elements are in $m$-scheme and are fully anti-symmetrized. The Hartree-Fock programs uses the density matrix discussed above in order to compute the Hartree-Fock matrix.
# Here we display the Hartree-Fock part only, assuming that single-particle data and two-body matrix elements have already been read in.
# In[1]:
import numpy as np
from decimal import Decimal
# expectation value for the one body part, Harmonic oscillator in three dimensions
def onebody(i, n, l):
homega = 10.0
return homega*(2*n[i] + l[i] + 1.5)
if __name__ == '__main__':
Nparticles = 16
""" Read quantum numbers from file """
index = []
n = []
l = []
j = []
mj = []
tz = []
spOrbitals = 0
with open("nucleispnumbers.dat", "r") as qnumfile:
for line in qnumfile:
nums = line.split()
if len(nums) != 0:
index.append(int(nums[0]))
n.append(int(nums[1]))
l.append(int(nums[2]))
j.append(int(nums[3]))
mj.append(int(nums[4]))
tz.append(int(nums[5]))
spOrbitals += 1
""" Read two-nucleon interaction elements (integrals) from file, brute force 4-dim array """
nninteraction = np.zeros([spOrbitals, spOrbitals, spOrbitals, spOrbitals])
with open("nucleitwobody.dat", "r") as infile:
for line in infile:
number = line.split()
a = int(number[0]) - 1
b = int(number[1]) - 1
c = int(number[2]) - 1
d = int(number[3]) - 1
nninteraction[a][b][c][d] = Decimal(number[4])
""" Set up single-particle integral """
singleparticleH = np.zeros(spOrbitals)
for i in range(spOrbitals):
singleparticleH[i] = Decimal(onebody(i, n, l))
""" Star HF-iterations, preparing variables and density matrix """
""" Coefficients for setting up density matrix, assuming only one along the diagonals """
C = np.eye(spOrbitals) # HF coefficients
DensityMatrix = np.zeros([spOrbitals,spOrbitals])
for gamma in range(spOrbitals):
for delta in range(spOrbitals):
sum = 0.0
for i in range(Nparticles):
sum += C[gamma][i]*C[delta][i]
DensityMatrix[gamma][delta] = Decimal(sum)
maxHFiter = 100
epsilon = 1.0e-5
difference = 1.0
hf_count = 0
oldenergies = np.zeros(spOrbitals)
newenergies = np.zeros(spOrbitals)
while hf_count < maxHFiter and difference > epsilon:
print("############### Iteration %i ###############" % hf_count)
HFmatrix = np.zeros([spOrbitals,spOrbitals])
for alpha in range(spOrbitals):
for beta in range(spOrbitals):
""" If tests for three-dimensional systems, including isospin conservation """
if l[alpha] != l[beta] and j[alpha] != j[beta] and mj[alpha] != mj[beta] and tz[alpha] != tz[beta]: continue
""" Setting up the Fock matrix using the density matrix and antisymmetrized NN interaction in m-scheme """
sumFockTerm = 0.0
for gamma in range(spOrbitals):
for delta in range(spOrbitals):
if (mj[alpha]+mj[gamma]) != (mj[beta]+mj[delta]) and (tz[alpha]+tz[gamma]) != (tz[beta]+tz[delta]): continue
sumFockTerm += DensityMatrix[gamma][delta]*nninteraction[alpha][gamma][beta][delta]
HFmatrix[alpha][beta] = Decimal(sumFockTerm)
""" Adding the one-body term, here plain harmonic oscillator """
if beta == alpha: HFmatrix[alpha][alpha] += singleparticleH[alpha]
spenergies, C = np.linalg.eigh(HFmatrix)
""" Setting up new density matrix in m-scheme """
DensityMatrix = np.zeros([spOrbitals,spOrbitals])
for gamma in range(spOrbitals):
for delta in range(spOrbitals):
sum = 0.0
for i in range(Nparticles):
sum += C[gamma][i]*C[delta][i]
DensityMatrix[gamma][delta] = Decimal(sum)
newenergies = spenergies
""" Brute force computation of difference between previous and new sp HF energies """
sum =0.0
for i in range(spOrbitals):
sum += (abs(newenergies[i]-oldenergies[i]))/spOrbitals
difference = sum
oldenergies = newenergies
print ("Single-particle energies, ordering may have changed ")
for i in range(spOrbitals):
print('{0:4d} {1:.4f}'.format(i, Decimal(oldenergies[i])))
hf_count += 1
# Running the program, one finds that the lowest-lying states for a nucleus like $^{16}\mbox{O}$, we see that the nucleon-nucleon force brings a natural spin-orbit splitting for the $0p$ states (or other states except the $s$-states).
# Since we are using the $m$-scheme for our calculations, we observe that there are several states with the same
# eigenvalues. The number of eigenvalues corresponds to the degeneracy $2j+1$ and is well respected in our calculations, as see from the table here.
#
# The values of the lowest-lying states are ($\pi$ for protons and $\nu$ for neutrons)
# <table border="1">
# <thead>
# <tr><th align="center">Quantum numbers </th> <th align="center">Energy [MeV]</th> </tr>
# </thead>
# <tbody>
# <tr><td align="center"> $0s_{1/2}^{\pi}$ </td> <td align="center"> -40.4602 </td> </tr>
# <tr><td align="center"> $0s_{1/2}^{\pi}$ </td> <td align="center"> -40.4602 </td> </tr>
# <tr><td align="center"> $0s_{1/2}^{\nu}$ </td> <td align="center"> -40.6426 </td> </tr>
# <tr><td align="center"> $0s_{1/2}^{\nu}$ </td> <td align="center"> -40.6426 </td> </tr>
# <tr><td align="center"> $0p_{1/2}^{\pi}$ </td> <td align="center"> -6.7133 </td> </tr>
# <tr><td align="center"> $0p_{1/2}^{\pi}$ </td> <td align="center"> -6.7133 </td> </tr>
# <tr><td align="center"> $0p_{1/2}^{\nu}$ </td> <td align="center"> -6.8403 </td> </tr>
# <tr><td align="center"> $0p_{1/2}^{\nu}$ </td> <td align="center"> -6.8403 </td> </tr>
# <tr><td align="center"> $0p_{3/2}^{\pi}$ </td> <td align="center"> -11.5886 </td> </tr>
# <tr><td align="center"> $0p_{3/2}^{\pi}$ </td> <td align="center"> -11.5886 </td> </tr>
# <tr><td align="center"> $0p_{3/2}^{\pi}$ </td> <td align="center"> -11.5886 </td> </tr>
# <tr><td align="center"> $0p_{3/2}^{\pi}$ </td> <td align="center"> -11.5886 </td> </tr>
# <tr><td align="center"> $0p_{3/2}^{\nu}$ </td> <td align="center"> -11.7201 </td> </tr>
# <tr><td align="center"> $0p_{3/2}^{\nu}$ </td> <td align="center"> -11.7201 </td> </tr>
# <tr><td align="center"> $0p_{3/2}^{\nu}$ </td> <td align="center"> -11.7201 </td> </tr>
# <tr><td align="center"> $0p_{3/2}^{\nu}$ </td> <td align="center"> -11.7201 </td> </tr>
# <tr><td align="center"> $0d_{5/2}^{\pi}$ </td> <td align="center"> 18.7589 </td> </tr>
# <tr><td align="center"> $0d_{5/2}^{\nu}$ </td> <td align="center"> 18.8082 </td> </tr>
# </tbody>
# </table>
# We can use these results to attempt our first link with experimental data, namely to compute the shell gap or the separation energies. The shell gap for neutrons is given by
# $$
# \Delta S_n= 2BE(N,Z)-BE(N-1,Z)-BE(N+1,Z).
# $$
# For $^{16}\mbox{O}$ we have an experimental value for the shell gap of $11.51$ MeV for neutrons, while our Hartree-Fock calculations result in $25.65$ MeV. This means that correlations beyond a simple Hartree-Fock calculation with a two-body force play an important role in nuclear physics.
# The splitting between the $0p_{3/2}^{\nu}$ and the $0p_{1/2}^{\nu}$ state is 4.88 MeV, while the experimental value for the gap between the ground state $1/2^{-}$ and the first excited $3/2^{-}$ states is 6.08 MeV. The two-nucleon spin-orbit force plays a central role here. In our discussion of nuclear forces we will see how the spin-orbit force comes into play here.
#
# <!-- --- end exercise --- -->
#
#
# ## Hartree-Fock in second quantization and stability of HF solution
#
# We wish now to derive the Hartree-Fock equations using our second-quantized formalism and study the stability of the equations.
# Our ansatz for the ground state of the system is approximated as (this is our representation of a Slater determinant in second quantization)
# $$
# |\Phi_0\rangle = |c\rangle = a^{\dagger}_i a^{\dagger}_j \dots a^{\dagger}_l|0\rangle.
# $$
# We wish to determine $\hat{u}^{HF}$ so that
# $E_0^{HF}= \langle c|\hat{H}| c\rangle$ becomes a local minimum.
#
# In our analysis here we will need Thouless' theorem, which states that
# an arbitrary Slater determinant $|c'\rangle$ which is not orthogonal to a determinant
# $| c\rangle ={\displaystyle\prod_{i=1}^{n}}
# a_{\alpha_{i}}^{\dagger}|0\rangle$, can be written as
# $$
# |c'\rangle=exp\left\{\sum_{a>F}\sum_{i\le F}C_{ai}a_{a}^{\dagger}a_{i}\right\}| c\rangle
# $$
# Let us give a simple proof of Thouless' theorem. The theorem states that we can make a linear combination av particle-hole excitations with respect to a given reference state $\vert c\rangle$. With this linear combination, we can make a new Slater determinant $\vert c'\rangle $ which is not orthogonal to
# $\vert c\rangle$, that is
# $$
# \langle c|c'\rangle \ne 0.
# $$
# To show this we need some intermediate steps. The exponential product of two operators $\exp{\hat{A}}\times\exp{\hat{B}}$ is equal to $\exp{(\hat{A}+\hat{B})}$ only if the two operators commute, that is
# $$
# [\hat{A},\hat{B}] = 0.
# $$
# ## Thouless' theorem
#
#
# If the operators do not commute, we need to resort to the [Baker-Campbell-Hauersdorf](http://www.encyclopediaofmath.org/index.php/Campbell%E2%80%93Hausdorff_formula). This relation states that
# $$
# \exp{\hat{C}}=\exp{\hat{A}}\exp{\hat{B}},
# $$
# with
# $$
# \hat{C}=\hat{A}+\hat{B}+\frac{1}{2}[\hat{A},\hat{B}]+\frac{1}{12}[[\hat{A},\hat{B}],\hat{B}]-\frac{1}{12}[[\hat{A},\hat{B}],\hat{A}]+\dots
# $$
# From these relations, we note that
# in our expression for $|c'\rangle$ we have commutators of the type
# $$
# [a_{a}^{\dagger}a_{i},a_{b}^{\dagger}a_{j}],
# $$
# and it is easy to convince oneself that these commutators, or higher powers thereof, are all zero. This means that we can write out our new representation of a Slater determinant as
# $$
# |c'\rangle=exp\left\{\sum_{a>F}\sum_{i\le F}C_{ai}a_{a}^{\dagger}a_{i}\right\}| c\rangle=\prod_{i}\left\{1+\sum_{a>F}C_{ai}a_{a}^{\dagger}a_{i}+\left(\sum_{a>F}C_{ai}a_{a}^{\dagger}a_{i}\right)^2+\dots\right\}| c\rangle
# $$
# We note that
# $$
# \prod_{i}\sum_{a>F}C_{ai}a_{a}^{\dagger}a_{i}\sum_{b>F}C_{bi}a_{b}^{\dagger}a_{i}| c\rangle =0,
# $$
# and all higher-order powers of these combinations of creation and annihilation operators disappear
# due to the fact that $(a_i)^n| c\rangle =0$ when $n > 1$. This allows us to rewrite the expression for $|c'\rangle $ as
# $$
# |c'\rangle=\prod_{i}\left\{1+\sum_{a>F}C_{ai}a_{a}^{\dagger}a_{i}\right\}| c\rangle,
# $$
# which we can rewrite as
# $$
# |c'\rangle=\prod_{i}\left\{1+\sum_{a>F}C_{ai}a_{a}^{\dagger}a_{i}\right\}| a^{\dagger}_{i_1} a^{\dagger}_{i_2} \dots a^{\dagger}_{i_n}|0\rangle.
# $$
# The last equation can be written as
# <!-- Equation labels as ordinary links -->
# <div id="_auto3"></div>
#
# $$
# \begin{equation}
# |c'\rangle=\prod_{i}\left\{1+\sum_{a>F}C_{ai}a_{a}^{\dagger}a_{i}\right\}| a^{\dagger}_{i_1} a^{\dagger}_{i_2} \dots a^{\dagger}_{i_n}|0\rangle=\left(1+\sum_{a>F}C_{ai_1}a_{a}^{\dagger}a_{i_1}\right)a^{\dagger}_{i_1}
# \label{_auto3} \tag{10}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto4"></div>
#
# $$
# \begin{equation}
# \times\left(1+\sum_{a>F}C_{ai_2}a_{a}^{\dagger}a_{i_2}\right)a^{\dagger}_{i_2} \dots |0\rangle=\prod_{i}\left(a^{\dagger}_{i}+\sum_{a>F}C_{ai}a_{a}^{\dagger}\right)|0\rangle.
# \label{_auto4} \tag{11}
# \end{equation}
# $$
# ## New operators
#
#
# If we define a new creation operator
# <!-- Equation labels as ordinary links -->
# <div id="eq:newb"></div>
#
# $$
# \begin{equation}
# b^{\dagger}_{i}=a^{\dagger}_{i}+\sum_{a>F}C_{ai}a_{a}^{\dagger}, \label{eq:newb} \tag{12}
# \end{equation}
# $$
# we have
# $$
# |c'\rangle=\prod_{i}b^{\dagger}_{i}|0\rangle=\prod_{i}\left(a^{\dagger}_{i}+\sum_{a>F}C_{ai}a_{a}^{\dagger}\right)|0\rangle,
# $$
# meaning that the new representation of the Slater determinant in second quantization, $|c'\rangle$, looks like our previous ones. However, this representation is not general enough since we have a restriction on the sum over single-particle states in Eq. ([12](#eq:newb)). The single-particle states have all to be above the Fermi level.
# The question then is whether we can construct a general representation of a Slater determinant with a creation operator
# $$
# \tilde{b}^{\dagger}_{i}=\sum_{p}f_{ip}a_{p}^{\dagger},
# $$
# where $f_{ip}$ is a matrix element of a unitary matrix which transforms our creation and annihilation operators
# $a^{\dagger}$ and $a$ to $\tilde{b}^{\dagger}$ and $\tilde{b}$. These new operators define a new representation of a Slater determinant as
# $$
# |\tilde{c}\rangle=\prod_{i}\tilde{b}^{\dagger}_{i}|0\rangle.
# $$
# ## Showing that $|\tilde{c}\rangle= |c'\rangle$
#
#
#
# We need to show that $|\tilde{c}\rangle= |c'\rangle$. We need also to assume that the new state
# is not orthogonal to $|c\rangle$, that is $\langle c| \tilde{c}\rangle \ne 0$. From this it follows that
# $$
# \langle c| \tilde{c}\rangle=\langle 0| a_{i_n}\dots a_{i_1}\left(\sum_{p=i_1}^{i_n}f_{i_1p}a_{p}^{\dagger} \right)\left(\sum_{q=i_1}^{i_n}f_{i_2q}a_{q}^{\dagger} \right)\dots \left(\sum_{t=i_1}^{i_n}f_{i_nt}a_{t}^{\dagger} \right)|0\rangle,
# $$
# which is nothing but the determinant $det(f_{ip})$ which we can, using the intermediate normalization condition,
# normalize to one, that is
# $$
# det(f_{ip})=1,
# $$
# meaning that $f$ has an inverse defined as (since we are dealing with orthogonal, and in our case unitary as well, transformations)
# $$
# \sum_{k} f_{ik}f^{-1}_{kj} = \delta_{ij},
# $$
# and
# $$
# \sum_{j} f^{-1}_{ij}f_{jk} = \delta_{ik}.
# $$
# Using these relations we can then define the linear combination of creation (and annihilation as well)
# operators as
# $$
# \sum_{i}f^{-1}_{ki}\tilde{b}^{\dagger}_{i}=\sum_{i}f^{-1}_{ki}\sum_{p=i_1}^{\infty}f_{ip}a_{p}^{\dagger}=a_{k}^{\dagger}+\sum_{i}\sum_{p=i_{n+1}}^{\infty}f^{-1}_{ki}f_{ip}a_{p}^{\dagger}.
# $$
# Defining
# $$
# c_{kp}=\sum_{i \le F}f^{-1}_{ki}f_{ip},
# $$
# we can redefine
# $$
# a_{k}^{\dagger}+\sum_{i}\sum_{p=i_{n+1}}^{\infty}f^{-1}_{ki}f_{ip}a_{p}^{\dagger}=a_{k}^{\dagger}+\sum_{p=i_{n+1}}^{\infty}c_{kp}a_{p}^{\dagger}=b_k^{\dagger},
# $$
# our starting point. We have shown that our general representation of a Slater determinant
# $$
# |\tilde{c}\rangle=\prod_{i}\tilde{b}^{\dagger}_{i}|0\rangle=|c'\rangle=\prod_{i}b^{\dagger}_{i}|0\rangle,
# $$
# with
# $$
# b_k^{\dagger}=a_{k}^{\dagger}+\sum_{p=i_{n+1}}^{\infty}c_{kp}a_{p}^{\dagger}.
# $$
# This means that we can actually write an ansatz for the ground state of the system as a linear combination of
# terms which contain the ansatz itself $|c\rangle$ with an admixture from an infinity of one-particle-one-hole states. The latter has important consequences when we wish to interpret the Hartree-Fock equations and their stability. We can rewrite the new representation as
# $$
# |c'\rangle = |c\rangle+|\delta c\rangle,
# $$
# where $|\delta c\rangle$ can now be interpreted as a small variation. If we approximate this term with
# contributions from one-particle-one-hole (*1p-1h*) states only, we arrive at
# $$
# |c'\rangle = \left(1+\sum_{ai}\delta C_{ai}a_{a}^{\dagger}a_i\right)|c\rangle.
# $$
# In our derivation of the Hartree-Fock equations we have shown that
# $$
# \langle \delta c| \hat{H} | c\rangle =0,
# $$
# which means that we have to satisfy
# $$
# \langle c|\sum_{ai}\delta C_{ai}\left\{a_{a}^{\dagger}a_i\right\} \hat{H} | c\rangle =0.
# $$
# With this as a background, we are now ready to study the stability of the Hartree-Fock equations.
#
#
#
# ## Hartree-Fock in second quantization and stability of HF solution
#
# The variational condition for deriving the Hartree-Fock equations guarantees only that the expectation value $\langle c | \hat{H} | c \rangle$ has an extreme value, not necessarily a minimum. To figure out whether the extreme value we have found is a minimum, we can use second quantization to analyze our results and find a criterion
# for the above expectation value to a local minimum. We will use Thouless' theorem and show that
# $$
# \frac{\langle c' |\hat{H} | c'\rangle}{\langle c' |c'\rangle} \ge \langle c |\hat{H} | c\rangle= E_0,
# $$
# with
# $$
# {|c'\rangle} = {|c\rangle + |\delta c\rangle}.
# $$
# Using Thouless' theorem we can write out $|c'\rangle$ as
# <!-- Equation labels as ordinary links -->
# <div id="_auto5"></div>
#
# $$
# \begin{equation}
# {|c'\rangle}=\exp\left\{\sum_{a > F}\sum_{i \le F}\delta C_{ai}a_{a}^{\dagger}a_{i}\right\}| c\rangle
# \label{_auto5} \tag{13}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto6"></div>
#
# $$
# \begin{equation}
# =\left\{1+\sum_{a > F}\sum_{i \le F}\delta C_{ai}a_{a}^{\dagger}
# a_{i}+\frac{1}{2!}\sum_{ab > F}\sum_{ij \le F}\delta C_{ai}\delta C_{bj}a_{a}^{\dagger}a_{i}a_{b}^{\dagger}a_{j}+\dots\right\}
# \label{_auto6} \tag{14}
# \end{equation}
# $$
# where the amplitudes $\delta C$ are small.
#
#
# The norm of $|c'\rangle$ is given by (using the intermediate normalization condition $\langle c' |c\rangle=1$)
# $$
# \langle c' | c'\rangle = 1+\sum_{a>F}
# \sum_{i\le F}|\delta C_{ai}|^2+O(\delta C_{ai}^3).
# $$
# The expectation value for the energy is now given by (using the Hartree-Fock condition)
# 1
# 4
# 5
#
# <
# <
# <
# !
# !
# M
# A
# T
# H
# _
# B
# L
# O
# C
# K
# $$
# \frac{1}{2!}\sum_{ab>F}
# \sum_{ij\le F}\delta C_{ai}\delta C_{bj}\langle c |\hat{H}a_{a}^{\dagger}a_{i}a_{b}^{\dagger}a_{j}|c\rangle+\frac{1}{2!}\sum_{ab>F}
# \sum_{ij\le F}\delta C_{ai}^*\delta C_{bj}^*\langle c|a_{j}^{\dagger}a_{b}a_{i}^{\dagger}a_{a}\hat{H}|c\rangle
# +\dots
# $$
# We have already calculated the second term on the right-hand side of the previous equation
# <!-- Equation labels as ordinary links -->
# <div id="_auto7"></div>
#
# $$
# \begin{equation}
# \langle c | \left(\{a^\dagger_i a_a\} \hat{H} \{a^\dagger_b a_j\} \right) | c\rangle=\sum_{pq} \sum_{ijab}\delta C_{ai}^*\delta C_{bj} \langle p|\hat{h}_0 |q\rangle
# \langle c | \left(\{a^{\dagger}_i a_a\}\{a^{\dagger}_pa_q\}
# \{a^{\dagger}_b a_j\} \right)| c\rangle
# \label{_auto7} \tag{15}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto8"></div>
#
# $$
# \begin{equation}
# +\frac{1}{4} \sum_{pqrs} \sum_{ijab}\delta C_{ai}^*\delta C_{bj} \langle pq| \hat{v}|rs\rangle
# \langle c | \left(\{a^\dagger_i a_a\}\{a^{\dagger}_p a^{\dagger}_q a_s a_r\} \{a^{\dagger}_b a_j\} \right)| c\rangle ,
# \label{_auto8} \tag{16}
# \end{equation}
# $$
# resulting in
# $$
# E_0\sum_{ai}|\delta C_{ai}|^2+\sum_{ai}|\delta C_{ai}|^2(\varepsilon_a-\varepsilon_i)-\sum_{ijab} \langle aj|\hat{v}| bi\rangle \delta C_{ai}^*\delta C_{bj}.
# $$
# $$
# \frac{1}{2!}\langle c |\left(\{a^\dagger_j a_b\} \{a^\dagger_i a_a\} \hat{V}_N \right) | c\rangle =
# \frac{1}{2!}\langle c |\left( \hat{V}_N \{a^\dagger_a a_i\} \{a^\dagger_b a_j\} \right)^{\dagger} | c\rangle
# $$
# which is nothing but
# $$
# \frac{1}{2!}\langle c | \left( \hat{V}_N \{a^\dagger_a a_i\} \{a^\dagger_b a_j\} \right) | c\rangle^*
# =\frac{1}{2} \sum_{ijab} (\langle ij|\hat{v}|ab\rangle)^*\delta C_{ai}^*\delta C_{bj}^*
# $$
# or
# $$
# \frac{1}{2} \sum_{ijab} (\langle ab|\hat{v}|ij\rangle)\delta C_{ai}^*\delta C_{bj}^*
# $$
# where we have used the relation
# $$
# \langle a |\hat{A} | b\rangle = (\langle b |\hat{A}^{\dagger} | a\rangle)^*
# $$
# due to the hermiticity of $\hat{H}$ and $\hat{V}$.
#
#
# We define two matrix elements
# $$
# A_{ai,bj}=-\langle aj|\hat{v} bi\rangle
# $$
# and
# $$
# B_{ai,bj}=\langle ab|\hat{v}|ij\rangle
# $$
# both being anti-symmetrized.
#
#
#
# With these definitions we write out the energy as
# <!-- Equation labels as ordinary links -->
# <div id="_auto9"></div>
#
# $$
# \begin{equation}
# \langle c'|H|c'\rangle = \left(1+\sum_{ai}|\delta C_{ai}|^2\right)\langle c |H|c\rangle+\sum_{ai}|\delta C_{ai}|^2(\varepsilon_a^{HF}-\varepsilon_i^{HF})+\sum_{ijab}A_{ai,bj}\delta C_{ai}^*\delta C_{bj}+
# \label{_auto9} \tag{17}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto10"></div>
#
# $$
# \begin{equation}
# \frac{1}{2} \sum_{ijab} B_{ai,bj}^*\delta C_{ai}\delta C_{bj}+\frac{1}{2} \sum_{ijab} B_{ai,bj}\delta C_{ai}^*\delta C_{bj}^*
# +O(\delta C_{ai}^3),
# \label{_auto10} \tag{18}
# \end{equation}
# $$
# which can be rewritten as
# $$
# \langle c'|H|c'\rangle = \left(1+\sum_{ai}|\delta C_{ai}|^2\right)\langle c |H|c\rangle+\Delta E+O(\delta C_{ai}^3),
# $$
# and skipping higher-order terms we arrived
# $$
# \frac{\langle c' |\hat{H} | c'\rangle}{\langle c' |c'\rangle} =E_0+\frac{\Delta E}{\left(1+\sum_{ai}|\delta C_{ai}|^2\right)}.
# $$
# We have defined
# $$
# \Delta E = \frac{1}{2} \langle \chi | \hat{M}| \chi \rangle
# $$
# with the vectors
# $$
# \chi = \left[ \delta C\hspace{0.2cm} \delta C^*\right]^T
# $$
# and the matrix
# $$
# \hat{M}=\left(\begin{array}{cc} \Delta + A & B \\ B^* & \Delta + A^*\end{array}\right),
# $$
# with $\Delta_{ai,bj} = (\varepsilon_a-\varepsilon_i)\delta_{ab}\delta_{ij}$.
#
#
#
# The condition
# $$
# \Delta E = \frac{1}{2} \langle \chi | \hat{M}| \chi \rangle \ge 0
# $$
# for an arbitrary vector
# $$
# \chi = \left[ \delta C\hspace{0.2cm} \delta C^*\right]^T
# $$
# means that all eigenvalues of the matrix have to be larger than or equal zero.
# A necessary (but no sufficient) condition is that the matrix elements (for all $ai$ )
# $$
# (\varepsilon_a-\varepsilon_i)\delta_{ab}\delta_{ij}+A_{ai,bj} \ge 0.
# $$
# This equation can be used as a first test of the stability of the Hartree-Fock equation.
| CompPhysics/ComputationalPhysics2 | doc/LectureNotes/_build/jupyter_execute/hartreefocktheory.py | Python | cc0-1.0 | 69,233 | [
"DIRAC"
] | 0d653d463fab22d7a9dc6a69390abac8fb65a480fbc92ad63af8df85e03a3025 |
#!/usr/bin/env python
#
# Copyright 2013-2015 Matthew Wall, Andrew Miles
# See the file LICENSE.txt for your full rights.
#
# Thanks to Andrew Miles for figuring out how to read history records
# and many station parameters.
# Thanks to Sebastian John for the te923tool written in C (v0.6.1):
# http://te923.fukz.org/
# Thanks to Mark Teel for the te923 implementation in wview:
# http://www.wviewweather.com/
# Thanks to mrbalky:
# http://www.mrbalky.com/tag/te923/
"""Classes and functions for interfacing with te923 weather stations.
These stations were made by Hideki and branded as Honeywell, Meade, IROX Pro X,
Mebus TE923, and TFA Nexus. They date back to at least 2007 and are still
sold (sparsely in the US, more commonly in Europe) as of 2013.
Apparently there are at least two different memory sizes. One version can
store about 200 records, a newer version can store about 3300 records.
The firmware version of each component can be read by talking to the station,
assuming that the component has a wireless connection to the station, of
course.
To force connection between station and sensors, press and hold DOWN button.
To reset all station parameters:
- press and hold SNOOZE and UP for 4 seconds
- press SET button; main unit will beep
- wait until beeping stops
- remove batteries and wait 10 seconds
- reinstall batteries
From the Meade TE9233W manual (TE923W-M_IM(ENG)_BK_010511.pdf):
Remote temperature/humidty sampling interval: 10 seconds
Remote temperature/humidity transmit interval: about 47 seconds
Indoor temperature/humidity sampling interval: 10 seconds
Indoor pressure sampling interval: 20 minutes
Rain counter transmitting interval: 183 seconds
Wind direction transmitting interval: 33 seconds
Wind/Gust speed display update interval: 33 seconds
Wind/Gust sampling interval: 11 seconds
UV transmitting interval: 300 seconds
Rain counter resolution: 0.03 in (0.6578 mm)
Battery status of each sensor is checked every hour
This implementation polls the station for data. Use the polling_interval to
control the frequency of polling. Default is 10 seconds.
The manual says that a single bucket tip is 0.03 inches. In reality, a single
bucket tip is between 0.02 and 0.03 in (0.508 to 0.762 mm). This driver uses
a value of 0.02589 in (0.6578 mm) per bucket tip.
The station has altitude, latitude, longitude, and time.
Setting the time does not persist. If you set the station time using weewx,
the station initially indicates that it is set to the new time, but then it
reverts.
Notes From/About Other Implementations
Apparently te923tool came first, then wview copied a bit from it. te923tool
provides more detail about the reason for invalid values, for example, values
out of range versus no link with sensors. However, these error states have not
yet been corroborated.
There are some disagreements between the wview and te923tool implementations.
From the te923tool:
- reading from usb in 8 byte chunks instead of all at once
- length of buffer is 35, but reads are 32-byte blocks
- windspeed and windgust state can never be -1
- index 29 in rain count, also in wind dir
From wview:
- wview does the 8-byte reads using interruptRead
- wview ignores the windchill value from the station
- wview treats the pressure reading as barometer (SLP), then calculates the
station pressure and altimeter pressure
Memory Map
0x020000 - Last sample:
[00] = Month (Bits 0-3), Weekday (1 = Monday) (Bits 7:4)
[01] = Day
[02] = Hour
[03] = Minute
[04] ... reading as below
0x020001 - Current readings:
[00] = Temp In Low BCD
[01] = Temp In High BCD (Bit 5 = 0.05 deg, Bit 7 = -ve)
[02] = Humidity In
[03] = Temp Channel 1 Low (No link = Xa)
[04] = Temp Channel 1 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[05] = Humidity Channel 1 (No link = Xa)
[06] = Temp Channel 2 Low (No link = Xa)
[07] = Temp Channel 2 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[08] = Humidity Channel 2 (No link = Xa)
[09] = Temp Channel 3 Low (No link = Xa)
[10] = Temp Channel 3 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[11] = Humidity Channel 3 (No link = Xa)
[12] = Temp Channel 4 Low (No link = Xa)
[13] = Temp Channel 4 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[14] = Humidity Channel 4 (No link = Xa)
[15] = Temp Channel 5 Low (No link = Xa)
[16] = Temp Channel 5 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[17] = Humidity Channel 5 (No link = Xa)
[18] = UV Low (No link = ff)
[19] = UV High (No link = ff)
[20] = Sea-Level Pressure Low
[21] = Sea-Level Pressure High
[22] = Forecast (Bits 0-2) Storm (Bit 3)
[23] = Wind Chill Low (No link = ff)
[24] = Wind Chill High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve, No link = ff)
[25] = Gust Low (No link = ff)
[26] = Gust High (No link = ff)
[27] = Wind Low (No link = ff)
[28] = Wind High (No link = ff)
[29] = Wind Dir (Bits 0-3)
[30] = Rain Low
[31] = Rain High
(1) Memory map values related to sensors use same coding as above
(2) Checksum are via subtraction: 0x100 - sum of all values, then add 0x100
until positive i.e. 0x100 - 0x70 - 0x80 - 0x28 = -0x18, 0x18 + 0x100 = 0xE8
SECTION 1: Date & Local location
0x000000 - Unknown - changes if date section is modified but still changes if
same data is written so not a checksum
0x000001 - Unknown (always 0)
0x000002 - Day (Reverse BCD) (Changes at midday!)
0x000003 - Unknown
0x000004 - Year (Reverse BCD)
0x000005 - Month (Bits 7:4), Weekday (Bits 3:1)
0x000006 - Latitude (degrees) (reverse BCD)
0x000007 - Latitude (minutes) (reverse BCD)
0x000008 - Longitude (degrees) (reverse BCD)
0x000009 - Longitude (minutes) (reverse BCD)
0x00000A - Bit 7 - Set if Latitude southerly
Bit 6 - Set if Longitude easterly
Bit 4 - Set if DST is always on
Bit 3 - Set if -ve TZ
Bits 0 & 1 - Set if half-hour TZ
0x00000B - Longitude (100 degrees) (Bits 7:4), DST zone (Bits 3:0)
0x00000C - City code (High) (Bits 7:4)
Language (Bits 3:0)
0 - English
1 - German
2 - French
3 - Italian
4 - Spanish
6 - Dutch
0x00000D - Timezone (hour) (Bits 7:4), City code (Low) (Bits 3:0)
0x00000E - Bit 2 - Set if 24hr time format
Bit 1 - Set if 12hr time format
0x00000F - Checksum of 00:0E
SECTION 2: Time Alarms
0x000010 - Weekday alarm (hour) (reverse BCD)
Bit 3 - Set if single alarm active
Bit 2 - Set if weekday-alarm active
0x000011 - Weekday alarm (minute) (reverse BCD)
0x000012 - Single alarm (hour) (reverse BCD) (Bit 3 - Set if pre-alarm active)
0x000013 - Single alarm (minute) (reverse BCD)
0x000014 - Bits 7-4: Pre-alarm (1-5 = 15,30,45,60 or 90 mins)
Bits 3-0: Snooze value
0x000015 - Checksum of 10:14
SECTION 3: Alternate Location
0x000016 - Latitude (degrees) (reverse BCD)
0x000017 - Latitude (minutes) (reverse BCD)
0x000018 - Longitude (degrees) (reverse BCD)
0x000019 - Longitude (minutes) (reverse BCD)
0x00001A - Bit 7 - Set if Latitude southerly
Bit 6 - Set if Longitude easterly
Bit 4 - Set if DST is always on
Bit 3 - Set if -ve TZ
Bits 0 & 1 - Set if half-hour TZ
0x00001B - Longitude (100 degrees) (Bits 7:4), DST zone (Bits 3:0)
0x00001C - City code (High) (Bits 7:4), Unknown (Bits 3:0)
0x00001D - Timezone (hour) (Bits 7:4), City code (Low) (Bits 3:0)
0x00001E - Checksum of 16:1D
SECTION 4: Temperature Alarms
0x00001F:20 - High Temp Alarm Value
0x000021:22 - Low Temp Alarm Value
0x000023 - Checksum of 1F:22
SECTION 5: Min/Max 1
0x000024:25 - Min In Temp
0x000026:27 - Max in Temp
0x000028 - Min In Humidity
0x000029 - Max In Humidity
0x00002A:2B - Min Channel 1 Temp
0x00002C:2D - Max Channel 1 Temp
0x00002E - Min Channel 1 Humidity
0x00002F - Max Channel 1 Humidity
0x000030:31 - Min Channel 2 Temp
0x000032:33 - Max Channel 2 Temp
0x000034 - Min Channel 2 Humidity
0x000035 - Max Channel 2 Humidity
0x000036:37 - Min Channel 3 Temp
0x000038:39 - Max Channel 3 Temp
0x00003A - Min Channel 3 Humidity
0x00003B - Max Channel 3 Humidity
0x00003C:3D - Min Channel 4 Temp
0x00003F - Checksum of 24:3E
SECTION 6: Min/Max 2
0x00003E,40 - Max Channel 4 Temp
0x000041 - Min Channel 4 Humidity
0x000042 - Max Channel 4 Humidity
0x000043:44 - Min Channel 4 Temp
0x000045:46 - Max Channel 4 Temp
0x000047 - Min Channel 4 Humidity
0x000048 - Max Channel 4 Humidity
0x000049 - ? Values rising/falling ?
Bit 5 : Chan 1 temp falling
Bit 2 : In temp falling
0x00004A:4B - 0xFF (Unused)
0x00004C - Battery status
Bit 7: Rain
Bit 6: Wind
Bit 5: UV
Bits 4:0: Channel 5:1
0x00004D:58 - 0xFF (Unused)
0x000059 - Checksum of 3E:58
SECTION 7: Altitude
0x00005A:5B - Altitude (Low:High)
0x00005C - Bit 3 - Set if altitude negative
Bit 2 - Pressure falling?
Bit 1 - Always set
0X00005D - Checksum of 5A:5C
0x00005E:5F - Unused (0xFF)
SECTION 8: Pressure 1
0x000060 - Month of last reading (Bits 0-3), Weekday (1 = Monday) (Bits 7:4)
0x000061 - Day of last reading
0x000062 - Hour of last reading
0x000063 - Minute of last reading
0x000064:65 - T -0 Hours
0x000066:67 - T -1 Hours
0x000068:69 - T -2 Hours
0x00006A:6B - T -3 Hours
0x00006C:6D - T -4 Hours
0x00006E:6F - T -5 Hours
0x000070:71 - T -6 Hours
0x000072:73 - T -7 Hours
0x000074:75 - T -8 Hours
0x000076:77 - T -9 Hours
0x000078:79 - T -10 Hours
0x00007B - Checksum of 60:7A
SECTION 9: Pressure 2
0x00007A,7C - T -11 Hours
0x00007D:7E - T -12 Hours
0x00007F:80 - T -13 Hours
0x000081:82 - T -14 Hours
0x000083:84 - T -15 Hours
0x000085:86 - T -16 Hours
0x000087:88 - T -17 Hours
0x000089:90 - T -18 Hours
0x00008B:8C - T -19 Hours
0x00008D:8E - T -20 Hours
0x00008f:90 - T -21 Hours
0x000091:92 - T -22 Hours
0x000093:94 - T -23 Hours
0x000095:96 - T -24 Hours
0x000097 - Checksum of 7C:96
SECTION 10: Versions
0x000098 - firmware versions (barometer)
0x000099 - firmware versions (uv)
0x00009A - firmware versions (rcc)
0x00009B - firmware versions (wind)
0x00009C - firmware versions (system)
0x00009D - Checksum of 98:9C
0x00009E:9F - 0xFF (Unused)
SECTION 11: Rain/Wind Alarms 1
0x0000A0 - Alarms
Bit2 - Set if rain alarm active
Bit 1 - Set if wind alarm active
Bit 0 - Set if gust alarm active
0x0000A1:A2 - Rain alarm value (High:Low) (BCD)
0x0000A3 - Unknown
0x0000A4:A5 - Wind speed alarm value
0x0000A6 - Unknown
0x0000A7:A8 - Gust alarm value
0x0000A9 - Checksum of A0:A8
SECTION 12: Rain/Wind Alarms 2
0x0000AA:AB - Max daily wind speed
0x0000AC:AD - Max daily gust speed
0x0000AE:AF - Rain bucket count (yesterday) (Low:High)
0x0000B0:B1 - Rain bucket count (week) (Low:High)
0x0000B2:B3 - Rain bucket count (month) (Low:High)
0x0000B4 - Checksum of AA:B3
0x0000B5:E0 - 0xFF (Unused)
SECTION 13: Unknownn
0x0000E1:F9 - 0x15 (Unknown)
0x0000FA - Checksum of E1:F9
SECTION 14: Archiving
0x0000FB - Unknown
0x0000FC - Memory size (0 = 0x1fff, 2 = 0x20000)
0x0000FD - Number of records (High)
0x0000FE - Archive interval
1-11 = 5, 10, 20, 30, 60, 90, 120, 180, 240, 360, 1440 mins
0x0000FF - Number of records (Low)
0x000100 - Checksum of FB:FF
0x000101 - Start of historical records:
[00] = Month (Bits 0-3), Weekday (1 = Monday) (Bits 7:4)
[01] = Day
[02] = Hour
[03] = Minute
[04] = Temp In Low BCD
[05] = Temp In High BCD (Bit 5 = 0.05 deg, Bit 7 = -ve)
[06] = Humidity In
[07] = Temp Channel 1 Low (No link = Xa)
[08] = Temp Channel 1 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[09] = Humidity Channel 1 (No link = Xa)
[10] = Temp Channel 2 Low (No link = Xa)
[11] = Temp Channel 2 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[12] = Humidity Channel 2 (No link = Xa)
[13] = Temp Channel 3 Low (No link = Xa)
[14] = Temp Channel 3 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[15] = Checksum of bytes 0:14
[16] = Humidity Channel 3 (No link = Xa)
[17] = Temp Channel 4 Low (No link = Xa)
[18] = Temp Channel 4 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[19] = Humidity Channel 4 (No link = Xa)
[20] = Temp Channel 5 Low (No link = Xa)
[21] = Temp Channel 5 High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve)
[22] = Humidity Channel 5 (No link = Xa)
[23] = UV Low (No link = ff)
[24] = UV High (No link = ff)
[25] = Sea-Level Pressure Low
[26] = Sea-Level Pressure High
[27] = Forecast (Bits 0-2) Storm (Bit 3)
[28] = Wind Chill Low (No link = ff)
[29] = Wind Chill High (Bit 6 = 1, Bit 5 = 0.05 deg, Bit 7 = +ve, No link = ee)
[30] = Gust Low (No link = ff)
[31] = Gust High (No link = ff)
[32] = Wind Low (No link = ff)
[33] = Wind High (No link = ff)
[34] = Wind Dir (Bits 0-3)
[35] = Rain Low
[36] = Rain High
[37] = Checksum of bytes 16:36
USB Protocol
The station shows up on the USB as a HID. Control packet is 8 bytes.
Read from station:
0x05 (Length)
0xAF (Read)
Addr (Bit 17:16), Addr (Bits 15:8), Addr (Bits 7:0), CRC, Unused, Unused
Read acknowledge:
0x24 (Ack)
0xAF (Read)
Addr (Bit 17:16), Addr (Bits 15:8), Addr (Bits 7:0), CRC, Unused, Unused
Write to station:
0x07 (Length)
0xAE (Write)
Addr (Bit 17:16), Addr (Bits 15:8), Addr (Bits 7:0), Data1, Data2, Data3
... Data continue with 3 more packets of length 7 then ...
0x02 (Length), Data32, CRC, Unused, Unused, Unused, Unused, Unused, Unused
Reads returns 32 bytes. Write expects 32 bytes as well, but address must be
aligned to a memory-map section start address and will only write to that
section.
Schema Additions
The station emits more sensor data than the default schema (wview schema) can
handle. This driver includes a mapping between the sensor data and the wview
schema, plus additional fields. To use the default mapping with the wview
schema, these are the additional fields that must be added to the schema:
('extraTemp4', 'REAL'),
('extraHumid3', 'REAL'),
('extraHumid4', 'REAL'),
('extraBatteryStatus1', 'REAL'),
('extraBatteryStatus2', 'REAL'),
('extraBatteryStatus3', 'REAL'),
('extraBatteryStatus4', 'REAL'),
('windLinkStatus', 'REAL'),
('rainLinkStatus', 'REAL'),
('uvLinkStatus', 'REAL'),
('outLinkStatus', 'REAL'),
('extraLinkStatus1', 'REAL'),
('extraLinkStatus2', 'REAL'),
('extraLinkStatus3', 'REAL'),
('extraLinkStatus4', 'REAL'),
('forecast', 'REAL'),
('storm', 'REAL'),
"""
# TODO: figure out how to read gauge pressure instead of slp
# TODO: figure out how to clear station memory
# TODO: add option to reset rain total
# FIXME: set-date and sync-date do not work - something reverts the clock
# FIXME: is there any way to get rid of the bad header byte on first read?
from __future__ import with_statement
import syslog
import time
import usb
import weewx.drivers
import weewx.wxformulas
from weeutil.weeutil import timestamp_to_string
DRIVER_NAME = 'TE923'
DRIVER_VERSION = '0.24'
def loader(config_dict, engine): # @UnusedVariable
return TE923Driver(**config_dict[DRIVER_NAME])
def configurator_loader(config_dict): # @UnusedVariable
return TE923Configurator()
def confeditor_loader():
return TE923ConfEditor()
DEBUG_READ = 1
DEBUG_WRITE = 1
DEBUG_DECODE = 0
# map the station data to the default database schema, plus extensions
DEFAULT_MAP = {
'windLinkStatus': 'link_wind',
'windBatteryStatus': 'bat_wind',
'rainLinkStatus': 'link_rain',
'rainBatteryStatus': 'bat_rain',
'uvLinkStatus': 'link_uv',
'uvBatteryStatus': 'bat_uv',
'inTemp': 't_in',
'inHumidity': 'h_in',
'outTemp': 't_1',
'outHumidity': 'h_1',
'outTempBatteryStatus': 'bat_1',
'outLinkStatus': 'link_1',
'extraTemp1': 't_2',
'extraHumid1': 'h_2',
'extraBatteryStatus1': 'bat_2',
'extraLinkStatus1': 'link_2',
'extraTemp2': 't_3',
'extraHumid2': 'h_3',
'extraBatteryStatus2': 'bat_3',
'extraLinkStatus2': 'link_3',
'extraTemp3': 't_4',
'extraHumid3': 'h_4',
'extraBatteryStatus3': 'bat_4',
'extraLinkStatus3': 'link_4',
'extraTemp4': 't_5',
'extraHumid4': 'h_5',
'extraBatteryStatus4': 'bat_5',
'extraLinkStatus4': 'link_5'
}
def logmsg(dst, msg):
syslog.syslog(dst, 'te923: %s' % msg)
def logdbg(msg):
logmsg(syslog.LOG_DEBUG, msg)
def loginf(msg):
logmsg(syslog.LOG_INFO, msg)
def logcrt(msg):
logmsg(syslog.LOG_CRIT, msg)
def logerr(msg):
logmsg(syslog.LOG_ERR, msg)
class TE923ConfEditor(weewx.drivers.AbstractConfEditor):
@property
def default_stanza(self):
return """
[TE923]
# This section is for the Hideki TE923 series of weather stations.
# The station model, e.g., 'Meade TE923W' or 'TFA Nexus'
model = TE923
# The driver to use:
driver = weewx.drivers.te923
# The default configuration associates the channel 1 sensor with outTemp
# and outHumidity. To change this, or to associate other channels with
# specific columns in the database schema, use the following map.
#[[sensor_map]]
%s
""" % "\n".join([" # %s = %s" % (x, DEFAULT_MAP[x]) for x in DEFAULT_MAP])
class TE923Configurator(weewx.drivers.AbstractConfigurator):
LOCSTR = "CITY|USR,LONG_DEG,LONG_MIN,E|W,LAT_DEG,LAT_MIN,N|S,TZ,DST"
ALMSTR = "WEEKDAY,SINGLE,PRE_ALARM,SNOOZE,MAXTEMP,MINTEMP,RAIN,WIND,GUST"
idx_to_interval = {
1: "5 min", 2: "10 min", 3: "20 min", 4: "30 min", 5: "60 min",
6: "90 min", 7: "2 hour", 8: "3 hour", 9: "4 hour", 10: "6 hour",
11: "1 day"}
interval_to_idx = {
"5m": 1, "10m": 2, "20m": 3, "30m": 4, "60m": 5, "90m": 6,
"2h": 7, "3h": 8, "4h": 9, "6h": 10, "1d": 11}
forecast_dict = {
0: 'heavy snow',
1: 'light snow',
2: 'heavy rain',
3: 'light rain',
4: 'heavy clouds',
5: 'light clouds',
6: 'sunny',
}
dst_dict = {
0: ["NO", 'None'],
1: ["SA", 'Australian'],
2: ["SB", 'Brazilian'],
3: ["SC", 'Chilian'],
4: ["SE", 'European'],
5: ["SG", 'Eqyptian'],
6: ["SI", 'Cuban'],
7: ["SJ", 'Iraq and Syria'],
8: ["SK", 'Irkutsk and Moscow'],
9: ["SM", 'Uruguayan'],
10: ["SN", 'Nambian'],
11: ["SP", 'Paraguayan'],
12: ["SQ", 'Iranian'],
13: ["ST", 'Tasmanian'],
14: ["SU", 'American'],
15: ["SZ", 'New Zealand'],
}
city_dict = {
0: ["ADD", 3, 0, 9, 01, "N", 38, 44, "E", "Addis Ababa, Ethiopia"],
1: ["ADL", 9.5, 1, 34, 55, "S", 138, 36, "E", "Adelaide, Australia"],
2: ["AKR", 2, 4, 39, 55, "N", 32, 55, "E", "Ankara, Turkey"],
3: ["ALG", 1, 0, 36, 50, "N", 3, 0, "E", "Algiers, Algeria"],
4: ["AMS", 1, 4, 52, 22, "N", 4, 53, "E", "Amsterdam, Netherlands"],
5: ["ARN", 1, 4, 59, 17, "N", 18, 3, "E", "Stockholm Arlanda, Sweden"],
6: ["ASU", -3, 11, 25, 15, "S", 57, 40, "W", "Asuncion, Paraguay"],
7: ["ATH", 2, 4, 37, 58, "N", 23, 43, "E", "Athens, Greece"],
8: ["ATL", -5, 14, 33, 45, "N", 84, 23, "W", "Atlanta, Ga."],
9: ["AUS", -6, 14, 30, 16, "N", 97, 44, "W", "Austin, Tex."],
10: ["BBU", 2, 4, 44, 25, "N", 26, 7, "E", "Bucharest, Romania"],
11: ["BCN", 1, 4, 41, 23, "N", 2, 9, "E", "Barcelona, Spain"],
12: ["BEG", 1, 4, 44, 52, "N", 20, 32, "E", "Belgrade, Yugoslavia"],
13: ["BEJ", 8, 0, 39, 55, "N", 116, 25, "E", "Beijing, China"],
14: ["BER", 1, 4, 52, 30, "N", 13, 25, "E", "Berlin, Germany"],
15: ["BHM", -6, 14, 33, 30, "N", 86, 50, "W", "Birmingham, Ala."],
16: ["BHX", 0, 4, 52, 25, "N", 1, 55, "W", "Birmingham, England"],
17: ["BKK", 7, 0, 13, 45, "N", 100, 30, "E", "Bangkok, Thailand"],
18: ["BNA", -6, 14, 36, 10, "N", 86, 47, "W", "Nashville, Tenn."],
19: ["BNE", 10, 0, 27, 29, "S", 153, 8, "E", "Brisbane, Australia"],
20: ["BOD", 1, 4, 44, 50, "N", 0, 31, "W", "Bordeaux, France"],
21: ["BOG", -5, 0, 4, 32, "N", 74, 15, "W", "Bogota, Colombia"],
22: ["BOS", -5, 14, 42, 21, "N", 71, 5, "W", "Boston, Mass."],
23: ["BRE", 1, 4, 53, 5, "N", 8, 49, "E", "Bremen, Germany"],
24: ["BRU", 1, 4, 50, 52, "N", 4, 22, "E", "Brussels, Belgium"],
25: ["BUA", -3, 0, 34, 35, "S", 58, 22, "W", "Buenos Aires, Argentina"],
26: ["BUD", 1, 4, 47, 30, "N", 19, 5, "E", "Budapest, Hungary"],
27: ["BWI", -5, 14, 39, 18, "N", 76, 38, "W", "Baltimore, Md."],
28: ["CAI", 2, 5, 30, 2, "N", 31, 21, "E", "Cairo, Egypt"],
29: ["CCS", -4, 0, 10, 28, "N", 67, 2, "W", "Caracas, Venezuela"],
30: ["CCU", 5.5, 0, 22, 34, "N", 88, 24, "E", "Calcutta, India (as Kolkata)"],
31: ["CGX", -6, 14, 41, 50, "N", 87, 37, "W", "Chicago, IL"],
32: ["CLE", -5, 14, 41, 28, "N", 81, 37, "W", "Cleveland, Ohio"],
33: ["CMH", -5, 14, 40, 0, "N", 83, 1, "W", "Columbus, Ohio"],
34: ["COR", -3, 0, 31, 28, "S", 64, 10, "W", "Cordoba, Argentina"],
35: ["CPH", 1, 4, 55, 40, "N", 12, 34, "E", "Copenhagen, Denmark"],
36: ["CPT", 2, 0, 33, 55, "S", 18, 22, "E", "Cape Town, South Africa"],
37: ["CUU", -6, 14, 28, 37, "N", 106, 5, "W", "Chihuahua, Mexico"],
38: ["CVG", -5, 14, 39, 8, "N", 84, 30, "W", "Cincinnati, Ohio"],
39: ["DAL", -6, 14, 32, 46, "N", 96, 46, "W", "Dallas, Tex."],
40: ["DCA", -5, 14, 38, 53, "N", 77, 2, "W", "Washington, D.C."],
41: ["DEL", 5.5, 0, 28, 35, "N", 77, 12, "E", "New Delhi, India"],
42: ["DEN", -7, 14, 39, 45, "N", 105, 0, "W", "Denver, Colo."],
43: ["DKR", 0, 0, 14, 40, "N", 17, 28, "W", "Dakar, Senegal"],
44: ["DTW", -5, 14, 42, 20, "N", 83, 3, "W", "Detroit, Mich."],
45: ["DUB", 0, 4, 53, 20, "N", 6, 15, "W", "Dublin, Ireland"],
46: ["DUR", 2, 0, 29, 53, "S", 30, 53, "E", "Durban, South Africa"],
47: ["ELP", -7, 14, 31, 46, "N", 106, 29, "W", "El Paso, Tex."],
48: ["FIH", 1, 0, 4, 18, "S", 15, 17, "E", "Kinshasa, Congo"],
49: ["FRA", 1, 4, 50, 7, "N", 8, 41, "E", "Frankfurt, Germany"],
50: ["GLA", 0, 4, 55, 50, "N", 4, 15, "W", "Glasgow, Scotland"],
51: ["GUA", -6, 0, 14, 37, "N", 90, 31, "W", "Guatemala City, Guatemala"],
52: ["HAM", 1, 4, 53, 33, "N", 10, 2, "E", "Hamburg, Germany"],
53: ["HAV", -5, 6, 23, 8, "N", 82, 23, "W", "Havana, Cuba"],
54: ["HEL", 2, 4, 60, 10, "N", 25, 0, "E", "Helsinki, Finland"],
55: ["HKG", 8, 0, 22, 20, "N", 114, 11, "E", "Hong Kong, China"],
56: ["HOU", -6, 14, 29, 45, "N", 95, 21, "W", "Houston, Tex."],
57: ["IKT", 8, 8, 52, 30, "N", 104, 20, "E", "Irkutsk, Russia"],
58: ["IND", -5, 0, 39, 46, "N", 86, 10, "W", "Indianapolis, Ind."],
59: ["JAX", -5, 14, 30, 22, "N", 81, 40, "W", "Jacksonville, Fla."],
60: ["JKT", 7, 0, 6, 16, "S", 106, 48, "E", "Jakarta, Indonesia"],
61: ["JNB", 2, 0, 26, 12, "S", 28, 4, "E", "Johannesburg, South Africa"],
62: ["KIN", -5, 0, 17, 59, "N", 76, 49, "W", "Kingston, Jamaica"],
63: ["KIX", 9, 0, 34, 32, "N", 135, 30, "E", "Osaka, Japan"],
64: ["KUL", 8, 0, 3, 8, "N", 101, 42, "E", "Kuala Lumpur, Malaysia"],
65: ["LAS", -8, 14, 36, 10, "N", 115, 12, "W", "Las Vegas, Nev."],
66: ["LAX", -8, 14, 34, 3, "N", 118, 15, "W", "Los Angeles, Calif."],
67: ["LIM", -5, 0, 12, 0, "S", 77, 2, "W", "Lima, Peru"],
68: ["LIS", 0, 4, 38, 44, "N", 9, 9, "W", "Lisbon, Portugal"],
69: ["LON", 0, 4, 51, 32, "N", 0, 5, "W", "London, England"],
70: ["LPB", -4, 0, 16, 27, "S", 68, 22, "W", "La Paz, Bolivia"],
71: ["LPL", 0, 4, 53, 25, "N", 3, 0, "W", "Liverpool, England"],
72: ["LYO", 1, 4, 45, 45, "N", 4, 50, "E", "Lyon, France"],
73: ["MAD", 1, 4, 40, 26, "N", 3, 42, "W", "Madrid, Spain"],
74: ["MEL", 10, 1, 37, 47, "S", 144, 58, "E", "Melbourne, Australia"],
75: ["MEM", -6, 14, 35, 9, "N", 90, 3, "W", "Memphis, Tenn."],
76: ["MEX", -6, 14, 19, 26, "N", 99, 7, "W", "Mexico City, Mexico"],
77: ["MIA", -5, 14, 25, 46, "N", 80, 12, "W", "Miami, Fla."],
78: ["MIL", 1, 4, 45, 27, "N", 9, 10, "E", "Milan, Italy"],
79: ["MKE", -6, 14, 43, 2, "N", 87, 55, "W", "Milwaukee, Wis."],
80: ["MNL", 8, 0, 14, 35, "N", 120, 57, "E", "Manila, Philippines"],
81: ["MOW", 3, 8, 55, 45, "N", 37, 36, "E", "Moscow, Russia"],
82: ["MRS", 1, 4, 43, 20, "N", 5, 20, "E", "Marseille, France"],
83: ["MSP", -6, 14, 44, 59, "N", 93, 14, "W", "Minneapolis, Minn."],
84: ["MSY", -6, 14, 29, 57, "N", 90, 4, "W", "New Orleans, La."],
85: ["MUC", 1, 4, 48, 8, "N", 11, 35, "E", "Munich, Germany"],
86: ["MVD", -3, 9, 34, 53, "S", 56, 10, "W", "Montevideo, Uruguay"],
87: ["NAP", 1, 4, 40, 50, "N", 14, 15, "E", "Naples, Italy"],
88: ["NBO", 3, 0, 1, 25, "S", 36, 55, "E", "Nairobi, Kenya"],
89: ["NKG", 8, 0, 32, 3, "N", 118, 53, "E", "Nanjing (Nanking), China"],
90: ["NYC", -5, 14, 40, 47, "N", 73, 58, "W", "New York, N.Y."],
91: ["ODS", 2, 4, 46, 27, "N", 30, 48, "E", "Odessa, Ukraine"],
92: ["OKC", -6, 14, 35, 26, "N", 97, 28, "W", "Oklahoma City, Okla."],
93: ["OMA", -6, 14, 41, 15, "N", 95, 56, "W", "Omaha, Neb."],
94: ["OSL", 1, 4, 59, 57, "N", 10, 42, "E", "Oslo, Norway"],
95: ["PAR", 1, 4, 48, 48, "N", 2, 20, "E", "Paris, France"],
96: ["PDX", -8, 14, 45, 31, "N", 122, 41, "W", "Portland, Ore."],
97: ["PER", 8, 0, 31, 57, "S", 115, 52, "E", "Perth, Australia"],
98: ["PHL", -5, 14, 39, 57, "N", 75, 10, "W", "Philadelphia, Pa."],
99: ["PHX", -7, 0, 33, 29, "N", 112, 4, "W", "Phoenix, Ariz."],
100: ["PIT", -5, 14, 40, 27, "N", 79, 57, "W", "Pittsburgh, Pa."],
101: ["PRG", 1, 4, 50, 5, "N", 14, 26, "E", "Prague, Czech Republic"],
102: ["PTY", -5, 0, 8, 58, "N", 79, 32, "W", "Panama City, Panama"],
103: ["RGN", 6.5, 0, 16, 50, "N", 96, 0, "E", "Rangoon, Myanmar"],
104: ["RIO", -3, 2, 22, 57, "S", 43, 12, "W", "Rio de Janeiro, Brazil"],
105: ["RKV", 0, 0, 64, 4, "N", 21, 58, "W", "Reykjavik, Iceland"],
106: ["ROM", 1, 4, 41, 54, "N", 12, 27, "E", "Rome, Italy"],
107: ["SAN", -8, 14, 32, 42, "N", 117, 10, "W", "San Diego, Calif."],
108: ["SAT", -6, 14, 29, 23, "N", 98, 33, "W", "San Antonio, Tex."],
109: ["SCL", -4, 3, 33, 28, "S", 70, 45, "W", "Santiago, Chile"],
110: ["SEA", -8, 14, 47, 37, "N", 122, 20, "W", "Seattle, Wash."],
111: ["SFO", -8, 14, 37, 47, "N", 122, 26, "W", "San Francisco, Calif."],
112: ["SHA", 8, 0, 31, 10, "N", 121, 28, "E", "Shanghai, China"],
113: ["SIN", 8, 0, 1, 14, "N", 103, 55, "E", "Singapore, Singapore"],
114: ["SJC", -8, 14, 37, 20, "N", 121, 53, "W", "San Jose, Calif."],
115: ["SOF", 2, 4, 42, 40, "N", 23, 20, "E", "Sofia, Bulgaria"],
116: ["SPL", -3, 2, 23, 31, "S", 46, 31, "W", "Sao Paulo, Brazil"],
117: ["SSA", -3, 0, 12, 56, "S", 38, 27, "W", "Salvador, Brazil"],
118: ["STL", -6, 14, 38, 35, "N", 90, 12, "W", "St. Louis, Mo."],
119: ["SYD", 10, 1, 34, 0, "S", 151, 0, "E", "Sydney, Australia"],
120: ["TKO", 9, 0, 35, 40, "N", 139, 45, "E", "Tokyo, Japan"],
121: ["TPA", -5, 14, 27, 57, "N", 82, 27, "W", "Tampa, Fla."],
122: ["TRP", 2, 0, 32, 57, "N", 13, 12, "E", "Tripoli, Libya"],
123: ["USR", 0, 0, 0, 0, "N", 0, 0, "W", "User defined city"],
124: ["VAC", -8, 14, 49, 16, "N", 123, 7, "W", "Vancouver, Canada"],
125: ["VIE", 1, 4, 48, 14, "N", 16, 20, "E", "Vienna, Austria"],
126: ["WAW", 1, 4, 52, 14, "N", 21, 0, "E", "Warsaw, Poland"],
127: ["YMX", -5, 14, 45, 30, "N", 73, 35, "W", "Montreal, Que., Can."],
128: ["YOW", -5, 14, 45, 24, "N", 75, 43, "W", "Ottawa, Ont., Can."],
129: ["YTZ", -5, 14, 43, 40, "N", 79, 24, "W", "Toronto, Ont., Can."],
130: ["YVR", -8, 14, 49, 13, "N", 123, 6, "W", "Vancouver, B.C., Can."],
131: ["YYC", -7, 14, 51, 1, "N", 114, 1, "W", "Calgary, Alba., Can."],
132: ["ZRH", 1, 4, 47, 21, "N", 8, 31, "E", "Zurich, Switzerland"]
}
@property
def version(self):
return DRIVER_VERSION
def add_options(self, parser):
super(TE923Configurator, self).add_options(parser)
parser.add_option("--info", dest="info", action="store_true",
help="display weather station configuration")
parser.add_option("--current", dest="current", action="store_true",
help="get the current weather conditions")
parser.add_option("--history", dest="nrecords", type=int, metavar="N",
help="display N history records")
parser.add_option("--history-since", dest="recmin",
type=int, metavar="N",
help="display history records since N minutes ago")
parser.add_option("--minmax", dest="minmax", action="store_true",
help="display historical min/max data")
parser.add_option("--get-date", dest="getdate", action="store_true",
help="display station date")
parser.add_option("--set-date", dest="setdate",
type=str, metavar="YEAR,MONTH,DAY",
help="set station date")
parser.add_option("--sync-date", dest="syncdate", action="store_true",
help="set station date using system clock")
parser.add_option("--get-location-local", dest="loc_local",
action="store_true",
help="display local location and timezone")
parser.add_option("--set-location-local", dest="setloc_local",
type=str, metavar=self.LOCSTR,
help="set local location and timezone")
parser.add_option("--get-location-alt", dest="loc_alt",
action="store_true",
help="display alternate location and timezone")
parser.add_option("--set-location-alt", dest="setloc_alt",
type=str, metavar=self.LOCSTR,
help="set alternate location and timezone")
parser.add_option("--get-altitude", dest="getalt", action="store_true",
help="display altitude")
parser.add_option("--set-altitude", dest="setalt", type=int,
metavar="ALT", help="set altitude (meters)")
parser.add_option("--get-alarms", dest="getalarms",
action="store_true", help="display alarms")
parser.add_option("--set-alarms", dest="setalarms", type=str,
metavar=self.ALMSTR, help="set alarm state")
parser.add_option("--get-interval", dest="getinterval",
action="store_true", help="display archive interval")
parser.add_option("--set-interval", dest="setinterval",
type=str, metavar="INTERVAL",
help="set archive interval (minutes)")
parser.add_option("--format", dest="format",
type=str, metavar="FORMAT", default='table',
help="formats include: table, dict")
def do_options(self, options, parser, config_dict, prompt): # @UnusedVariable
if (options.format.lower() != 'table' and
options.format.lower() != 'dict'):
parser.error("Unknown format '%s'. Known formats include 'table' and 'dict'." % options.format)
with TE923Station() as station:
if options.info is not None:
self.show_info(station, fmt=options.format)
elif options.current is not None:
self.show_current(station, fmt=options.format)
elif options.nrecords is not None:
self.show_history(station, count=options.nrecords,
fmt=options.format)
elif options.recmin is not None:
ts = int(time.time()) - options.recmin * 60
self.show_history(station, ts=ts, fmt=options.format)
elif options.minmax is not None:
self.show_minmax(station)
elif options.getdate is not None:
self.show_date(station)
elif options.setdate is not None:
self.set_date(station, options.setdate)
elif options.syncdate:
self.set_date(station, None)
elif options.loc_local is not None:
self.show_location(station, 0)
elif options.setloc_local is not None:
self.set_location(station, 0, options.setloc_local)
elif options.loc_alt is not None:
self.show_location(station, 1)
elif options.setloc_alt is not None:
self.set_location(station, 1, options.setloc_alt)
elif options.getalt is not None:
self.show_altitude(station)
elif options.setalt is not None:
self.set_altitude(station, options.setalt)
elif options.getalarms is not None:
self.show_alarms(station)
elif options.setalarms is not None:
self.set_alarms(station, options.setalarms)
elif options.getinterval is not None:
self.show_interval(station)
elif options.setinterval is not None:
self.set_interval(station, options.setinterval)
@staticmethod
def show_info(station, fmt='dict'):
print 'Querying the station for the configuration...'
data = station.get_config()
TE923Configurator.print_data(data, fmt)
@staticmethod
def show_current(station, fmt='dict'):
print 'Querying the station for current weather data...'
data = station.get_readings()
TE923Configurator.print_data(data, fmt)
@staticmethod
def show_history(station, ts=0, count=None, fmt='dict'):
print "Querying the station for historical records..."
for r in station.gen_records(ts, count):
TE923Configurator.print_data(r, fmt)
@staticmethod
def show_minmax(station):
print "Querying the station for historical min/max data"
data = station.get_minmax()
print "Console Temperature Min : %s" % data['t_in_min']
print "Console Temperature Max : %s" % data['t_in_max']
print "Console Humidity Min : %s" % data['h_in_min']
print "Console Humidity Max : %s" % data['h_in_max']
for i in range(1, 6):
print "Channel %d Temperature Min : %s" % (i, data['t_%d_min' % i])
print "Channel %d Temperature Max : %s" % (i, data['t_%d_max' % i])
print "Channel %d Humidity Min : %s" % (i, data['h_%d_min' % i])
print "Channel %d Humidity Max : %s" % (i, data['h_%d_max' % i])
print "Wind speed max since midnight : %s" % data['windspeed_max']
print "Wind gust max since midnight : %s" % data['windgust_max']
print "Rain yesterday : %s" % data['rain_yesterday']
print "Rain this week : %s" % data['rain_week']
print "Rain this month : %s" % data['rain_month']
print "Last Barometer reading : %s" % time.strftime(
"%Y %b %d %H:%M", time.localtime(data['barometer_ts']))
for i in range(25):
print " T-%02d Hours : %.1f" % (i, data['barometer_%d' % i])
@staticmethod
def show_date(station):
ts = station.get_date()
tt = time.localtime(ts)
print "Date: %02d/%02d/%d" % (tt[2], tt[1], tt[0])
TE923Configurator.print_alignment()
@staticmethod
def set_date(station, datestr):
if datestr is not None:
date_list = datestr.split(',')
if len(date_list) != 3:
print "Bad date '%s', format is YEAR,MONTH,DAY" % datestr
return
if int(date_list[0]) < 2000 or int(date_list[0]) > 2099:
print "Year must be between 2000 and 2099 inclusive"
return
if int(date_list[1]) < 1 or int(date_list[1]) > 12:
print "Month must be between 1 and 12 inclusive"
return
if int(date_list[2]) < 1 or int(date_list[2]) > 31:
print "Day must be between 1 and 31 inclusive"
return
tt = time.localtime()
offset = 1 if tt[3] < 12 else 0
ts = time.mktime((int(date_list[0]), int(date_list[1]), int(date_list[2]) - offset, 0, 0, 0, 0, 0, 0))
else:
ts = time.time()
station.set_date(ts)
TE923Configurator.print_alignment()
def show_location(self, station, loc_type):
data = station.get_loc(loc_type)
print "City : %s (%s)" % (self.city_dict[data['city_time']][9],
self.city_dict[data['city_time']][0])
degree_sign= u'\N{DEGREE SIGN}'.encode('iso-8859-1')
print "Location : %03d%s%02d'%s %02d%s%02d'%s" % (
data['long_deg'], degree_sign, data['long_min'], data['long_dir'],
data['lat_deg'], degree_sign, data['lat_min'], data['lat_dir'])
if data['dst_always_on']:
print "DST : Always on"
else:
print "DST : %s (%s)" % (self.dst_dict[data['dst']][1],
self.dst_dict[data['dst']][0])
def set_location(self, station, loc_type, location):
dst_on = 1
dst_index = 0
location_list = location.split(',')
if len(location_list) == 1 and location_list[0] != "USR":
city_index = None
for idx in range(len(self.city_dict)):
if self.city_dict[idx][0] == location_list[0]:
city_index = idx
break
if city_index is None:
print "City code '%s' not recognized - consult station manual for valid city codes" % location_list[0]
return
long_deg = self.city_dict[city_index][6]
long_min = self.city_dict[city_index][7]
long_dir = self.city_dict[city_index][8]
lat_deg = self.city_dict[city_index][3]
lat_min = self.city_dict[city_index][4]
lat_dir = self.city_dict[city_index][5]
tz_hr = int(self.city_dict[city_index][1])
tz_min = 0 if self.city_dict[city_index][1] == int(self.city_dict[city_index][1]) else 30
dst_on = 0
dst_index = self.city_dict[city_index][2]
elif len(location_list) == 9 and location_list[0] == "USR":
if int(location_list[1]) < 0 or int(location_list[1]) > 180:
print "Longitude degrees must be between 0 and 180 inclusive"
return
if int(location_list[2]) < 0 or int(location_list[2]) > 180:
print "Longitude minutes must be between 0 and 59 inclusive"
return
if location_list[3] != "E" and location_list[3] != "W":
print "Longitude direction must be E or W"
return
if int(location_list[4]) < 0 or int(location_list[4]) > 180:
print "Latitude degrees must be between 0 and 90 inclusive"
return
if int(location_list[5]) < 0 or int(location_list[5]) > 180:
print "Latitude minutes must be between 0 and 59 inclusive"
return
if location_list[6] != "N" and location_list[6] != "S":
print "Longitude direction must be N or S"
return
tz_list = location_list[7].split(':')
if len(tz_list) != 2:
print "Bad timezone '%s', format is HOUR:MINUTE" % location_list[7]
return
if int(tz_list[0]) < -12 or int(tz_list[0]) > 12:
print "Timezone hour must be between -12 and 12 inclusive"
return
if int(tz_list[1]) != 0 and int(tz_list[1]) != 30:
print "Timezone minute must be 0 or 30"
return
if location_list[8].lower() != 'on':
dst_on = 0
dst_index = None
for idx in range(16):
if self.dst_dict[idx][0] == location_list[8]:
dst_index = idx
break
if dst_index is None:
print "DST code '%s' not recognized - consult station manual for valid DST codes" % location_list[8]
return
else:
dst_on = 1
dst_index = 0
city_index = 123 # user-defined city
long_deg = int(location_list[1])
long_min = int(location_list[2])
long_dir = location_list[3]
lat_deg = int(location_list[4])
lat_min = int(location_list[5])
lat_dir = location_list[6]
tz_hr = int(tz_list[0])
tz_min = int(tz_list[1])
else:
print "Bad location '%s'" % location
print "Location format is: %s" % self.LOCSTR
return
station.set_loc(loc_type, city_index, dst_on, dst_index, tz_hr, tz_min,
lat_deg, lat_min, lat_dir,
long_deg, long_min, long_dir)
@staticmethod
def show_altitude(station):
altitude = station.get_alt()
print "Altitude: %d meters" % altitude
@staticmethod
def set_altitude(station, altitude):
if altitude < -200 or altitude > 5000:
print "Altitude must be between -200 and 5000 inclusive"
return
station.set_alt(altitude)
@staticmethod
def show_alarms(station):
data = station.get_alarms()
print "Weekday alarm : %02d:%02d (%s)" % (
data['weekday_hour'], data['weekday_min'], data['weekday_active'])
print "Single alarm : %02d:%02d (%s)" % (
data['single_hour'], data['single_min'], data['single_active'])
print "Pre-alarm : %s (%s)" % (
data['prealarm_period'], data['prealarm_active'])
if data['snooze'] > 0:
print "Snooze : %d mins" % data['snooze']
else:
print "Snooze : Invalid"
print "Max Temperature Alarm : %s" % data['max_temp']
print "Min Temperature Alarm : %s" % data['min_temp']
print "Rain Alarm : %d mm (%s)" % (
data['rain'], data['rain_active'])
print "Wind Speed Alarm : %s (%s)" % (
data['windspeed'], data['windspeed_active'])
print "Wind Gust Alarm : %s (%s)" % (
data['windgust'], data['windgust_active'])
@staticmethod
def set_alarms(station, alarm):
alarm_list = alarm.split(',')
if len(alarm_list) != 9:
print "Bad alarm '%s'" % alarm
print "Alarm format is: %s" % TE923Configurator.ALMSTR
return
weekday = alarm_list[0]
if weekday.lower() != 'off':
weekday_list = weekday.split(':')
if len(weekday_list) != 2:
print "Bad alarm '%s', expected HOUR:MINUTE or OFF" % weekday
return
if int(weekday_list[0]) < 0 or int(weekday_list[0]) > 23:
print "Alarm hours must be between 0 and 23 inclusive"
return
if int(weekday_list[1]) < 0 or int(weekday_list[1]) > 59:
print "Alarm minutes must be between 0 and 59 inclusive"
return
single = alarm_list[1]
if single.lower() != 'off':
single_list = single.split(':')
if len(single_list) != 2:
print "Bad alarm '%s', expected HOUR:MINUTE or OFF" % single
return
if int(single_list[0]) < 0 or int(single_list[0]) > 23:
print "Alarm hours must be between 0 and 23 inclusive"
return
if int(single_list[1]) < 0 or int(single_list[1]) > 59:
print "Alarm minutes must be between 0 and 59 inclusive"
return
if alarm_list[2].lower() != 'off' and alarm_list[2] not in ['15', '30', '45', '60', '90']:
print "Prealarm must be 15, 30, 45, 60, 90 or OFF"
return
if int(alarm_list[3]) < 1 or int(alarm_list[3]) > 15:
print "Snooze must be between 1 and 15 inclusive"
return
if float(alarm_list[4]) < -50 or float(alarm_list[4]) > 70:
print "Temperature alarm must be between -50 and 70 inclusive"
return
if float(alarm_list[5]) < -50 or float(alarm_list[5]) > 70:
print "Temperature alarm must be between -50 and 70 inclusive"
return
if alarm_list[6].lower() != 'off' and (int(alarm_list[6]) < 1 or int(alarm_list[6]) > 9999):
print "Rain alarm must be between 1 and 999 inclusive or OFF"
return
if alarm_list[7].lower() != 'off' and (float(alarm_list[7]) < 1 or float(alarm_list[7]) > 199):
print "Wind alarm must be between 1 and 199 inclusive or OFF"
return
if alarm_list[8].lower() != 'off' and (float(alarm_list[8]) < 1 or float(alarm_list[8]) > 199):
print "Wind alarm must be between 1 and 199 inclusive or OFF"
return
station.set_alarms(alarm_list[0], alarm_list[1], alarm_list[2],
alarm_list[3], alarm_list[4], alarm_list[5],
alarm_list[6], alarm_list[7], alarm_list[8])
print "Temperature alarms can only be modified via station controls"
@staticmethod
def show_interval(station):
idx = station.get_interval()
print "Interval: %s" % TE923Configurator.idx_to_interval.get(idx, 'unknown')
@staticmethod
def set_interval(station, interval):
"""accept 30s|2h|1d format or raw minutes, but only known intervals"""
idx = TE923Configurator.interval_to_idx.get(interval)
if idx is None:
try:
ival = int(interval * 60)
for i in TE923Station.idx_to_interval_sec:
if ival == TE923Station.idx_to_interval_sec[i]:
idx = i
except ValueError:
pass
if idx is None:
print "Bad interval '%s'" % interval
print "Valid intervals are %s" % ','.join(TE923Configurator.interval_to_idx.keys())
return
station.set_interval(idx)
@staticmethod
def print_data(data, fmt):
if fmt.lower() == 'table':
TE923Configurator.print_table(data)
else:
print data
@staticmethod
def print_table(data):
for key in sorted(data):
print "%s: %s" % (key.rjust(16), data[key])
@staticmethod
def print_alignment():
print " If computer time is not aligned to station time then date"
print " may be incorrect by 1 day"
class TE923Driver(weewx.drivers.AbstractDevice):
"""Driver for Hideki TE923 stations."""
def __init__(self, **stn_dict):
"""Initialize the station object.
polling_interval: How often to poll the station, in seconds.
[Optional. Default is 10]
model: Which station model is this?
[Optional. Default is 'TE923']
"""
loginf('driver version is %s' % DRIVER_VERSION)
global DEBUG_READ
DEBUG_READ = int(stn_dict.get('debug_read', DEBUG_READ))
global DEBUG_WRITE
DEBUG_WRITE = int(stn_dict.get('debug_write', DEBUG_WRITE))
global DEBUG_DECODE
DEBUG_DECODE = int(stn_dict.get('debug_decode', DEBUG_DECODE))
self._last_rain_loop = None
self._last_rain_archive = None
self._last_ts = None
self.model = stn_dict.get('model', 'TE923')
self.max_tries = int(stn_dict.get('max_tries', 5))
self.retry_wait = int(stn_dict.get('retry_wait', 3))
self.read_timeout = int(stn_dict.get('read_timeout', 10))
self.polling_interval = int(stn_dict.get('polling_interval', 10))
loginf('polling interval is %s' % str(self.polling_interval))
self.sensor_map = dict(DEFAULT_MAP)
if 'sensor_map' in stn_dict:
self.sensor_map.update(stn_dict['sensor_map'])
loginf('sensor map is %s' % self.sensor_map)
self.station = TE923Station(max_tries=self.max_tries,
retry_wait=self.retry_wait,
read_timeout=self.read_timeout)
self.station.open()
loginf('logger capacity %s records' % self.station.get_memory_size())
ts = self.station.get_date()
now = int(time.time())
loginf('station time is %s, computer time is %s' % (ts, now))
def closePort(self):
if self.station is not None:
self.station.close()
self.station = None
@property
def hardware_name(self):
return self.model
# @property
# def archive_interval(self):
# return self.station.get_interval_seconds()
def genLoopPackets(self):
while True:
data = self.station.get_readings()
status = self.station.get_status()
packet = self.data_to_packet(data, status=status,
last_rain=self._last_rain_loop,
sensor_map=self.sensor_map)
self._last_rain_loop = packet['rainTotal']
yield packet
time.sleep(self.polling_interval)
# same as genStartupRecords, but insert battery status on the last record.
# when record_generation is hardware, this results in a full suit of sensor
# data, but with the archive interval calculations done by the hardware.
# def genArchiveRecords(self, since_ts=0):
# for data in self.station.gen_records(since_ts):
# # FIXME: insert battery status on the last record
# packet = self.data_to_packet(data, status=None,
# last_rain=self._last_rain_archive,
# sensor_map=self.sensor_map)
# self._last_rain_archive = packet['rainTotal']
# if self._last_ts:
# packet['interval'] = (packet['dateTime'] - self._last_ts) / 60
# yield packet
# self._last_ts = packet['dateTime']
# there is no battery status for historical records.
def genStartupRecords(self, since_ts=0):
loginf("reading records from logger since %s" % since_ts)
cnt = 0
for data in self.station.gen_records(since_ts):
packet = self.data_to_packet(data, status=None,
last_rain=self._last_rain_archive,
sensor_map=self.sensor_map)
self._last_rain_archive = packet['rainTotal']
if self._last_ts:
packet['interval'] = (packet['dateTime'] - self._last_ts) / 60
if packet['interval'] > 0:
cnt += 1
yield packet
else:
loginf("skip packet with duplidate timestamp: %s" % packet)
self._last_ts = packet['dateTime']
if cnt % 50 == 0:
loginf("read %s records from logger" % cnt)
loginf("read %s records from logger" % cnt)
@staticmethod
def data_to_packet(data, status, last_rain, sensor_map):
"""convert raw data to format and units required by weewx
station weewx (metric)
temperature degree C degree C
humidity percent percent
uv index unitless unitless
slp mbar mbar
wind speed mile/h km/h
wind gust mile/h km/h
wind dir degree degree
rain mm cm
rain rate cm/h
"""
packet = dict()
packet['usUnits'] = weewx.METRIC
packet['dateTime'] = data['dateTime']
# include the link status - 0 indicates ok, 1 indicates no link
data['link_wind'] = 0 if data['windspeed_state'] == STATE_OK else 1
data['link_rain'] = 0 if data['rain_state'] == STATE_OK else 1
data['link_uv'] = 0 if data['uv_state'] == STATE_OK else 1
data['link_1'] = 0 if data['t_1_state'] == STATE_OK else 1
data['link_2'] = 0 if data['t_2_state'] == STATE_OK else 1
data['link_3'] = 0 if data['t_3_state'] == STATE_OK else 1
data['link_4'] = 0 if data['t_4_state'] == STATE_OK else 1
data['link_5'] = 0 if data['t_5_state'] == STATE_OK else 1
# map extensible sensors to database fields
for label in sensor_map:
if sensor_map[label] in data:
packet[label] = data[sensor_map[label]]
elif status is not None and sensor_map[label] in status:
packet[label] = int(status[sensor_map[label]])
# handle unit converstions
packet['windSpeed'] = data.get('windspeed')
if packet['windSpeed'] is not None:
packet['windSpeed'] *= 1.60934 # speed is mph; weewx wants km/h
packet['windDir'] = data.get('winddir')
if packet['windDir'] is not None:
packet['windDir'] *= 22.5 # weewx wants degrees
packet['windGust'] = data.get('windgust')
if packet['windGust'] is not None:
packet['windGust'] *= 1.60934 # speed is mph; weewx wants km/h
packet['rainTotal'] = data['rain']
if packet['rainTotal'] is not None:
packet['rainTotal'] *= 0.06578 # weewx wants cm
packet['rain'] = weewx.wxformulas.calculate_rain(
packet['rainTotal'], last_rain)
# some stations report uv
packet['UV'] = data['uv']
# station calculates windchill
packet['windchill'] = data['windchill']
# station reports baromter (SLP)
packet['barometer'] = data['slp']
# forecast and storm fields use the station's algorithms
packet['forecast'] = data['forecast']
packet['storm'] = data['storm']
return packet
STATE_OK = 'ok'
STATE_INVALID = 'invalid'
STATE_NO_LINK = 'no_link'
def _fmt(buf):
if buf:
return ' '.join(["%02x" % x for x in buf])
return ''
def bcd2int(bcd):
return int(((bcd & 0xf0) >> 4) * 10) + int(bcd & 0x0f)
def rev_bcd2int(bcd):
return int((bcd & 0xf0) >> 4) + int((bcd & 0x0f) * 10)
def int2bcd(num):
return int(num / 10) * 0x10 + (num % 10)
def rev_int2bcd(num):
return (num % 10) * 0x10 + int(num / 10)
def decode(buf):
data = dict()
for i in range(6): # console plus 5 remote channels
data.update(decode_th(buf, i))
data.update(decode_uv(buf))
data.update(decode_pressure(buf))
data.update(decode_forecast(buf))
data.update(decode_windchill(buf))
data.update(decode_wind(buf))
data.update(decode_rain(buf))
return data
def decode_th(buf, i):
if i == 0:
tlabel = 't_in'
hlabel = 'h_in'
else:
tlabel = 't_%d' % i
hlabel = 'h_%d' % i
tstate = '%s_state' % tlabel
hstate = '%s_state' % hlabel
offset = i * 3
if DEBUG_DECODE:
logdbg("TH%d BUF[%02d]=%02x BUF[%02d]=%02x BUF[%02d]=%02x" %
(i, 0 + offset, buf[0 + offset], 1 + offset, buf[1 + offset],
2 + offset, buf[2 + offset]))
data = dict()
data[tlabel], data[tstate] = decode_temp(buf[0 + offset], buf[1 + offset],
i != 0)
data[hlabel], data[hstate] = decode_humid(buf[2 + offset])
if DEBUG_DECODE:
logdbg("TH%d %s %s %s %s" % (i, data[tlabel], data[tstate],
data[hlabel], data[hstate]))
return data
def decode_temp(byte1, byte2, remote):
"""decode temperature. result is degree C."""
if bcd2int(byte1 & 0x0f) > 9:
if byte1 & 0x0f == 0x0a:
return None, STATE_NO_LINK
else:
return None, STATE_INVALID
if byte2 & 0x40 != 0x40 and remote:
return None, STATE_INVALID
value = bcd2int(byte1) / 10.0 + bcd2int(byte2 & 0x0f) * 10.0
if byte2 & 0x20 == 0x20:
value += 0.05
if byte2 & 0x80 != 0x80:
value *= -1
return value, STATE_OK
def decode_humid(byte):
"""decode humidity. result is percentage."""
if bcd2int(byte & 0x0f) > 9:
if byte & 0x0f == 0x0a:
return None, STATE_NO_LINK
else:
return None, STATE_INVALID
return bcd2int(byte), STATE_OK
# NB: te923tool does not include the 4-bit shift
def decode_uv(buf):
"""decode data from uv sensor"""
data = dict()
if DEBUG_DECODE:
logdbg("UVX BUF[18]=%02x BUF[19]=%02x" % (buf[18], buf[19]))
if ((buf[18] == 0xaa and buf[19] == 0x0a) or
(buf[18] == 0xff and buf[19] == 0xff)):
data['uv_state'] = STATE_NO_LINK
data['uv'] = None
elif bcd2int(buf[18]) > 99 or bcd2int(buf[19]) > 99:
data['uv_state'] = STATE_INVALID
data['uv'] = None
else:
data['uv_state'] = STATE_OK
data['uv'] = bcd2int(buf[18] & 0x0f) / 10.0 \
+ bcd2int((buf[18] & 0xf0) >> 4) \
+ bcd2int(buf[19] & 0x0f) * 10.0
if DEBUG_DECODE:
logdbg("UVX %s %s" % (data['uv'], data['uv_state']))
return data
def decode_pressure(buf):
"""decode pressure data"""
data = dict()
if DEBUG_DECODE:
logdbg("PRS BUF[20]=%02x BUF[21]=%02x" % (buf[20], buf[21]))
if buf[21] & 0xf0 == 0xf0:
data['slp_state'] = STATE_INVALID
data['slp'] = None
else:
data['slp_state'] = STATE_OK
data['slp'] = int(buf[21] * 0x100 + buf[20]) * 0.0625
if DEBUG_DECODE:
logdbg("PRS %s %s" % (data['slp'], data['slp_state']))
return data
# NB: te923tool divides speed/gust by 2.23694 (1 meter/sec = 2.23694 mile/hour)
# NB: wview does not divide speed/gust
# NB: wview multiplies winddir by 22.5, te923tool does not
def decode_wind(buf):
"""decode wind speed, gust, and direction"""
data = dict()
if DEBUG_DECODE:
logdbg("WGS BUF[25]=%02x BUF[26]=%02x" % (buf[25], buf[26]))
data['windgust'], data['windgust_state'] = decode_ws(buf[25], buf[26])
if DEBUG_DECODE:
logdbg("WGS %s %s" % (data['windgust'], data['windgust_state']))
if DEBUG_DECODE:
logdbg("WSP BUF[27]=%02x BUF[28]=%02x" % (buf[27], buf[28]))
data['windspeed'], data['windspeed_state'] = decode_ws(buf[27], buf[28])
if DEBUG_DECODE:
logdbg("WSP %s %s" % (data['windspeed'], data['windspeed_state']))
if DEBUG_DECODE:
logdbg("WDR BUF[29]=%02x" % buf[29])
data['winddir_state'] = data['windspeed_state']
data['winddir'] = int(buf[29] & 0x0f)
if DEBUG_DECODE:
logdbg("WDR %s %s" % (data['winddir'], data['winddir_state']))
return data
def decode_ws(byte1, byte2):
"""decode wind speed, result is mph"""
if bcd2int(byte1 & 0xf0) > 90 or bcd2int(byte1 & 0x0f) > 9:
if ((byte1 == 0xee and byte2 == 0x8e) or
(byte1 == 0xff and byte2 == 0xff)):
return None, STATE_NO_LINK
else:
return None, STATE_INVALID
offset = 100 if byte2 & 0x10 == 0x10 else 0
value = bcd2int(byte1) / 10.0 + bcd2int(byte2 & 0x0f) * 10.0 + offset
return value, STATE_OK
# the rain counter is in the station, not the rain bucket. so if the link
# between rain bucket and station is lost, the station will miss rainfall and
# there is no way to know about it.
# FIXME: figure out how to detect link status between station and rain bucket
# NB: wview treats the raw rain count as millimeters
def decode_rain(buf):
"""rain counter is number of bucket tips, each tip is about 0.03 inches"""
data = dict()
if DEBUG_DECODE:
logdbg("RAIN BUF[30]=%02x BUF[31]=%02x" % (buf[30], buf[31]))
data['rain_state'] = STATE_OK
data['rain'] = int(buf[31] * 0x100 + buf[30])
if DEBUG_DECODE:
logdbg("RAIN %s %s" % (data['rain'], data['rain_state']))
return data
def decode_windchill(buf):
data = dict()
if DEBUG_DECODE:
logdbg("WCL BUF[23]=%02x BUF[24]=%02x" % (buf[23], buf[24]))
if bcd2int(buf[23] & 0xf0) > 90 or bcd2int(buf[23] & 0x0f) > 9:
if ((buf[23] == 0xee and buf[24] == 0x8e) or
(buf[23] == 0xff and buf[24] == 0xff)):
data['windchill_state'] = STATE_NO_LINK
else:
data['windchill_state'] = STATE_INVALID
data['windchill'] = None
elif buf[24] & 0x40 != 0x40:
data['windchill_state'] = STATE_INVALID
data['windchill'] = None
else:
data['windchill_state'] = STATE_OK
data['windchill'] = bcd2int(buf[23]) / 10.0 \
+ bcd2int(buf[24] & 0x0f) * 10.0
if buf[24] & 0x20 == 0x20:
data['windchill'] += 0.05
if buf[24] & 0x80 != 0x80:
data['windchill'] *= -1
if DEBUG_DECODE:
logdbg("WCL %s %s" % (data['windchill'], data['windchill_state']))
return data
def decode_forecast(buf):
data = dict()
if DEBUG_DECODE:
logdbg("STT BUF[22]=%02x" % buf[22])
if buf[22] & 0x0f == 0x0f:
data['storm'] = None
data['forecast'] = None
else:
data['storm'] = 1 if buf[22] & 0x08 == 0x08 else 0
data['forecast'] = int(buf[22] & 0x07)
if DEBUG_DECODE:
logdbg("STT %s %s" % (data['storm'], data['forecast']))
return data
class BadRead(weewx.WeeWxIOError):
"""Bogus data length, CRC, header block, or other read failure"""
class BadWrite(weewx.WeeWxIOError):
"""Bogus data length, header block, or other write failure"""
class BadHeader(weewx.WeeWxIOError):
"""Bad header byte"""
class TE923Station(object):
ENDPOINT_IN = 0x81
READ_LENGTH = 0x8
TIMEOUT = 1200
START_ADDRESS = 0x101
RECORD_SIZE = 0x26
idx_to_interval_sec = {
1: 300, 2: 600, 3: 1200, 4: 1800, 5: 3600, 6: 5400, 7: 7200,
8: 10800, 9: 14400, 10: 21600, 11: 86400}
def __init__(self, vendor_id=0x1130, product_id=0x6801,
max_tries=10, retry_wait=5, read_timeout=5):
self.vendor_id = vendor_id
self.product_id = product_id
self.devh = None
self.max_tries = max_tries
self.retry_wait = retry_wait
self.read_timeout = read_timeout
self._num_rec = None
self._num_blk = None
def __enter__(self):
self.open()
return self
def __exit__(self, type_, value, traceback): # @UnusedVariable
self.close()
def open(self, interface=0):
dev = self._find_dev(self.vendor_id, self.product_id)
if not dev:
logcrt("Cannot find USB device with VendorID=0x%04x ProductID=0x%04x" % (self.vendor_id, self.product_id))
raise weewx.WeeWxIOError('Unable to find station on USB')
self.devh = dev.open()
if not self.devh:
raise weewx.WeeWxIOError('Open USB device failed')
# be sure kernel does not claim the interface
try:
self.devh.detachKernelDriver(interface)
except (AttributeError, usb.USBError):
pass
# attempt to claim the interface
try:
self.devh.claimInterface(interface)
self.devh.setAltInterface(interface)
except usb.USBError, e:
self.close()
logcrt("Unable to claim USB interface %s: %s" % (interface, e))
raise weewx.WeeWxIOError(e)
# doing a reset seems to cause problems more often than it eliminates them
# self.devh.reset()
# figure out which type of memory this station has
self.read_memory_size()
def close(self):
try:
self.devh.releaseInterface()
except (ValueError, usb.USBError), e:
logerr("release interface failed: %s" % e)
self.devh = None
@staticmethod
def _find_dev(vendor_id, product_id):
"""Find the vendor and product ID on the USB."""
for bus in usb.busses():
for dev in bus.devices:
if dev.idVendor == vendor_id and dev.idProduct == product_id:
loginf('Found device on USB bus=%s device=%s' %
(bus.dirname, dev.filename))
return dev
return None
def _raw_read(self, addr):
reqbuf = [0x05, 0xAF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
reqbuf[4] = addr / 0x10000
reqbuf[3] = (addr - (reqbuf[4] * 0x10000)) / 0x100
reqbuf[2] = addr - (reqbuf[4] * 0x10000) - (reqbuf[3] * 0x100)
reqbuf[5] = (reqbuf[1] ^ reqbuf[2] ^ reqbuf[3] ^ reqbuf[4])
ret = self.devh.controlMsg(requestType=0x21,
request=usb.REQ_SET_CONFIGURATION,
value=0x0200,
index=0x0000,
buffer=reqbuf,
timeout=self.TIMEOUT)
if ret != 8:
raise BadRead('Unexpected response to data request: %s != 8' % ret)
# sleeping does not seem to have any effect on the reads
# time.sleep(0.1) # te923tool is 0.3
start_ts = time.time()
rbuf = []
while time.time() - start_ts < self.read_timeout:
try:
buf = self.devh.interruptRead(
self.ENDPOINT_IN, self.READ_LENGTH, self.TIMEOUT)
if buf:
nbytes = buf[0]
if nbytes > 7 or nbytes > len(buf) - 1:
raise BadRead("Bogus length during read: %d" % nbytes)
rbuf.extend(buf[1:1 + nbytes])
if len(rbuf) >= 34:
break
except usb.USBError, e:
errmsg = repr(e)
if not ('No data available' in errmsg or 'No error' in errmsg):
raise
# sleeping seems to have no effect on the reads
# time.sleep(0.009) # te923tool is 0.15
else:
logdbg("timeout while reading: ignoring bytes: %s" % _fmt(rbuf))
raise BadRead("Timeout after %d bytes" % len(rbuf))
# Send acknowledgement whether or not it was a good read
reqbuf = [0x24, 0xAF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
reqbuf[4] = addr / 0x10000
reqbuf[3] = (addr - (reqbuf[4] * 0x10000)) / 0x100
reqbuf[2] = addr - (reqbuf[4] * 0x10000) - (reqbuf[3] * 0x100)
reqbuf[5] = (reqbuf[1] ^ reqbuf[2] ^ reqbuf[3] ^ reqbuf[4])
ret = self.devh.controlMsg(requestType=0x21,
request=usb.REQ_SET_CONFIGURATION,
value=0x0200,
index=0x0000,
buffer=reqbuf,
timeout=self.TIMEOUT)
# now check what we got
if len(rbuf) < 34:
raise BadRead("Not enough bytes: %d < 34" % len(rbuf))
# there must be a header byte...
if rbuf[0] != 0x5a:
raise BadHeader("Bad header byte: %02x != %02x" % (rbuf[0], 0x5a))
# ...and the last byte must be a valid crc
crc = 0x00
for x in rbuf[:33]:
crc = crc ^ x
if crc != rbuf[33]:
raise BadRead("Bad crc: %02x != %02x" % (crc, rbuf[33]))
# early versions of this driver used to get long reads, but these
# might not happen any more. log it then try to use the data anyway.
if len(rbuf) != 34:
loginf("read: wrong number of bytes: %d != 34" % len(rbuf))
return rbuf
def _raw_write(self, addr, buf):
wbuf = [0] * 38
wbuf[0] = 0xAE
wbuf[3] = addr / 0x10000
wbuf[2] = (addr - (wbuf[3] * 0x10000)) / 0x100
wbuf[1] = addr - (wbuf[3] * 0x10000) - (wbuf[2] * 0x100)
crc = wbuf[0] ^ wbuf[1] ^ wbuf[2] ^ wbuf[3]
for i in range(32):
wbuf[i + 4] = buf[i]
crc = crc ^ buf[i]
wbuf[36] = crc
for i in range(6):
if i == 5:
reqbuf = [0x2,
wbuf[i * 7], wbuf[1 + i * 7],
0x00, 0x00, 0x00, 0x00, 0x00]
else:
reqbuf = [0x7,
wbuf[i * 7], wbuf[1 + i * 7], wbuf[2 + i * 7],
wbuf[3 + i * 7], wbuf[4 + i * 7], wbuf[5 + i * 7],
wbuf[6 + i * 7]]
if DEBUG_WRITE:
logdbg("write: %s" % _fmt(reqbuf))
ret = self.devh.controlMsg(requestType=0x21,
request=usb.REQ_SET_CONFIGURATION,
value=0x0200,
index=0x0000,
buffer=reqbuf,
timeout=self.TIMEOUT)
if ret != 8:
raise BadWrite('Unexpected response: %s != 8' % ret)
# Wait for acknowledgement
time.sleep(0.1)
start_ts = time.time()
rbuf = []
while time.time() - start_ts < 5:
try:
tmpbuf = self.devh.interruptRead(
self.ENDPOINT_IN, self.READ_LENGTH, self.TIMEOUT)
if tmpbuf:
nbytes = tmpbuf[0]
if nbytes > 7 or nbytes > len(tmpbuf) - 1:
raise BadRead("Bogus length during read: %d" % nbytes)
rbuf.extend(tmpbuf[1:1 + nbytes])
if len(rbuf) >= 1:
break
except usb.USBError, e:
errmsg = repr(e)
if not ('No data available' in errmsg or 'No error' in errmsg):
raise
time.sleep(0.009)
else:
raise BadWrite("Timeout after %d bytes" % len(rbuf))
if len(rbuf) != 1:
loginf("write: ack got wrong number of bytes: %d != 1" % len(rbuf))
if len(rbuf) == 0:
raise BadWrite("Bad ack: zero length response")
elif rbuf[0] != 0x5a:
raise BadHeader("Bad header byte: %02x != %02x" % (rbuf[0], 0x5a))
def _read(self, addr):
"""raw_read returns the entire 34-byte chunk, i.e., one header byte,
32 data bytes, one checksum byte. this function simply returns it."""
# FIXME: strip the header and checksum so that we return only the
# 32 bytes of data. this will require shifting every index
# pretty much everywhere else in this code.
if DEBUG_READ:
logdbg("read: address 0x%06x" % addr)
for cnt in range(self.max_tries):
try:
buf = self._raw_read(addr)
if DEBUG_READ:
logdbg("read: %s" % _fmt(buf))
return buf
except (BadRead, BadHeader, usb.USBError), e:
logerr("Failed attempt %d of %d to read data: %s" %
(cnt + 1, self.max_tries, e))
logdbg("Waiting %d seconds before retry" % self.retry_wait)
time.sleep(self.retry_wait)
else:
raise weewx.RetriesExceeded("Read failed after %d tries" %
self.max_tries)
def _write(self, addr, buf):
if DEBUG_WRITE:
logdbg("write: address 0x%06x: %s" % (addr, _fmt(buf)))
for cnt in range(self.max_tries):
try:
self._raw_write(addr, buf)
return
except (BadWrite, BadHeader, usb.USBError), e:
logerr("Failed attempt %d of %d to write data: %s" %
(cnt + 1, self.max_tries, e))
logdbg("Waiting %d seconds before retry" % self.retry_wait)
time.sleep(self.retry_wait)
else:
raise weewx.RetriesExceeded("Write failed after %d tries" %
self.max_tries)
def read_memory_size(self):
buf = self._read(0xfc)
if DEBUG_DECODE:
logdbg("MEM BUF[1]=%s" % buf[1])
if buf[1] == 0:
self._num_rec = 208
self._num_blk = 256
logdbg("detected small memory size")
elif buf[1] == 2:
self._num_rec = 3442
self._num_blk = 4096
logdbg("detected large memory size")
else:
msg = "Unrecognised memory size '%s'" % buf[1]
logerr(msg)
raise weewx.WeeWxIOError(msg)
def get_memory_size(self):
return self._num_rec
def gen_blocks(self, count=None):
"""generator that returns consecutive blocks of station memory"""
if not count:
count = self._num_blk
for x in range(0, count * 32, 32):
buf = self._read(x)
yield x, buf
def dump_memory(self):
for i in range(8):
buf = self._read(i * 32)
for j in range(4):
loginf("%02x : %02x %02x %02x %02x %02x %02x %02x %02x" %
(i * 32 + j * 8, buf[1 + j * 8], buf[2 + j * 8],
buf[3 + j * 8], buf[4 + j * 8], buf[5 + j * 8],
buf[6 + j * 8], buf[7 + j * 8], buf[8 + j * 8]))
def get_config(self):
data = dict()
data.update(self.get_versions())
data.update(self.get_status())
data['latitude'], data['longitude'] = self.get_location()
data['altitude'] = self.get_altitude()
return data
def get_versions(self):
data = dict()
buf = self._read(0x98)
if DEBUG_DECODE:
logdbg("VER BUF[1]=%s BUF[2]=%s BUF[3]=%s BUF[4]=%s BUF[5]=%s" %
(buf[1], buf[2], buf[3], buf[4], buf[5]))
data['version_bar'] = buf[1]
data['version_uv'] = buf[2]
data['version_rcc'] = buf[3]
data['version_wind'] = buf[4]
data['version_sys'] = buf[5]
if DEBUG_DECODE:
logdbg("VER bar=%s uv=%s rcc=%s wind=%s sys=%s" %
(data['version_bar'], data['version_uv'],
data['version_rcc'], data['version_wind'],
data['version_sys']))
return data
def get_status(self):
# map the battery status flags. 0 indicates ok, 1 indicates failure.
# FIXME: i get 1 for uv even when no uv link
# FIXME: i get 0 for th3, th4, th5 even when no link
status = dict()
buf = self._read(0x4c)
if DEBUG_DECODE:
logdbg("BAT BUF[1]=%02x" % buf[1])
status['bat_rain'] = 0 if buf[1] & 0x80 == 0x80 else 1
status['bat_wind'] = 0 if buf[1] & 0x40 == 0x40 else 1
status['bat_uv'] = 0 if buf[1] & 0x20 == 0x20 else 1
status['bat_5'] = 0 if buf[1] & 0x10 == 0x10 else 1
status['bat_4'] = 0 if buf[1] & 0x08 == 0x08 else 1
status['bat_3'] = 0 if buf[1] & 0x04 == 0x04 else 1
status['bat_2'] = 0 if buf[1] & 0x02 == 0x02 else 1
status['bat_1'] = 0 if buf[1] & 0x01 == 0x01 else 1
if DEBUG_DECODE:
logdbg("BAT rain=%s wind=%s uv=%s th5=%s th4=%s th3=%s th2=%s th1=%s" %
(status['bat_rain'], status['bat_wind'], status['bat_uv'],
status['bat_5'], status['bat_4'], status['bat_3'],
status['bat_2'], status['bat_1']))
return status
# FIXME: is this any different than get_alt?
def get_altitude(self):
buf = self._read(0x5a)
if DEBUG_DECODE:
logdbg("ALT BUF[1]=%02x BUF[2]=%02x BUF[3]=%02x" %
(buf[1], buf[2], buf[3]))
altitude = buf[2] * 0x100 + buf[1]
if buf[3] & 0x8 == 0x8:
altitude *= -1
if DEBUG_DECODE:
logdbg("ALT %s" % altitude)
return altitude
# FIXME: is this any different than get_loc?
def get_location(self):
buf = self._read(0x06)
if DEBUG_DECODE:
logdbg("LOC BUF[1]=%02x BUF[2]=%02x BUF[3]=%02x BUF[4]=%02x BUF[5]=%02x BUF[6]=%02x" % (buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]))
latitude = float(rev_bcd2int(buf[1])) + (float(rev_bcd2int(buf[2])) / 60)
if buf[5] & 0x80 == 0x80:
latitude *= -1
longitude = float((buf[6] & 0xf0) / 0x10 * 100) + float(rev_bcd2int(buf[3])) + (float(rev_bcd2int(buf[4])) / 60)
if buf[5] & 0x40 == 0x00:
longitude *= -1
if DEBUG_DECODE:
logdbg("LOC %s %s" % (latitude, longitude))
return latitude, longitude
def get_readings(self):
"""get sensor readings from the station, return as dictionary"""
buf = self._read(0x020001)
data = decode(buf[1:])
data['dateTime'] = int(time.time() + 0.5)
return data
def _get_next_index(self):
"""get the index of the next history record"""
buf = self._read(0xfb)
if DEBUG_DECODE:
logdbg("HIS BUF[3]=%02x BUF[5]=%02x" % (buf[3], buf[5]))
record_index = buf[3] * 0x100 + buf[5]
logdbg("record_index=%s" % record_index)
if record_index > self._num_rec:
msg = "record index of %d exceeds memory size of %d records" % (
record_index, self._num_rec)
logerr(msg)
raise weewx.WeeWxIOError(msg)
return record_index
def _get_starting_addr(self, requested):
"""calculate the oldest and latest addresses"""
count = requested
if count is None:
count = self._num_rec
elif count > self._num_rec:
count = self._num_rec
loginf("too many records requested (%d), using %d instead" %
(requested, count))
idx = self._get_next_index()
if idx < 1:
idx += self._num_rec
latest_addr = self.START_ADDRESS + (idx - 1) * self.RECORD_SIZE
oldest_addr = latest_addr - (count - 1) * self.RECORD_SIZE
logdbg("count=%s oldest_addr=0x%06x latest_addr=0x%06x" %
(count, oldest_addr, latest_addr))
return oldest_addr, count
def gen_records(self, since_ts=0, requested=None):
"""return requested records from station from oldest to newest. If
since_ts is specified, then all records since that time. If requested
is specified, then at most that many most recent records. If both
are specified then at most requested records newer than the timestamp.
Each historical record is 38 bytes (0x26) long. Records start at
memory address 0x101 (257). The index of the record after the latest
is at address 0xfc:0xff (253:255), indicating the offset from the
starting address.
On small memory stations, the last 32 bytes of memory are never used.
On large memory stations, the last 20 bytes of memory are never used.
"""
logdbg("gen_records: since_ts=%s requested=%s" % (since_ts, requested))
# we need the current year and month since station does not track year
start_ts = time.time()
tt = time.localtime(start_ts)
# get the archive interval for use in calculations later
arcint = self.get_interval_seconds()
# if nothing specified, get everything since time began
if since_ts is None:
since_ts = 0
# if no count specified, use interval to estimate number of records
if requested is None:
requested = int((start_ts - since_ts) / arcint)
requested += 1 # safety margin
# get the starting address for what we want to read, plus actual count
oldest_addr, count = self._get_starting_addr(requested)
# inner loop reads records, outer loop catches any added while reading
more_records = True
while more_records:
n = 0
while n < count:
addr = oldest_addr + n * self.RECORD_SIZE
if addr < self.START_ADDRESS:
addr += self._num_rec * self.RECORD_SIZE
record = self.get_record(addr, tt.tm_year, tt.tm_mon)
n += 1
msg = "record %d of %d addr=0x%06x" % (n, count, addr)
if record and record['dateTime'] > since_ts:
msg += " %s" % timestamp_to_string(record['dateTime'])
logdbg("gen_records: yield %s" % msg)
yield record
else:
if record:
msg += " since_ts=%d %s" % (
since_ts, timestamp_to_string(record['dateTime']))
logdbg("gen_records: skip %s" % msg)
# insert a sleep to simulate slow reads
# time.sleep(5)
# see if reading has taken so much time that more records have
# arrived. read whatever records have come in since the read began.
now = time.time()
if now - start_ts > arcint:
newreq = int((now - start_ts) / arcint)
newreq += 1 # safety margin
logdbg("gen_records: reading %d more records" % newreq)
oldest_addr, count = self._get_starting_addr(newreq)
start_ts = now
else:
more_records = False
def get_record(self, addr, now_year, now_month):
"""Return a single record from station."""
logdbg("get_record at address 0x%06x (year=%s month=%s)" %
(addr, now_year, now_month))
buf = self._read(addr)
if DEBUG_DECODE:
logdbg("REC %02x %02x %02x %02x" %
(buf[1], buf[2], buf[3], buf[4]))
if buf[1] == 0xff:
logdbg("get_record: no data at address 0x%06x" % addr)
return None
year = now_year
month = buf[1] & 0x0f
if month > now_month:
year -= 1
day = bcd2int(buf[2])
hour = bcd2int(buf[3])
minute = bcd2int(buf[4])
ts = time.mktime((year, month, day, hour, minute, 0, 0, 0, -1))
if DEBUG_DECODE:
logdbg("REC %d/%02d/%02d %02d:%02d = %d" %
(year, month, day, hour, minute, ts))
tmpbuf = buf[5:16]
buf = self._read(addr + 0x10)
tmpbuf.extend(buf[1:22])
data = decode(tmpbuf)
data['dateTime'] = int(ts)
logdbg("get_record: found record %s" % data)
return data
def _read_minmax(self):
buf = self._read(0x24)
tmpbuf = self._read(0x40)
buf[28:37] = tmpbuf[1:10]
tmpbuf = self._read(0xaa)
buf[37:47] = tmpbuf[1:11]
tmpbuf = self._read(0x60)
buf[47:74] = tmpbuf[1:28]
tmpbuf = self._read(0x7c)
buf[74:101] = tmpbuf[1:28]
return buf
def get_minmax(self):
buf = self._read_minmax()
data = dict()
data['t_in_min'], _ = decode_temp(buf[1], buf[2], 0)
data['t_in_max'], _ = decode_temp(buf[3], buf[4], 0)
data['h_in_min'], _ = decode_humid(buf[5])
data['h_in_max'], _ = decode_humid(buf[6])
for i in range(5):
label = 't_%d_%%s' % (i + 1)
data[label % 'min'], _ = decode_temp(buf[7+i*6], buf[8 +i*6], 1)
data[label % 'max'], _ = decode_temp(buf[9+i*6], buf[10+i*6], 1)
label = 'h_%d_%%s' % (i + 1)
data[label % 'min'], _ = decode_humid(buf[11+i*6])
data[label % 'max'], _ = decode_humid(buf[12+i*6])
data['windspeed_max'], _ = decode_ws(buf[37], buf[38])
data['windgust_max'], _ = decode_ws(buf[39], buf[40])
data['rain_yesterday'] = (buf[42] * 0x100 + buf[41]) * 0.6578
data['rain_week'] = (buf[44] * 0x100 + buf[43]) * 0.6578
data['rain_month'] = (buf[46] * 0x100 + buf[45]) * 0.6578
tt = time.localtime()
offset = 1 if tt[3] < 12 else 0
month = bcd2int(buf[47] & 0xf)
day = bcd2int(buf[48])
hour = bcd2int(buf[49])
minute = bcd2int(buf[50])
year = tt.tm_year
if month > tt.tm_mon:
year -= 1
ts = time.mktime((year, month, day - offset, hour, minute, 0, 0, 0, 0))
data['barometer_ts'] = ts
for i in range(25):
data['barometer_%d' % i] = (buf[52+i*2]*0x100 + buf[51+i*2])*0.0625
return data
def _read_date(self):
buf = self._read(0x0)
return buf[1:33]
def _write_date(self, buf):
self._write(0x0, buf)
def get_date(self):
tt = time.localtime()
offset = 1 if tt[3] < 12 else 0
buf = self._read_date()
day = rev_bcd2int(buf[2])
month = (buf[5] & 0xF0) / 0x10
year = rev_bcd2int(buf[4]) + 2000
ts = time.mktime((year, month, day + offset, 0, 0, 0, 0, 0, 0))
return ts
def set_date(self, ts):
tt = time.localtime(ts)
buf = self._read_date()
buf[2] = rev_int2bcd(tt[2])
buf[4] = rev_int2bcd(tt[0] - 2000)
buf[5] = tt[1] * 0x10 + (tt[6] + 1) * 2 + (buf[5] & 1)
buf[15] = self._checksum(buf[0:15])
self._write_date(buf)
def _read_loc(self, loc_type):
addr = 0x0 if loc_type == 0 else 0x16
buf = self._read(addr)
return buf[1:33]
def _write_loc(self, loc_type, buf):
addr = 0x0 if loc_type == 0 else 0x16
self._write(addr, buf)
def get_loc(self, loc_type):
buf = self._read_loc(loc_type)
offset = 6 if loc_type == 0 else 0
data = dict()
data['city_time'] = (buf[6 + offset] & 0xF0) + (buf[7 + offset] & 0xF)
data['lat_deg'] = rev_bcd2int(buf[0 + offset])
data['lat_min'] = rev_bcd2int(buf[1 + offset])
data['lat_dir'] = "S" if buf[4 + offset] & 0x80 == 0x80 else "N"
data['long_deg'] = (buf[5 + offset] & 0xF0) / 0x10 * 100 + rev_bcd2int(buf[2 + offset])
data['long_min'] = rev_bcd2int(buf[3 + offset])
data['long_dir'] = "E" if buf[4 + offset] & 0x40 == 0x40 else "W"
data['tz_hr'] = (buf[7 + offset] & 0xF0) / 0x10
if buf[4 + offset] & 0x8 == 0x8:
data['tz_hr'] *= -1
data['tz_min'] = 30 if buf[4 + offset] & 0x3 == 0x3 else 0
if buf[4 + offset] & 0x10 == 0x10:
data['dst_always_on'] = True
else:
data['dst_always_on'] = False
data['dst'] = buf[5 + offset] & 0xf
return data
def set_loc(self, loc_type, city_index, dst_on, dst_index, tz_hr, tz_min,
lat_deg, lat_min, lat_dir, long_deg, long_min, long_dir):
buf = self._read_loc(loc_type)
offset = 6 if loc_type == 0 else 0
buf[0 + offset] = rev_int2bcd(lat_deg)
buf[1 + offset] = rev_int2bcd(lat_min)
buf[2 + offset] = rev_int2bcd(long_deg % 100)
buf[3 + offset] = rev_int2bcd(long_min)
buf[4 + offset] = (lat_dir == "S") * 0x80 + (long_dir == "E") * 0x40 + (tz_hr < 0) + dst_on * 0x10 * 0x8 + (tz_min == 30) * 3
buf[5 + offset] = (long_deg > 99) * 0x10 + dst_index
buf[6 + offset] = (buf[28] & 0x0F) + int(city_index / 0x10) * 0x10
buf[7 + offset] = city_index % 0x10 + abs(tz_hr) * 0x10
if loc_type == 0:
buf[15] = self._checksum(buf[0:15])
else:
buf[8] = self._checksum(buf[0:8])
self._write_loc(loc_type, buf)
def _read_alt(self):
buf = self._read(0x5a)
return buf[1:33]
def _write_alt(self, buf):
self._write(0x5a, buf)
def get_alt(self):
buf = self._read_alt()
altitude = buf[1] * 0x100 + buf[0]
if buf[3] & 0x8 == 0x8:
altitude *= -1
return altitude
def set_alt(self, altitude):
buf = self._read_alt()
buf[0] = abs(altitude) & 0xff
buf[1] = abs(altitude) / 0x100
buf[2] = buf[2] & 0x7 + (altitude < 0) * 0x8
buf[3] = self._checksum(buf[0:3])
self._write_alt(buf)
def _read_alarms(self):
buf = self._read(0x10)
tmpbuf = self._read(0x1F)
buf[33:65] = tmpbuf[1:33]
tmpbuf = self._read(0xA0)
buf[65:97] = tmpbuf[1:33]
return buf[1:97]
def _write_alarms(self, buf):
self._write(0x10, buf[0:32])
self._write(0x1F, buf[32:64])
self._write(0xA0, buf[64:96])
def get_alarms(self):
buf = self._read_alarms()
data = dict()
data['weekday_active'] = buf[0] & 0x4 == 0x4
data['single_active'] = buf[0] & 0x8 == 0x8
data['prealarm_active'] = buf[2] & 0x8 == 0x8
data['weekday_hour'] = rev_bcd2int(buf[0] & 0xF1)
data['weekday_min'] = rev_bcd2int(buf[1])
data['single_hour'] = rev_bcd2int(buf[2] & 0xF1)
data['single_min'] = rev_bcd2int(buf[3])
data['prealarm_period'] = (buf[4] & 0xF0) / 0x10
data['snooze'] = buf[4] & 0xF
data['max_temp'], _ = decode_temp(buf[32], buf[33], 0)
data['min_temp'], _ = decode_temp(buf[34], buf[35], 0)
data['rain_active'] = buf[64] & 0x4 == 0x4
data['windspeed_active'] = buf[64] & 0x2 == 0x2
data['windgust_active'] = buf[64] & 0x1 == 0x1
data['rain'] = bcd2int(buf[66]) * 100 + bcd2int(buf[65])
data['windspeed'], _ = decode_ws(buf[68], buf[69])
data['windgust'], _ = decode_ws(buf[71], buf[72])
return data
def set_alarms(self, weekday, single, prealarm, snooze,
maxtemp, mintemp, rain, wind, gust):
buf = self._read_alarms()
if weekday.lower() != 'off':
weekday_list = weekday.split(':')
buf[0] = rev_int2bcd(int(weekday_list[0])) | 0x4
buf[1] = rev_int2bcd(int(weekday_list[1]))
else:
buf[0] &= 0xFB
if single.lower() != 'off':
single_list = single.split(':')
buf[2] = rev_int2bcd(int(single_list[0]))
buf[3] = rev_int2bcd(int(single_list[1]))
buf[0] |= 0x8
else:
buf[0] &= 0xF7
if (prealarm.lower() != 'off' and
(weekday.lower() != 'off' or single.lower() != 'off')):
if int(prealarm) == 15:
buf[4] = 0x10
elif int(prealarm) == 30:
buf[4] = 0x20
elif int(prealarm) == 45:
buf[4] = 0x30
elif int(prealarm) == 60:
buf[4] = 0x40
elif int(prealarm) == 90:
buf[4] = 0x50
buf[2] |= 0x8
else:
buf[2] &= 0xF7
buf[4] = (buf[4] & 0xF0) + int(snooze)
buf[5] = self._checksum(buf[0:5])
buf[32] = int2bcd(int(abs(float(maxtemp)) * 10) % 100)
buf[33] = int2bcd(int(abs(float(maxtemp)) / 10))
if float(maxtemp) >= 0:
buf[33] |= 0x80
if (abs(float(maxtemp)) * 100) % 10 == 5:
buf[33] |= 0x20
buf[34] = int2bcd(int(abs(float(mintemp)) * 10) % 100)
buf[35] = int2bcd(int(abs(float(mintemp)) / 10))
if float(mintemp) >= 0:
buf[35] |= 0x80
if (abs(float(mintemp)) * 100) % 10 == 5:
buf[35] |= 0x20
buf[36] = self._checksum(buf[32:36])
if rain.lower() != 'off':
buf[65] = int2bcd(int(rain) % 100)
buf[66] = int2bcd(int(int(rain) / 100))
buf[64] |= 0x4
else:
buf[64] = buf[64] & 0xFB
if wind.lower() != 'off':
buf[68] = int2bcd(int(float(wind) * 10) % 100)
buf[69] = int2bcd(int(float(wind) / 10))
buf[64] |= 0x2
else:
buf[64] = buf[64] & 0xFD
if gust.lower() != 'off':
buf[71] = int2bcd(int(float(gust) * 10) % 100)
buf[72] = int2bcd(int(float(gust) / 10))
buf[64] |= 0x1
else:
buf[64] |= 0xFE
buf[73] = self._checksum(buf[64:73])
self._write_alarms(buf)
def get_interval(self):
buf = self._read(0xFE)
return buf[1]
def get_interval_seconds(self):
idx = self.get_interval()
interval = self.idx_to_interval_sec.get(idx)
if interval is None:
msg = "Unrecognized archive interval '%s'" % idx
logerr(msg)
raise weewx.WeeWxIOError(msg)
return interval
def set_interval(self, idx):
buf = self._read(0xFE)
buf = buf[1:33]
buf[0] = idx
self._write(0xFE, buf)
@staticmethod
def _checksum(buf):
crc = 0x100
for i in range(len(buf)):
crc -= buf[i]
if crc < 0:
crc += 0x100
return crc
# define a main entry point for basic testing of the station without weewx
# engine and service overhead. invoke this as follows from the weewx root dir:
#
# PYTHONPATH=bin python bin/weewx/drivers/te923.py
#
# by default, output matches that of te923tool
# te923con display current weather readings
# te923con -d dump 208 memory records
# te923con -s display station status
#
# date; PYTHONPATH=bin python bin/user/te923.py --records 0 > c; date
# 91s
# Thu Dec 10 00:12:59 EST 2015
# Thu Dec 10 00:14:30 EST 2015
# date; PYTHONPATH=bin python bin/weewx/drivers/te923.py --records 0 > b; date
# 531s
# Tue Nov 26 10:37:36 EST 2013
# Tue Nov 26 10:46:27 EST 2013
# date; /home/mwall/src/te923tool-0.6.1/te923con -d > a; date
# 53s
# Tue Nov 26 10:46:52 EST 2013
# Tue Nov 26 10:47:45 EST 2013
if __name__ == '__main__':
import optparse
FMT_TE923TOOL = 'te923tool'
FMT_DICT = 'dict'
FMT_TABLE = 'table'
usage = """%prog [options] [--debug] [--help]"""
def main():
syslog.openlog('wee_te923', syslog.LOG_PID | syslog.LOG_CONS)
parser = optparse.OptionParser(usage=usage)
parser.add_option('--version', dest='version', action='store_true',
help='display driver version')
parser.add_option('--debug', dest='debug', action='store_true',
help='display diagnostic information while running')
parser.add_option('--status', dest='status', action='store_true',
help='display station status')
parser.add_option('--readings', dest='readings', action='store_true',
help='display sensor readings')
parser.add_option("--records", dest="records", type=int, metavar="N",
help="display N station records, oldest to newest")
parser.add_option('--blocks', dest='blocks', type=int, metavar="N",
help='display N 32-byte blocks of station memory')
parser.add_option("--format", dest="format", type=str,metavar="FORMAT",
default=FMT_TE923TOOL,
help="format for output: te923tool, table, or dict")
(options, _) = parser.parse_args()
if options.version:
print "te923 driver version %s" % DRIVER_VERSION
exit(1)
if options.debug is not None:
syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG))
else:
syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_INFO))
if (options.format.lower() != FMT_TE923TOOL and
options.format.lower() != FMT_TABLE and
options.format.lower() != FMT_DICT):
print "Unknown format '%s'. Known formats include: %s" % (
options.format, ','.join([FMT_TE923TOOL, FMT_TABLE, FMT_DICT]))
exit(1)
with TE923Station() as station:
if options.status:
data = station.get_versions()
data.update(station.get_status())
if options.format.lower() == FMT_TE923TOOL:
print_status(data)
else:
print_data(data, options.format)
if options.readings:
data = station.get_readings()
if options.format.lower() == FMT_TE923TOOL:
print_readings(data)
else:
print_data(data, options.format)
if options.records is not None:
for data in station.gen_records(requested=options.records):
if options.format.lower() == FMT_TE923TOOL:
print_readings(data)
else:
print_data(data, options.format)
if options.blocks is not None:
for ptr, block in station.gen_blocks(count=options.blocks):
print_hex(ptr, block)
def print_data(data, fmt):
if fmt.lower() == FMT_TABLE:
print_table(data)
else:
print data
def print_hex(ptr, data):
print "0x%06x %s" % (ptr, _fmt(data))
def print_table(data):
"""output entire dictionary contents in two columns"""
for key in sorted(data):
print "%s: %s" % (key.rjust(16), data[key])
def print_status(data):
"""output status fields in te923tool format"""
print "0x%x:0x%x:0x%x:0x%x:0x%x:%d:%d:%d:%d:%d:%d:%d:%d" % (
data['version_sys'], data['version_bar'], data['version_uv'],
data['version_rcc'], data['version_wind'],
data['bat_rain'], data['bat_uv'], data['bat_wind'], data['bat_5'],
data['bat_4'], data['bat_3'], data['bat_2'], data['bat_1'])
def print_readings(data):
"""output sensor readings in te923tool format"""
output = [str(data['dateTime'])]
output.append(getvalue(data, 't_in', '%0.2f'))
output.append(getvalue(data, 'h_in', '%d'))
for i in range(1, 6):
output.append(getvalue(data, 't_%d' % i, '%0.2f'))
output.append(getvalue(data, 'h_%d' % i, '%d'))
output.append(getvalue(data, 'slp', '%0.1f'))
output.append(getvalue(data, 'uv', '%0.1f'))
output.append(getvalue(data, 'forecast', '%d'))
output.append(getvalue(data, 'storm', '%d'))
output.append(getvalue(data, 'winddir', '%d'))
output.append(getvalue(data, 'windspeed', '%0.1f'))
output.append(getvalue(data, 'windgust', '%0.1f'))
output.append(getvalue(data, 'windchill', '%0.1f'))
output.append(getvalue(data, 'rain', '%d'))
print ':'.join(output)
def getvalue(data, label, fmt):
if label + '_state' in data:
if data[label + '_state'] == STATE_OK:
return fmt % data[label]
else:
return data[label + '_state']
else:
if data[label] is None:
return 'x'
else:
return fmt % data[label]
if __name__ == '__main__':
main()
| paolobenve/weewx | bin/weewx/drivers/te923.py | Python | gpl-3.0 | 99,426 | [
"COLUMBUS"
] | 7cccb97c89363378682606c5677f7921b0a78c50d98abbec4512bf61a01895f3 |
'''
base64 ±àÂëÌåϵÓÃÓÚ½«ÈÎÒâ¶þ½øÖÆÊý¾Ýת»»Îª´¿Îı¾.
Ëü½«Ò»¸ö 3 ×ֽڵĶþ½øÖÆ×Ö½Ú×éת»»Îª 4 ¸öÎı¾×Ö·û×é´¢´æ, ¶øÇҹ涨ֻÔÊÐíÒÔϼ¯ºÏÖеÄ×Ö·û³öÏÖ:
ABCDEFGHIJKLMNOPQRSTUVWXYZ
abcdefghijklmnopqrstuvwxyz
0123456789+/
ÁíÍâ, = ÓÃÓÚÌî³äÊý¾ÝÁ÷µÄĩβ.
ÏÂÀý չʾÁËÈçºÎʹÓà encode ºÍ decode º¯Êý²Ù×÷Îļþ¶ÔÏó.
'''
import base64
MESSAGE = "life of brian"
file = open("out.txt", "w")
file.write(MESSAGE)
file.close()
base64.encode(open("out.txt"), open("out.b64", "w"))
base64.decode(open("out.b64"), open("out.txt", "w"))
print "original:", repr(MESSAGE)
print "encoded message:", repr(open("out.b64").read())
print "decoded message:", repr(open("out.txt").read()) | iamweilee/pylearn | base64-example-1.py | Python | mit | 657 | [
"Brian"
] | 315587f58a0589006aafb72c9aaa09dbbf3b2c9b87b4e91f33e1d3609e53f06f |
"""
plotter.py: plot functions of the results
Copyright (C) 2017 Hanjie Pan
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Correspondence concerning LEAP should be addressed as follows:
Email: hanjie [Dot] pan [At] epfl [Dot] ch
Postal address: EPFL-IC-LCAV
Station 14
1015 Lausanne
Switzerland
"""
from __future__ import division
import re
import os
import subprocess
import numpy as np
from astropy import units
from astropy.coordinates import SkyCoord
from utils import planar_distance, UVW2J2000
import matplotlib
if os.environ.get('DISPLAY') is None:
matplotlib.use('Agg')
import matplotlib.colors as mcolors
try:
which_latex = subprocess.check_output(['which', 'latex'])
os.environ['PATH'] = \
os.environ['PATH'] + ':' + \
os.path.dirname(which_latex.decode('utf-8').rstrip('\n'))
use_latex = True
except subprocess.CalledProcessError:
use_latex = False
if use_latex:
from matplotlib import rcParams
rcParams['text.usetex'] = True
rcParams['text.latex.unicode'] = True
rcParams['text.latex.preamble'] = [r"\usepackage{bm}"]
import matplotlib.pyplot as plt
import seaborn as sns
from plotly.offline import plot
import plotly.graph_objs as go
import plotly
sns.set_style('ticks',
{
'xtick.major.size': 3.5,
'xtick.minor.size': 2,
'ytick.major.size': 3.5,
'ytick.minor.size': 2,
'axes.linewidth': 0.8
}
)
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=-1):
if n == -1:
n = cmap.N
new_cmap = mcolors.LinearSegmentedColormap.from_list(
'trunc({name},{a:.2f},{b:.2f})'.format(name=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def planar_plot_diracs_zoom_J2000(
x_plt_grid, y_plt_grid, zoom_box=None,
RA_focus_rad=0, DEC_focus_rad=0,
x_ref=None, y_ref=None, amplitude_ref=None, marker_ref='^',
x_recon=None, y_recon=None, amplitude_recon=None, marker_recon='*',
max_amp_ref=None, max_amp=None, cmap='magma_r',
background_img=None, marker_scale=1,
marker_alpha=0.6, legend_marker_scale=0.7,
save_fig=False, file_name='sph_recon_2d_dirac',
reverse_xaxis=True,
label_ref_sol='ground truth', label_recon='reconstruction', legend_loc=0,
file_format='pdf', dpi=300, close_fig=True, has_title=True, title_str=None):
"""
zoom-in plot of the reconstructed point sources in the J2000 coordinates
:param x_plt_grid: plotting grid on the horizontal axis
:param y_plt_grid: plotting grid on the vertical axis
:param zoom_box: the box area to zoom-in. It's a list of 4 element:
[lower_x, lower_y, width, height]
:param RA_focus_rad: telescope focus in radian (right ascension)
:param DEC_focus_rad: telescope focus in radian (declination)
:param x_ref: ground truth RA of the sources in UVW
:param y_ref: ground truth DEC of the sources in UVW
:param amplitude_ref: ground truth intensities of the sources
:param x_recon: reconstructed RA of the sources in UVW
:param y_recon: reconstructed DEC of the sources in UVW
:param amplitude_recon: reconstructed intensities of the sources
:param max_amp_ref: maximum source intensity (used for noramlization of marker size)
:param max_amp: maximum source intensity (used for noramlization of marker size)
:param cmap: colormap
:param background_img: background image
:param marker_scale: prescaling factor for marker size
:param marker_alpha: alpha (transparency) for the marker
:param save_fig: whether to save the figure or not
:param file_name: figure file name
:param reverse_xaxis: whether to reverse the horizontal (RA) axis or not
:param label_ref_sol: reference solution label name
:param label_recon: reconstruction label name
:param legend_loc: location of the legend. Default 'best'
:param file_format: figure file format
:param dpi: dpi for the saved figure file
:param close_fig: close figure or not
:param has_title: whether to use a figure title or not.
:param title_str: title string. If has_title is true, the default title is
the reconstruction error.
:return:
"""
# decide the plotting grid and background image based on the zoom-in box
if zoom_box is None or background_img is None:
planar_plot_diracs_J2000(
x_plt_grid=x_plt_grid, y_plt_grid=y_plt_grid,
RA_focus_rad=RA_focus_rad, DEC_focus_rad=DEC_focus_rad,
x_ref=x_ref, y_ref=y_ref,
amplitude_ref=amplitude_ref, marker_ref=marker_ref,
x_recon=x_recon, y_recon=y_recon,
amplitude_recon=amplitude_recon, marker_recon=marker_recon,
max_amp_ref=max_amp_ref, max_amp=max_amp, cmap=cmap,
background_img=background_img,
marker_scale=marker_scale,
legend_marker_scale=legend_marker_scale,
marker_alpha=marker_alpha, save_fig=save_fig,
file_name=file_name, reverse_xaxis=reverse_xaxis,
label_ref_sol=label_ref_sol, label_recon=label_recon,
legend_loc=legend_loc, file_format=file_format, dpi=dpi,
close_fig=close_fig, has_title=has_title, title_str=title_str
)
else:
img_sz0, img_sz1 = x_plt_grid.shape
pixel_idx_row_lower = int(img_sz0 * zoom_box[1])
pixel_idx_col_left = int(img_sz1 * zoom_box[0])
pixel_idx_row_upper = int(img_sz0 * (zoom_box[1] + zoom_box[3]))
pixel_idx_col_right = int(img_sz1 * (zoom_box[0] + zoom_box[2]))
x_plt_grid_zoom = \
x_plt_grid[pixel_idx_row_lower:pixel_idx_row_upper,
pixel_idx_col_left:pixel_idx_col_right]
y_plt_grid_zoom = \
y_plt_grid[pixel_idx_row_lower:pixel_idx_row_upper,
pixel_idx_col_left:pixel_idx_col_right]
background_img_zoom = \
background_img[pixel_idx_row_lower:pixel_idx_row_upper,
pixel_idx_col_left:pixel_idx_col_right]
planar_plot_diracs_J2000(
x_plt_grid=x_plt_grid_zoom, y_plt_grid=y_plt_grid_zoom,
RA_focus_rad=RA_focus_rad, DEC_focus_rad=DEC_focus_rad,
x_ref=x_ref, y_ref=y_ref,
amplitude_ref=amplitude_ref, marker_ref=marker_ref,
x_recon=x_recon, y_recon=y_recon,
amplitude_recon=amplitude_recon, marker_recon=marker_recon,
max_amp_ref=max_amp_ref, max_amp=max_amp, cmap=cmap,
background_img=background_img_zoom,
marker_scale=marker_scale,
legend_marker_scale=legend_marker_scale,
marker_alpha=marker_alpha, save_fig=save_fig,
file_name=file_name, reverse_xaxis=reverse_xaxis,
label_ref_sol=label_ref_sol, label_recon=label_recon,
legend_loc=legend_loc, file_format=file_format, dpi=dpi,
close_fig=close_fig, has_title=has_title, title_str=title_str
)
def planar_plot_diracs_J2000(
x_plt_grid, y_plt_grid,
RA_focus_rad=0, DEC_focus_rad=0,
x_ref=None, y_ref=None, amplitude_ref=None, marker_ref='^',
x_recon=None, y_recon=None, amplitude_recon=None, marker_recon='*',
max_amp_ref=None, max_amp=None, cmap='magma_r',
background_img=None, marker_scale=1,
marker_alpha=0.6, legend_marker_scale=0.7,
save_fig=False, file_name='sph_recon_2d_dirac',
reverse_xaxis=True,
label_ref_sol='ground truth', label_recon='reconstruction', legend_loc=0,
file_format='pdf', dpi=300, close_fig=True, has_title=True, title_str=None):
"""
plot the reconstructed point sources in the J2000 coordinates
:param y_ref: ground truth colatitudes of the Dirac
:param x_ref: ground truth azimuths of the Dirac
:param amplitude_ref: ground truth amplitudes of the Dirac
:param y_recon: reconstructed colatitudes of the Dirac
:param x_recon: reconstructed azimuths of the Dirac
:param amplitude_recon: reconstructed amplitudes of the Dirac
:param lon_0: center of the projection (longitude) <- azimuth
:param lat_0: center of the projection (latitude) <- pi/2 - co-latitude
:param save_fig: whether to save figure or not
:param file_name: figure file name (basename)
:param file_format: format of the saved figure file
:return:
"""
if y_ref is not None and x_ref is not None and amplitude_ref is not None:
ref_pt_available = True
else:
ref_pt_available = False
if y_recon is not None and x_recon is not None and amplitude_recon is not None:
recon_pt_available = True
else:
recon_pt_available = False
# convert UVW coordinates to J2000 in [arcmin]
x_plt_grid_J2000 = x_plt_grid * 180 / np.pi * 60
y_plt_grid_J2000 = y_plt_grid * 180 / np.pi * 60
if ref_pt_available:
x_ref_J2000, y_ref_J2000, z_ref_J2000 = UVW2J2000(
RA_focus_rad, DEC_focus_rad,
x_ref, y_ref, convert_dms=False
)[:3]
RA_ref_J2000 = np.arctan2(y_ref_J2000, x_ref_J2000)
DEC_ref_J2000 = np.arcsin(z_ref_J2000)
if recon_pt_available:
x_recon_J2000, y_recon_J2000, z_recon_J2000 = UVW2J2000(
RA_focus_rad, DEC_focus_rad,
x_recon, y_recon, convert_dms=False
)[:3]
RA_recon_J2000 = np.arctan2(y_recon_J2000, x_recon_J2000)
DEC_recon_J2000 = np.arcsin(z_recon_J2000)
# plot
if background_img is not None:
ax = plt.figure(figsize=(5.5, 4), dpi=dpi).add_subplot(111)
pos_original = ax.get_position()
pos_new = [pos_original.x0 + 0.06, pos_original.y0 + 0.01,
pos_original.width, pos_original.height]
ax.set_position(pos_new)
plt.pcolormesh(x_plt_grid_J2000, y_plt_grid_J2000, background_img,
shading='gouraud', cmap=cmap)
if ref_pt_available:
if max_amp_ref is not None:
amplitude_ref_rescaled = amplitude_ref / max_amp_ref
else:
amplitude_ref_rescaled = amplitude_ref / np.max(amplitude_ref)
plt.scatter(RA_ref_J2000 * 180 / np.pi * 60,
DEC_ref_J2000 * 180 / np.pi * 60,
s=amplitude_ref_rescaled * 200 * marker_scale, # 350 for '^'
marker=marker_ref, edgecolors='k', linewidths=0.5,
alpha=marker_alpha, c='w',
label=label_ref_sol)
if recon_pt_available:
if max_amp is not None:
amplitude_rescaled = amplitude_recon / max_amp
else:
amplitude_rescaled = amplitude_recon / np.max(amplitude_recon)
plt.scatter(RA_recon_J2000 * 180 / np.pi * 60,
DEC_recon_J2000 * 180 / np.pi * 60,
s=amplitude_rescaled * 600 * marker_scale,
marker=marker_recon, edgecolors='k', linewidths=0.5, alpha=marker_alpha,
c=np.tile([0.996, 0.410, 0.703], (x_recon.size, 1)),
label=label_recon)
if has_title and ref_pt_available and recon_pt_available and title_str is None:
dist_recon = planar_distance(x_ref, y_ref, x_recon, y_recon)[0]
# in degree, minute, and second representation
dist_recon_dms = SkyCoord(
ra=0, dec=dist_recon, unit=units.radian
).to_string('dms').split(' ')[1]
dist_recon_dms = list(filter(None, re.split('[dms]+', dist_recon_dms)))
dist_recon_dms = (
'{0}' + u'\u00B0' + '{1}' + u'\u2032' + '{2:.2f}' + u'\u2033'
).format(dist_recon_dms[0], dist_recon_dms[1], float(dist_recon_dms[2]))
plt.title(u'average error = {0}'.format(dist_recon_dms), fontsize=11)
elif has_title and title_str is not None:
plt.title(title_str, fontsize=11)
else:
plt.title(u'', fontsize=11)
if ref_pt_available or recon_pt_available:
plt.legend(scatterpoints=1, loc=legend_loc, fontsize=9,
ncol=1, markerscale=legend_marker_scale,
handletextpad=0.1, columnspacing=0.1,
labelspacing=0.1, framealpha=0.5, frameon=True)
plt.axis('image')
plt.xlim((np.min(x_plt_grid_J2000), np.max(x_plt_grid_J2000)))
plt.ylim((np.min(y_plt_grid_J2000), np.max(y_plt_grid_J2000)))
plt.xlabel('RA (J2000)')
plt.ylabel('DEC (J2000)')
if reverse_xaxis:
plt.gca().invert_xaxis()
# extract lablels to convert to hmsdms format
x_tick_loc, _ = plt.xticks()
y_tick_loc, _ = plt.yticks()
x_tick_loc = x_tick_loc[1:-1]
y_tick_loc = y_tick_loc[1:-1]
# evaluate a uniform grid of the same size
x_tick_loc = np.linspace(start=x_tick_loc[0], stop=x_tick_loc[-1],
num=x_tick_loc.size, endpoint=True)
y_tick_loc = np.linspace(start=y_tick_loc[0], stop=y_tick_loc[-1],
num=y_tick_loc.size, endpoint=True)
xlabels_hms_all = []
for label_idx, xlabels_original_loop in enumerate(x_tick_loc):
xlabels_original_loop = float(xlabels_original_loop)
xlabels_hms = SkyCoord(
ra=xlabels_original_loop, dec=0, unit=units.arcmin
).to_string('hmsdms').split(' ')[0]
xlabels_hms = list(filter(None, re.split('[hms]+', xlabels_hms)))
if label_idx == 0:
xlabels_hms = (
u'{0:.0f}h{1:.0f}m{2:.0f}s'
).format(float(xlabels_hms[0]),
float(xlabels_hms[1]),
float(xlabels_hms[2]))
else:
xlabels_hms = (
u'{0:.0f}m{1:.0f}s'
).format(float(xlabels_hms[1]),
float(xlabels_hms[2]))
xlabels_hms_all.append(xlabels_hms)
ylabels_dms_all = []
for label_idx, ylabels_original_loop in enumerate(y_tick_loc):
ylabels_original_loop = float(ylabels_original_loop)
ylabels_dms = SkyCoord(
ra=0, dec=ylabels_original_loop, unit=units.arcmin
).to_string('hmsdms').split(' ')[1]
ylabels_dms = list(filter(None, re.split('[dms]+', ylabels_dms)))
ylabels_dms = (u'{0:.0f}\u00B0{1:.0f}\u2032').format(
float(ylabels_dms[0]), float(ylabels_dms[1]) + float(ylabels_dms[2]) / 60.
)
ylabels_dms_all.append(ylabels_dms)
plt.axis('image')
plt.xlim((np.min(x_plt_grid_J2000), np.max(x_plt_grid_J2000)))
plt.ylim((np.min(y_plt_grid_J2000), np.max(y_plt_grid_J2000)))
plt.xticks(x_tick_loc)
plt.yticks(y_tick_loc)
plt.gca().set_xticklabels(xlabels_hms_all, fontsize=9)
plt.gca().set_yticklabels(ylabels_dms_all, fontsize=9)
if reverse_xaxis:
plt.gca().invert_xaxis()
if save_fig:
plt.savefig(filename=(file_name + '.' + file_format), format=file_format,
dpi=dpi, transparent=True)
if close_fig:
plt.close()
def planar_plot_diracs(
x_plt_grid, y_plt_grid,
x_ref=None, y_ref=None, amplitude_ref=None,
x_recon=None, y_recon=None, amplitude_recon=None,
max_amp_ref=None, max_amp=None, cmap='magma_r',
background_img=None, marker_scale=1, marker_alpha=0.6,
save_fig=False, file_name='sph_recon_2d_dirac',
xticklabels=None, yticklabels=None, reverse_xaxis=True,
label_ref_sol='ground truth', label_recon='reconstruction', legend_loc=0,
file_format='pdf', dpi=300, close_fig=True, has_title=True, title_str=None):
"""
plot the reconstructed point sources with basemap module
:param y_ref: ground truth colatitudes of the Dirac
:param x_ref: ground truth azimuths of the Dirac
:param amplitude_ref: ground truth amplitudes of the Dirac
:param y_recon: reconstructed colatitudes of the Dirac
:param x_recon: reconstructed azimuths of the Dirac
:param amplitude_recon: reconstructed amplitudes of the Dirac
:param lon_0: center of the projection (longitude) <- azimuth
:param lat_0: center of the projection (latitude) <- pi/2 - co-latitude
:param save_fig: whether to save figure or not
:param file_name: figure file name (basename)
:param file_format: format of the saved figure file
:return:
"""
if y_ref is not None and x_ref is not None and amplitude_ref is not None:
ref_pt_available = True
else:
ref_pt_available = False
if y_recon is not None and x_recon is not None and amplitude_recon is not None:
recon_pt_available = True
else:
recon_pt_available = False
# plot
x_plt_grid_degree = np.degrees(x_plt_grid)
y_plt_grid_degree = np.degrees(y_plt_grid)
if background_img is not None:
# cmap = sns.cubehelix_palette(dark=0.95, light=0.1, reverse=True,
# start=1, rot=-0.6, as_cmap=True)
# cmap = sns.cubehelix_palette(dark=0.95, light=0.1, reverse=True,
# start=0.3, rot=-0.6, as_cmap=True)
# cmap = 'cubehelix_r' # 'Spectral_r' # 'BuPu'
# move the plotting area slight up
ax = plt.figure(figsize=(5, 4), dpi=dpi).add_subplot(111)
pos_original = ax.get_position()
pos_new = [pos_original.x0, pos_original.y0 + 0.01,
pos_original.width, pos_original.height]
ax.set_position(pos_new)
plt.pcolormesh(x_plt_grid_degree, y_plt_grid_degree, background_img,
shading='gouraud', cmap=cmap)
if ref_pt_available:
if max_amp_ref is not None:
amplitude_ref_rescaled = amplitude_ref / max_amp_ref
else:
amplitude_ref_rescaled = amplitude_ref / np.max(amplitude_ref)
plt.scatter(np.degrees(x_ref), np.degrees(y_ref),
s=amplitude_ref_rescaled * 350 * marker_scale,
marker='^', edgecolors='k', linewidths=0.5, alpha=marker_alpha, c='w',
label=label_ref_sol)
if recon_pt_available:
if max_amp is not None:
amplitude_rescaled = amplitude_recon / max_amp
else:
amplitude_rescaled = amplitude_recon / np.max(amplitude_recon)
plt.scatter(np.degrees(x_recon), np.degrees(y_recon),
s=amplitude_rescaled * 600 * marker_scale,
marker='*', edgecolors='k', linewidths=0.5, alpha=marker_alpha,
c=np.tile([0.996, 0.410, 0.703], (x_recon.size, 1)),
label=label_recon)
if has_title and ref_pt_available and recon_pt_available and title_str is None:
dist_recon = planar_distance(x_ref, y_ref, x_recon, y_recon)[0]
# in degree, minute, and second representation
dist_recon_dms = SkyCoord(
ra=0, dec=dist_recon, unit=units.radian
).to_string('dms').split(' ')[1]
dist_recon_dms = list(filter(None, re.split('[dms]+', dist_recon_dms)))
dist_recon_dms = (
'{0}' + u'\u00B0' + '{1}' + u'\u2032' + '{2:.2f}' + u'\u2033'
).format(dist_recon_dms[0], dist_recon_dms[1], float(dist_recon_dms[2]))
plt.title(u'average error = {0}'.format(dist_recon_dms), fontsize=11)
elif has_title and title_str is not None:
plt.title(title_str, fontsize=11)
else:
plt.title(u'', fontsize=11)
if ref_pt_available or recon_pt_available:
plt.legend(scatterpoints=1, loc=legend_loc, fontsize=9,
ncol=1, markerscale=0.7,
handletextpad=0.1, columnspacing=0.1,
labelspacing=0.1, framealpha=0.5, frameon=True)
plt.axis('image')
plt.xlim((np.min(x_plt_grid_degree), np.max(x_plt_grid_degree)))
plt.ylim((np.min(y_plt_grid_degree), np.max(y_plt_grid_degree)))
if xticklabels is not None:
# set the number of ticks to match the length of the labels
''' from matplotlib documentation: "the number of ticks <= nbins +1" '''
plt.gca().locator_params(axis='x', nbins=len(xticklabels) - 1)
plt.gca().set_xticklabels(xticklabels, fontsize=9)
if yticklabels is not None:
# set the number of ticks to match the length of the labels
''' from matplotlib documentation: "the number of ticks <= nbins +1" '''
plt.gca().locator_params(axis='y', nbins=len(yticklabels) - 1)
plt.gca().set_yticklabels(yticklabels, fontsize=9)
plt.xlabel('RA (J2000)')
plt.ylabel('DEC (J2000)')
if reverse_xaxis:
plt.gca().invert_xaxis()
if save_fig:
plt.savefig(filename=(file_name + '.' + file_format), format=file_format,
dpi=dpi, transparent=True)
if close_fig:
plt.close()
def plot_phase_transition_2dirac(metric_mtx, sep_seq, snr_seq,
save_fig, fig_format, file_name,
fig_title='', dpi=300, cmap=None,
color_bar_min=0, color_bar_max=1,
close_fig=True, plt_line=False):
"""
plot the phase transition for the reconstructions of two Dirac deltas
:param metric_mtx: a matrix of the aggregated performance. Here the row indices
correspond to different separations between two Dirac deltas. The column
indices corresponds to different noise levels.
:param sep_seq: a sequence that specifies the separation between two Dirac deltas
:param snr_seq: a sequence of different SNRs tested
:param save_fig: whether to save figure or not.
:param fig_format: file format for the saved figure.
:param file_name: file name
:param fig_title: title of the figure
:param color_bar_min: minimum value for the colorbar
:param color_bar_max: maximum value for the colorbar
:return:
"""
fig = plt.figure(figsize=(5, 3), dpi=90)
ax = plt.axes([0.19, 0.17, 0.72, 0.72])
if cmap is None:
cmap = sns.cubehelix_palette(dark=0.95, light=0.1,
start=0, rot=-0.6, as_cmap=True)
p_hd = ax.matshow(metric_mtx, cmap=cmap, alpha=1,
vmin=color_bar_min, vmax=color_bar_max)
ax.grid(False)
# the line that shows at least 50% success rate
if plt_line:
mask = (metric_mtx >= 0.5).astype('int')
line_y = np.array([np.where(mask[:, loop])[0][0]
for loop in range(mask.shape[1])])
line_x = np.arange(mask.shape[1])
fitting_coef = np.polyfit(line_x, line_y, deg=1)
x_inter = np.linspace(line_x.min(), line_x.max(), num=100)
y_inter = np.zeros(x_inter.shape)
for power_of_x, coef in enumerate(fitting_coef[::-1]):
y_inter += coef * x_inter ** power_of_x
plt.plot(line_x, line_y, linestyle='', linewidth=2,
color=[0, 0, 1], marker='o', ms=2.5)
plt.plot(x_inter, y_inter, linestyle=':', linewidth=1.5,
color=[1, 1, 0], marker='')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(snr_seq.size))
ax.set_xticklabels(['{:g}'.format(snr_loop) for snr_loop in snr_seq])
ax.set_yticks(np.arange(sep_seq.size))
ytick_str = []
for sep_loop in sep_seq:
use_degree = True if np.degrees(sep_loop) >= 1 else False
use_miniute = True if np.degrees(sep_loop) * 60 >= 1 else False
use_second = True if np.degrees(sep_loop) * 3600 >= 1 else False
# in degree, minute, and second representation
sep_loop_dms = SkyCoord(
ra=0, dec=sep_loop, unit=units.radian
).to_string('dms').split(' ')[1]
sep_loop_dms = list(filter(None, re.split('[dms]+', sep_loop_dms)))
if use_degree:
sep_loop_dms = (
'{0}' + '\u00B0' + '{1}' + '\u2032' + '{2:.0f}' + '\u2033'
).format(sep_loop_dms[0].lstrip('0'),
sep_loop_dms[1].lstrip('0'),
float(sep_loop_dms[2]))
elif use_miniute:
sep_loop_dms = (
'{0}' + '\u2032' + '{1:.0f}' + '\u2033'
).format(sep_loop_dms[1].lstrip('0'),
float(sep_loop_dms[2]))
elif use_second:
sep_loop_dms = (
'{0:.0f}' + '\u2033'
).format(float(sep_loop_dms[2]))
ytick_str.append(sep_loop_dms)
ax.set_yticklabels(ytick_str)
plt.xlabel('SNR (dB)')
plt.ylabel('source separation')
ax.set_title(fig_title, position=(0.5, 1.01), fontsize=11)
p_hdc = fig.colorbar(p_hd, orientation='vertical', use_gridspec=False,
anchor=(0, 0.5), shrink=1, spacing='proportional')
p_hdc.ax.tick_params(labelsize=8.5)
p_hdc.update_ticks()
ax.set_aspect('auto')
if save_fig:
plt.savefig(file_name, format=fig_format, dpi=dpi, transparent=True)
if close_fig:
plt.close()
def planar_plot_diracs_plotly(x_plt, y_plt, img_lsq,
y_ref=None, x_ref=None, amplitude_ref=None,
y_recon=None, x_recon=None, amplitude_recon=None,
file_name='planar_recon_2d_dirac.html',
open_browser=False):
plotly.offline.init_notebook_mode()
surfacecolor = np.real(img_lsq) # for plotting purposes
if y_ref is not None and x_ref is not None and amplitude_ref is not None:
ref_pt_available = True
else:
ref_pt_available = False
if y_recon is not None and x_recon is not None and amplitude_recon is not None:
recon_pt_available = True
else:
recon_pt_available = False
trace1 = go.Surface(x=np.degrees(x_plt), y=np.degrees(y_plt),
surfacecolor=surfacecolor,
opacity=1, colorscale='Portland', hoverinfo='none')
trace1['contours']['x']['highlightwidth'] = 1
trace1['contours']['y']['highlightwidth'] = 1
# trace1['contours']['z']['highlightwidth'] = 1
np.set_printoptions(precision=3, formatter={'float': '{: 0.2f}'.format})
if ref_pt_available:
if hasattr(y_ref, '__iter__'): # <= not a scalar
text_str2 = []
for count, y0 in enumerate(y_ref):
if amplitude_ref.shape[1] > 1:
text_str2.append((
u'({0:.2f}\N{DEGREE SIGN}, ' +
u'{1:.2f}\N{DEGREE SIGN}), </br>' +
u'intensity: {2}').format(np.degrees(y0),
np.degrees(x_ref[count]),
amplitude_ref.squeeze()[count])
)
else:
text_str2.append((
u'({0:.2f}\N{DEGREE SIGN}, ' +
u'{1:.2f}\N{DEGREE SIGN}), </br>' +
u'intensity: {2:.2f}').format(np.degrees(y0),
np.degrees(x_ref[count]),
amplitude_ref.squeeze()[count])
)
trace2 = go.Scatter(mode='markers', name='ground truth',
x=np.degrees(x_ref),
y=np.degrees(y_ref),
text=text_str2,
hoverinfo='name+text',
marker=dict(size=6, symbol='circle', opacity=0.6,
line=dict(
color='rgb(0, 0, 0)',
width=1
),
color='rgb(255, 255, 255)'))
else:
if amplitude_ref.shape[1] > 1:
text_str2 = [(u'({0:.2f}\N{DEGREE SIGN}, ' +
u'{1:.2f}\N{DEGREE SIGN}) </br>' +
u'intensity: {2}').format(np.degrees(y_ref),
np.degrees(x_ref),
amplitude_ref)]
else:
text_str2 = [(u'({0:.2f}\N{DEGREE SIGN}, ' +
u'{1:.2f}\N{DEGREE SIGN}) </br>' +
u'intensity: {2:.2f}').format(np.degrees(y_ref),
np.degrees(x_ref),
amplitude_ref)]
trace2 = go.Scatter(mode='markers', name='ground truth',
x=[np.degrees(x_ref)],
y=[np.degrees(y_ref)],
text=text_str2,
hoverinfo='name+text',
marker=dict(size=6, symbol='circle', opacity=0.6,
line=dict(
color='rgb(0, 0, 0)',
width=1
),
color='rgb(255, 255, 255)'))
if recon_pt_available:
if hasattr(y_recon, '__iter__'):
text_str3 = []
for count, y0 in enumerate(y_recon):
if amplitude_recon.shape[1] > 1:
text_str3.append((
u'({0:.2f}\N{DEGREE SIGN}, ' +
u'{1:.2f}\N{DEGREE SIGN}) </br>' +
u'intensity: {2}').format(np.degrees(y0),
np.degrees(x_recon[count]),
amplitude_recon.squeeze()[count])
)
else:
text_str3.append((
u'({0:.2f}\N{DEGREE SIGN}, ' +
u'{1:.2f}\N{DEGREE SIGN}) </br>' +
u'intensity: {2:.2f}').format(np.degrees(y0),
np.degrees(x_recon[count]),
np.squeeze(amplitude_recon, axis=1)[count])
)
trace3 = go.Scatter(mode='markers', name='reconstruction',
x=np.degrees(x_recon), y=np.degrees(y_recon),
text=text_str3,
hoverinfo='name+text',
marker=dict(size=6, symbol='diamond', opacity=0.6,
line=dict(
color='rgb(0, 0, 0)',
width=1
),
color='rgb(255, 105, 180)'))
else:
if amplitude_recon.shape[1] > 1:
text_str3 = [(u'({0:.2f}\N{DEGREE SIGN}, '
u'{1:.2f}\N{DEGREE SIGN}) </br>' +
u'intensity: {2}').format(np.degrees(y_recon),
np.degrees(x_recon),
amplitude_recon)]
else:
text_str3 = [(u'({0:.2f}\N{DEGREE SIGN}, '
u'{1:.2f}\N{DEGREE SIGN}) </br>' +
u'intensity: {2:.2f}').format(np.degrees(y_recon),
np.degrees(x_recon),
amplitude_recon)]
trace3 = go.Scatter(mode='markers', name='reconstruction',
x=[np.degrees(x_recon)],
y=[np.degrees(y_recon)],
text=text_str3,
hoverinfo='name+text',
marker=dict(size=6, symbol='diamond', opacity=0.6,
line=dict(
color='rgb(0, 0, 0)',
width=1
),
color='rgb(255, 105, 180)'))
if ref_pt_available and recon_pt_available:
data = go.Data([trace1, trace2, trace3])
elif ref_pt_available and not recon_pt_available:
data = go.Data([trace1, trace2])
elif not ref_pt_available and recon_pt_available:
data = go.Data([trace1, trace3])
else:
data = go.Data([trace1])
if ref_pt_available and recon_pt_available:
dist_recon = planar_distance(x_ref, y_ref, x_recon, y_recon)[0]
layout = go.Layout(title=u'average error = {0:.2f}\N{DEGREE SIGN}'.format(np.degrees(dist_recon)),
titlefont={'family': 'Open Sans, verdana, arial, sans-serif',
'size': 14,
'color': '#000000'},
autosize=False, width=670, height=550, showlegend=True,
margin=go.Margin(l=45, r=45, b=55, t=45)
)
else:
layout = go.Layout(title=u'',
titlefont={'family': 'Open Sans, verdana, arial, sans-serif',
'size': 14,
'color': '#000000'},
autosize=False, width=670, height=550, showlegend=True,
margin=go.Margin(l=45, r=45, b=55, t=45)
)
if ref_pt_available or recon_pt_available:
layout['legend']['xanchor'] = 'center'
layout['legend']['yanchor'] = 'top'
layout['legend']['x'] = 0.5
layout['scene']['camera']['eye'] = {'x': 0, 'y': 0}
fig = go.Figure(data=data, layout=layout)
plot(fig, filename=file_name, auto_open=open_browser)
| hanjiepan/LEAP | plotter.py | Python | gpl-3.0 | 35,130 | [
"DIRAC"
] | 0d615f9ccc7545cb361840c872fc9b2d17a8f1dcf2da9cdcb565a6c5dd54c3be |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
cone = vtk.vtkConeSource()
cone.SetHeight(3.0)
cone.SetRadius(1.0)
cone.SetResolution(10)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
# Actor for opacity as a property value.
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
coneActor.GetProperty().SetOpacity(0.5)
# Actor for opacity thru LUT.
elevation = vtk.vtkElevationFilter()
elevation.SetInputConnection(cone.GetOutputPort())
coneMapper2 = vtk.vtkPolyDataMapper()
coneMapper2.SetInputConnection(elevation.GetOutputPort())
lut = vtk.vtkLookupTable()
lut.SetAlphaRange(0.9, 0.1)
lut.SetHueRange(0, 0)
lut.SetSaturationRange(1, 1)
lut.SetValueRange(1, 1)
coneMapper2.SetLookupTable(lut)
coneMapper2.SetScalarModeToUsePointData()
coneMapper2.SetScalarVisibility(1)
coneMapper2.InterpolateScalarsBeforeMappingOn()
coneActorLUT = vtk.vtkActor()
coneActorLUT.SetMapper(coneMapper2)
coneActorLUT.SetPosition(0.1, 1.0, 0)
coneActorLUT.GetProperty().SetOpacity(0.99)
# Actor for opacity thru texture.
reader = vtk.vtkPNGReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/alphachannel.png")
reader.Update()
sphere = vtk.vtkTexturedSphereSource()
texture = vtk.vtkTexture()
texture.SetInputConnection(reader.GetOutputPort())
coneMapper3 = vtk.vtkPolyDataMapper()
coneMapper3.SetInputConnection(sphere.GetOutputPort())
coneActorTexture = vtk.vtkActor()
coneActorTexture.SetTexture(texture)
coneActorTexture.SetMapper(coneMapper3)
coneActorTexture.SetPosition(0, -1.0, 0)
coneActorTexture.GetProperty().SetColor(0.5, 0.5, 1)
coneActorTexture.GetProperty().SetOpacity(0.99)
ren1 = vtk.vtkRenderer()
ren1.AddActor(coneActor)
ren1.AddActor(coneActorLUT)
ren1.AddActor(coneActorTexture)
ren1.SetBackground(0.1, 0.2, 0.4)
ren1.SetUseDepthPeeling(1)
# 20 layers of translucency
ren1.SetMaximumNumberOfPeels(20)
# 2 out of 1000 pixels
ren1.SetOcclusionRatio(0.002)
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.SetAlphaBitPlanes(1)
renWin.AddRenderer(ren1)
renWin.SetSize(300, 300)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
style = vtk.vtkInteractorStyleTrackballCamera()
iren.SetInteractorStyle(style)
iren.Initialize()
camera = ren1.GetActiveCamera()
camera.SetPosition(9, -1, 3)
camera.SetViewAngle(30)
camera.SetViewUp(0.05, 0.96, 0.24)
camera.SetFocalPoint(0, 0.25, 0)
ren1.ResetCameraClippingRange()
renWin.Render()
print(ren1.GetLastRenderingUsedDepthPeeling())
if (ren1.GetLastRenderingUsedDepthPeeling()):
print("depth peeling was used")
else:
print("depth peeling was not used (alpha blending instead)")
#iren.Start()
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Rendering/Core/Testing/Python/TestOpacity2.py | Python | gpl-3.0 | 2,727 | [
"VTK"
] | c5cab511f0dc934790c18c6108911b22c8c7cd529e1c2981d069bf37606600a6 |
from setuptools import setup
VERSION = '2.0.2'
VERSION_TAG = 'v%s' % VERSION
README_URL = ('https://github.com/briancline/crm114-python'
'/blob/%s/README.md' % VERSION_TAG)
setup(
name='crm114',
version=VERSION,
author='Brian Cline',
author_email='brian.cline@gmail.com',
description=('Python wrapper classes for the CRM-114 Discriminator '
'(http://crm114.sourceforge.net/)'),
license = 'MIT',
keywords = 'crm114 text analysis classifier kubrick',
url = 'http://packages.python.org/crm114',
packages=['crm114'],
long_description='See README.md for full details, or %s.' % README_URL,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Adaptive Technologies',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing',
],
)
| briancline/crm114-python | setup.py | Python | mit | 1,333 | [
"Brian"
] | 1c2698c2bbaa7bbe1a3e99d5557069adcc3d41fb37a9a1705b9e0c045bb68aea |
################################################################################
# #
# Copyright (C) 2010-2017 The ESPResSo project #
# #
# This file is part of ESPResSo. #
# #
# ESPResSo is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# ESPResSo is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
# #
# Active Matter: Enhanced Diffusion Tutorial #
# #
################################################################################
from __future__ import print_function
import numpy as np
import os
import sys
import time
from espressomd.observables import ParticlePositions
from espressomd.correlators import Correlator
# create an output folder
outdir = "./RESULTS_ENHANCED_DIFFUSION/"
try:
os.makedirs(outdir)
except:
print("INFO: Directory \"{}\" exists".format(outdir))
################################################################################
# Read in the active velocity from the command prompt
if len(sys.argv) != 2:
print("Usage:", sys.argv[0], "<vel> (0 <= vel < 10.0)")
exit()
vel = float(sys.argv[1])
# Set the basic simulation parameters
sampsteps = 5000
samplength = 1000
tstep = 0.01
## Exercise 2 ##
# Why can we get away with such a small box?
# Could it be even smaller?
system = espressomd.System(box_l=[10.0, 10.0, 10.0])
system.cell_system.skin = 0.3
system.time_step = tstep
################################################################################
#
# To obtain accurate statistics, you will need to run the simulation
# several times, which is accomplished by this loop. Do not increase
# this number too much, as it will slow down the simulation.
#
################################################################################
## Exercise 4 ##
# Once you have tested the routine for a single , then
# make it such that you can loop over the run parameter
# and repeat the simulation 5 times.
for ...:
# Set up a random seed (a new one for each run)
## Exercise 1 ##
# Explain the choice of the random seed
system.seed = np.random.randint(0, 2**31 - 1)
# Use the Langevin thermostat (no hydrodynamics)
system.thermostat.set_langevin(kT=1.0, gamma=1.0)
# Place a single active particle (that can rotate freely! rotation=[1,1,1])
system.part.add(pos=[5.0, 5.0, 5.0], swimming={
'v_swim': vel}, rotation=[1, 1, 1])
# Initialize the mean squared displacement (MSD) correlator
tmax = tstep * sampsteps
pos_id = ParticlePositions(ids=[0])
msd = Correlator(obs1=pos_id,
corr_operation="square_distance_componentwise",
dt=tstep,
tau_max=tmax,
tau_lin=16)
system.auto_update_correlators.add(msd)
## Exercise 3 ##
# Construct the auto-correlators for the VACF and AVACF,
# using the example of the MSD
# Initialize the velocity auto-correlation function (VACF) correlator
...
# Initialize the angular velocity auto-correlation function (AVACF) correlator
...
# Integrate 5,000,000 steps. This can be done in one go as well.
for i in range(sampsteps):
system.integrator.run(samplength)
# Finalize the correlators and write to disk
system.auto_update_correlators.remove(msd)
msd.finalize()
np.savetxt("{}/msd_{}_{}.dat".format(outdir, vel, run), msd.result())
...
...
| KonradBreitsprecher/espresso | doc/tutorials/06-active_matter/EXERCISES/enhanced_diffusion.py | Python | gpl-3.0 | 4,875 | [
"ESPResSo"
] | 2449413b2cf3ff276d398df6cec9df373885969fa1a3c970ad48f641ab5907b3 |
"""Translates Python code into Python's built-in AST.
"""
import ast
from py2c.abc.worker import Worker
from py2c.processing import ProcessingError
from py2c.tree.visitors import RecursiveNodeTransformer
from py2c.utils import get_temp_variable_name
__all__ = ["SourceToASTTranslationError", "SourceToAST"]
# -----------------------------------------------------------------------------
# Exceptions
# -----------------------------------------------------------------------------
class SourceToASTTranslationError(ProcessingError):
"""Raised when fatal error(s) occur in the Translation of source-code to AST.
"""
def __init__(self):
super().__init__("Couldn't convert source-code to AST.")
# -----------------------------------------------------------------------------
# Translator
# -----------------------------------------------------------------------------
class SourceToAST(Worker, RecursiveNodeTransformer):
"""Translates Python code into an simplified AST
"""
def __init__(self):
Worker.__init__(self)
RecursiveNodeTransformer.__init__(self, ast.AST, ast.iter_fields)
def work(self, code):
"""Translate the passed code into a Python AST and simplify it.
"""
try:
node = ast.parse(code)
except Exception as e:
raise SourceToASTTranslationError() from e
else:
return self.visit(node)
| pradyunsg/Py2C | py2c/processing/to_ast.py | Python | bsd-3-clause | 1,431 | [
"VisIt"
] | 783b75a5a2b7a9c60f3c1309a6da60f12361c10e0c85d46874400f84ac21b02a |
"""
=============================
Generic SpectralModel wrapper
=============================
.. moduleauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com>
"""
import numpy as np
from pyspeckit.mpfit import mpfit,mpfitException
from pyspeckit.spectrum.parinfo import ParinfoList,Parinfo
import copy
import matplotlib.cbook as mpcb
import fitter
from . import mpfit_messages
from pyspeckit.specwarnings import warn
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
except ImportError:
warn( "OrderedDict is required for modeling. If you have python <2.7, install the ordereddict module." )
class SpectralModel(fitter.SimpleFitter):
"""
A wrapper class for a spectra model. Includes internal functions to
generate multi-component models, annotations, integrals, and individual
components. The declaration can be complex, since you should name
individual variables, set limits on them, set the units the fit will be
performed in, and set the annotations to be used. Check out some
of the hyperfine codes (hcn, n2hp) for examples.
"""
def __init__(self, modelfunc, npars,
shortvarnames=("A","\\Delta x","\\sigma"), multisingle='multi',
fitunits=None,
centroid_par=None,
fwhm_func=None,
fwhm_pars=None,
integral_func=None,
use_lmfit=False, **kwargs):
"""Spectral Model Initialization
Create a Spectral Model class for data fitting
Parameters
----------
modelfunc : function
the model function to be fitted. Should take an X-axis
(spectroscopic axis) as an input followed by input parameters.
Returns an array with the same shape as the input X-axis
npars : int
number of parameters required by the model
parnames : list (optional)
a list or tuple of the parameter names
parvalues : list (optional)
the initial guesses for the input parameters (defaults to ZEROS)
parlimits : list (optional)
the upper/lower limits for each variable (defaults to ZEROS)
parfixed : list (optional)
Can declare any variables to be fixed (defaults to ZEROS)
parerror : list (optional)
technically an output parameter... hmm (defaults to ZEROS)
partied : list (optional)
not the past tense of party. Can declare, via text, that
some parameters are tied to each other. Defaults to zeros like the
others, but it's not clear if that's a sensible default
fitunits : str (optional)
convert X-axis to these units before passing to model
parsteps : list (optional)
minimum step size for each paremeter (defaults to ZEROS)
npeaks : list (optional)
default number of peaks to assume when fitting (can be overridden)
shortvarnames : list (optional)
TeX names of the variables to use when annotating
multisingle : list (optional)
Are there multiple peaks (no background will be fit) or
just a single peak (a background may/will be fit)
Returns
-------
A tuple containing (model best-fit parameters, the model, parameter
errors, chi^2 value)
"""
self.modelfunc = modelfunc
if self.__doc__ is None:
self.__doc__ = modelfunc.__doc__
elif modelfunc.__doc__ is not None:
self.__doc__ += modelfunc.__doc__
self.npars = npars
self.default_npars = npars
self.multisingle = multisingle
self.fitunits = fitunits
# this needs to be set once only
self.shortvarnames = shortvarnames
self.default_parinfo = None
self.default_parinfo, kwargs = self._make_parinfo(**kwargs)
self.parinfo = copy.copy(self.default_parinfo)
self.modelfunc_kwargs = kwargs
self.use_lmfit = use_lmfit
# default name of parameter that represents the profile centroid
self.centroid_par = centroid_par
# FWHM function and parameters
self.fwhm_func = fwhm_func
self.fwhm_pars = fwhm_pars
# analytic integral function
self.integral_func = integral_func
def _make_parinfo(self, params=None, parnames=None, parvalues=None,
parlimits=None, parlimited=None, parfixed=None, parerror=None,
partied=None, fitunits=None, parsteps=None, npeaks=1,
parinfo=None,
names=None, values=None, limits=None,
limited=None, fixed=None, error=None, tied=None, steps=None,
negamp=None,
limitedmin=None, limitedmax=None,
minpars=None, maxpars=None,
vheight=False,
debug=False,
**kwargs):
"""
Generate a `ParinfoList` that matches the inputs
This code is complicated - it can take inputs in a variety of different
forms with different priority. It will return a `ParinfoList` (and
therefore must have values within parameter ranges)
"""
# for backwards compatibility - partied = tied, etc.
for varname in str.split("parnames,parvalues,parsteps,parlimits,parlimited,parfixed,parerror,partied",","):
shortvarname = varname.replace("par","")
if locals()[shortvarname] is not None:
# HACK! locals() failed for unclear reasons...
exec("%s = %s" % (varname,shortvarname))
if params is not None and parvalues is not None:
raise ValueError("parvalues and params both specified; they're redundant so that's not allowed.")
elif params is not None and parvalues is None:
parvalues = params
if parnames is not None:
self.parnames = parnames
elif parnames is None and self.parnames is not None:
parnames = self.parnames
elif self.default_parinfo is not None and parnames is None:
parnames = [p['parname'] for p in self.default_parinfo]
if limitedmin is not None:
if limitedmax is not None:
parlimited = zip(limitedmin,limitedmax)
else:
parlimited = zip(limitedmin,(False,)*len(parnames))
elif limitedmax is not None:
parlimited = zip((False,)*len(parnames),limitedmax)
elif self.default_parinfo is not None and parlimited is None:
parlimited = [p['limited'] for p in self.default_parinfo]
if minpars is not None:
if maxpars is not None:
parlimits = zip(minpars,maxpars)
else:
parlimits = zip(minpars,(False,)*len(parnames))
elif maxpars is not None:
parlimits = zip((False,)*len(parnames),maxpars)
elif self.default_parinfo is not None and parlimits is None:
parlimits = [p['limits'] for p in self.default_parinfo]
self.npeaks = npeaks
# the height / parvalue popping needs to be done before the temp_pardict is set in order to make sure
# that the height guess isn't assigned to the amplitude
self.vheight = vheight
if vheight and len(self.parinfo) == self.default_npars and len(parvalues) == self.default_npars + 1:
# if the right number of parameters are passed, the first is the height
self.parinfo = [ {'n':0, 'value':parvalues.pop(0), 'limits':(0,0),
'limited': (False,False), 'fixed':False, 'parname':'HEIGHT',
'error': 0, 'tied':"" } ]
elif vheight and len(self.parinfo) == self.default_npars and len(parvalues) == self.default_npars:
# if you're one par short, guess zero
self.parinfo = [ {'n':0, 'value': 0, 'limits':(0,0),
'limited': (False,False), 'fixed':False, 'parname':'HEIGHT',
'error': 0, 'tied':"" } ]
elif vheight and len(self.parinfo) == self.default_npars+1 and len(parvalues) == self.default_npars+1:
# the right numbers are passed *AND* there is already a height param
self.parinfo = [ {'n':0, 'value':parvalues.pop(0), 'limits':(0,0),
'limited': (False,False), 'fixed':False, 'parname':'HEIGHT',
'error': 0, 'tied':"" } ]
#heightparnum = (i for i,s in self.parinfo if 'HEIGHT' in s['parname'])
#for hpn in heightparnum:
# self.parinfo[hpn]['value'] = parvalues[0]
elif vheight:
raise ValueError('VHEIGHT is specified but a case was found that did not allow it to be included.')
else:
self.parinfo = []
if debug: print "After VHEIGHT parse len(parinfo): %i vheight: %s" % (len(self.parinfo), vheight)
# this is a clever way to turn the parameter lists into a dict of lists
# clever = hard to read
temp_pardict = OrderedDict([(varname, np.zeros(self.npars*self.npeaks, dtype='bool'))
if locals()[varname] is None else (varname, list(locals()[varname]) )
for varname in str.split("parnames,parvalues,parsteps,parlimits,parlimited,parfixed,parerror,partied",",")])
temp_pardict['parlimits'] = parlimits if parlimits is not None else [(0,0)] * (self.npars*self.npeaks)
temp_pardict['parlimited'] = parlimited if parlimited is not None else [(False,False)] * (self.npars*self.npeaks)
for k,v in temp_pardict.iteritems():
if (self.npars*self.npeaks) / len(v) > 1:
temp_pardict[k] = list(v) * ((self.npars*self.npeaks) / len(v))
# generate the parinfo dict
# note that 'tied' must be a blank string (i.e. ""), not False, if it is not set
# parlimited, parfixed, and parlimits are all two-element items (tuples or lists)
self.parinfo += [ {'n':ii+self.npars*jj+vheight,
'value':float(temp_pardict['parvalues'][ii+self.npars*jj]),
'step':temp_pardict['parsteps'][ii+self.npars*jj],
'limits':temp_pardict['parlimits'][ii+self.npars*jj],
'limited':temp_pardict['parlimited'][ii+self.npars*jj],
'fixed':temp_pardict['parfixed'][ii+self.npars*jj],
'parname':temp_pardict['parnames'][ii].upper()+"%0i" % jj,
'error':float(temp_pardict['parerror'][ii+self.npars*jj]),
'tied':temp_pardict['partied'][ii+self.npars*jj] if temp_pardict['partied'][ii+self.npars*jj] else ""}
for jj in xrange(self.npeaks)
for ii in xrange(self.npars) ] # order matters!
if debug: print "After Generation step len(parinfo): %i vheight: %s" % (len(self.parinfo), vheight)
if debug > True: import pdb; pdb.set_trace()
# special keyword to specify emission/absorption lines
if negamp is not None:
if negamp:
for p in self.parinfo:
if 'AMP' in p['parname']:
p['limited'] = (p['limited'][0], True)
p['limits'] = (p['limits'][0], 0)
else:
for p in self.parinfo:
if 'AMP' in p['parname']:
p['limited'] = (True, p['limited'][1])
p['limits'] = (0, p['limits'][1])
# This is effectively an override of all that junk above (3/11/2012)
# Much of it is probably unnecessary, but it was easier to do this than
# rewrite the above
self.parinfo = ParinfoList([Parinfo(p) for p in self.parinfo])
# New feature: scaleability
for par in self.parinfo:
if par.parname.lower().strip('0123456789') in ('amplitude','amp'):
par.scaleable = True
return self.parinfo, kwargs
def n_modelfunc(self, pars=None, debug=False, **kwargs):
"""
Simple wrapper to deal with N independent peaks for a given spectral model
"""
if pars is None:
pars = self.parinfo
elif not isinstance(pars, ParinfoList):
try:
partemp = copy.copy(self.parinfo)
partemp._from_Parameters(pars)
pars = partemp
except AttributeError:
if debug:
print "Reading pars as LMPar failed."
if debug > 1:
import pdb; pdb.set_trace()
pass
if hasattr(pars,'values'):
# important to treat as Dictionary, since lmfit params & parinfo both have .items
parnames,parvals = zip(*pars.items())
parnames = [p.lower() for p in parnames]
parvals = [p.value for p in parvals]
else:
parvals = list(pars)
if debug: print "pars to n_modelfunc: ",pars
def L(x):
v = np.zeros(len(x))
if self.vheight: v += parvals[0]
# use len(pars) instead of self.npeaks because we want this to work
# independent of the current best fit
for jj in xrange((len(parvals)-self.vheight)/self.npars):
lower_parind = jj*self.npars+self.vheight
upper_parind = (jj+1)*self.npars+self.vheight
v += self.modelfunc(x, *parvals[lower_parind:upper_parind], **kwargs)
return v
return L
def mpfitfun(self,x,y,err=None):
"""
Wrapper function to compute the fit residuals in an mpfit-friendly format
"""
if err is None:
def f(p,fjac=None): return [0,(y-self.n_modelfunc(p, **self.modelfunc_kwargs)(x))]
else:
def f(p,fjac=None): return [0,(y-self.n_modelfunc(p, **self.modelfunc_kwargs)(x))/err]
return f
def __call__(self, *args, **kwargs):
use_lmfit = kwargs.pop('use_lmfit') if 'use_lmfit' in kwargs else self.use_lmfit
if use_lmfit:
return self.lmfitter(*args,**kwargs)
if self.multisingle == 'single':
# Generate a variable-height version of the model
# not used func = fitter.vheightmodel(self.modelfunc)
return self.fitter(*args, **kwargs)
elif self.multisingle == 'multi':
return self.fitter(*args,**kwargs)
def lmfitfun(self,x,y,err=None,debug=False):
"""
Wrapper function to compute the fit residuals in an lmfit-friendly format
"""
def f(p):
#pars = [par.value for par in p.values()]
kwargs = {}
kwargs.update(self.modelfunc_kwargs)
if debug: print p,kwargs.keys()
if err is None:
return (y-self.n_modelfunc(p,**kwargs)(x))
else:
return (y-self.n_modelfunc(p,**kwargs)(x))/err
return f
def lmfitter(self, xax, data, err=None, parinfo=None, quiet=True, debug=False, **kwargs):
"""
Use lmfit instead of mpfit to do the fitting
Parameters
----------
xax : SpectroscopicAxis
The X-axis of the spectrum
data : ndarray
The data to fit
err : ndarray (optional)
The error on the data. If unspecified, will be uniform unity
parinfo : ParinfoList
The guesses, parameter limits, etc. See
`pyspeckit.spectrum.parinfo` for details
quiet : bool
If false, print out some messages about the fitting
"""
try:
import lmfit
except ImportError as e:
raise ImportError( "Could not import lmfit, try using mpfit instead." )
self.xax = xax # the 'stored' xax is just a link to the original
if hasattr(xax,'convert_to_unit') and self.fitunits is not None:
# some models will depend on the input units. For these, pass in an X-axis in those units
# (gaussian, voigt, lorentz profiles should not depend on units. Ammonia, formaldehyde,
# H-alpha, etc. should)
xax = copy.copy(xax)
xax.convert_to_unit(self.fitunits, quiet=quiet)
elif self.fitunits is not None:
raise TypeError("X axis does not have a convert method")
if np.any(np.isnan(data)) or np.any(np.isinf(data)):
err[np.isnan(data) + np.isinf(data)] = np.inf
data[np.isnan(data) + np.isinf(data)] = 0
if parinfo is None:
parinfo, kwargs = self._make_parinfo(debug=debug, **kwargs)
if debug:
print parinfo
LMParams = parinfo.as_Parameters()
if debug:
print "LMParams: ","\n".join([repr(p) for p in LMParams.values()])
print "parinfo: ",parinfo
minimizer = lmfit.minimize(self.lmfitfun(xax,np.array(data),err,debug=debug),LMParams,**kwargs)
if not quiet:
print "There were %i function evaluations" % (minimizer.nfev)
#modelpars = [p.value for p in parinfo.values()]
#modelerrs = [p.stderr for p in parinfo.values() if p.stderr is not None else 0]
self.LMParams = LMParams
self.parinfo._from_Parameters(LMParams)
if debug:
print LMParams
print parinfo
self.mp = minimizer
self.mpp = self.parinfo.values
self.mpperr = self.parinfo.errors
self.mppnames = self.parinfo.names
modelkwargs = {}
modelkwargs.update(self.modelfunc_kwargs)
self.model = self.n_modelfunc(self.parinfo, **modelkwargs)(xax)
if hasattr(minimizer,'chisqr'):
chi2 = minimizer.chisqr
else:
try:
chi2 = (((data-self.model)/err)**2).sum()
except TypeError:
chi2 = ((data-self.model)**2).sum()
if np.isnan(chi2):
warn( "Warning: chi^2 is nan" )
if hasattr(self.mp,'ier') and self.mp.ier not in [1,2,3,4]:
print "Fitter failed: %s, %s" % (self.mp.message, self.mp.lmdif_message)
return self.mpp,self.model,self.mpperr,chi2
def fitter(self, xax, data, err=None, quiet=True, veryverbose=False,
debug=False, parinfo=None, **kwargs):
"""
Run the fitter using mpfit.
kwargs will be passed to _make_parinfo and mpfit.
Parameters
----------
xax : SpectroscopicAxis
The X-axis of the spectrum
data : ndarray
The data to fit
err : ndarray (optional)
The error on the data. If unspecified, will be uniform unity
parinfo : ParinfoList
The guesses, parameter limits, etc. See
`pyspeckit.spectrum.parinfo` for details
quiet : bool
pass to mpfit. If False, will print out the parameter values for
each iteration of the fitter
veryverbose : bool
print out a variety of mpfit output parameters
debug : bool
raise an exception (rather than a warning) if chi^2 is nan
"""
if parinfo is None:
parinfo, kwargs = self._make_parinfo(debug=debug, **kwargs)
else:
if debug: print "Using user-specified parinfo dict"
# clean out disallowed kwargs (don't want to pass them to mpfit)
#throwaway, kwargs = self._make_parinfo(debug=debug, **kwargs)
self.xax = xax # the 'stored' xax is just a link to the original
if hasattr(xax,'convert_to_unit') and self.fitunits is not None:
# some models will depend on the input units. For these, pass in an X-axis in those units
# (gaussian, voigt, lorentz profiles should not depend on units. Ammonia, formaldehyde,
# H-alpha, etc. should)
xax = copy.copy(xax)
xax.convert_to_unit(self.fitunits, quiet=quiet)
elif self.fitunits is not None:
raise TypeError("X axis does not have a convert method")
if np.any(np.isnan(data)) or np.any(np.isinf(data)):
err[np.isnan(data) + np.isinf(data)] = np.inf
data[np.isnan(data) + np.isinf(data)] = 0
if debug:
for p in parinfo: print p
print "\n".join(["%s %i: tied: %s value: %s" % (p['parname'],p['n'],p['tied'],p['value']) for p in parinfo])
mp = mpfit(self.mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet,**kwargs)
mpp = mp.params
if mp.perror is not None: mpperr = mp.perror
else: mpperr = mpp*0
chi2 = mp.fnorm
if mp.status == 0:
if "parameters are not within PARINFO limits" in mp.errmsg:
print parinfo
raise mpfitException(mp.errmsg)
for i,(p,e) in enumerate(zip(mpp,mpperr)):
self.parinfo[i]['value'] = p
self.parinfo[i]['error'] = e
if veryverbose:
print "Fit status: ",mp.status
print "Fit error message: ",mp.errmsg
print "Fit message: ",mpfit_messages[mp.status]
for i,p in enumerate(mpp):
print self.parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
self.mp = mp
self.mpp = self.parinfo.values
self.mpperr = self.parinfo.errors
self.mppnames = self.parinfo.names
self.model = self.n_modelfunc(self.parinfo,**self.modelfunc_kwargs)(xax)
if debug:
print "Modelpars: ",self.mpp
if np.isnan(chi2):
if debug:
raise ValueError("Error: chi^2 is nan")
else:
print "Warning: chi^2 is nan"
return mpp,self.model,mpperr,chi2
def slope(self, xinp):
"""
Find the local slope of the model at location x
(x must be in xax's units)
"""
if hasattr(self, 'model'):
dm = np.diff(self.model)
# convert requested x to pixels
xpix = self.xax.x_to_pix(xinp)
dmx = np.average(dm[xpix-1:xpix+1])
if np.isfinite(dmx):
return dmx
else:
return 0
def annotations(self, shortvarnames=None, debug=False):
"""
Return a list of TeX-formatted labels
The values and errors are formatted so that only the significant digits
are displayed. Rounding is performed using the decimal package.
Parameters
----------
shortvarnames : list
A list of variable names (tex is allowed) to include in the
annotations. Defaults to self.shortvarnames
Examples
--------
>>> # Annotate a Gaussian
>>> sp.specfit.annotate(shortvarnames=['A','\\Delta x','\\sigma'])
"""
from decimal import Decimal # for formatting
svn = self.shortvarnames if shortvarnames is None else shortvarnames
# if pars need to be replicated....
if len(svn) < self.npeaks*self.npars:
svn = svn * self.npeaks
parvals = self.parinfo.values
parerrs = self.parinfo.errors
loop_list = [(parvals[ii+jj*self.npars+self.vheight],
parerrs[ii+jj*self.npars+self.vheight],
svn[ii+jj*self.npars],
self.parinfo.fixed[ii+jj*self.npars+self.vheight],
jj)
for jj in range(self.npeaks) for ii in range(self.npars)]
label_list = []
for (value, error, varname, fixed, varnumber) in loop_list:
if debug: print(value, error, varname, fixed, varnumber)
if fixed or error==0:
label = ("$%s(%i)$=%8s" % (varname,varnumber,
Decimal("%g" % value).quantize( Decimal("%0.6g" % (value)) )))
else:
label = ("$%s(%i)$=%8s $\\pm$ %8s" % (varname,varnumber,
Decimal("%g" % value).quantize( Decimal("%0.2g" % (min(np.abs([value,error])))) ),
Decimal("%g" % error).quantize(Decimal("%0.2g" % (error))),))
label_list.append(label)
labels = tuple(mpcb.flatten(label_list))
return labels
def components(self, xarr, pars, **kwargs):
"""
Return a numpy ndarray of shape [npeaks x modelshape] of the
independent components of the fits
"""
modelcomponents = np.array(
[self.modelfunc(xarr,
*pars[i*self.npars:(i+1)*self.npars],
**dict(self.modelfunc_kwargs.items()+kwargs.items()))
for i in range(self.npeaks)])
if len(modelcomponents.shape) == 3:
newshape = [modelcomponents.shape[0]*modelcomponents.shape[1], modelcomponents.shape[2]]
modelcomponents = np.reshape(modelcomponents, newshape)
return modelcomponents
def integral(self, modelpars, dx=None, **kwargs):
"""
Extremely simple integrator:
IGNORES modelpars;
just sums self.model
"""
if dx is not None:
return (self.model*dx).sum()
else:
return self.model.sum()
def analytic_integral(self, modelpars=None, npeaks=None, npars=None):
"""
Placeholder for analyic integrals; these must be defined for individual models
"""
if self.integral_func is None:
raise NotImplementedError("Analytic integrals must be implemented independently for each model type")
# all of these parameters are allowed to be overwritten
if modelpars is None:
modelpars = self.parinfo.values
if npeaks is None:
npeaks = self.npeaks
if npars is None:
npars = self.npars
return np.sum([
self.integral_func(modelpars[npars*ii:npars*(1+ii)])
for ii in xrange(npeaks)])
def component_integrals(self, xarr, dx=None):
"""
Compute the integrals of each component
"""
components = self.components(xarr, self.parinfo.values)
if dx is None:
dx = 1
integrals = [com.sum()*dx for com in components]
return integrals
def analytic_fwhm(self, parinfo=None):
"""
Return the FWHMa of the model components *if* a fwhm_func has been
defined
Done with incomprehensible list comprehensions instead of nested for
loops... readability sacrificed for speed and simplicity. This is
unpythonic.
"""
if self.fwhm_func is None and self.fwhm_pars is None:
raise TypeError("fwhm_func not implemented for model %s" % self.__name__)
if parinfo is None:
parinfo = self.parinfo
fwhm = [self.fwhm_func(
*[self.parinfo[str.upper(p+'%i' % n)] for p in self.fwhm_pars]
)
for n in xrange(self.npeaks)]
return fwhm
def analytic_centroids(self, centroidpar=None):
"""
Return the *analytic* centroids of the model components
Parameters
----------
centroidpar : None or string
The name of the parameter in the fit that represents the centroid
*some models have default centroid parameters - these will be used
if centroidpar is unspecified*
Returns
-------
List of the centroid values (even if there's only 1)
"""
if centroidpar is None:
centroidpar = self.centroid_par
centr = [par.value
for par in self.parinfo
if str.upper(centroidpar) in par.parname]
return centr
def computed_centroid(self, xarr=None):
"""
Return the *computed* centroid of the model
Parameters
----------
xarr : None or np.ndarray
The X coordinates of the model over which the centroid should be
computed. If unspecified, the centroid will be in pixel units
"""
if xarr is None:
xarr = np.arange(self.model.size)
centr = (self.model*xarr).sum() / self.model.sum()
return centr
def logp(self, xarr, data, error, pars=None):
"""
Return the log probability of the model
"""
if pars is None:
pars = self.parinfo
model = self.n_modelfunc(pars, **self.modelfunc_kwargs)(xarr)
difference = np.abs(data-model)
# prob = 1/(2*np.pi)**0.5/error * exp(-difference**2/(2.*error**2))
#logprob = np.log(1./(2.*np.pi)**0.5/error) * (-difference**2/(2.*error**2))
logprob = (-difference**2/(2.*error**2))
totallogprob = np.sum(logprob)
return totallogprob
def get_emcee_sampler(self, xarr, data, error, **kwargs):
"""
Get an emcee walker for the data & model
Parameters
----------
xarr : pyspeckit.units.SpectroscopicAxis
data : np.ndarray
error : np.ndarray
Examples
--------
>>> import pyspeckit
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> emcee_sampler = sp.specfit.fitter.get_emcee_sampler(sp.xarr, sp.data, sp.error)
>>> p0 = sp.specfit.parinfo
>>> emcee_sampler.run_mcmc(p0,100)
"""
try:
import emcee
except ImportError:
return
def probfunc(pars):
return self.logp(xarr, data, error, pars=pars)
raise NotImplementedError("emcee's metropolis-hastings sampler is not implemented; use pymc")
sampler = emcee.MHSampler(self.npars*self.npeaks+self.vheight, probfunc, **kwargs)
return sampler
def get_emcee_ensemblesampler(self, xarr, data, error, nwalkers, **kwargs):
"""
Get an emcee walker ensemble for the data & model
Parameters
----------
data : np.ndarray
error : np.ndarray
nwalkers : int
Number of walkers to use
Examples
--------
>>> import pyspeckit
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> nwalkers = sp.specfit.fitter.npars * 2
>>> emcee_ensemble = sp.specfit.fitter.get_emcee_ensemblesampler(sp.xarr, sp.data, sp.error, nwalkers)
>>> p0 = np.array([sp.specfit.parinfo.values] * nwalkers)
>>> p0 *= np.random.randn(*p0.shape) / 10. + 1.0
>>> pos,logprob,state = emcee_ensemble.run_mcmc(p0,100)
"""
try:
import emcee
except ImportError:
return
def probfunc(pars):
return self.logp(xarr, data, error, pars=pars)
sampler = emcee.EnsembleSampler(nwalkers, self.npars*self.npeaks+self.vheight, probfunc, **kwargs)
return sampler
def get_pymc(self, xarr, data, error, use_fitted_values=False, inf=np.inf,
use_adaptive=False, return_dict=False, **kwargs):
"""
Create a pymc MCMC sampler. Defaults to 'uninformative' priors
Parameters
----------
data : np.ndarray
error : np.ndarray
use_fitted_values : bool
Each parameter with a measured error will have a prior defined by
the Normal distribution with sigma = par.error and mean = par.value
Examples
--------
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> MCuninformed = sp.specfit.fitter.get_pymc(sp.xarr, sp.data, sp.error)
>>> MCwithpriors = sp.specfit.fitter.get_pymc(sp.xarr, sp.data, sp.error, use_fitted_values=True)
>>> MCuninformed.sample(1000)
>>> MCuninformed.stats()['AMPLITUDE0']
>>> # WARNING: This will fail because width cannot be set <0, but it may randomly reach that...
>>> # How do you define a likelihood distribution with a lower limit?!
>>> MCwithpriors.sample(1000)
>>> MCwithpriors.stats()['AMPLITUDE0']
"""
old_errsettings = np.geterr()
try:
import pymc
finally:
# pymc breaks error settings
np.seterr(**old_errsettings)
#def lowerlimit_like(x,lolim):
# "lower limit (log likelihood - set very positive for unacceptable values)"
# return (x>=lolim) / 1e10
#def upperlimit_like(x,uplim):
# "upper limit"
# return (x<=uplim) / 1e10
#LoLim = pymc.distributions.stochastic_from_dist('lolim', logp=lowerlimit_like, dtype=np.float, mv=False)
#UpLim = pymc.distributions.stochastic_from_dist('uplim', logp=upperlimit_like, dtype=np.float, mv=False)
funcdict = {}
# very, very worrisome: pymc changes the values of parinfo
parcopy = copy.deepcopy(self.parinfo)
for par in parcopy:
lolim = par.limits[0] if par.limited[0] else -inf
uplim = par.limits[1] if par.limited[1] else inf
if par.fixed:
funcdict[par.parname] = pymc.distributions.Uniform(par.parname, par.value, par.value, value=par.value)
elif use_fitted_values:
if par.error > 0:
if any(par.limited):
try:
funcdict[par.parname] = pymc.distributions.TruncatedNormal(par.parname, par.value, 1./par.error**2, lolim, uplim)
except AttributeError:
# old versions used this?
funcdict[par.parname] = pymc.distributions.TruncNorm(par.parname, par.value, 1./par.error**2, lolim, uplim)
else:
funcdict[par.parname] = pymc.distributions.Normal(par.parname, par.value, 1./par.error**2)
else:
if any(par.limited):
funcdict[par.parname] = pymc.distributions.Uniform(par.parname, lolim, uplim, value=par.value)
else:
funcdict[par.parname] = pymc.distributions.Uninformative(par.parname, value=par.value)
elif any(par.limited):
lolim = par.limits[0] if par.limited[0] else -1e10
uplim = par.limits[1] if par.limited[1] else 1e10
funcdict[par.parname] = pymc.distributions.Uniform(par.parname, lower=lolim, upper=uplim, value=par.value)
else:
funcdict[par.parname] = pymc.distributions.Uninformative(par.parname, value=par.value)
d = dict(funcdict)
def modelfunc(xarr, pars=parcopy, **kwargs):
for k,v in kwargs.iteritems():
if k in pars.keys():
pars[k].value = v
return self.n_modelfunc(pars, **self.modelfunc_kwargs)(xarr)
funcdict['xarr'] = xarr
funcdet=pymc.Deterministic(name='f',eval=modelfunc,parents=funcdict,doc="The model function")
d['f'] = funcdet
datamodel = pymc.distributions.Normal('data',mu=funcdet,tau=1/np.asarray(error)**2,observed=True,value=np.asarray(data))
d['data']=datamodel
if return_dict:
return d
mc = pymc.MCMC(d)
if use_adaptive:
mc.use_step_method(pymc.AdaptiveMetropolis,[d[p] for p in self.parinfo.names])
return mc
class AstropyModel(SpectralModel):
def __init__(self, model, shortvarnames=None, **kwargs):
"""
Override the SpectralModel initialization
"""
if hasattr(self,__doc__): # how do you extend a docstring really?
self.__doc__ += SpectralModel.__doc__
if shortvarnames is None:
shortvarnames = model.param_names
super(AstropyModel,self).__init__(model, len(model.parameters),
shortvarnames=shortvarnames,
multisingle='multi',
model=model,
**kwargs)
self.mp = None
self.vheight = False
self.npeaks = 1
def _make_parinfo(self, model=None):
self.parinfo = ParinfoList([
Parinfo(parname=name,value=value)
for name,value in zip(model.param_names,model.parameters)])
return self.parinfo, {}
def _parse_parinfo(self, parinfo):
"""
Parse a ParinfoList into astropy.models parameters
"""
if len(parinfo) > self.npars:
if len(parinfo) % self.npars != 0:
raise ValueError("Need to have an integer number of models")
else:
self.modelfunc.param_names = parinfo.names
self.modelfunc.parameters = parinfo.values
else:
self.modelfunc.param_names = parinfo.names
self.modelfunc.parameters = parinfo.values
def fitter(self, xax, data, err=None, quiet=True, veryverbose=False,
debug=False, parinfo=None, params=None, npeaks=None, **kwargs):
import astropy.models as models
if npeaks is not None and npeaks > 1:
raise NotImplementedError("Astropy models cannot be used to fit multiple peaks yet")
if parinfo is not None:
self._parse_parinfo(parinfo)
if params is not None:
self.modelfunc.parameters = params
self.astropy_fitter = models.fitting.NonLinearLSQFitter(self.modelfunc)
if err is None:
self.astropy_fitter(xax, data, **kwargs)
else:
self.astropy_fitter(xax, data, weights=1./err**2, **kwargs)
mpp = self.astropy_fitter.fitpars
cov = self.astropy_fitter.covar
if cov is None:
mpperr = np.zeros(len(mpp))
else:
mpperr = cov.diagonal()
self.model = self.astropy_fitter.model(xax)
if err is None:
chi2 = ((data-self.model)**2).sum()
else:
chi2 = ((data-self.model)**2/err**2).sum()
# update object paramters
self.modelfunc.parameters = mpp
self._make_parinfo(self.modelfunc)
return mpp,self.model,mpperr,chi2
def n_modelfunc(self, pars=None, debug=False, **kwargs):
"""
Only deals with single-peak functions
"""
try:
self._parse_parinfo(pars)
except AttributeError:
self.modelfunc.parameters = pars
return self.modelfunc
| keflavich/pyspeckit-obsolete | pyspeckit/spectrum/models/model.py | Python | mit | 39,132 | [
"Gaussian"
] | c1a00eb5514d192248ad1b648aa46337b47413c8a045ef9adee56423790e83c7 |
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module provides code to access resources at ExPASy over the WWW.
http://www.expasy.ch/
Functions:
get_prodoc_entry Interface to the get-prodoc-entry CGI script.
get_prosite_entry Interface to the get-prosite-entry CGI script.
get_prosite_raw Interface to the get-prosite-raw CGI script.
get_sprot_raw Interface to the get-sprot-raw CGI script.
sprot_search_ful Interface to the sprot-search-ful CGI script.
sprot_search_de Interface to the sprot-search-de CGI script.
"""
import urllib
def get_prodoc_entry(id, cgi='http://www.expasy.ch/cgi-bin/get-prodoc-entry'):
"""get_prodoc_entry(id,
cgi='http://www.expasy.ch/cgi-bin/get-prodoc-entry') -> handle
Get a handle to a PRODOC entry at ExPASy in HTML format.
For a non-existing key XXX, ExPASy returns an HTML-formatted page
containing this line:
'There is no PROSITE documentation entry XXX. Please try again.'
"""
# Open a handle to ExPASy.
handle = urllib.urlopen("%s?%s" % (cgi, id))
return handle
def get_prosite_entry(id,
cgi='http://www.expasy.ch/cgi-bin/get-prosite-entry'):
"""get_prosite_entry(id,
cgi='http://www.expasy.ch/cgi-bin/get-prosite-entry') -> handle
Get a handle to a PROSITE entry at ExPASy in HTML format.
For a non-existing key XXX, ExPASy returns an HTML-formatted page
containing this line:
'There is currently no PROSITE entry for XXX. Please try again.'
"""
handle = urllib.urlopen("%s?%s" % (cgi, id))
return handle
def get_prosite_raw(id, cgi='http://www.expasy.ch/cgi-bin/get-prosite-raw.pl'):
"""get_prosite_raw(id,
cgi='http://www.expasy.ch/cgi-bin/get-prosite-raw.pl')
-> handle
Get a handle to a raw PROSITE or PRODOC entry at ExPASy.
For a non-existing key, ExPASy returns nothing.
"""
handle = urllib.urlopen("%s?%s" % (cgi, id))
return handle
def get_sprot_raw(id, cgi=None):
"""Get a handle to a raw SwissProt entry at ExPASy.
For an ID of XXX, fetches http://www.uniprot.org/uniprot/XXX.txt
(as per the http://www.expasy.ch/expasy_urls.html documentation).
For a non-existing key XXX, ExPASy returns an HTML Error 404 page.
This function used to take a cgi option to specify the URL, but that
is no longer supported. This is because prior to November 2009 we
used to use http://www.expasy.ch/cgi-bin/get-sprot-raw.pl?XXX
However, at the time of writting this returns FASTA format instead
(probably an ExPASy/UniProt oversight). Under the new URL scheme,
we cannot just append "?XXX" to the cgi argument.
"""
if cgi :
import warnings
import Bio
warnings.warn("The cgi argument in get_sprot_raw is not "
"supported anymore", Bio.BiopythonDeprecationWarning)
return urllib.urlopen("http://www.uniprot.org/uniprot/%s.txt" % id)
def sprot_search_ful(text, make_wild=None, swissprot=1, trembl=None,
cgi='http://www.expasy.ch/cgi-bin/sprot-search-ful'):
"""sprot_search_ful(text, make_wild=None, swissprot=1, trembl=None,
cgi='http://www.expasy.ch/cgi-bin/sprot-search-ful') -> handle
Search SwissProt by full text.
"""
variables = {'SEARCH' : text}
if make_wild:
variables['makeWild'] = 'on'
if swissprot:
variables['S'] = 'on'
if trembl:
variables['T'] = 'on'
options = urllib.urlencode(variables)
fullcgi = "%s?%s" % (cgi, options)
handle = urllib.urlopen(fullcgi)
return handle
def sprot_search_de(text, swissprot=1, trembl=None,
cgi='http://www.expasy.ch/cgi-bin/sprot-search-de'):
"""sprot_search_de(text, swissprot=1, trembl=None,
cgi='http://www.expasy.ch/cgi-bin/sprot-search-de') -> handle
Search SwissProt by name, description, gene name, species, or
organelle.
"""
variables = {'SEARCH' : text}
if swissprot:
variables['S'] = 'on'
if trembl:
variables['T'] = 'on'
options = urllib.urlencode(variables)
fullcgi = "%s?%s" % (cgi, options)
handle = urllib.urlopen(fullcgi)
return handle
| BlogomaticProject/Blogomatic | opt/blog-o-matic/usr/lib/python/Bio/ExPASy/__init__.py | Python | gpl-2.0 | 4,374 | [
"Biopython"
] | 41f904c3418be47a5061ce29ca7a381b90a32d7a13e93044dcb152714323f159 |
import webapp2
import cgi
import datetime
import import_path
from models.tracker.Identifiers import *
from models.tracker.Visits import *
class TrackHandler(webapp2.RequestHandler):
def get(self):
token = cgi.escape(self.request.get('t'))
if token == None or len(token) == 0:
self.response.out.write("no token found")
return
identifierModel = IdentifierModel()
identifier = identifierModel.GetByToken(token)
visit = VisitModel(parent = identifier)
visit.Ip = self.request.remote_addr
visit.Agent = str(self.request.headers['User-Agent'])
visit.Save()
url = identifier.RedirectUrl
if url != None and len(url) > 0:
self.redirect(str(url))
else:
self.redirect('/1p.gif')
| cipicip/appengine | ccutilapp/handlers/tracker/TrackHandler.py | Python | apache-2.0 | 715 | [
"VisIt"
] | 6059f8cbb8486894f978ddb40550c90a09eb0a79826d1120316a33743ffb5e29 |
import os
import tempfile
import unittest
from pyepw.epw import EPW
class TestReadEPW(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_read_epw(self):
epw = EPW()
epw.read(r"tests/data/USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
epw.save(self.path)
epw2 = EPW()
epw2.read(self.path)
| rbuffat/pyepw | tests/test_read.py | Python | apache-2.0 | 439 | [
"EPW"
] | 4d1f9827b23695fe1a8e42ee84fe6141f0fa5aae7fb222a84352d0b17ac46fbd |
# Install third-party packages
# Read/write files and directories
import os
# Read command line arguments
import argparse
# Numeric Python
import numpy
# Read JSON file format
import json
# Install imagaing packages
import skimage
from skimage import io
# Ignore warnings so they won't be displayed
import warnings
warnings.filterwarnings('ignore')
# Metadata files specify which channels were used for imaging
# This dictionary is used to conver the channel number to
# a readable format used in the file naming
channels = {
'1': '470',
'2': '660',
'3': '750',
'4': '800',
'5': 'ChannelError'
}
# The file extensions indicate which type of file
# This dictionary is used in the file naming
image_types = {
'ssr': 'RGB',
'ssa': 'Monochrome',
'ssm': 'Side-by-Side'
}
# In an advanced mode the user can acquire images using a
# Liquid Crystal Tunable Filter
# In this mode an image is acquired with the following emission filters
# Traget, Tissue, and Food are computed by the unmixing algorithm on the system
LCTF_channels = ['520',
'530',
'540',
'550',
'560',
'570',
'580',
'590',
'600',
'610',
'620',
'Target',
'Tissue',
'Food']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
# This is the main function to read the image files in a directory
def read_solaris_image_set(directory, file_name, lctf_channel=False):
# Read snapshot metadata
if lctf_channel:
# LCTF channels store the metadata in the parent directory
# The '..' is Unix notation to move up a directory
snapshot_metadata = os.path.join(directory, '..', 'metadata.svd')
else:
snapshot_metadata = os.path.join(directory, 'metadata.svd')
with open(snapshot_metadata) as metadata_file:
snapshot_metadata = json.load(metadata_file)
# Using the data from the metadata file in the snapshot directory
# We can extract extra information about the type of image
current_channel_num = str(snapshot_metadata['Channel'])
current_channel = channels[current_channel_num]
snaphot_name = snapshot_metadata['DataName']
# Construct file name of image file
current_full_file = os.path.join(directory, file_name)
# Find the image file extension
field_name = file_name.split('.')[1]
# Store all the image information in a single dictionary
image_info = {
'channel_num': current_channel_num,
'channel_name': current_channel,
'snapshot_name': snaphot_name,
'field_name': field_name
}
# Print debug information about current file
print('Reading: {}\n\t{}'.format(current_full_file, image_info))
# Read image file(s) as long as they are not the side-by-side images
if field_name != 'ssm':
with open(current_full_file,'rb') as file:
if field_name=='ssr':
# 8-bit color image
byte_array = numpy.fromfile(current_full_file, dtype='uint8')
else:
# 16-bit monochrome image
# - ssa is fluorescent image
# - ssm is dummy image to place ssr and ssa next to each other
byte_array = numpy.fromfile(current_full_file, dtype='uint16')
# Calculate width from length of byte array
width = int(numpy.size(byte_array)/height)
# Reconstruct image from array
if field_name=='ssr':
# Color image (R G B)
reconstructed_im = numpy.reshape(byte_array, [height, height, 3])
else:
# Monochrome 16-bit image
reconstructed_im = numpy.reshape(byte_array, [height, height])
# Flip fluorescent image (up-down)
reconstructed_im = numpy.flipud(reconstructed_im)
# Rotate image -90 degrees
reconstructed_im = numpy.rot90(reconstructed_im,-1)
return [reconstructed_im, image_info]
# If the group file is used, we want to
# include this in the output file names
def read_all_file_with_group(study_data, input_dir, output_dir, channels=channels, image_types=image_types, LCTF_channels=LCTF_channels):
# Create a new dictionary to store the image data
solaris_images = {}
# Create an empty list to store the directories
# that will need to be processed
solaris_dirs = []
# The group file will indicate the names of the experiments, so we loop through all of these
for group in study_data:
# Find the name of the group
group_name = group['Name']
# Create a sub-dictionary for the group
solaris_images[group_name] = {}
# Print the group name for debug
print('{}'.format(group_name))
# Within each group/etxperiment there can be multiple subjects/timepoints
for time_point in group['SubjectNames']:
# Create a sub-dictionary for the timepoint
solaris_images[group_name][time_point] = {}
print('\t{}'.format(time_point))
# Construct the full directory name
timepoint_dir = os.path.join(input_dir, time_point)
# Find all the snapshot directories within this time point
# Each time point can have multiple images which are all stored
# in their own directories
snapshot_dirs = os.listdir(timepoint_dir)
# Loop through each directory in the list
for snapshot_dir in snapshot_dirs:
# Verify the directory has the search_term i.e. "Snapshot" in it's name
if search_term in snapshot_dir:
# Add empty sub-dictionary for snapshot
solaris_images[group_name][time_point][snapshot_dir] = {}
# Using the LCTF, the software can perform spectral unmixing
# If that is the case, there will be multiple emission wavelengths
if 'Unmixed' in snapshot_dir:
channel_dirs = os.listdir(os.path.join(timepoint_dir,snapshot_dir))
# Loop through each emission wavelength present in the current directory
for each_channel in channel_dirs:
# Verify directory name matches valid LCTF channels
if each_channel in LCTF_channels:
# Create empty sub-dictionary for each emission channel
solaris_images[group_name][time_point][snapshot_dir][each_channel] = {}
# Construct the full directory name
full_snapshot_dir = os.path.join(input_dir, time_point, snapshot_dir, each_channel)
# Find all files in the directory
snapshot_files = os.listdir(full_snapshot_dir)
# Limit to only files with search term i.e. 'Snapshot'
file_matches = [s for s in snapshot_files if search_term in s]
#print(file_matches)
for image_file in file_matches:
# Process as long it is not a side-by-side image
if '.ssm' not in image_file:
[reconstructed_im, image_info] = read_solaris_image_set(full_snapshot_dir, image_file, True)
#print(numpy.shape(reconstructed_im))
#print(image_info)
if write_files:
# Construct output file name
output_filename = '{}_{}_{}_LCTF{}_{}'.format(group_name,
time_point,
image_types[image_info['field_name']],
each_channel,
image_info['snapshot_name'])
# Remove unsafe characters in file name
safe_filename = "".join([c for c in output_filename if c.isalpha() or c.isdigit() or c==' ' or c=='_']).rstrip()
#print('\t\t{}'.format(safe_filename))
# Save as .TIF or .PNG file
skimage.io.imsave( os.path.join(output_dir, '{}.tif'.format(safe_filename)), reconstructed_im)
# Store image array in dictionary
solaris_images[group_name][time_point][snapshot_dir][each_channel][image_types[image_info['field_name']]] = reconstructed_im
# If not a spectrally unmixed image set
else:
# Construct the directory name
full_snapshot_dir = os.path.join(input_dir, time_point, snapshot_dir)
#print(full_snapshot_dir)
# Return list of all files in directory
snapshot_files = os.listdir(full_snapshot_dir)
# Find files in directory that contain the search term i.e. 'Snapshot'
file_matches = [s for s in snapshot_files if search_term in s]
#print(file_matches)
# Loop through all the matches
for image_file in file_matches:
# Process as long it is not a side-by-side image
if '.ssm' not in image_file:
reconstructed_im, image_info = read_solaris_image_set(full_snapshot_dir, image_file)
if write_files:
# Construct output file name
output_filename = '{}_{}_{}_{}_{}'.format(group_name,
time_point,
image_types[image_info['field_name']],
image_info['channel_name'],
image_info['snapshot_name'])
# Remove unsafe characters in file name
safe_filename = "".join([c for c in output_filename if c.isalpha() or c.isdigit() or c==' ' or c=='_']).rstrip()
#print('\t\t{}'.format(safe_filename))
# Save as .TIF or .PNG file
skimage.io.imsave( os.path.join(output_dir, '{}.tif'.format(safe_filename)), reconstructed_im)
# Store image array in dictionary
solaris_images[group_name][time_point][snapshot_dir][image_types[image_info['field_name']]] = reconstructed_im
return solaris_images
# If the group file is NOT used,
# we can read the image data, but process
# is a little different
def read_all_file_without_group(input_dir, output_dir, channels=channels, image_types=image_types, LCTF_channels=LCTF_channels):
# Create a new dictionary to store the image data
solaris_images = {}
# Create an empty list to store the directories
# that will need to be processed
solaris_dirs = []
# Find all the directories listed in the current input directory
all_timepoints = os.listdir(input_dir)
# Within each group/etxperiment there can be multiple subjects/timepoints
# Loop through each sub-directory
for time_point in all_timepoints:
print('\t{}'.format(time_point))
# Create a sub-dictionary for the timepoint
solaris_images[time_point] = {}
# Construct full sub-directory name for current timepoint
timepoint_dir = os.path.join(input_dir, time_point)
# Verify it is a directory and not a file
if os.path.isdir(timepoint_dir):
# Find all sub-directories within the current timepoint
snapshot_dirs = os.listdir(timepoint_dir)
for snapshot_dir in snapshot_dirs:
# Verify the search term .i.e. 'Snapshot' is found in the file name
if search_term in snapshot_dir:
# Add empty sub-dictionary for snapshot
solaris_images[time_point][snapshot_dir] = {}
# Using the LCTF, the software can perform spectral unmixing
# If that is the case, there will be multiple emission wavelengths
if 'Unmixed' in snapshot_dir:
channel_dirs = os.listdir(os.path.join(timepoint_dir,snapshot_dir))
# Loop through each emission wavelength present in the current directory
for each_channel in channel_dirs:
if each_channel in LCTF_channels:
# Create empty sub-dictionary for each emission channel
solaris_images[time_point][snapshot_dir][each_channel] = {}
# Construct the full directory name
full_snapshot_dir = os.path.join(input_dir, time_point, snapshot_dir, each_channel)
# Find all files in the directory
snapshot_files = os.listdir(full_snapshot_dir)
# Limit to only files with search term i.e. 'Snapshot'
file_matches = [s for s in snapshot_files if search_term in s]
#print(file_matches)
for image_file in file_matches:
# Process as long it is not a side-by-side image
if '.ssm' not in image_file:
[reconstructed_im, image_info] = read_solaris_image_set(full_snapshot_dir, image_file, True)
#print(numpy.shape(reconstructed_im))
#print(image_info)
if write_files:
# Construct output file name
output_filename = '{}_{}_LCTF{}_{}'.format(time_point,
image_types[image_info['field_name']],
each_channel,
image_info['snapshot_name'])
# Remove unsafe characters in file name
safe_filename = "".join([c for c in output_filename if c.isalpha() or c.isdigit() or c==' ' or c=='_']).rstrip()
#print('\t\t{}'.format(safe_filename))
# Save as .TIF or .PNG file
skimage.io.imsave( os.path.join(output_dir, '{}.tif'.format(safe_filename)), reconstructed_im)
# Store image array in dictionary
solaris_images[time_point][snapshot_dir][each_channel][image_types[image_info['field_name']]] = reconstructed_im
# If not a spectrally unmixed image set
else:
# Construct the directory name
full_snapshot_dir = os.path.join(input_dir, time_point, snapshot_dir)
#print(full_snapshot_dir)
# Return list of all files in directory
snapshot_files = os.listdir(full_snapshot_dir)
# Find files in directory that contain the search term i.e. 'Snapshot'
file_matches = [s for s in snapshot_files if search_term in s]
#print(file_matches)
# Loop through all the matches
for image_file in file_matches:
# Process as long it is not a side-by-side image
if '.ssm' not in image_file:
reconstructed_im, image_info = read_solaris_image_set(full_snapshot_dir, image_file)
if write_files:
# Construct output file name
output_filename = '{}_{}_{}_{}'.format(time_point,
image_types[image_info['field_name']],
image_info['channel_name'],
image_info['snapshot_name'])
# Remove unsafe characters in file name
safe_filename = "".join([c for c in output_filename if c.isalpha() or c.isdigit() or c==' ' or c=='_']).rstrip()
#print('\t\t{}'.format(safe_filename))
# Save as .TIF or .PNG file
skimage.io.imsave( os.path.join(output_dir, '{}.tif'.format(safe_filename)), reconstructed_im)
# Store image array in dictionary
solaris_images[time_point][snapshot_dir][image_types[image_info['field_name']]] = reconstructed_im
return solaris_images
# ********************** MAIN function ********************** #
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Batch process Solaris images.')
parser.add_argument('experiment', type=str,
help='The directory for the experiment to batch convert (in quotes if spaces)')
parser.add_argument('--size', dest='im_size', type=int, default=1024,
help='image dimension. Default 1024. Other options: 512, 256')
parser.add_argument('--search_file', dest='search_term', type=str, default='Snapshot',
help='File search term. Default: \'Snapshot\'')
parser.add_argument('--write', dest='write_files', type=str2bool, default=True,
help='Write output files. Default: True')
args = parser.parse_args()
## MODIFY HERE ##
input_root_dir = 'D:\\\\SolarisData\\Research\\'
output_root_dir = 'D:\\\\ExportData\\'
## STOP MODIFY ##
cur_experiment_dir = args.experiment
# If testing, write_files can be set to False
# This will be slightly faster becasue it does not
# write to disk
write_files = args.write_files
# The code assumes all image files have the
# search_term in the file name
search_term = args.search_term
# The Solaris allows three different image sizes.
# We generally always use 1024x1024
height = args.im_size
width = args.im_size
input_dir = os.path.join(input_root_dir, cur_experiment_dir)
output_dir = os.path.join(output_root_dir, cur_experiment_dir)
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# The following generally stays the same
# Group file is used to store names of experiments, but it is not always used
groups_file = os.path.join(input_dir, 'groups.svd')
# Open and read the data in the group file
# This may be empty (If it is empty use the 'No Groups' code below)
use_group_meta = False
if os.path.isfile(groups_file):
with open(groups_file) as data_file:
study_data = json.load(data_file)
if study_data!=[]:
use_group_meta = True
if use_group_meta:
output_images = read_all_file_with_group(study_data, input_dir, output_dir)
else:
output_images = read_all_file_without_group(input_dir, output_dir)
| ethanlarochelle/solaris-tools | CLI/cli_solaris_batch_export.py | Python | gpl-3.0 | 20,729 | [
"CRYSTAL"
] | 7fcd88825f0085ac75276a5617996da4bc98831720e9bb25b556365848fe2ab3 |
# -*- coding: utf-8 -*-
import re
import math
#抽取一篇文档的特征(此处为不重复的单词)
def getwords(doc):
splitter=re.compile('\\W*')
#根据非字母字符将文档拆分为单词
words=[s.lower() for s in splitter.split(doc)
if len(s)>2 and len(s)<20]
return dict([(w,1) for w in words])
#样本训练函数(省的每次都输入训练数据)
def sampletrain(cl):
cl.train('Nobody owns the water','good')
cl.train('the quick rabbit jumps fences','good')
cl.train('buy pharmaceuticals now','bad')
cl.train('make quick money at the online casino','bad')
cl.train('the quick brown fox jumps','good')
#这个是分类器基类
class classifier:
def __init__(self,getfeatures,filename=None):
#统计特征/分类组合的数量
self.fc={}
#统计每个分类中文档的数量
self.cc={}
self.getfeatures=getfeatures
#增加对特征/分类组合的计数值
def incf(self,f,cat):
self.fc.setdefault(f,{})
self.fc[f].setdefault(cat,0)
self.fc[f][cat]+=1
#增加某一分类的计数值
def incc(self,cat):
self.cc.setdefault(cat,0)
self.cc[cat]+=1
#某一特征出现于某一分类的次数(即fc字典的基本用法)
def fcount(self,f,cat):
if f in self.fc and cat in self.fc[f]:
return float(self.fc[f][cat])
#返回浮点数
return 0.0
#属于某一分类的内容项数量(即cc字典的基本用法)
def catcount(self,cat):
if cat in self.cc:
return float(self.cc[cat])
return 0.0
#所有内容项(文档)的数量
def totalcount(self):
return sum(self.cc.values())
#所有分类的列表
def categories(self):
return self.cc.keys()
#以内容项和它的分类作为参数进行训练
def train(self,item,cat):
features=self.getfeatures(item)
#对该分类的每一个特征增加计数值
for f in features:
self.incf(f,cat)
#对属于该分类的内容项计数值加1
self.incc(cat)
#计算特征(即单词)在某分类中出现的概率
#记作Pr(A|B),读作“在给定B的条件下A的概率,这里为Pr(word|classfication)
def fprob(self,f,cat):
if self.catcount(cat)==0:
return 0
return self.fcount(f,cat)/self.catcount(cat)
#当样本有限时,可以设一个初始的推荐值,并给它一个权重(ap和weight)
def weightedprob(self,f,cat,prf,weight=1.0,ap=0.5):
#计算基础的概率值,这里使用传入的prf增加可重用性
basicprob=prf(f,cat)
#统计特征在所有分类中出现的次数
totals=sum([self.fcount(f,c) for c in self.categories()])
#计算加权平均
bp=((weight*ap)+(totals*basicprob))/(weight+totals)
return bp
#朴素贝叶斯分类器继承自上面的分类器
#之所以叫朴素,是因为它假设将要被组合的各个概率是彼此独立的
class naivebayes(classifier):
def __init__(self,getfeatures):
classifier.__init__(self,getfeatures)
self.thresholds={}
#提取出所有特征,并将它们的概率相乘,求得了Pr(Document|Category)
def docprob(self,item,cat):
features=self.getfeatures(item)
p=1
for f in features:
p*=self.weightedprob(f,cat,self.fprob)
return p
#根据贝叶斯定理,通过调换求解,求得已知文档求其所属分类的概率
def prob(self,item,cat):
catprob=self.catcount(cat)/self.totalcount()
docprob=self.docprob(item,cat)
return catprob*docprob
#设置和取得某一个分类阈值
def setthresholds(self,cat,t):
self.thresholds[cat]=t
def getthresholds(self,cat):
if cat not in self.thresholds:
return 1.0
return self.thresholds[cat]
#对内容项进行分类的函数
def classify(self,item,default=None):
probs={}
#寻找概率最大的分类
max=0.0
for cat in self.categories():
probs[cat]=self.prob(item,cat)
if probs[cat]>max:
max=probs[cat]
best=cat
#确保这个best符合阈值
for cat in probs:
if cat==best:
continue
if probs[cat]*self.getthresholds(best)>probs[best]:
return default
return best
#
#费舍尔方法
class fisherclassifier(classifier):
def __init__(self,getfeatures):
classifier.__init__(self,getfeatures)
self.minimums={} #用来保存临界值
#针对特征的分类概率
def cprob(self,f,cat):
clf=self.fprob(f,cat)
if clf==0:
return 0
#特征在各个分类中的出现概率之和
freqsum=sum([self.fprob(f,c) for c in self.categories()])
p=clf/freqsum
return p
#将各个概率值组合起来
def fisherprob(self,item,cat):
p=1
features=self.getfeatures(item)
for f in features:
p*=(self.weightedprob(f,cat,self.cprob))
#取自然对数,并乘以-2
fscore=-2*math.log(p)
#利用倒置对数卡方函数求得概率
return self.invchi2(fscore,len(features)*2)
#费舍尔方法告诉我们,如果概率彼此独立且随机分布,则这一计算结果满足对数卡方分布(chi-squared distribution)
def invchi2(self,chi,df):
m=chi/2.0
sum=term=math.exp(-m)
for i in range(1,df/2):
term*=m/i
sum+=term
return min(sum,1.0)
def setminimum(self,cat,min):
self.minimums[cat]=min
def getminimum(self,cat):
if cat not in self.minimums:
return 0
return self.minimums[cat]
def classify(self,item,default=None):
#遍历寻找最佳结果
best=default
max=0.0
for c in self.categories():
p=self.fisherprob(item,c)
#确保超过下限值
if p>self.getminimum(c) and p>max:
best=c
max=p
return best
#全局测试代码
#测试贝叶斯
if 0:
cl=naivebayes(getwords)
sampletrain(cl)
print cl.classify('quick rabbit',default='unknown')
print cl.classify('quick money',default='unknown')
cl.setthresholds('bad',3.0)
print cl.classify('quick money',default='unknown')
for i in range(10):
sampletrain(cl)
print cl.classify('quick money',default='unknown')
#测试费舍尔方法
if 1:
cl=fisherclassifier(getwords)
sampletrain(cl)
print cl.classify('quick rabbit')
print cl.classify('quick money')
cl.setminimum('bad',0.8)
print cl.classify('quick money')
cl.setminimum('good',0.4)
print cl.classify('quick money')
print cl.fisherprob('quick money','good')
| Windriver/codelab | programming-collective-intelligence/docfilter/docclass.py | Python | apache-2.0 | 7,071 | [
"CASINO"
] | 5c1e7bdfe635557ea6697cf14e36371652df8b80c8d8e3f8cbeae386817f7158 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import add_days, getdate, cint
from frappe import throw, _
from erpnext.utilities.transaction_base import TransactionBase, delete_events
from erpnext.stock.utils import get_valid_serial_nos
class MaintenanceSchedule(TransactionBase):
def get_item_details(self, item_code):
item = frappe.db.sql("""select item_name, description from `tabItem`
where name=%s""", (item_code), as_dict=1)
ret = {
'item_name': item and item[0]['item_name'] or '',
'description' : item and item[0]['description'] or ''
}
return ret
def generate_schedule(self):
self.set('maintenance_schedule_detail', [])
frappe.db.sql("""delete from `tabMaintenance Schedule Detail`
where parent=%s""", (self.name))
count = 1
for d in self.get('item_maintenance_detail'):
self.validate_maintenance_detail()
s_list = []
s_list = self.create_schedule_list(d.start_date, d.end_date, d.no_of_visits, d.sales_person)
for i in range(d.no_of_visits):
child = self.append('maintenance_schedule_detail')
child.item_code = d.item_code
child.item_name = d.item_name
child.scheduled_date = s_list[i].strftime('%Y-%m-%d')
if d.serial_no:
child.serial_no = d.serial_no
child.idx = count
count = count + 1
child.sales_person = d.sales_person
self.save()
def on_submit(self):
if not self.get('maintenance_schedule_detail'):
throw(_("Please click on 'Generate Schedule' to get schedule"))
self.check_serial_no_added()
self.validate_schedule()
email_map = {}
for d in self.get('item_maintenance_detail'):
if d.serial_no:
serial_nos = get_valid_serial_nos(d.serial_no)
self.validate_serial_no(serial_nos, d.start_date)
self.update_amc_date(serial_nos, d.end_date)
if d.sales_person not in email_map:
sp = frappe.get_doc("Sales Person", d.sales_person)
email_map[d.sales_person] = sp.get_email_id()
scheduled_date = frappe.db.sql("""select scheduled_date from
`tabMaintenance Schedule Detail` where sales_person=%s and item_code=%s and
parent=%s""", (d.sales_person, d.item_code, self.name), as_dict=1)
for key in scheduled_date:
if email_map[d.sales_person]:
description = "Reference: %s, Item Code: %s and Customer: %s" % \
(self.name, d.item_code, self.customer)
frappe.get_doc({
"doctype": "Event",
"owner": email_map[d.sales_person] or self.owner,
"subject": description,
"description": description,
"starts_on": key["scheduled_date"] + " 10:00:00",
"event_type": "Private",
"ref_type": self.doctype,
"ref_name": self.name
}).insert(ignore_permissions=1)
frappe.db.set(self, 'status', 'Submitted')
def create_schedule_list(self, start_date, end_date, no_of_visit, sales_person):
schedule_list = []
start_date_copy = start_date
date_diff = (getdate(end_date) - getdate(start_date)).days
add_by = date_diff / no_of_visit
for visit in range(cint(no_of_visit)):
if (getdate(start_date_copy) < getdate(end_date)):
start_date_copy = add_days(start_date_copy, add_by)
if len(schedule_list) < no_of_visit:
schedule_date = self.validate_schedule_date_for_holiday_list(getdate(start_date_copy),
sales_person)
if schedule_date > getdate(end_date):
schedule_date = getdate(end_date)
schedule_list.append(schedule_date)
return schedule_list
def validate_schedule_date_for_holiday_list(self, schedule_date, sales_person):
from erpnext.accounts.utils import get_fiscal_year
validated = False
fy_details = ""
try:
fy_details = get_fiscal_year(date=schedule_date, verbose=0)
except Exception:
pass
if fy_details and fy_details[0]:
# check holiday list in employee master
holiday_list = frappe.db.sql_list("""select h.holiday_date from `tabEmployee` emp,
`tabSales Person` sp, `tabHoliday` h, `tabHoliday List` hl
where sp.name=%s and emp.name=sp.employee
and hl.name=emp.holiday_list and
h.parent=hl.name and
hl.fiscal_year=%s""", (sales_person, fy_details[0]))
if not holiday_list:
# check global holiday list
holiday_list = frappe.db.sql("""select h.holiday_date from
`tabHoliday` h, `tabHoliday List` hl
where h.parent=hl.name and ifnull(hl.is_default, 0) = 1
and hl.fiscal_year=%s""", fy_details[0])
if not validated and holiday_list:
if schedule_date in holiday_list:
schedule_date = add_days(schedule_date, -1)
else:
validated = True
return schedule_date
def validate_dates_with_periodicity(self):
for d in self.get("item_maintenance_detail"):
if d.start_date and d.end_date and d.periodicity:
date_diff = (getdate(d.end_date) - getdate(d.start_date)).days + 1
days_in_period = {
"Weekly": 7,
"Monthly": 30,
"Quarterly": 90,
"Half Yearly": 180,
"Yearly": 365
}
if date_diff < days_in_period[d.periodicity]:
throw(_("Row {0}: To set {1} periodicity, difference between from and to date \
must be greater than or equal to {2}")
.format(d.idx, d.periodicity, days_in_period[d.periodicity]))
def validate_maintenance_detail(self):
if not self.get('item_maintenance_detail'):
throw(_("Please enter Maintaince Details first"))
for d in self.get('item_maintenance_detail'):
if not d.item_code:
throw(_("Please select item code"))
elif not d.start_date or not d.end_date:
throw(_("Please select Start Date and End Date for Item {0}".format(d.item_code)))
elif not d.no_of_visits:
throw(_("Please mention no of visits required"))
elif not d.sales_person:
throw(_("Please select Incharge Person's name"))
if getdate(d.start_date) >= getdate(d.end_date):
throw(_("Start date should be less than end date for Item {0}").format(d.item_code))
def validate_sales_order(self):
for d in self.get('item_maintenance_detail'):
if d.prevdoc_docname:
chk = frappe.db.sql("""select ms.name from `tabMaintenance Schedule` ms,
`tabMaintenance Schedule Item` msi where msi.parent=ms.name and
msi.prevdoc_docname=%s and ms.docstatus=1""", d.prevdoc_docname)
if chk:
throw(_("Maintenance Schedule {0} exists against {0}").format(chk[0][0], d.prevdoc_docname))
def validate(self):
self.validate_maintenance_detail()
self.validate_dates_with_periodicity()
self.validate_sales_order()
def on_update(self):
frappe.db.set(self, 'status', 'Draft')
def update_amc_date(self, serial_nos, amc_expiry_date=None):
for serial_no in serial_nos:
serial_no_doc = frappe.get_doc("Serial No", serial_no)
serial_no_doc.amc_expiry_date = amc_expiry_date
serial_no_doc.save()
def validate_serial_no(self, serial_nos, amc_start_date):
for serial_no in serial_nos:
sr_details = frappe.db.get_value("Serial No", serial_no,
["warranty_expiry_date", "amc_expiry_date", "status", "delivery_date"], as_dict=1)
if sr_details.warranty_expiry_date and sr_details.warranty_expiry_date>=amc_start_date:
throw(_("Serial No {0} is under warranty upto {1}").format(serial_no, sr_details.warranty_expiry_date))
if sr_details.amc_expiry_date and sr_details.amc_expiry_date >= amc_start_date:
throw(_("Serial No {0} is under maintenance contract upto {1}").format(serial_no, sr_details.amc_start_date))
if sr_details.status=="Delivered" and sr_details.delivery_date and \
sr_details.delivery_date >= amc_start_date:
throw(_("Maintenance start date can not be before delivery date for Serial No {0}").format(serial_no))
def validate_schedule(self):
item_lst1 =[]
item_lst2 =[]
for d in self.get('item_maintenance_detail'):
if d.item_code not in item_lst1:
item_lst1.append(d.item_code)
for m in self.get('maintenance_schedule_detail'):
if m.item_code not in item_lst2:
item_lst2.append(m.item_code)
if len(item_lst1) != len(item_lst2):
throw(_("Maintenance Schedule is not generated for all the items. Please click on 'Generate Schedule'"))
else:
for x in item_lst1:
if x not in item_lst2:
throw(_("Please click on 'Generate Schedule'"))
def check_serial_no_added(self):
serial_present =[]
for d in self.get('item_maintenance_detail'):
if d.serial_no:
serial_present.append(d.item_code)
for m in self.get('maintenance_schedule_detail'):
if serial_present:
if m.item_code in serial_present and not m.serial_no:
throw(_("Please click on 'Generate Schedule' to fetch Serial No added for Item {0}").format(m.item_code))
def on_cancel(self):
for d in self.get('item_maintenance_detail'):
if d.serial_no:
serial_nos = get_valid_serial_nos(d.serial_no)
self.update_amc_date(serial_nos)
frappe.db.set(self, 'status', 'Cancelled')
delete_events(self.doctype, self.name)
def on_trash(self):
delete_events(self.doctype, self.name)
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
from frappe.model.mapper import get_mapped_doc
def update_status(source, target, parent):
target.maintenance_type = "Scheduled"
doclist = get_mapped_doc("Maintenance Schedule", source_name, {
"Maintenance Schedule": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "maintenance_schedule"
},
"validation": {
"docstatus": ["=", 1]
},
"postprocess": update_status
},
"Maintenance Schedule Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype",
"sales_person": "service_person"
}
}
}, target_doc)
return doclist
| gangadharkadam/sterp | erpnext/support/doctype/maintenance_schedule/maintenance_schedule.py | Python | agpl-3.0 | 9,700 | [
"VisIt"
] | a1a5b326e7f8059858ff06199d3023cd38e631195d9a4adbb1ac0febd3452827 |
import theano.tensor as t
from numpy.random import RandomState
def addLoggingParams (parser) :
'''Setup common logging and profiler options.'''
parser.add_argument('--log', dest='logfile', type=str, default=None,
help='Specify log output file.')
parser.add_argument('--level', dest='level', default='INFO', type=str,
help='Log Level.')
parser.add_argument('--prof', dest='profile', type=str,
default='Application-Profiler.xml',
help='Specify profile output file.')
def addDebuggingParams (parser) :
'''Setup common debugging options.'''
parser.add_argument('--debug', dest='debug', default=False,
action='store_true',
help='Dump debugging information while processing.')
def addEarlyStop (parser) :
'''Setup common early stoppage parameters.'''
import numpy as np
parser.add_argument('--limit', dest='limit', type=int, default=2,
help='Number of runs between validation checks.')
parser.add_argument('--stop', dest='stop', type=int, default=5,
help='Number of inferior validation checks to end.')
parser.add_argument('--epoch', dest='epoch', type=float, default=np.inf,
help='Maximum number of runs per Layer/Network.')
def addSynapseLoad(parser, multiLoad=False) :
'''Setup parser for loading synapses from disk to initialize a network.'''
if not multiLoad :
parser.add_argument('--syn', dest='synapse', type=str, default=None,
help='Load from a previously saved network.')
else :
parser.add_argument('--syn', dest='synapse', type=str, default=[],
nargs='*', help='Load one or more saved networks.')
def addSupDataParams (parser, base, multiLoad=False) :
'''Setup common dataset parameters for supervised learning.'''
parser.add_argument('--batch', dest='batchSize', type=int, default=100,
help='Batch size for training and test sets.')
parser.add_argument('--holdout', dest='holdout', type=float, default=.05,
help='Percent of data to be held out for testing.')
parser.add_argument('--base', dest='base', type=str, default='./' + base,
help='Base name of the network output and temp files.')
addSynapseLoad(parser, multiLoad=multiLoad)
parser.add_argument('data', help='Directory or pkl.gz file for the ' +
'training and test sets.')
def addUnsupDataParams (parser, base, multiLoad=False) :
'''Setup common dataset parameters for unsupervised learning.'''
addSupDataParams(parser, base, multiLoad)
boolCheck = lambda x: True if 'True' == x else False
parser.add_argument('--greedyNetwork', dest='greedyNet',
type=boolCheck, default=True,
help='Activate Greedy Network Reconstruction.')
parser.add_argument('--target', dest='targetDir', type=str, required=True,
help='Directory with target data to match.')
parser.add_argument('--maxTarget', dest='maxTarget', type=int, default=100,
help='Directory with target data to match.')
def addSupConvolutionalParams(parser) :
'''Setup common ConvolutionalLayer options.'''
parser.add_argument('--kernel', dest='kernel', type=int, nargs='+',
default=[],
help='Number of kernels on Convolutional Layers.')
parser.add_argument('--kernelSize', dest='kernelSize', type=int, nargs='+',
default=[],
help='Size of kernels on Convolutional Layers.')
parser.add_argument('--downsample', dest='downsample', type=int, nargs='+',
default=[],
help='Downsample factor on Convolutional Layers.')
parser.add_argument('--learnC', dest='learnC', type=float, nargs='+',
default=[],
help='Rate of learning on Convolutional Layers.')
parser.add_argument('--momentumC', dest='momentumC', type=float, nargs='+',
default=[],
help='Rate of momentum on Convolutional Layers.')
parser.add_argument('--dropoutC', dest='dropoutC', type=float, nargs='+',
default=[],
help='Dropout amount on Convolutional Layers.')
parser.add_argument('--regTypeC', dest='regTypeC', type=str,
default='L2',
help='Type of regularization on Convolutional Layers.')
parser.add_argument('--regValueC', dest='regValueC', type=float,
default=.00001,
help='Rate of regularization on Convolutional Layers.')
def addUnsupConvolutionalParams(parser) :
'''Setup common ConvolutionalAE options.'''
addSupConvolutionalParams(parser)
parser.add_argument('--sparseC', dest='sparseC', type=bool, nargs='+',
default=[],
help='Force the output to be sparse for stronger '
'pattern extraction on Convolutional Layers.')
def addSupContiguousParams(parser) :
'''Setup common ContiguousLayer options.'''
parser.add_argument('--neuron', dest='neuron', type=int, nargs='+',
default=[],
help='Number of neurons on Fully-Connected Layers.')
parser.add_argument('--learnF', dest='learnF', type=float, nargs='+',
default=[],
help='Rate of learning on Fully-Connected Layers.')
parser.add_argument('--momentumF', dest='momentumF', type=float, nargs='+',
default=[],
help='Rate of momentum on Fully-Connected Layers.')
parser.add_argument('--dropoutF', dest='dropoutF', type=float, nargs='+',
default=[],
help='Dropout amount on Fully-Connected Layer.')
parser.add_argument('--regTypeF', dest='regTypeF', type=str,
default='L2', help='Type of regularization on ' \
'Fully-Connected Layers.')
parser.add_argument('--regValueF', dest='regValueF', type=float,
default=.00001, help='Rate of regularization on ' \
'Fully-Connected Layers.')
def addUnsupContiguousParams(parser) :
'''Setup common ContiguousAE options.'''
addSupContiguousParams(parser)
parser.add_argument('--sparseF', dest='sparseF', type=bool, nargs='+',
default=[],
help='Force the output to be sparse for stronger '
'pattern extraction on Fully-Connected Layers.')
| mbojrab/playbox | trunk/modules/python/builder/args.py | Python | mit | 6,964 | [
"NEURON"
] | 15eff6b5d47bd4724e2fa05a61f3782a3359a89c95b68910b995e6e78194980f |
# -*- mode: python; indent-tabs-mode: nil; tab-width: 4 -*-
# vim: set tabstop=4 shiftwidth=4 expandtab:
#
# Copyright (C) 2001-2005 Ichiro Fujinaga, Michael Droettboom, Karl MacMillan
# 2007-2010 Christoph Dalitz and Uma Kompella
# 2014 Christoph Dalitz
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from gamera.plugin import *
from gamera.args import NoneDefault
import _threshold
class threshold(PluginFunction):
"""
Creates a binary image by splitting along a given global threshold value.
Pixels that are greater than the given value become white.
Pixels less than or equal to the given value become black.
*storage_format* (optional)
specifies the compression type for the result:
DENSE (0)
no compression
RLE (1)
run-length encoding compression.
"""
self_type = ImageType([GREYSCALE, GREY16, FLOAT])
args = Args([Int("threshold"), Choice("storage format", ['dense', 'rle'])])
return_type = ImageType([ONEBIT], "output")
doc_examples = [(GREYSCALE, 128)]
def __call__(image, threshold, storage_format = 0):
return _threshold.threshold(image, threshold, storage_format)
__call__ = staticmethod(__call__)
class otsu_find_threshold(PluginFunction):
"""
Finds a threshold point using the Otsu algorithm. Reference:
N. Otsu: *A Threshold Selection Method from Grey-Level
Histograms.* IEEE Transactions on Systems, Man, and Cybernetics
(9), pp. 62-66 (1979)
"""
self_type = ImageType([GREYSCALE])
return_type = Int("threshold_point")
doc_examples = [(GREYSCALE,)]
class otsu_threshold(PluginFunction):
"""
Creates a binary image by splitting along a threshold value
determined using the Otsu algorithm.
Equivalent to ``image.threshold(image.otsu_find_threshold())``.
*storage_format* (optional)
specifies the compression type for the result:
DENSE (0)
no compression
RLE (1)
run-length encoding compression
"""
self_type = ImageType([GREYSCALE])
args = Args(Choice("storage format", ['dense', 'rle']))
return_type = ImageType([ONEBIT], "output")
doc_examples = [(GREYSCALE,)]
def __call__(image, storage_format = 0):
return _threshold.otsu_threshold(image, storage_format)
__call__ = staticmethod(__call__)
class tsai_moment_preserving_find_threshold(PluginFunction):
"""
Finds a threshold point using the Tsai Moment Preserving threshold
algorithm. Reference:
W.H. Tsai: *Moment-Preserving Thresholding: A New Approach.*
Computer Vision Graphics and Image Processing (29), pp. 377-393
(1985)
"""
self_type = ImageType([GREYSCALE])
return_type = Int("threshold_point")
doc_examples = [(GREYSCALE,)]
author = "Uma Kompella"
class tsai_moment_preserving_threshold(PluginFunction):
"""
Creates a binary image by splitting along a threshold value
determined using the Tsai Moment Preserving Threshold algorithm.
Equivalent to
``image.threshold(image.tsai_moment_preserving_find_threshold())``.
*storage_format* (optional)
specifies the compression type for the result:
DENSE (0)
no compression
RLE (1)
run-length encoding compression
"""
self_type = ImageType([GREYSCALE])
args = Args(Choice("storage format", ['dense', 'rle']))
return_type = ImageType([ONEBIT], "output")
doc_examples = [(GREYSCALE,)]
author = "Uma Kompella"
def __call__(image, storage_format = 0):
return _threshold.tsai_moment_preserving_threshold(image, storage_format)
__call__ = staticmethod(__call__)
class abutaleb_threshold(PluginFunction):
"""
Creates a binary image by using the Abutaleb locally-adaptive
thresholding algorithm.
*storage_format* (optional)
specifies the compression type for the result:
DENSE (0)
no compression
RLE (1)
run-length encoding compression
"""
self_type = ImageType([GREYSCALE])
args = Args(Choice("storage format", ['dense', 'rle']))
return_type = ImageType([ONEBIT], "output")
doc_examples = [(GREYSCALE,)]
def __call__(image, storage_format = 0):
return _threshold.abutaleb_threshold(image, storage_format)
__call__ = staticmethod(__call__)
class bernsen_threshold(PluginFunction):
"""
Creates a binary image by using the Bernsen algorithm.
Each point is thresholded by the mean between the maximum and minimum
value in the surrounding region of size *region_size*. When the difference
between maximum and minimum is below *contrast_limit* the pixel is set
to black in case of *doubt_to_black* = ``True``, otherwise to white.
Reference: J. Bernsen: *Dynamic thresholding of grey-level images.*
Proc. 8th International Conference on Pattern Recognition (ICPR8),
pp. 1251-1255, 1986.
*storage_format*
specifies the compression type for the result:
DENSE (0)
no compression
RLE (1)
run-length encoding compression
*region_size*
The size of each region in which to calculate a threshold
*contrast_limit*
The minimum amount of contrast required to threshold.
*doubt_to_black*
When ``True``, 'doubtful' values are set to black, otherwise to white.
"""
self_type = ImageType([GREYSCALE])
args = Args([Choice("storage format", ['dense', 'rle']),
Int("region size", range=(1, 50), default=11),
Int("contrast limit", range=(0, 255), default=80),
Check("doubt_to_black", default=False)])
return_type = ImageType([ONEBIT], "output")
doc_examples = [(GREYSCALE,)]
def __call__(image, storage_format = 0, region_size = 11,
contrast_limit = 80, doubt_to_black = False):
return _threshold.bernsen_threshold(image, storage_format, region_size, contrast_limit, doubt_to_black)
__call__ = staticmethod(__call__)
class djvu_threshold(PluginFunction):
"""
Creates a binary image by using the DjVu thresholding algorithm.
See Section 5.1 in:
Bottou, L., P. Haffner, P. G. Howard, P. Simard, Y. Bengio and
Y. LeCun. 1998. High Quality Document Image Compression with
DjVu. AT&T Labs, Lincroft, NJ.
http://research.microsoft.com/~patrice/PDF/jei.pdf
This implementation features an additional extension to the
algorithm described above. Once the background and foreground
colors are determined for each block, the image is thresholded by
interpolating the foreground and background colors between the
blocks. This prevents "blockiness" along boundaries of strong
color change.
*smoothness*
The amount of effect that parent blocks have on their children
blocks. Higher values will result in more smoothness between
blocks. Expressed as a percentage between 0.0 and 1.0.
*max_block_size*
The size of the largest block to determine a threshold.
*min_block_size*
The size of the smallest block to determine a threshold.
*block_factor*
The number of child blocks (in each direction) per parent block.
For instance, a *block_factor* of 2 results in 4 children per
parent.
"""
self_type = ImageType([RGB])
args = Args([Float("smoothness", default=0.2, range=(0.0, 1.0)),
Int("max_block_size", default=512),
Int("min_block_size", default=64),
Int("block_factor", default=2, range=(1, 8))])
return_type = ImageType([ONEBIT], "output")
def __call__(image, smoothness=0.2, max_block_size=512, min_block_size=64,
block_factor=2):
return _threshold.djvu_threshold(image, smoothness, max_block_size,
min_block_size, block_factor)
__call__ = staticmethod(__call__)
doc_examples = [(RGB, 0.5, 512, 64, 2)]
class soft_threshold(PluginFunction):
"""
Does a greyscale transformation that \"smears out\" the threshold *t* by a
choosable amount *sigma*. This has the effect of a \"soft\" thresholding.
Each grey value *x* is transformed to *F(x,t,sigma)*, where *F*
is the CDF probability distribution with mean *t* and variance
*sigma^2*. The parameter *dist* determines the type of probability
distribution: 0 = logistic, 1 = normal (gaussian), 2 = uniform.
As the choice *sigma* = 0 is useless (it is the same as normal
thresholding), this special value is reserved for an automatic selection
of *sigma* with soft_threshold_find_sigma__.
.. __: #soft-threshold-find-sigma
When *t* is not given, it is automatically computed with
otsu_find_threshold__.
.. __: #otsu-find-threshold
Reference: C. Dalitz: `\"Soft Thresholding for Visual Image Enhancement.\"`__
Technischer Bericht Nr. 2014-01, Hochschule Niederrhein,
Fachbereich Elektrotechnik und Informatik, 2014
.. __: http://www.hsnr.de/fileadmin/dateien/fb03/gruppen/technische-berichte/fb03-tb-2014-01.pdf
"""
self_type = ImageType([GREYSCALE])
args = Args([Int("t", default=NoneDefault), Float("sigma", default=0.0), Choice("dist", ["logistic","normal","uniform"], default=0)])
return_type = ImageType([GREYSCALE], "output")
author = "Christoph Dalitz"
def __call__(image, t=None, sigma=0.0, dist=0):
if t is None:
t = image.otsu_find_threshold()
return _threshold.soft_threshold(image, t, sigma, dist)
__call__ = staticmethod(__call__)
doc_examples = [(GREYSCALE, 128, 25)]
class soft_threshold_find_sigma(PluginFunction):
"""
For the CDF probability distribution given by *dist*
(0 = logistic, 1 = normal (gaussian), 2 = uniform), sigma is
determined such that *F(m,t,sigma)* = 0.99, where *m* is the mean grey
value of all pixels with a grey value greater than *t*.
Reference: C. Dalitz: `\"Soft Thresholding for Visual Image Enhancement.\"`__
Technischer Bericht Nr. 2014-01, Hochschule Niederrhein,
Fachbereich Elektrotechnik und Informatik, 2014
.. __: http://www.hsnr.de/fileadmin/dateien/fb03/gruppen/technische-berichte/fb03-tb-2014-01.pdf
"""
self_type = ImageType([GREYSCALE])
args = Args([Int("t", default=NoneDefault), Choice("dist", ["logistic","normal","uniform"], default=0)])
return_type = Float("sigma")
author = "Christoph Dalitz"
def __call__(image, t, dist=0):
return _threshold.soft_threshold_find_sigma(image, t, dist)
__call__ = staticmethod(__call__)
class ThresholdModule(PluginModule):
"""
This module provides functions that convert images between different
pixel types.
"""
category = "Binarization"
cpp_headers = ["threshold.hpp"]
functions = [threshold, otsu_find_threshold, otsu_threshold,
tsai_moment_preserving_find_threshold,
tsai_moment_preserving_threshold, abutaleb_threshold,
bernsen_threshold, djvu_threshold,
soft_threshold, soft_threshold_find_sigma]
author = "Michael Droettboom and Karl MacMillan"
url = "http://gamera.sourceforge.net/"
module = ThresholdModule()
| hsnr-gamera/gamera | gamera/plugins/threshold.py | Python | gpl-2.0 | 11,906 | [
"Gaussian"
] | b7443fb76e1d6d102b0726cf5369413037b73ea93ce8ca57be67c1e4b4934ce7 |
import pyglet
import socket
import time
import random
#DONE: grab implementation
#DONE: Startup display
#DONE: battery system
#DONE: life-support
#DONE: reactor
#TODO: death
#Copyright 2014 Antonius Frie
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#ship systems
#lasers, targeting, engines, reactor, comms, grabber, locking, battery, drill, absorber, hydroponics
LASERS, TARGETING, ENGINES, REACTOR, COMMS, GRABBER, LOCKING, BATTERY, DRILL, ABSORBER, HYDROPONICS = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
LEN = 11
levels = []#upgrade-level
heat = []#system heat
letters = ["w", "t", "e", "r", "c", "g", "l", "b", "d", "a", "h"]#the letters that that will be displayed to reference each system
active = []#whether a system is active (True) or inactive (False)
colors = [(255, 204, 212, 255), (204, 204, 255, 255), (255, 221, 204, 255), (204, 255, 230, 255), (232, 232, 204, 255), (204, 204, 204, 255), (204, 204, 204, 255), (204, 255, 255, 255), (221, 213, 204, 255), (221, 213, 204, 255), (221, 255, 204, 255)]#the colors that will be used in the user interface
powerreq = [5, 0, 2, 7, 0, 1, 1, 0, 5, 5, 7]
for i in range(LEN): levels.append(0); heat.append(0.0); active.append(False)
def heatup(system, amount):
"""General function for handling system heatup"""
heat[system] += amount
if heat[system] > 9.9:
heat[system] = 9.9
def lasers():
pass
def targeting(arg=False):
global target
if arg != False:
#active[TARGETING] = False
if arg in oids:
nr = oids.index(arg)
if oposition[nr][0] == position[0] and oposition[nr][1] == position[1]:
target = nr
hudprint("Targeting: Targeted.")
else:
hudprint("Targeting: Ship is not at your position.")
elif arg == "":
active[TARGETING] = False
else:
hudprint("Targeting: No such ship.")
elif (not waitingfunc) and target == None:
prompt("target: ", targeting)
elif target != None:
if oposition[target][0] == position[0] and oposition[target][1] == position[1]:
pass
else:
target = None
active[TARGETING] = False
hudprint("Targeting: Targeted ship lost.")
def engines(arg=False):
global position
global environment
if heat[ENGINES] < 5:
if arg != False:
maxrange = levels[ENGINES]+1
active[ENGINES] = False
try:
numbers = arg.split(",")
x = int(numbers[0])
y = int(numbers[1])
except:
return None
if not (x <= maxrange and y <= maxrange):
x = maxrange
y = maxrange
newx = position[0]+x
newy = position[1]+y
if newx < 0:
newx = 0
elif newx > 20:
newx = 20
if newy < 0:
newy = 0
elif newy > 20:
newy = 20
position = (newx, newy)
heatup(ENGINES, 10)
environment = get_surr(position)
cargo[FUEL] -= 1
elif not waitingfunc:
prompt("direction: ", engines)
def reactor():
global cargo
if heat[REACTOR] < 5:
rnumber = random.random()
if cargo[GAS] >= 1:
cargo[GAS] -= 1
if rnumber < 0.5:
cargo[FUEL] += int(1+0.4*levels[REACTOR])
else:
cargo[ORGANICS] += int(1+0.4*levels[REACTOR])
heatup(REACTOR, 7)
else:
hudprint("Reactor: No gas to convert.")
active[REACTOR] = False
def comms():
pass
def grabber(arg=False):
global cargo
if heat[GRABBER] < 5:
if arg != False:
if arg == "":
active[GRABBER] = False
return None
am = arg.split(",")
if target != None:
if len(am) == 2:
try:
amount = int(am[0])
except:
hudprint("Grabber: An integer number is required.")
return None
if am[1] in materials:
material = materials.index(am[1])
postex("grab", oids[target], amount, material)
active[GRABBER] = False
else:
hudprint("Grabber: Unknown material.")
else:
hudprint("Grabber: Invalid format")
else:
hudprint("Grabber: No target.")
elif not waitingfunc:
prompt("amount, material: ", grabber)
def locking():
pass
def battery():
pass
def drill():
if environment == "asteroids" and heat[DRILL] < 5:
cargo[ORE] += levels[DRILL]+1
cargo[MINERALS] += int((levels[DRILL]+1)/3)
if random.random() < levels[DRILL]*0.01:
cargo[CRYSTAL] += 1
heatup(DRILL, 5)
def absorber():
if environment == "nebula" and heat[ABSORBER] < 5:
cargo[GAS] += levels[ABSORBER]+1
heatup(ABSORBER, 5)
def hydroponics():
if heat[HYDROPONICS] < 5:
active[HYDROPONICS] = False
if cargo[ORGANICS] < 1:
hudprint("Hydroponics: No organics.")
elif cargo[WASTE] < 1:
hudprint("Hydroponics: No waste material.")
elif cargo[MINERALS] < 1:
hudprint("Hydroponics: No minerals.")
else:
cargo[ORGANICS] -= 1
cargo[WASTE] -= 1
cargo[MINERALS] -= 1
cargo[OXYGEN] += int(1+0.2*levels[HYDROPONICS])
cargo[FOOD] += int(1+0.2*levels[HYDROPONICS])
heatup(HYDROPONICS, 7)
active[HYDROPONICS] = False
functions = [lasers, targeting, engines, reactor, comms, grabber, locking, battery, drill, absorber, hydroponics]
#materials
ORE, MINERALS, CRYSTAL, GAS, ORGANICS, FUEL, OXYGEN, FOOD, WASTE = 0, 1, 2, 3, 4, 5, 6, 7, 8
materials = ["ore", "minerals", "crystal", "gas", "organics", "fuel", "oxygen", "food", "waste"]
#definition of upgrades
neededmaterials = [[(10, ORE), (50, ORE), (150, ORE), (250, ORE), (5, CRYSTAL), (300, ORE), (375, ORE), (10, CRYSTAL), (400, ORE)]]*11
#system-related functions
def upgradesystem(system=None):
if system:
if system in letters:
system = letters.index(system)
if levels[system] < 9:
nms = neededmaterials[system][levels[system]]
neededmaterial = nms[1]
amount = nms[0]
if cargo[neededmaterial] < amount:
hudprint("Upgrade: Not enough materials to upgrade.")
else:
cargo[neededmaterial] -= amount
levels[system] += 1
else:
hudprint("Upgrade: This system is already fully upgraded.")
else:
prompt("system: ", upgradesystem)
#ship properties
position = (0, 0)
environment = "asteroids"#environments are: nothing, gas clouds, asteroids etc.; it will be received from the server after a jump
cargo = [0, 0, 0, 0, 0, 0, 0, 0, 0]
target = None
power = 0
#other ships' properties
oids = []
olevels = []
oheat = []
oactive = []
ocargo = []
oposition = []
#targetted ship
def scan():
if target != None:
tcargo = ocargo[target]
text = "Cargo of "+oids[target]+": "
for i in range(9):
text += str(tcargo[i])+" "+materials[i]+", "
text = text[:-2]
hudprint(text)
else:
text = "Other ships: "
for i in range(len(oids)):
text += oids[i]+", "
text = text[:-2]
hudprint(text)
window = pyglet.window.Window()
pyglet.font.add_file('whitrabt.ttf')
gamedisplays = pyglet.graphics.Batch()
systemdoc = pyglet.text.document.FormattedDocument(text="0w0\n0s0\n0e0")
systemdoc.set_style(0, len(systemdoc.text), {'font_name':"White Rabbit", 'font_size':32, 'color':(255, 255, 255, 255)})
systemdisplay = pyglet.text.layout.TextLayout(systemdoc, width=100, multiline=True, batch=gamedisplays)
systemdisplay.anchor_x = 'left'
systemdisplay.anchor_y = 'top'
systemdisplay.x = 20
systemdisplay.y = window.height-20
cargodoc = pyglet.text.document.FormattedDocument(text="0xORE")
cargodoc.set_style(0, len(cargodoc.text), {'font_name':"White Rabbit", 'font_size':20, 'color':(255, 255, 255, 255)})
cargodisplay = pyglet.text.layout.TextLayout(cargodoc, width=600, multiline=True, batch=gamedisplays)
cargodisplay.anchor_x = 'left'
cargodisplay.anchor_y = 'bottom'
cargodisplay.x = 20
cargodisplay.y = 20
positiondisplay = pyglet.text.Label(text=str(position), font_name="White Rabbit", font_size=20, x=window.width-20, y=window.height-20, anchor_x="right", anchor_y="top", batch=gamedisplays)
promptdisplay = pyglet.text.Label(text="", font_name="White Rabbit", font_size=20, multiline=False, x=140, y=window.height-60, width=480, batch=gamedisplays)
promptinput = ""
waitingfunc = None
def prompt(text, func):
global promptinput
global waitingfunc
promptdisplay.text = text
promptinput = ""
waitingfunc = func
def promptcallback(text):
global promptinput
global waitingfunc
if not waitingfunc:
return
elif text == "\r":
f = waitingfunc
waitingfunc = None
p = promptinput
promptinput = ""
promptdisplay.text = ""
f(p)
else:
promptinput += text
promptdisplay.text += text
hud = pyglet.text.Label(font_name="White Rabbit", font_size=20, x=140, y=window.height-80, width=480, multiline=True, anchor_x='left', anchor_y='top', batch=gamedisplays)
hud.text = "Welcome to your spaceship. This is the unified spaceship control interface. Press Enter to dismiss this message."
hudqueue = []
def hudprint(text=0):
global hud
global hudqueue
if text == 0:
if hudqueue != []:
hud.text = hudqueue.pop()
else:
hud.text = ""
else:
if hud.text == "":
hud.text = text
else:
hudqueue.insert(0, text)
centerlabel = pyglet.text.Label(text = "CONFEDERATION", font_name="White Rabbit", font_size=48, x=window.width/2, y= window.height/2, anchor_x='center', anchor_y='center')
gamerunning = False
startinput = ""
startupstate = 0
def lifesupport(dt):
if cargo[FOOD] == 0 or cargo[OXYGEN] == 0:
quitgame(dead=True)
else:
if cargo[FOOD] == 1:
hudprint("Low on food.")
if cargo[OXYGEN] == 1:
hudprint("Low on oxygen.")
cargo[FOOD] -= 1
cargo[OXYGEN] -= 1
cargo[WASTE] += 1
@window.event
def on_draw():
window.clear()
if gamerunning:
gamedisplays.draw()
else:
if startupstate < 4:
centerlabel.draw()
elif startupstate == 4:
youdied.blit(0, 0)
elif startupstate == 5:
thankyou.blit(0, 0)
#systemdisplay.draw()
#cargodisplay.draw()
#positiondisplay.draw()
#promptdisplay.draw()
#hud.draw()
@window.event
def on_key_press(symbol, modifiers):
global waitingfunc
global promptinput
global promptdisplay
if gamerunning:
if not waitingfunc:
keys = [pyglet.window.key.W, pyglet.window.key.T, pyglet.window.key.E, pyglet.window.key.R, pyglet.window.key.C, pyglet.window.key.G, pyglet.window.key.L, pyglet.window.key.B, pyglet.window.key.D, pyglet.window.key.A, pyglet.window.key.H]
if symbol in keys:
n = keys.index(symbol)
if active[n]:
active[n] = False
else:
active[n] = True
else:
if symbol == pyglet.window.key.ENTER:
hudprint()
updatetext()
elif symbol == pyglet.window.key.BACKSPACE:
waitingfunc = None
promptinput = ""
promptdisplay.text = ""
else:
if symbol == pyglet.window.key.BACKSPACE or symbol == pyglet.window.key.ENTER:
startinputhandler(symbol)
@window.event
def on_text(text):
if gamerunning:
if waitingfunc:
promptcallback(text)
else:
if text == "u":
upgradesystem()
elif text == "s":
scan()
elif text == "q":
quitgame()
else:
startinputhandler(text)
def startinputhandler(symbol):
global startinput
global startupstate
global centerlabel
global ip
global name
global gamerunning
global youdied
global thankyou
if symbol == pyglet.window.key.BACKSPACE:
if startupstate > 0:
if len(startinput) > 4:
startinput = startinput[:-1]
centerlabel.text = startinput+"_"
elif symbol == pyglet.window.key.ENTER:
startupstate += 1
startinput = startinput[4:]
if startupstate == 1:
centerlabel.font_size = 36
centerlabel.text = "IP: _"
startinput = "IP: "
elif startupstate == 2:
ip = startinput
centerlabel.text = "ID: _"
startinput = "ID: "
elif startupstate == 3:
name = startinput
gamerunning = True
connecttoserver()
pyglet.clock.schedule_interval(update, 1/30.)
pyglet.clock.schedule_interval(post, 1/2.)
pyglet.clock.schedule_interval(get, 1/2.)
pyglet.clock.schedule_interval(getex, 1)
pyglet.clock.schedule_interval(lifesupport, 60)
elif startupstate == 4:
pyglet.clock.unschedule(update)
pyglet.clock.unschedule(post)
pyglet.clock.unschedule(get)
pyglet.clock.unschedule(getex)
pyglet.clock.unschedule(lifesupport)
window.clear()
youdied.blit(0, 0)
elif startupstate == 5:
thankyou = pyglet.image.load("thankyou.png")
window.clear()
thankyou.blit(0, 0)
elif startupstate == 6:
quitgame(True)
else:
if startupstate > 0:
if len(symbol) == 1:
if (ord(symbol) >= 65 and ord(symbol) < 128) or symbol in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "."]:
startinput += symbol
centerlabel.text = startinput+"_"
def quitgame(final=False, dead=False):
global gamerunning
global youdied
if not final:
gamerunning = False
for i in range(1000):
try:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((ip, 2001))
client.send("DELETE "+name)
break
except:
if client:
client.close()
client.close()
startinputhandler(pyglet.window.key.ENTER)
if dead:
youdied = pyglet.image.load("death.png")
else:
youdied = pyglet.image.load("earth.png")
else:
pyglet.app.exit()
youdied = pyglet.image.load("earth.png")
def updatetext():
text = ""
for i in range(LEN):
text += str(levels[i])+letters[i]+str(int(heat[i]))+"\n"
systemdoc.text = text
for i in range(LEN):
if active[i]:
systemdoc.set_style(i*4, i*4+4, {'color':colors[i]})
else:
systemdoc.set_style(i*4, i*4+4, {'color':(64, 64, 64, 255)})
text = ""
for i in range(len(cargo)):
text += str(cargo[i])+" "+materials[i]+", "
text = text[:-2]
cargodoc.text = text
positiondisplay.text = environment.capitalize()+" "+str(position[0])+","+str(position[1])
def update(dt):
global target
if not active[TARGETING]:
target = None
power = 9
for i in range(LEN):
heat[i] -= (levels[i]+1)*0.5*dt
if heat[i] < 0:
heat[i] = 0
if active[i]:
if power >= powerreq[i]:
functions[i]()
power -= powerreq[i]
else:
active[i] = False
levels[BATTERY] = 9-power
updatetext()
def post(dt):
req = "POST "+name
req += ";"+",".join([str(x) for x in levels])
req += ";"+",".join([str(x) for x in heat])
req += ";"+",".join([str(x) for x in active])
req += ";"+",".join([str(x) for x in cargo])
req += ";"+",".join([str(x) for x in position])
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client.connect((ip, 2001))
client.send(req)
client.close()
except:
if client:
client.close()
def get(dt):
try:
req = "GET"
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((ip, 2001))
client.send(req)
answer = client.recv(10000)
client.close()
if answer[:2] == "OK":
shipdata = answer[3:].split("/")
shipdata = [x.split(";") for x in shipdata]
for ship in shipdata:
if ship[0] == name:
continue
if ship[0] in oids:
nr = oids.index(ship[0])
olevels[nr] = [int(x) for x in ship[1].split(",")]
oheat[nr] = [float(x) for x in ship[2].split(",")]
oactive[nr] = [bools(x) for x in ship[3].split(",")]
ocargo[nr] = [int(x) for x in ship[4].split(",")]
oposition[nr] = [int(x) for x in ship[5].split(",")]
else:
oids.append(ship[0])
olevels.append([int(x) for x in ship[1].split(",")])
oheat.append([float(x) for x in ship[2].split(",")])
oactive.append([bools(x) for x in ship[3].split(",")])
ocargo.append([int(x) for x in ship[4].split(",")])
oposition.append([int(x) for x in ship[5].split(",")])
elif answer[:2] == "EX":
if answer[3:7] == "GRAB":
data = answer[8:].split(",")
amount = int(data[0])
material = int(data[1])
other = int(data[2])
if cargo[material] < amount:
amount = 0
cargo[material] -= amount
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
req = "GROK"
client.connect((ip, 2001))
client.send("GROK")
except:
if client:
client.close()
def getex(dt):
try:
req = "XGET "+name
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((ip, 2001))
client.send(req)
answer = client.recv(10000)
client.close()
if answer[:2] == "OK":
reqs = answer[3:].split(";")
for req in reqs:
req = req.split(",")
if req[0] == name:
if req[1] == "GRAB":
amount = int(req[2])
material = int(req[3])
other = req[4]
if cargo[material] < amount:
amount = 0
cargo[material] -= amount
postex("grok", other, amount, material)
elif req[1] == "GROK":
amount = int(req[2])
material = int(req[3])
cargo[material] += amount
except:
if client:
client.close()
def postex(extype, *args):
if extype == "grok":
other, amount, material = args
req = "XPOST "+other+",GROK,"+str(amount)+","+str(material)
elif extype == "grab":
other, amount, material = args
req = "XPOST "+other+",GRAB,"+str(amount)+","+str(material)+","+name
try:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((ip, 2001))
client.send(req)
answer = client.recv(1000)
client.close()
except:
if client:
client.close()
def get_surr(pos):
try:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((ip, 2001))
req = "GS "
req += str(position[0])+","+str(position[1])
client.send(req)
answer = client.recv(1000)
client.close()
if answer[:2] == "OK":
surr = answer[3:]
if surr == "asteroids" or surr == "nebula":
return surr
else:
return "nothing"
else:
return "nothing"
except:
if client:
client.close()
return "nothing"
def bools(string):
if string=="True":
return True
else:
return False
updatetext()
def connecttoserver():
global levels
global heat
global active
global cargo
global position
global environment
for i in range(1000):
try:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((ip, 2001))
client.send("LOGIN "+name)
answer = client.recv(1000)
#print answer
if answer[:2] == "OK":
print "ok"
shipdata = answer[3:].split(";")
levels = [int(x) for x in shipdata[0].split(",")]
heat = [float(x) for x in shipdata[1].split(",")]
active = [bools(x) for x in shipdata[2].split(",")]
cargo = [int(x) for x in shipdata[3].split(",")]
position = shipdata[4].split(",")
position = (int(position[0]), int(position[1]))
environment = get_surr(position)
break
elif answer[:6] == "FAILED":
client.close()
exit()
elif answer[:10] == "ID CREATED":
client.close()
continue
except:
if i < 999:
client.close()
continue
else:
print i
client.close()
exit()
client.close()
pyglet.app.run()
| metaplinius/confederation | main.py | Python | gpl-3.0 | 22,951 | [
"CRYSTAL"
] | 4664a3ffe090444567a477d4edb8845698e9fd0c63aa16802952e236e7a83ade |
############################
# General options/settings #
############################
PING_WAIT = 300 # Seconds
PING_MIN_WAIT = 30 # How long !start has to wait after a !ping
MINIMUM_WAIT = 60
EXTRA_WAIT = 20
MAXIMUM_WAITED = 3 # limit for amount of !wait's
STATS_RATE_LIMIT = 15
VOTES_RATE_LIMIT = 15
ADMINS_RATE_LIMIT = 300
MAX_PLAYERS = 40
NIGHT_TIME_LIMIT = 120
NIGHT_TIME_WARN = 90 # should be less than NIGHT_TIME_LIMIT
MIDNIGHT_TIME_LIMIT = 60
MIDNIGHT_TIME_WARN = 55 # should be less than MIDNIGHT_TIME_LIMIT
DAY_TIME_LIMIT_WARN = 600
DAY_TIME_LIMIT_CHANGE = 120 # seconds after DAY_TIME_LIMIT_WARN has passed
# May only be set if the above are also set
SHORT_DAY_PLAYERS = 6 # Number of players left to have a short day
SHORT_DAY_LIMIT_WARN = 400
SHORT_DAY_LIMIT_CHANGE = 120
KILL_IDLE_TIME = 300
WARN_IDLE_TIME = 180
PART_GRACE_TIME = 12
QUIT_GRACE_TIME = 30
# controls how many people it does in one /msg; only works for messages that are the same
MAX_PRIVMSG_TARGETS = 2
LEAVE_STASIS_PENALTY = 1
IDLE_STASIS_PENALTY = 0
PART_STASIS_PENALTY = 1
GOAT_HERDER = True
SELF_LYNCH_ALLOWED = True
CARE_BOLD = True
CARE_COLOR = True
KILL_COLOR = False
KILL_BOLD = False
LOG_FILENAME = "game.log"
BARE_LOG_FILENAME = "barelog.log"
#################################################################################################################
# ROLE INDEX: PLAYERS SEER|WOLF|CURSED|DRUNK|HARLOT|TRAITOR|GUNNER|CROW|GUARD|DETECTIVE ##
# WITCH|CUPID|THIEF|GIRL|HUNTER|ANCIENT|SHAMAN|IDIOT|SCAPEGOAT|PIPER ##
# WHITEWOLF|CROW|KID|ANGEL|FATHER|BBW|SISTER ##
#################################################################################################################
ROLES_GUIDE = { 4 : (1,1,0,0,0,0,0,0,0,0, ##
0,0,0,0,0,0,0,0,0,0, ##
0,0,0,0,0,0,0), ##
6 : (1,1,1,1,0,0,0,0,0,0, ##
1,0,0,0,0,0,0,0,0,0, ##
0,0,0,0,0,0,2), ##
8 : (1,2,1,1,1,0,0,0,0,0, ##
1,0,0,0,0,0,0,0,0,0, ##
0,0,0,0,0,0,2), ##
10 : (1,2,1,1,1,1,1,0,0,0, ##
0,0,0,0,0,0,0,0,0,0, ##
0,0,0,0,0,0,0), ##
12 : (1,2,1,1,1,1,1,1,0,1, ##
0,0,0,0,0,0,0,0,0,0, ##
0,0,0,0,0,0,0), ##
15 : (1,3,1,1,1,1,1,1,0,1, ##
0,0,0,0,0,0,0,0,0,0, ##
0,0,0,0,0,0,0), ##
17 : (1,3,1,1,1,1,1,1,1,1, ##
0,0,0,0,0,0,0,0,0,0, ##
0,0,0,0,0,0,0), ##
22 : (1,4,1,1,1,1,1,1,1,1, ##
0,0,0,0,0,0,0,0,0,0, ##
0,0,0,0,0,0,0), ##
25 : (1,4,2,1,1,1,1,1,1,1, ##
0,0,0,0,0,0,0,0,0,0, ##
0,0,0,0,0,0,0), ##
29 : (1,5,2,1,1,1,1,1,1,1, ##
0,0,0,0,0,0,0,0,0,0, ##
0,0,0,0,0,0,0), ##
None : (0,0,0,0,0,0,0,0,0,0, ##
0,0,0,0,0,0,0,0,0,0, ##
0,0,0,0,0,0,0)} ##
#################################################################################################################
# Notes: ##
#################################################################################################################
GAME_MODES = {}
AWAY = [] # cloaks of people who are away.
SIMPLE_NOTIFY = [] # cloaks of people who !simple, who want everything /notice'd
ROLE_INDICES = {0 : "seer", # ok
1 : "wolf", # ok
2 : "cursed villager", # ok
3 : "village drunk", # ok
4 : "harlot", # ok
5 : "traitor", # ok
6 : "gunner", # ok
7 : "werecrow", # ok
8 : "guardian angel", # ok
9 : "detective", # ok
10 : "witch", # ok
11 : "cupid", # TODO
12 : "thief", # TODO
13 : "little girl", # ok
14 : "hunter", # TODO
15 : "ancient", # TODO
16 : "shaman", # ok
17 : "village idiot", # TODO
18 : "scapegoat", # TODO
19 : "pied piper", # TODO
20 : "white wolf", # TODO
21 : "crow", # ok
22 : "wild kid", # TODO
23 : "angel", # ok
24 : "wolf father", # TODO
25 : "big bad wolf", # TODO
26 : "sister" # ok
}
# TODO: Code roles
# Villager : default role, tries to survive with the restof the village
# 0: Seer: Can scrye at night to discover the identity of a villager
CHATTY_SEER_CHANCE = 1/2
# 1: Wolf: Meets other wolves at night to eat a villager
# 2: Cursed: Seen as a wolf by the seer but as an innocent by the detective
# 3: Drunk villager: no power
# 4: Harlot: can visit someone at night and prevent their power
# 5: Traitor: appears as a villager, but sided with the wolves
# 6: Gunner: has a gun, may kill the wolves at night, may lose his gun upon death
SHOTS_MULTIPLIER = .12 # ceil(shots_multiplier * len_players) = bullets given
DRUNK_SHOTS_MULTIPLIER = 3
# HIT MISS SUICIDE
GUN_CHANCES = ( 5/7 , 1/7 , 1/7 )
DRUNK_GUN_CHANCES = ( 2/7 , 4/7 , 1/7 )
MANSLAUGHTER_CHANCE = 1/5 # ACCIDENTAL HEADSHOT (FATAL)
GUNNER_KILLS_WOLF_AT_NIGHT_CHANCE = 0
# 7: Werecrow: sided with the wolves, can visit a player at night to spy on him
# 8: Guardian angel: each night, can protect one player from the wolves. May (will?) die if he protects a wolf. No effect on the little girl.
GUARDIAN_ANGEL_DIES_CHANCE = 1/2
# 9: Detective: can check someone's true identity during the day, may reveal himself doing so.
DETECTIVE_REVEALED_CHANCE = 2/5
# 10: Witch: tries to eliminate the wolves, has two potions:
# life potion to save the werewolves target, death potion to kill someone during the night
# 11: Cupid: tries to kill the wolves ; at the beginning of the game, will choose two players and "marry" them
# The two lovers' fate are linked: if one dies, so does the other. They know each other.
# Their goal is to survive together. If they are on opposite side, their goal is to win against everyone.
# 12: Thief: will choose his role between the two that haven't been drawn.
# 13: Little girl: villager, can spy the wolves at night
LITTLE_GIRL_PEEK_CHANCE = 1/5
LITTLE_GIRL_SEEN_CHANCE = 1/6 #Double if peek is successful
# 14: Hunter: villager, revenge kills one player when he dies
# 15: Ancient: villager, he can survive the first wolf attack.
# If he is lynched or killed by the hunter or witch, every villager loses their power
# 16: Shaman: tries to kill the wolves ; has a short time during the night to listen to the spirits
# 17: Village idiot: villager. If the village decides to lynch him, he will be spared at the last moment, but will lose his right to vote.
# 18: Scapegoat: villager. In case of a tie, he will be killed by default.
# 19: Pied piper: His goal is to win alone. He must charm every player alive, up to two each night.
# 20: White Wolf: His goal is to win the game alone. The other werewolves think he is a normal wolf, but every other night,
# he may kill one werewolf of his choice.
# 21: Crow: villager ; each night he may curse one player which will have two votes against him the next morning.
# 22: Wild kid: villager. At the beginning of the game, chooses a role model. If that player dies, the kid becomes a werewolf.
# 23: Angel: his goal is to be eliminated on the first turn of the game. If he does, the game ends and he wins.
# If he fails, he becomes a powerless villager
# 24: Wolf-father: his goal is to eliminate all the innocents (non-werewolves). At night convenes with the wolves to eliminate a player
# Once per game, he can change the victim of the wolves into a wolf. The infected player keeps his power.
# 25: Big Bad Wolf: his goal is to kill the innocents (non wolves), each nights he convenes with the wolves.
# Each night as long as no other wolf is dead, he may kill an additional victim
# 26: Sister: villager. She knows the identity of her sister, whom she can trust.
INDEX_OF_ROLE = dict((v,k) for k,v in ROLE_INDICES.items())
NO_VICTIMS_MESSAGES = ("The body of a young penguin pet is found.",
"A pool of blood and wolf paw prints are found.",
"Traces of wolf fur are found.")
LYNCH_MESSAGES = ("The villagers, after much debate, finally decide on lynching \u0002{0}\u0002, who turned out to be... a \u0002{1}\u0002.",
"Under a lot of noise, the pitchfork-bearing villagers lynch \u0002{0}\u0002, who turned out to be... a \u0002{1}\u0002.",
"The mob drags a protesting \u0002{0}\u0002 to the hanging tree. S/He succumbs to the will of the horde, and is hanged. It is discovered (s)he was a \u0002{1}\u0002.",
"Resigned to his/her fate, \u0002{0}\u0002 is led to the gallows. After death, it is discovered (s)he was a \u0002{1}\u0002.",
"As s/he is about to be lynched, \u0002{0}\u0002, the \u0002{1}\u0002, throws a grenade at the mob. The grenade explodes early.")
import botconfig
RULES = (botconfig.CHANNEL + " channel rules: 1) Be nice to others. 2) Do not share information "+
"after death. 3) No bots allowed. 4) Do not play with clones.\n"+
"5) Do not quit unless you need to leave. 6) Keep it "+
"safe for work. 7) Do not paste PM's from the bot during the game. "+
"8) Use common sense. 9) Waiting for timeouts is discouraged.")
# Other settings:
START_WITH_DAY = False
WOLF_STEALS_GUN = False # at night, the wolf can steal steal the victim's bullets
OPT_IN_PING = False # instead of !away/!back, users can opt-in to be pinged
PING_IN = [] # cloaks of users who have opted in for ping
is_role = lambda plyr, rol: rol in ROLES and plyr in ROLES[rol]
def plural(role):
if role == "wolf": return "wolves"
elif role == "person": return "people"
else: return role + "s"
def list_players():
pl = []
for x in ROLES.values():
pl.extend(x)
return pl
def list_players_and_roles():
plr = {}
for x in ROLES.keys():
for p in ROLES[x]:
plr[p] = x
return plr
get_role = lambda plyr: list_players_and_roles()[plyr]
def del_player(pname):
prole = get_role(pname)
ROLES[prole].remove(pname)
class InvalidModeException(Exception): pass
def game_mode(name):
def decor(c):
GAME_MODES[name] = c
return c
return decor
CHANGEABLE_ROLES = { "seers" : INDEX_OF_ROLE["seer"],
"wolves" : INDEX_OF_ROLE["wolf"],
"cursed" : INDEX_OF_ROLE["cursed villager"],
"drunks" : INDEX_OF_ROLE["village drunk"],
"harlots" : INDEX_OF_ROLE["harlot"],
"traitors" : INDEX_OF_ROLE["traitor"],
"gunners" : INDEX_OF_ROLE["gunner"],
"werecrows" : INDEX_OF_ROLE["werecrow"],
"angels" : INDEX_OF_ROLE["guardian angel"],
"detectives" : INDEX_OF_ROLE["detective"]}
# TODO: implement game modes
@game_mode("roles")
class ChangedRolesMode(object):
"""Example: !fgame roles=wolves:1,seers:0,angels:1"""
def __init__(self, arg):
self.ROLES_GUIDE = ROLES_GUIDE.copy()
lx = list(ROLES_GUIDE[None])
pairs = arg.split(",")
if not pairs:
raise InvalidModeException("Invalid syntax for mode roles.")
for pair in pairs:
change = pair.split(":")
if len(change) != 2:
raise InvalidModeException("Invalid syntax for mode roles.")
role, num = change
try:
num = int(num)
try:
lx[CHANGEABLE_ROLES[role.lower()]] = num
except KeyError:
raise InvalidModeException(("The role \u0002{0}\u0002 "+
"is not valid.").format(role))
except ValueError:
raise InvalidModeException("A bad value was used in mode roles.")
for k in ROLES_GUIDE.keys():
self.ROLES_GUIDE[k] = tuple(lx)
# Persistence
# Load saved settings
import sqlite3
conn = sqlite3.connect("data.sqlite3", check_same_thread = False)
with conn:
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS away (nick TEXT)') # whoops, i mean cloak, not nick
c.execute('CREATE TABLE IF NOT EXISTS simple_role_notify (cloak TEXT)') # people who understand each role
c.execute('SELECT * FROM away')
for row in c:
AWAY.append(row[0])
c.execute('SELECT * FROM simple_role_notify')
for row in c:
SIMPLE_NOTIFY.append(row[0])
# populate the roles table
c.execute('DROP TABLE IF EXISTS roles')
c.execute('CREATE TABLE roles (id INTEGER PRIMARY KEY AUTOINCREMENT, role TEXT)')
for x in ["villager"]+list(ROLE_INDICES.values()):
c.execute("INSERT OR REPLACE INTO roles (role) VALUES (?)", (x,))
c.execute(('CREATE TABLE IF NOT EXISTS rolestats (player TEXT, role TEXT, '+
'teamwins SMALLINT, individualwins SMALLINT, totalgames SMALLINT, '+
'UNIQUE(player, role))'))
if OPT_IN_PING:
c.execute('CREATE TABLE IF NOT EXISTS ping (cloak text)')
c.execute('SELECT * FROM ping')
for row in c:
PING_IN.append(row[0])
def remove_away(clk):
with conn:
c = conn.cursor()
c.execute('DELETE from away where nick=?', (clk,))
def add_away(clk):
with conn:
c = conn.cursor()
c.execute('INSERT into away VALUES (?)', (clk,))
def remove_simple_rolemsg(clk):
with conn:
c = conn.cursor()
c.execute('DELETE from simple_role_notify where cloak=?', (clk,))
def add_simple_rolemsg(clk):
with conn:
c = conn.cursor()
c.execute('INSERT into simple_role_notify VALUES (?)', (clk,))
def remove_ping(clk):
with conn:
c = conn.cursor()
c.execute('DELETE from ping where cloak=?', (clk,))
def add_ping(clk):
with conn:
c = conn.cursor()
c.execute('INSERT into ping VALUES (?)', (clk,))
def update_role_stats(acc, role, won, iwon):
with conn:
wins, iwins, totalgames = 0, 0, 0
c = conn.cursor()
c.execute(("SELECT teamwins, individualwins, totalgames FROM rolestats "+
"WHERE player=? AND role=?"), (acc, role))
row = c.fetchone()
if row:
wins, iwins, total = row
else:
wins, iwins, total = 0,0,0
if won:
wins += 1
if iwon:
iwins += 1
total += 1
c.execute("INSERT OR REPLACE INTO rolestats VALUES (?,?,?,?,?)",
(acc, role, wins, iwins, total))
| Epithumia/pyIRCbot | settings/mascarade.py | Python | bsd-2-clause | 17,931 | [
"VisIt"
] | aec17b96ab3831cec05d9870ca9ba8aa5171d1967ae33d6d3c28759103854fe1 |
from aiida.orm import Code, DataFactory, WorkflowFactory
from aiida.orm.workflow import Workflow
from aiida.orm.calculation.inline import make_inline
#from aiida.workflows.wf_gruneisen_pressure import WorkflowGruneisen
#from aiida.workflows.wf_phonon import WorkflowPhonon
#from aiida.orm.data.structure import StructureData
#from aiida.orm.data.array import ArrayData
from aiida.orm import load_workflow
import numpy as np
import StringIO
from phonopy import PhonopyQHA
from phonon_common import arrange_band_labels, get_data_info, get_file_from_numpy_array
WorkflowPhonon = WorkflowFactory('wf_phonon')
WorkflowGruneisen = WorkflowFactory('wf_gruneisen_pressure')
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
ArrayData = DataFactory('array')
# Normalize to from unitformula to unitcell
def gcd(L):
import fractions
L = np.unique(L, return_counts=True)[1]
return reduce(fractions.gcd, L)
def check_dos_stable(wf, tol=1e-6):
try:
dos = wf.get_result('dos').get_array('total_dos')
freq = wf.get_result('dos').get_array('frequency')
except:
return False
mask_neg = np.ma.masked_less(freq, 0.0).mask
mask_pos = np.ma.masked_greater(freq, 0.0).mask
if mask_neg.any() == False:
return True
if mask_pos.any() == False:
return False
int_neg = -np.trapz(np.multiply(dos[mask_neg], freq[mask_neg]), x=freq[mask_neg])
int_pos = np.trapz(np.multiply(dos[mask_pos], freq[mask_pos]), x=freq[mask_pos])
if int_neg / int_pos > tol:
return False
else:
return True
def qha_prediction(wf, interval, min, max, use_all_data=True):
# max = wf.get_attribute('max')
# min = wf.get_attribute('min')
wf_complete_list = []
for step_name in ['pressure_expansions', 'collect_data']:
if wf.get_step(step_name):
wf_complete_list += list(wf.get_step(step_name).get_sub_workflows())
wf_complete_list += list(wf.get_step('start').get_sub_workflows()[0].get_step('start').get_sub_workflows())
if use_all_data:
# check data is stable
good = [wf_test.get_attribute('pressure') for wf_test in wf_complete_list
if check_dos_stable(wf_test, tol=1e-6)]
good = np.sort(good)
test_pressures = np.array(good)
test_pressures = test_pressures[np.unique(np.round(test_pressures, decimals=4),
return_index=True)[1]].tolist()
else:
test_pressures = np.arange(min, max, interval).tolist()
volumes = []
stresses = []
electronic_energies = []
temperatures = []
fe_phonon = []
entropy = []
cv = []
if True:
for wf_test in wf_complete_list:
for pressure in test_pressures:
if wf_test.get_state() == 'FINISHED':
if np.isclose(wf_test.get_attribute('pressure'), pressure, atol=interval / 4, rtol=0):
thermal_properties = wf_test.get_result('thermal_properties')
optimized_data = wf_test.get_result('optimized_structure_data')
final_structure = wf_test.get_result('final_structure')
electronic_energies.append(optimized_data.dict.energy)
volumes.append(final_structure.get_cell_volume())
stresses.append(pressure)
temperatures = thermal_properties.get_array('temperature')
fe_phonon.append(thermal_properties.get_array('free_energy'))
entropy.append(thermal_properties.get_array('entropy'))
cv.append(thermal_properties.get_array('cv'))
if False:
test_pressures = []
for wf_test in wf_complete_list:
if wf_test.get_state() != 'ERROR':
repeated = False
for p in test_pressures:
if np.isclose(wf_test.get_attribute('pressure'), p, atol=interval / 4, rtol=0):
repeated = True
if not repeated:
test_pressures.append(wf_test.get_attribute('pressure'))
thermal_properties = wf_test.get_result('thermal_properties')
optimized_data = wf_test.get_result('optimized_structure_data')
final_structure = wf_test.get_result('final_structure')
electronic_energies.append(optimized_data.dict.energy)
volumes.append(final_structure.get_cell_volume())
temperatures = thermal_properties.get_array('temperature')
fe_phonon.append(thermal_properties.get_array('free_energy'))
entropy.append(thermal_properties.get_array('entropy'))
cv.append(thermal_properties.get_array('cv'))
if len(stresses) < 5:
# raise Exception('Not enough points for QHA prediction')
return None
sort_index = np.argsort(volumes)
stresses = np.array(stresses)[sort_index]
volumes = np.array(volumes)[sort_index]
electronic_energies = np.array(electronic_energies)[sort_index]
temperatures = np.array(temperatures)
fe_phonon = np.array(fe_phonon).T[:, sort_index]
entropy = np.array(entropy).T[:, sort_index]
cv = np.array(cv).T[:, sort_index]
# Calculate QHA properties
phonopy_qha = PhonopyQHA(np.array(volumes),
np.array(electronic_energies),
eos="vinet",
temperatures=np.array(temperatures),
free_energy=np.array(fe_phonon),
cv=np.array(cv),
entropy=np.array(entropy),
# t_max=options.t_max,
verbose=False)
# Get data
volume_temperature = phonopy_qha.get_volume_temperature()
from scipy.optimize import curve_fit, OptimizeWarning
try:
# Fit to an exponential equation
def fitting_function(x, a, b, c):
return np.exp(-b * (x + a)) + c
p_b = 0.1
p_c = -200
p_a = -np.log(-p_c) / p_b - volumes[0]
popt, pcov = curve_fit(fitting_function, volumes, stresses, p0=[p_a, p_b, p_c], maxfev=100000)
min_stresses = fitting_function(volume_temperature, *popt)
except OptimizeWarning:
fit_vs = np.polyfit(volumes, stresses, 2)
min_stresses = np.array([np.polyval(fit_vs, i) for i in volume_temperature])
# if (np.max(min_stresses) - np.min(min_stresses)) < 1:
# return None
tolerance = 0.8
addition = (np.max(min_stresses) - np.min(min_stresses)) * tolerance
return np.min(min_stresses) - addition, np.max(min_stresses) + addition
def get_data_from_wf_phonon(wf):
from phonon_common import get_phonon
energy = wf.get_result('optimized_structure_data').dict.energy
pressure = wf.get_attribute('pressure')
structure = wf.get_result('final_structure')
volume = structure.get_cell_volume()
phonopy_input = wf.get_parameter('phonopy_input')['parameters']
force_constants = wf.get_result('force_constants').get_array('force_constants')
phonon = get_phonon(structure, force_constants, phonopy_input)
return {'energy': energy,
'pressure': pressure,
'structure': structure,
'volume': volume,
'force_constants': force_constants,
'phonopy_input': phonopy_input,
'phonon': phonon}
def gruneisen_predict(wf_origin, wf_plus, wf_minus):
from phonopy import PhonopyGruneisen
from phonon_common import thermal_expansion as check_expansion
energies = [get_data_from_wf_phonon(wf_origin)['energy'],
get_data_from_wf_phonon(wf_plus)['energy'],
get_data_from_wf_phonon(wf_minus)['energy']]
stresses = [get_data_from_wf_phonon(wf_origin)['pressure'],
get_data_from_wf_phonon(wf_plus)['pressure'],
get_data_from_wf_phonon(wf_minus)['pressure']]
volumes = [get_data_from_wf_phonon(wf_origin)['volume'],
get_data_from_wf_phonon(wf_plus)['volume'],
get_data_from_wf_phonon(wf_minus)['volume']]
phonon_plus = get_data_from_wf_phonon(wf_plus)['phonon']
phonon_minus = get_data_from_wf_phonon(wf_minus)['phonon']
phonon_origin = get_data_from_wf_phonon(wf_origin)['phonon']
gruneisen = PhonopyGruneisen(phonon_origin, # equilibrium
phonon_plus, # plus
phonon_minus) # minus
phonopy_input = get_data_from_wf_phonon(wf_origin)['phonopy_input']
gruneisen.set_mesh(phonopy_input['mesh'], is_gamma_center=False, is_mesh_symmetry=True)
# Thermal expansion approximate prediction
temperatures, min_volumes, min_stresses = check_expansion(volumes,
energies,
gruneisen,
stresses=stresses,
t_max=1000,
t_step=5)
# Safety control
if 0 < np.min(min_stresses):
min_stresses -= abs(np.min(min_stresses))
if 0 > np.max(min_stresses):
min_stresses += abs(np.max(min_stresses))
return np.min(min_stresses), np.max(min_stresses)
@make_inline
def calculate_qha_inline(**kwargs):
from phonopy import PhonopyQHA
from phonon_common import get_helmholtz_volume_from_phonopy_qha
import numpy as np
# thermal_properties_list = [key for key, value in kwargs.items() if 'thermal_properties' in key.lower()]
# optimized_structure_data_list = [key for key, value in kwargs.items() if 'optimized_structure_data' in key.lower()]
structure_list = [key for key, value in kwargs.items() if 'final_structure' in key.lower()]
volumes = []
electronic_energies = []
fe_phonon = []
entropy = []
cv = []
for i in range(len(structure_list)):
# volumes.append(kwargs.pop(key).get_cell_volume())
volumes.append(kwargs.pop('final_structure_{}'.format(i)).get_cell_volume())
electronic_energies.append(kwargs.pop('optimized_structure_data_{}'.format(i)).dict.energy)
thermal_properties = kwargs.pop('thermal_properties_{}'.format(i))
temperatures = thermal_properties.get_array('temperature')
fe_phonon.append(thermal_properties.get_array('free_energy'))
entropy.append(thermal_properties.get_array('entropy'))
cv.append(thermal_properties.get_array('cv'))
sort_index = np.argsort(volumes)
temperatures = np.array(temperatures)
volumes = np.array(volumes)[sort_index]
electronic_energies = np.array(electronic_energies)[sort_index]
fe_phonon = np.array(fe_phonon).T[:, sort_index]
entropy = np.array(entropy).T[:, sort_index]
cv = np.array(cv).T[:, sort_index]
# Calculate QHA
phonopy_qha = PhonopyQHA(np.array(volumes),
np.array(electronic_energies),
eos="vinet",
temperatures=np.array(temperatures),
free_energy=np.array(fe_phonon),
cv=np.array(cv),
entropy=np.array(entropy),
# t_max=options.t_max,
verbose=False)
# Get data
free_energy_volume_fitting = get_helmholtz_volume_from_phonopy_qha(phonopy_qha)
qha_temperatures = phonopy_qha._qha._temperatures[:phonopy_qha._qha._max_t_index]
helmholtz_volume = phonopy_qha.get_helmholtz_volume()
thermal_expansion = phonopy_qha.get_thermal_expansion()
volume_temperature = phonopy_qha.get_volume_temperature()
heat_capacity_P_numerical = phonopy_qha.get_heat_capacity_P_numerical()
volume_expansion = phonopy_qha.get_volume_expansion()
gibbs_temperature = phonopy_qha.get_gibbs_temperature()
qha_output = ArrayData()
qha_output.set_array('temperatures', np.array(qha_temperatures))
#qha_output.set_array('helmholtz_volume', np.array(helmholtz_volume))
qha_output.set_array('thermal_expansion', np.array(thermal_expansion))
qha_output.set_array('volume_temperature', np.array(volume_temperature))
qha_output.set_array('heat_capacity_P_numerical', np.array(heat_capacity_P_numerical))
qha_output.set_array('volume_expansion', np.array(volume_expansion))
qha_output.set_array('gibbs_temperature', np.array(gibbs_temperature))
qha_output.set_array('helmholtz_volume_points', np.array(free_energy_volume_fitting['points']))
qha_output.set_array('helmholtz_volume_fit', np.array(free_energy_volume_fitting['fit']))
qha_output.set_array('helmholtz_volume_minimum', np.array(free_energy_volume_fitting['minimum']))
return {'qha_output': qha_output}
@make_inline
def create_volumes_inline(**kwargs):
initial_structure = kwargs['structure']
volume_relations = kwargs['volumes'].get_dict()['relations']
structures = {}
for i, vol in enumerate(volume_relations):
cell = np.array(initial_structure.cell) * vol
structure = StructureData(cell=cell)
for site in initial_structure.sites:
structure.append_atom(position=np.array(site.position) * vol, symbols=site.kind_name)
structures["structure_{}".format(i)] = structure
return structures
class Wf_qhaWorkflow(Workflow):
def __init__(self, **kwargs):
super(Wf_qhaWorkflow, self).__init__(**kwargs)
if 'expansion_method' in kwargs:
self._expansion_method = kwargs['expansion_method']
else:
self._expansion_method = 'pressure' # By default expansion method is pressure
if 'include_born' in kwargs:
self._include_born = kwargs['include_born']
else:
self._include_born = False # By default not include born
if 'manual' in kwargs:
self._manual = kwargs['manual']
else:
self._manual = False # By default automatic mode
if 'only_grune' in kwargs:
self._only_grune = kwargs['only_grune']
else:
self._only_grune = False # By default use only grune to determine all QHA volume expansions
if 'n_points' in kwargs:
self._n_points = kwargs['n_points']
else:
self._n_points = 10 # By default use 10 points in automatic mode
# Calculates the reference crystal structure (optimize it if requested)
@Workflow.step
def start(self):
self.append_to_report('Starting workflow_workflow')
self.append_to_report('Phonon calculation of base structure')
self.add_attribute('manual', self._manual)
self.add_attribute('n_points', self._n_points)
self.add_attribute('include_born', self._include_born)
if self._manual:
self.next(self.pressure_manual_expansions)
return
wf_parameters = self.get_parameters()
# self.append_to_report('crystal: ' + wf_parameters['structure'].get_formula())
wf = WorkflowGruneisen(params=wf_parameters,
constant_volume=False,
pre_optimize=True,
p_displacement=2,
pressure=0,
include_born=self._include_born)
wf.store()
#wf = load_workflow(332)
self.attach_workflow(wf)
wf.start()
if self._only_grune:
self.next(self.pressure_gruneisen)
return
if self._expansion_method == 'pressure':
self.next(self.pressure_expansions)
elif self._expansion_method == 'volume':
self.append_to_report('Not yet implemented')
self.next(self.exit)
else:
self.append_to_report('Error no method defined')
self.next(self.exit)
# Direct manual stresses expanasions
@Workflow.step
def pressure_manual_expansions(self):
self.append_to_report('Manual pressure expansion calculations')
wf_parameters = self.get_parameters()
test_pressures = wf_parameters['scan_pressures'] # in kbar
if not 0 in test_pressures:
test_pressures.append(0)
if np.min(np.diff(test_pressures)) > 1e-5:
self.add_attribute('interval', np.min(np.diff(test_pressures)))
else:
self.add_attribute('interval', np.abs(test_pressures[1]-test_pressures[0]))
# wfs_test = [821, 820]
for i, pressure in enumerate(test_pressures):
self.append_to_report('pressure: {}'.format(pressure))
# Submit workflow
wf = WorkflowPhonon(params=wf_parameters,
pressure=pressure,
optimize=True,
include_born=self.get_attribute('include_born'))
wf.store()
self.attach_workflow(wf)
wf.start()
self.next(self.qha_calculation)
# Auto expansion just using Gruneisen prediction
@Workflow.step
def pressure_gruneisen(self):
self.append_to_report('Trust Gruneisen expansion (For empirical potentials)')
wf_parameters = self.get_parameters()
prediction = self.get_step('start').get_sub_workflows()[0].get_result('thermal_expansion_prediction')
stresses = prediction.get_array('stresses')
n_points = self.get_attribute('n_points')
test_pressures = np.linspace(-1.0 * np.max(stresses), np.max(stresses), n_points) # in kbar
self.add_attribute('interval', test_pressures[1] - test_pressures[0])
self.add_attribute('max', test_pressures[1])
self.add_attribute('min', test_pressures[0])
# wfs_test = [821, 820]
for i, pressure in enumerate(test_pressures):
self.append_to_report('pressure: {}'.format(pressure))
# Submit workflow
wf = WorkflowPhonon(params=wf_parameters,
pressure=pressure,
optimize=True,
include_born=self.get_attribute('include_born'))
wf.store()
self.attach_workflow(wf)
wf.start()
self.next(self.qha_calculation)
# Auto expansion by searching real DOS limits (hopping algorithm)
@Workflow.step
def pressure_expansions(self):
self.append_to_report('Pressure expansion calculations')
wf_parameters = self.get_parameters()
# structure = self.get_step(self.start).get_sub_workflows()[0].get_result('final_structure')
prediction = self.get_step('start').get_sub_workflows()[0].get_result('thermal_expansion_prediction')
stresses = prediction.get_array('stresses')
if np.isnan(stresses).any():
self.append_to_report('Gruneisen Prediction error')
exit()
test_pressures = [np.min(stresses), np.max(stresses)] # in kbar
total_range = test_pressures[1] - test_pressures[0]
interval = total_range/2
self.add_attribute('npoints', 5)
self.add_attribute('test_range', test_pressures)
self.add_attribute('total_range', total_range)
self.add_attribute('max', None)
self.add_attribute('min', None)
self.add_attribute('interval', interval)
self.add_attribute('clock', 1)
wfs_test = [344, 345]
for i, pressure in enumerate(test_pressures):
self.append_to_report('pressure: {}'.format(pressure))
# Submit workflow
wf = WorkflowPhonon(params=wf_parameters,
pressure=pressure,
optimize=True,
include_born=self.get_attribute('include_born'))
wf.store()
#wf = load_workflow(wfs_test[i])
self.attach_workflow(wf)
wf.start()
self.next(self.collect_data)
@Workflow.step
def collect_data(self):
self.append_to_report('--- collect step ------')
wf_parameters = self.get_parameters()
# self.get_step_calculations(self.optimize).latest('id')
n_points = self.get_attribute('n_points')
test_range = np.sort(self.get_attribute('test_range'))
total_range = self.get_attribute('total_range')
interval = self.get_attribute('interval')
clock = self.get_attribute('clock')
total_range = abs(test_range[1] - test_range[0])
max = self.get_attribute('max')
min = self.get_attribute('min')
wf_max = None
wf_min = None
self.append_to_report('test range {}'.format(test_range))
self.append_to_report('interval {}'.format(interval))
wf_origin = self.get_step('start').get_sub_workflows()[0].get_step('start').get_sub_workflows()[0]
wf_complete_list = list(self.get_step('pressure_expansions').get_sub_workflows())
if self.get_step('collect_data') is not None:
wf_complete_list += list(self.get_step('collect_data').get_sub_workflows())
# wf_min, wf_max = list(self.get_step('pressure_expansions').get_sub_workflows())[-2:]
for wf_test in wf_complete_list:
if np.isclose(wf_test.get_attribute('pressure'), test_range[0], atol=interval / 4, rtol=0):
# if wf_test.get_attribute('pressure') == test_range[0]:
wf_min = wf_test
if np.isclose(wf_test.get_attribute('pressure'), test_range[1], atol=interval / 4, rtol=0):
#if wf_test.get_attribute('pressure') == test_range[1]:
wf_max = wf_test
if wf_max is None or wf_min is None:
self.append_to_report('Something wrong with volumes: {}'.format(test_range))
self.next(self.exit)
return
ok_inf = check_dos_stable(wf_min, tol=1e-6)
ok_sup = check_dos_stable(wf_max, tol=1e-6)
self.append_to_report('DOS stable | inf:{} sup:{}'.format(ok_inf, ok_sup))
if not ok_sup or not ok_inf:
self.append_to_report('No-OK total_range {}, interval {}, e_points {}, n_points {}'.format(total_range,
interval,
total_range / interval,
n_points))
if total_range / interval < n_points:
interval *= 0.5
if not ok_sup:
test_range[1] -= interval
if not ok_inf:
test_range[0] += interval
if np.isclose(test_range[0], test_range[1], atol=interval/4, rtol=0) or interval < 0.001:
self.next(self.exit)
self.append_to_report('Stable range not found')
return
if ok_inf and ok_sup:
# if max is None:
# max = test_range[1]
# if min is None:
# min = test_range[0]
try:
min_stress, max_stress = qha_prediction(self, interval, min, max)
self.append_to_report('Using QHA prediction')
except:
min_stress, max_stress = gruneisen_predict(wf_origin, wf_min, wf_max)
self.append_to_report('Using Gruneisen prediction')
self.append_to_report('stresses prediction min:{} max:{}'.format(min_stress, max_stress))
if (max is None or
max > test_range[1] > max_stress or
# max < test_range[1] < max_stress or
max < max_stress and max < test_range[1]):
max = test_range[1]
if (min is None or
min < test_range[0] < min_stress or
# min > test_range[0] > min_stress
min > min_stress and min > test_range[0]):
min = test_range[0]
self.append_to_report('n_point estimation {}'.format(abs(max - min) / interval))
if abs(max - min) / interval > n_points:
self.append_to_report('Exit: min {}, max {}'.format(min, max))
self.next(self.complete)
return
# Adjust factor
acceptable_expansion_range = abs(max - min) * 0.5
if (abs(max - min) / interval > n_points and
max_stress < max < max_stress + acceptable_expansion_range and
min_stress > min > min_stress - acceptable_expansion_range):
self.append_to_report('Exit perfect: min {}, max {}'.format(min, max))
self.next(self.complete)
return
if abs(max_stress - test_range[1]) < interval and abs(test_range[0] - min_stress) < interval:
interval *= 0.5
## Safely measure if the test pressures becomes too close (prevents inactive loop, can be ommited)
# if min_stress > test_range[0] and max_stress < test_range[1] and total_range / interval < 3:
# interval *= 0.5
if abs(test_range[1] - test_range[0])/interval < 1:
interval *= 0.5
if max_stress > test_range[1]:
self.append_to_report('Increase max {} + {}'.format(test_range[1],
np.ceil(np.min([total_range/2, abs(max_stress - test_range[1])]) / interval) * interval))
test_range[1] += np.ceil(np.min([total_range/2, abs(max_stress - test_range[1])]) / interval) * interval
else:
self.append_to_report('Decrease max {} - {}'.format(test_range[1],
np.ceil(np.min([total_range / 2, abs(max_stress - test_range[1])]) / interval) * interval))
test_range[1] -= np.ceil(np.min([total_range / 2, abs(max_stress - test_range[1])]) / interval) * interval
if min_stress < test_range[0]:
self.append_to_report('Increase min {} - {}'.format(test_range[0],
np.ceil(np.min([total_range / 2, abs(test_range[0] - min_stress)]) / interval) * interval))
test_range[0] -= np.ceil(np.min([total_range/2, abs(test_range[0] - min_stress)]) / interval) * interval
else:
self.append_to_report('Decrease min {} + {}'.format(test_range[0],
np.ceil(np.min([total_range/2, abs(test_range[0] - min_stress)]) / interval) * interval))
test_range[0] += np.ceil(np.min([total_range/2, abs(test_range[0] - min_stress)]) / interval) * interval
total_range = abs(test_range[1] - test_range[0])
#total_range = abs(max - min)
self.add_attribute('max', max)
self.add_attribute('min', min)
self.add_attribute('test_range', test_range.tolist())
self.add_attribute('total_range', total_range)
self.add_attribute('interval', interval)
self.add_attribute('clock', clock)
test_pressures = [test_range[0], test_range[1]] # in kbar
# Be efficient
if min is not None and max is not None:
self.append_to_report('Info min {}, max {}, interval {}'.format(min, max, interval))
test_pressures += np.arange(min, max, interval).tolist()
# Remove self duplicates
test_pressures = np.array(test_pressures)
indexes = np.unique(np.round(test_pressures, decimals=4), return_index=True)[1]
test_pressures = test_pressures[indexes].tolist()
self.append_to_report('test_pressures {}'.format(test_pressures))
if len(test_pressures) > n_points * 2:
self.append_to_report('Safety exit (not converged): n_press {}'.format(len(test_pressures)))
self.next(self.complete)
return
# Remove duplicates
for wf_test in wf_complete_list:
for pressure in list(test_pressures):
#self.append_to_report('compare: {} {}'.format(wf_test.get_attribute('pressure'), pressure))
if np.isclose(wf_test.get_attribute('pressure'), pressure, atol=interval/4, rtol=0):
test_pressures.remove(pressure)
# self.append_to_report('IS close! -> remove {}'.format(pressure))
self.append_to_report('pressure list (no duplicates){}'.format(test_pressures))
for pressure in test_pressures:
# self.append_to_report('pressure: {}'.format(pressure))
# Submit workflow
wf = WorkflowPhonon(params=wf_parameters,
pressure=pressure,
optimize=True,
include_born=self.get_attribute('include_born'))
wf.store()
# wf = load_workflow(wfs_test[i])
self.attach_workflow(wf)
wf.start()
# self.append_to_report('Info min {}, max {}, n_points {} interval {}'.format(min, max, abs(max - min) / interval, interval))
if len(test_pressures) > n_points * 1.2:
self.append_to_report('Safety exit (not converged), n_points: {}'.format(len(test_pressures)))
self.next(self.complete)
return
self.next(self.collect_data)
@Workflow.step
def complete(self):
wf_parameters = self.get_parameters()
test_range = self.get_attribute('test_range')
# self.get_step_calculations(self.optimize).latest('id')
interval = self.get_attribute('interval')
max = self.get_attribute('max')
min = self.get_attribute('min')
n_points = int((max - min) / interval) + 1
test_pressures = [min + interval * i for i in range(n_points)]
self.append_to_report('final pressure list: {}'.format(test_pressures))
# Remove duplicates
wf_complete_list = list(self.get_step('pressure_expansions').get_sub_workflows())
wf_complete_list += list(self.get_step('collect_data').get_sub_workflows())
try:
wf_complete_list += list(self.get_step('complete').get_sub_workflows())
except:
self.append_to_report('First completion step, it is OK!')
# Remove duplicates
for wf_test in wf_complete_list:
for pressure in list(test_pressures):
try:
if wf_test.get_state() == 'ERROR':
wf_test.add_attribute('pressure', 'error')
else:
# self.append_to_report('compare: {} {}'.format(wf_test.get_attribute('pressure'), pressure))
if np.isclose(wf_test.get_attribute('pressure'), pressure, atol=interval/4., rtol=0):
# To make sure that the calculation did not fail and if it is the case give a second
# chance to finish correctly
test_pressures.remove(pressure)
# self.append_to_report('IS close! -> remove {}'.format(pressure))
except:
wf_test.add_attribute('pressure', 'error')
min_stress, max_stress = qha_prediction(self, interval, min, max)
self.append_to_report('Semi QHA prediction {} {}'.format(min_stress, max_stress))
for pressure in test_pressures:
self.append_to_report('pressure: {}'.format(pressure))
# Submit workflow
wf = WorkflowPhonon(params=wf_parameters,
pressure=pressure,
optimize=True,
include_born=self.get_attribute('include_born'))
wf.store()
# wf = load_workflow(wfs_test[i])
self.attach_workflow(wf)
wf.start()
if len(test_pressures):
self.append_to_report('Not yet completed, {} left'.format(len(test_pressures)))
# self.next(self.complete)
self.next(self.qha_calculation)
else:
self.next(self.qha_calculation)
@Workflow.step
def qha_calculation(self):
interval = self.get_attribute('interval')
if self.get_attribute('manual'):
test_pressures = self.get_parameter('scan_pressures') # in kbar
else:
max = self.get_attribute('max')
min = self.get_attribute('min')
n_points = int((max - min) / interval) + 1
test_pressures = [min + interval * i for i in range(n_points)]
min_stress, max_stress = qha_prediction(self, interval, min, max)
self.append_to_report('Final QHA prediction {} {}'.format(min_stress, max_stress))
# Workflow list
wf_complete_list = []
for step_name in ['pressure_expansions', 'collect_data', 'complete', 'pressure_manual_expansions',
'pressure_gruneisen']:
if self.get_step(step_name):
wf_complete_list += list(self.get_step(step_name).get_sub_workflows())
# Add phonon workflow at 0 pressure from gruneisen workflow if exists
try:
wf_complete_list += list(
self.get_step('start').get_sub_workflows()[0].get_step('start').get_sub_workflows())
except:
pass
inline_params = {}
for wf_test in wf_complete_list:
for i, pressure in enumerate(test_pressures):
if wf_test.get_state() == 'FINISHED':
if np.isclose(wf_test.get_attribute('pressure'), pressure, atol=interval / 4, rtol=0):
thermal_properties = wf_test.get_result('thermal_properties')
optimized_data = wf_test.get_result('optimized_structure_data')
final_structure = wf_test.get_result('final_structure')
inline_params.update({'thermal_properties_{}'.format(i): thermal_properties})
inline_params.update({'optimized_structure_data_{}'.format(i): optimized_data})
inline_params.update({'final_structure_{}'.format(i): final_structure})
qha_result = calculate_qha_inline(**inline_params)[1]
self.add_result('qha_output', qha_result['qha_output'])
# self.next(self.store_final_info)
self.next(self.qha_calculation_write_files)
@Workflow.step
def qha_calculation_write_files(self):
data_folder = self.current_folder.get_subfolder('DATA_FILES')
data_folder.create()
############################
# Get harmonic results
############################
try:
wf_zero = self.get_step('start').get_sub_workflows()[0].get_step('start').get_sub_workflows()[0]
except IndexError:
wf_complete_list = list(self.get_step('pressure_manual_expansions').get_sub_workflows())
for wf_test in wf_complete_list:
if np.isclose(wf_test.get_attribute('pressure'), 0, atol=1e-4, rtol=0):
wf_zero = wf_test
break
final_structure = wf_zero.get_result('final_structure')
norm_unitformula_to_unitcell = gcd([site.kind_name for site in final_structure.sites])
# Get data and write the files
thermal_properties = wf_zero.get_result('thermal_properties')
dos = wf_zero.get_result('dos')
band_structure = wf_zero.get_result('band_structure')
entropy = thermal_properties.get_array('entropy')
free_energy = thermal_properties.get_array('free_energy')
temperatures = thermal_properties.get_array('temperature')
cv = thermal_properties.get_array('cv')
# Normalize from unitcell to unitformula
free_energy /= norm_unitformula_to_unitcell
entropy /= norm_unitformula_to_unitcell
cv /= norm_unitformula_to_unitcell
# Density of states
freq_dos = dos.get_array('frequency')
total_dos = dos.get_array('total_dos')
partial_symbols = dos.get_array('partial_symbols')
partial_dos = dos.get_array('partial_dos')
# Check atom equivalences in partial DOS
delete_list = []
for i, dos_i in enumerate(partial_dos):
for j, dos_j in enumerate(partial_dos):
if i < j:
if np.allclose(dos_i, dos_j, rtol=1, atol=1e-8) and partial_symbols[i] == partial_symbols[j]:
dos_i += dos_j
delete_list.append(j)
partial_dos = np.delete(partial_dos, delete_list, 0).T
partial_symbols = np.delete(partial_symbols, delete_list)
data_folder.create_file_from_filelike(get_file_from_numpy_array(zip(freq_dos, total_dos)),
'total_dos')
data_folder.create_file_from_filelike(get_file_from_numpy_array(np.column_stack((freq_dos, partial_dos)),
text_list=['T'] + partial_symbols.tolist()),
'partial_dos')
# Thermal properties
data_folder.create_file_from_filelike(
get_file_from_numpy_array(np.column_stack((temperatures, entropy, free_energy, cv))), 'thermal_properties')
# Phonon band structure
band_array = []
for i, freq in enumerate(band_structure.get_array('frequencies')):
for j, q in enumerate(band_structure.get_array('q_path')[i]):
band_array.append([q] + freq[j].tolist())
band_array = np.array(band_array)
data_folder.create_file_from_filelike(get_file_from_numpy_array(band_array), 'phonon_band_structure')
x_labels, labels_e = arrange_band_labels(band_structure)
output = StringIO.StringIO()
for i, j in zip(x_labels, labels_e):
output.write(u'{0:12.8f} {1}\n'.format(i, j).encode('utf-8'))
output.seek(0)
data_folder.create_file_from_filelike(output, 'band_structure_labels')
self.append_to_report('Harmonic data written in files')
############################
# Get structure
############################
import pymatgen.io.cif as cif
pmg_structure = final_structure.get_pymatgen_structure()
cif.CifWriter(pmg_structure, symprec=0.1).write_file(data_folder.abspath + '/structure.cif')
# Save info data
info_data = StringIO.StringIO()
info_data.write(get_data_info(final_structure))
info_data.seek(0)
data_folder.create_file_from_filelike(info_data, 'info_data.html')
############################
# Get gruneisen results
############################
try:
wf_grune = self.get_step('start').get_sub_workflows()[0]
mesh = wf_grune.get_result('mesh')
freq_grune = mesh.get_array('frequencies')
param_grune = mesh.get_array('gruneisen')
data_folder.create_file_from_filelike(get_file_from_numpy_array(
np.column_stack((freq_grune.reshape(-1), param_grune.reshape(-1)))), 'gruneisen_mesh')
band_structure = wf_grune.get_result('band_structure')
q_tolerance = 1e-5
band_array = []
for i , freq in enumerate(band_structure.get_array('gruneisen')):
for j, q in enumerate(band_structure.get_array('q_path')[i]):
print 'q', q
if np.linalg.norm( band_structure.get_array('q_points')[i,j]) > q_tolerance:
band_array.append( [q] + freq[j].tolist())
# else:
# band_array.append( [np.nan] + freq[j].tolist())
band_array.append( [np.nan] + freq[0].tolist())
band_array = np.array(band_array)
data_folder.create_file_from_filelike(get_file_from_numpy_array(band_array), 'gruneisen_band_structure')
except IndexError:
self.append_to_report('Gruneisen calculation not available')
####################
# Get QHA results
####################
qha_output = self.get_result('qha_output')
#free_energy_volume_fitting = get_helmholtz_volume_from_phonopy_qha(phonopy_qha)
qha_temperatures = qha_output.get_array('temperatures')
# helmholtz_volume = phonopy_qha.get_helmholtz_volume()
thermal_expansion = qha_output.get_array('thermal_expansion')
volume_temperature = qha_output.get_array('volume_temperature')
heat_capacity_P_numerical = qha_output.get_array('heat_capacity_P_numerical')/norm_unitformula_to_unitcell
volume_expansion = qha_output.get_array('volume_expansion')
gibbs_temperature = qha_output.get_array('gibbs_temperature')
volumes = qha_output.get_array('helmholtz_volume_points')[0]
helmholtz_volume = qha_output.get_array('helmholtz_volume_points')[1]
volumes_fit = qha_output.get_array('helmholtz_volume_fit')[0]
helmholtz_volume_fit = qha_output.get_array('helmholtz_volume_fit')[1]
volumes_min = qha_output.get_array('helmholtz_volume_minimum')[0]
helmholtz_volume_min = qha_output.get_array('helmholtz_volume_minimum')[1]
data_folder.create_file_from_filelike(get_file_from_numpy_array(np.column_stack((volumes_fit, helmholtz_volume_fit.T))),
'free_energy_fit')
data_folder.create_file_from_filelike(get_file_from_numpy_array(np.column_stack((volumes, helmholtz_volume.T))),
'free_energy_points')
data_folder.create_file_from_filelike(get_file_from_numpy_array(zip(volumes_min, helmholtz_volume_min)),
'free_energy_min')
data_folder.create_file_from_filelike(get_file_from_numpy_array(zip(qha_temperatures, gibbs_temperature)),
'gibbs_temperature')
data_folder.create_file_from_filelike(get_file_from_numpy_array(zip(qha_temperatures, volume_expansion)),
'volume_expansion')
data_folder.create_file_from_filelike(get_file_from_numpy_array(zip(qha_temperatures, volume_temperature)),
'volume_temperature')
data_folder.create_file_from_filelike(get_file_from_numpy_array(zip(qha_temperatures, thermal_expansion)),
'thermal_expansion')
data_folder.create_file_from_filelike(get_file_from_numpy_array(zip(qha_temperatures, heat_capacity_P_numerical)),
'heat_capacity_P_numerical')
self.append_to_report('QHA properties calculated and written in files')
self.next(self.store_final_info)
@Workflow.step
def store_final_info(self):
from phonon_common import structure_to_poscar, get_FORCE_CONSTANTS_txt, get_file_from_txt
interval = self.get_attribute('interval')
if self.get_attribute('manual'):
test_pressures = self.get_parameter('scan_pressures') # in kbar
else:
max = self.get_attribute('max')
min = self.get_attribute('min')
n_points = int((max - min) / interval) + 1
test_pressures = [min + interval * i for i in range(n_points)]
min_stress, max_stress = qha_prediction(self, interval, min, max)
self.append_to_report('Final QHA prediction {} {}'.format(min_stress, max_stress))
# Workflow list
wf_complete_list = []
for step_name in ['pressure_expansions', 'collect_data', 'complete', 'pressure_manual_expansions',
'pressure_gruneisen']:
if self.get_step(step_name):
wf_complete_list += list(self.get_step(step_name).get_sub_workflows())
# Add phonon workflow at 0 pressure from gruneisen workflow if exists
try:
wf_complete_list += list(
self.get_step('start').get_sub_workflows()[0].get_step('start').get_sub_workflows())
except:
pass
final_list = []
for wf_test in wf_complete_list:
for i, pressure in enumerate(test_pressures):
if wf_test.get_state() == 'FINISHED':
if np.isclose(wf_test.get_attribute('pressure'), pressure, atol=interval / 4, rtol=0):
final_list.append(wf_test)
data_folder = self.current_folder.get_subfolder('DETAILS')
data_folder.create()
for i, wf_test in enumerate(final_list):
data_phonon_folder = data_folder.get_subfolder('phonon_{}'.format(i))
data_phonon_folder.create()
# Get optimized info data
pressure = wf_test.get_attribute('pressure')
energy = wf_test.get_result('optimized_structure_data').dict.energy
info_data = 'pressure: {}\nenergy: {}\n'.format(pressure, energy)
# Get data and write the files
thermal_properties = wf_test.get_result('thermal_properties')
dos = wf_test.get_result('dos')
# band_structure = wf_test.get_result('band_structure')
force_constants = wf_test.get_result('force_constants')
final_structure = wf_test.get_result('final_structure')
entropy = thermal_properties.get_array('entropy')
free_energy = thermal_properties.get_array('free_energy')
temperatures = thermal_properties.get_array('temperature')
cv = thermal_properties.get_array('cv')
# Density of states
freq_dos = dos.get_array('frequency')
total_dos = dos.get_array('total_dos')
# partial_symbols = dos.get_array('partial_symbols')
# partial_dos = dos.get_array('partial_dos')
# write files
self.append_to_report('data to be stored in: {}'.format(data_folder.abspath))
data_phonon_folder.create_file_from_filelike(get_file_from_txt(info_data),
'info_data')
data_phonon_folder.create_file_from_filelike(get_file_from_txt(get_FORCE_CONSTANTS_txt(force_constants)),
'FORCE_CONSTANTS')
data_phonon_folder.create_file_from_filelike(get_file_from_txt(structure_to_poscar(final_structure)),
'POSCAR')
data_phonon_folder.create_file_from_filelike(get_file_from_numpy_array(zip(freq_dos, total_dos)),
'total_dos')
data_phonon_folder.create_file_from_filelike(get_file_from_numpy_array(zip(temperatures, entropy)),
'gibbs_temperature')
data_phonon_folder.create_file_from_filelike(get_file_from_numpy_array(zip(temperatures, free_energy)),
'volume_temperature')
data_phonon_folder.create_file_from_filelike(get_file_from_numpy_array(zip(temperatures, cv)),
'volume_temperature')
self.append_to_report('stored data in: {}'.format(data_folder.abspath))
self.next(self.exit)
| abelcarreras/aiida_extensions | workflows/wf_qha.py | Python | mit | 48,077 | [
"CRYSTAL",
"phonopy",
"pymatgen"
] | eea1363afdad4fbaaf44626fd64165b9091059d4c2ed74ee2e9cb5eb8931dba4 |
import threading
import functools
try:
from Queue import Queue
except ImportError:
from queue import Queue
from os import getenv
from .client import JobClient
from .client import InputCachingJobClient
from .client import MessageJobClient
from .client import MessageCLIJobClient
from .interface import HttpPulsarInterface
from .interface import LocalPulsarInterface
from .object_client import ObjectStoreClient
from .transport import get_transport
from .util import TransferEventManager
from .destination import url_to_destination_params
from .amqp_exchange_factory import get_exchange
from logging import getLogger
log = getLogger(__name__)
DEFAULT_TRANSFER_THREADS = 2
def build_client_manager(**kwargs):
if 'job_manager' in kwargs:
return ClientManager(**kwargs) # TODO: Consider more separation here.
elif kwargs.get('amqp_url', None):
return MessageQueueClientManager(**kwargs)
else:
return ClientManager(**kwargs)
class ClientManager(object):
"""
Factory to create Pulsar clients, used to manage potential shared
state between multiple client connections.
"""
def __init__(self, **kwds):
if 'job_manager' in kwds:
self.job_manager_interface_class = LocalPulsarInterface
self.job_manager_interface_args = dict(job_manager=kwds['job_manager'], file_cache=kwds['file_cache'])
else:
self.job_manager_interface_class = HttpPulsarInterface
transport_type = kwds.get('transport', None)
transport = get_transport(transport_type)
self.job_manager_interface_args = dict(transport=transport)
cache = kwds.get('cache', None)
if cache is None:
cache = _environ_default_int('PULSAR_CACHE_TRANSFERS')
if cache:
log.info("Setting Pulsar client class to caching variant.")
self.client_cacher = ClientCacher(**kwds)
self.client_class = InputCachingJobClient
self.extra_client_kwds = {"client_cacher": self.client_cacher}
else:
log.info("Setting Pulsar client class to standard, non-caching variant.")
self.client_class = JobClient
self.extra_client_kwds = {}
def get_client(self, destination_params, job_id, **kwargs):
destination_params = _parse_destination_params(destination_params)
destination_params.update(**kwargs)
job_manager_interface_class = self.job_manager_interface_class
job_manager_interface_args = dict(destination_params=destination_params, **self.job_manager_interface_args)
job_manager_interface = job_manager_interface_class(**job_manager_interface_args)
return self.client_class(destination_params, job_id, job_manager_interface, **self.extra_client_kwds)
def shutdown(self, ensure_cleanup=False):
pass
try:
from galaxy.jobs.runners.util.cli import factory as cli_factory
except ImportError:
from pulsar.managers.util.cli import factory as cli_factory
class MessageQueueClientManager(object):
def __init__(self, **kwds):
self.url = kwds.get('amqp_url')
self.manager_name = kwds.get("manager", None) or "_default_"
self.exchange = get_exchange(self.url, self.manager_name, kwds)
self.status_cache = {}
self.callback_lock = threading.Lock()
self.callback_thread = None
self.active = True
def callback_wrapper(self, callback, body, message):
if not self.active:
log.debug("Obtained update message for inactive client manager, attempting requeue.")
try:
message.requeue()
log.debug("Requeue succeeded, will likely be handled next time consumer is enabled.")
except Exception:
log.debug("Requeue failed, message may be lost?")
return
try:
if "job_id" in body:
job_id = body["job_id"]
self.status_cache[job_id] = body
log.debug("Handling asynchronous status update from remote Pulsar.")
callback(body)
except Exception:
log.exception("Failure processing job status update message.")
except BaseException as e:
log.exception("Failure processing job status update message - BaseException type %s" % type(e))
finally:
message.ack()
def callback_consumer(self, callback_wrapper):
try:
self.exchange.consume("status_update", callback_wrapper, check=self)
except Exception:
log.exception("Exception while handling status update messages, this shouldn't really happen. Handler should be restarted.")
finally:
log.debug("Leaving Pulsar client status update thread, no additional Pulsar updates will be processed.")
def ensure_has_status_update_callback(self, callback):
with self.callback_lock:
if self.callback_thread is not None:
return
callback_wrapper = functools.partial(self.callback_wrapper, callback)
run = functools.partial(self.callback_consumer, callback_wrapper)
thread = threading.Thread(
name="pulsar_client_%s_status_update_callback" % self.manager_name,
target=run
)
thread.daemon = False # Lets not interrupt processing of this.
thread.start()
self.callback_thread = thread
def shutdown(self, ensure_cleanup=False):
self.active = False
if ensure_cleanup:
self.callback_thread.join()
def __nonzero__(self):
return self.active
def get_client(self, destination_params, job_id, **kwargs):
if job_id is None:
raise Exception("Cannot generate Pulsar client for empty job_id.")
destination_params = _parse_destination_params(destination_params)
destination_params.update(**kwargs)
if 'shell_plugin' in destination_params:
shell = cli_factory.get_shell(destination_params)
return MessageCLIJobClient(destination_params, job_id, self, shell)
else:
return MessageJobClient(destination_params, job_id, self)
class ObjectStoreClientManager(object):
def __init__(self, **kwds):
if 'object_store' in kwds:
self.interface_class = LocalPulsarInterface
self.interface_args = dict(object_store=kwds['object_store'])
else:
self.interface_class = HttpPulsarInterface
transport_type = kwds.get('transport', None)
transport = get_transport(transport_type)
self.interface_args = dict(transport=transport)
self.extra_client_kwds = {}
def get_client(self, client_params):
interface_class = self.interface_class
interface_args = dict(destination_params=client_params, **self.interface_args)
interface = interface_class(**interface_args)
return ObjectStoreClient(interface)
class ClientCacher(object):
def __init__(self, **kwds):
self.event_manager = TransferEventManager()
default_transfer_threads = _environ_default_int('PULSAR_CACHE_THREADS', DEFAULT_TRANSFER_THREADS)
num_transfer_threads = int(kwds.get('transfer_threads', default_transfer_threads))
self.__init_transfer_threads(num_transfer_threads)
def queue_transfer(self, client, path):
self.transfer_queue.put((client, path))
def acquire_event(self, input_path):
return self.event_manager.acquire_event(input_path)
def _transfer_worker(self):
while True:
transfer_info = self.transfer_queue.get()
try:
self.__perform_transfer(transfer_info)
except BaseException as e:
log.warn("Transfer failed.")
log.exception(e)
pass
self.transfer_queue.task_done()
def __perform_transfer(self, transfer_info):
(client, path) = transfer_info
event_holder = self.event_manager.acquire_event(path, force_clear=True)
failed = True
try:
client.cache_insert(path)
failed = False
finally:
event_holder.failed = failed
event_holder.release()
def __init_transfer_threads(self, num_transfer_threads):
self.num_transfer_threads = num_transfer_threads
self.transfer_queue = Queue()
for i in range(num_transfer_threads):
t = threading.Thread(target=self._transfer_worker)
t.daemon = True
t.start()
def _parse_destination_params(destination_params):
try:
unicode_type = unicode
except NameError:
unicode_type = str
if isinstance(destination_params, str) or isinstance(destination_params, unicode_type):
destination_params = url_to_destination_params(destination_params)
return destination_params
def _environ_default_int(variable, default="0"):
val = getenv(variable, default)
int_val = int(default)
if str(val).isdigit():
int_val = int(val)
return int_val
__all__ = [
'ClientManager',
'ObjectStoreClientManager',
'HttpPulsarInterface'
]
| jmchilton/pulsar | pulsar/client/manager.py | Python | apache-2.0 | 9,260 | [
"Galaxy"
] | 888605fbddc3e401d804861fd9576442e12e8991ecf0347226be276a703c835e |
# Copyright 2005-2008 by Frank Kauff & Cymon J. Cox. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Linked list functionality for use in Bio.Nexus.
Provides functionality of a linked list.
Each node has one (or none) predecessor, and an arbitrary number of successors.
Nodes can store arbitrary data in a NodeData class.
Subclassed by Nexus.Trees to store phylogenetic trees.
Bug reports to Frank Kauff (fkauff@biologie.uni-kl.de)
"""
class ChainException(Exception):
pass
class NodeException(Exception):
pass
class Chain(object):
"""Stores a list of nodes that are linked together."""
def __init__(self):
"""Initiates a node chain."""
self.chain = {}
self.id = -1
def _get_id(self):
"""Gets a new id for a node in the chain."""
self.id += 1
return self.id
def all_ids(self):
"""Return a list of all node ids."""
return list(self.chain.keys())
def add(self, node, prev=None):
"""Attaches node to another."""
if prev is not None and prev not in self.chain:
raise ChainException('Unknown predecessor: ' + str(prev))
else:
id = self._get_id()
node.set_id(id)
node.set_prev(prev)
if prev is not None:
self.chain[prev].add_succ(id)
self.chain[id] = node
return id
def collapse(self, id):
"""Deletes node from chain and relinks successors to predecessor."""
if id not in self.chain:
raise ChainException('Unknown ID: ' + str(id))
prev_id = self.chain[id].get_prev()
self.chain[prev_id].remove_succ(id)
succ_ids = self.chain[id].get_succ()
for i in succ_ids:
self.chain[i].set_prev(prev_id)
self.chain[prev_id].add_succ(succ_ids)
node = self.chain[id]
self.kill(id)
return node
def kill(self, id):
"""Kills a node from chain without caring to what it is connected."""
if id not in self.chain:
raise ChainException('Unknown ID: ' + str(id))
else:
del self.chain[id]
def unlink(self, id):
"""Disconnects node from his predecessor."""
if id not in self.chain:
raise ChainException('Unknown ID: ' + str(id))
else:
prev_id = self.chain[id].prev
if prev_id is not None:
self.chain[prev_id].succ.pop(self.chain[prev_id].succ.index(id))
self.chain[id].prev = None
return prev_id
def link(self, parent, child):
"""Connects son to parent."""
if child not in self.chain:
raise ChainException('Unknown ID: ' + str(child))
elif parent not in self.chain:
raise ChainException('Unknown ID: ' + str(parent))
else:
self.unlink(child)
self.chain[parent].succ.append(child)
self.chain[child].set_prev(parent)
def is_parent_of(self, parent, grandchild):
"""Check if grandchild is a subnode of parent."""
if grandchild == parent or grandchild in self.chain[parent].get_succ():
return True
else:
for sn in self.chain[parent].get_succ():
if self.is_parent_of(sn, grandchild):
return True
else:
return False
def trace(self, start, finish):
"""Returns a list of all node_ids between two nodes (excluding start, including end)."""
if start not in self.chain or finish not in self.chain:
raise NodeException('Unknown node.')
if not self.is_parent_of(start, finish) or start == finish:
return []
for sn in self.chain[start].get_succ():
if self.is_parent_of(sn, finish):
return [sn] + self.trace(sn, finish)
class Node(object):
"""A single node."""
def __init__(self, data=None):
"""Represents a node with one predecessor and multiple successors."""
self.id = None
self.data = data
self.prev = None
self.succ = []
def set_id(self, id):
"""Sets the id of a node, if not set yet."""
if self.id is not None:
raise NodeException('Node id cannot be changed.')
self.id = id
def get_id(self):
"""Returns the node's id."""
return self.id
def get_succ(self):
"""Returns a list of the node's successors."""
return self.succ
def get_prev(self):
"""Returns the id of the node's predecessor."""
return self.prev
def add_succ(self, id):
"""Adds a node id to the node's successors."""
if isinstance(id, type([])):
self.succ.extend(id)
else:
self.succ.append(id)
def remove_succ(self, id):
"""Removes a node id from the node's successors."""
self.succ.remove(id)
def set_succ(self, new_succ):
"""Sets the node's successors."""
if not isinstance(new_succ, type([])):
raise NodeException('Node successor must be of list type.')
self.succ = new_succ
def set_prev(self, id):
"""Sets the node's predecessor."""
self.prev = id
def get_data(self):
"""Returns a node's data."""
return self.data
def set_data(self, data):
"""Sets a node's data."""
self.data = data
| zjuchenyuan/BioWeb | Lib/Bio/Nexus/Nodes.py | Python | mit | 5,576 | [
"Biopython"
] | 243badbe8bb784f302cc9d078e6795b4981b0ab5504691cf5f9c40a23e21f2e1 |
# This file is part of Fedora Community.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import tw2.core as twc
from tw2.jquery import jQuery, jquery_js
from tw2.jqplugins.flot import flot_css, flot_js
from tw2.excanvas import excanvas_js
class FlotWidget(twc.Widget):
""" An attractive plotting widget.
Using Flot, a pure Javascript plotting library for jQuery,
this widget produces client-side graphical plots of arbitrary datasets
on-the-fly.
For detailed documentation on the flot API, visit the flot project
homepage: http://code.google.com/p/flot
"""
template = u"""
% if w.label:
<h3>${w.label}</h3>
% endif
<div id="${w.id}" style="width:${w.width};height:${w.height};">
</div>
<script>
% if w.tooltips:
function showTooltip(x, y, contents) {
$('<div id="tooltip">' + contents + '</div>').css( {
position: 'absolute',
display: 'none',
top: y + 5,
left: x + 5,
border: '1px solid #fdd',
padding: '2px',
'background-color': '#fee',
opacity: 0.80
}).appendTo("body").fadeIn(200);
}
var previousPoint = null;
$("#${w.id}").bind("plothover", function (event, pos, item) {
$("#x").text(pos.x.toFixed(2));
$("#y").text(pos.y.toFixed(2));
if (item) {
if (previousPoint != item.datapoint) {
previousPoint = item.datapoint;
$("#tooltip").remove();
var x = item.datapoint[0].toFixed(2),
y = item.datapoint[1].toFixed(2);
showTooltip(item.pageX, item.pageY, y);
}
}
else {
$("#tooltip").remove();
previousPoint = null;
}
});
% endif
$(document).ready(function(){
if (!${w.data}) {
$('#${w.id}').text('Data not ready for display \u2014 sorry!');
} else {
$.plot($('#${w.id}'), ${w.data}, ${w.options});
}
});
</script>
"""
inline_engine_name = 'mako'
data = twc.Param("An array of data series", default=None)
options = twc.Param("Plot options", default=None)
height = twc.Param("The height of the graph", default='300px')
width = twc.Param("The width of the graph", default='600px')
label = twc.Param("Label for the graph", default='')
tooltips = twc.Param("Enable onhover tooltips", default=False)
resources = [jquery_js, flot_js, excanvas_js, flot_css]
def prepare(self):
super(FlotWidget, self).prepare
if not self.id:
self.id = 'flot_%s' % str(int(random() * 999))
if not self.data:
self.data = []
if not self.options:
self.options = {}
| Fale/fedora-packages | fedoracommunity/widgets/flot.py | Python | agpl-3.0 | 3,776 | [
"VisIt"
] | ff1acd1b2c04785a6d8db2308f1b7514f9e1288e692ee957b2f39d37607ff1ef |
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
#
# License: Simplified BSD
import contextlib
from functools import partial
import os
import sys
import time
import traceback
import warnings
import numpy as np
from . import _Brain
from ..utils import _check_option, _show_help, _get_color_list, tight_layout
from ...externals.decorator import decorator
from ...source_space import vertex_to_mni
from ...utils import _ReuseCycle, warn, copy_doc
@decorator
def run_once(fun, *args, **kwargs):
"""Run the function only once."""
if not hasattr(fun, "_has_run"):
fun._has_run = True
return fun(*args, **kwargs)
@decorator
def safe_event(fun, *args, **kwargs):
"""Protect against PyQt5 exiting on event-handling errors."""
try:
return fun(*args, **kwargs)
except Exception:
traceback.print_exc(file=sys.stderr)
class MplCanvas(object):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, time_viewer, width, height, dpi):
from PyQt5 import QtWidgets
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
if time_viewer.separate_canvas:
parent = None
else:
parent = time_viewer.window
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.canvas = FigureCanvasQTAgg(self.fig)
self.axes = self.fig.add_subplot(111)
self.axes.set(xlabel='Time (sec)', ylabel='Activation (AU)')
self.canvas.setParent(parent)
FigureCanvasQTAgg.setSizePolicy(
self.canvas,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding
)
FigureCanvasQTAgg.updateGeometry(self.canvas)
# XXX eventually this should be called in the window resize callback
tight_layout(fig=self.axes.figure)
self.time_viewer = time_viewer
self.time_func = time_viewer.time_call
for event in ('button_press', 'motion_notify'):
self.canvas.mpl_connect(
event + '_event', getattr(self, 'on_' + event))
def plot(self, x, y, label, **kwargs):
"""Plot a curve."""
line, = self.axes.plot(
x, y, label=label, **kwargs)
self.update_plot()
return line
def plot_time_line(self, x, label, **kwargs):
"""Plot the vertical line."""
line = self.axes.axvline(x, label=label, **kwargs)
self.update_plot()
return line
def update_plot(self):
"""Update the plot."""
leg = self.axes.legend(
prop={'family': 'monospace', 'size': 'small'},
framealpha=0.5, handlelength=1.,
facecolor=self.time_viewer.brain._bg_color)
for text in leg.get_texts():
text.set_color(self.time_viewer.brain._fg_color)
self.canvas.draw()
def set_color(self, bg_color, fg_color):
"""Set the widget colors."""
self.axes.set_facecolor(bg_color)
self.axes.xaxis.label.set_color(fg_color)
self.axes.yaxis.label.set_color(fg_color)
self.axes.spines['top'].set_color(fg_color)
self.axes.spines['bottom'].set_color(fg_color)
self.axes.spines['left'].set_color(fg_color)
self.axes.spines['right'].set_color(fg_color)
self.axes.tick_params(axis='x', colors=fg_color)
self.axes.tick_params(axis='y', colors=fg_color)
self.fig.patch.set_facecolor(bg_color)
def show(self):
"""Show the canvas."""
self.canvas.show()
def close(self):
"""Close the canvas."""
self.canvas.close()
def on_button_press(self, event):
"""Handle button presses."""
# left click (and maybe drag) in progress in axes
if (event.inaxes != self.axes or
event.button != 1):
return
self.time_func(
event.xdata, update_widget=True, time_as_index=False)
on_motion_notify = on_button_press # for now they can be the same
class IntSlider(object):
"""Class to set a integer slider."""
def __init__(self, plotter=None, callback=None, first_call=True):
self.plotter = plotter
self.callback = callback
self.slider_rep = None
self.first_call = first_call
self._first_time = True
def __call__(self, value):
"""Round the label of the slider."""
idx = int(round(value))
if self.slider_rep is not None:
self.slider_rep.SetValue(idx)
self.plotter.update()
if not self._first_time or all([self._first_time, self.first_call]):
self.callback(idx)
if self._first_time:
self._first_time = False
class TimeSlider(object):
"""Class to update the time slider."""
def __init__(self, plotter=None, brain=None, callback=None,
first_call=True):
self.plotter = plotter
self.brain = brain
self.callback = callback
self.slider_rep = None
self.first_call = first_call
self._first_time = True
self.time_label = None
if self.brain is not None and callable(self.brain._data['time_label']):
self.time_label = self.brain._data['time_label']
def __call__(self, value, update_widget=False, time_as_index=True):
"""Update the time slider."""
value = float(value)
if not time_as_index:
value = self.brain._to_time_index(value)
if not self._first_time or all([self._first_time, self.first_call]):
self.brain.set_time_point(value)
if self.callback is not None:
self.callback()
current_time = self.brain._current_time
if self.slider_rep is not None:
if self.time_label is not None:
current_time = self.time_label(current_time)
self.slider_rep.SetTitleText(current_time)
if update_widget:
self.slider_rep.SetValue(value)
self.plotter.update()
if self._first_time:
self._first_time = False
class UpdateColorbarScale(object):
"""Class to update the values of the colorbar sliders."""
def __init__(self, plotter=None, brain=None):
self.plotter = plotter
self.brain = brain
self.keys = ('fmin', 'fmid', 'fmax')
self.reps = {key: None for key in self.keys}
self.fscale_slider_rep = None
def __call__(self, value):
"""Update the colorbar sliders."""
self.brain._update_fscale(value)
for key in self.keys:
if self.reps[key] is not None:
self.reps[key].SetValue(self.brain._data[key])
if self.fscale_slider_rep is not None:
self.fscale_slider_rep.SetValue(1.0)
self.plotter.update()
class BumpColorbarPoints(object):
"""Class that ensure constraints over the colorbar points."""
def __init__(self, plotter=None, brain=None, name=None):
self.plotter = plotter
self.brain = brain
self.name = name
self.callback = {
"fmin": lambda fmin: brain.update_lut(fmin=fmin),
"fmid": lambda fmid: brain.update_lut(fmid=fmid),
"fmax": lambda fmax: brain.update_lut(fmax=fmax),
}
self.keys = ('fmin', 'fmid', 'fmax')
self.reps = {key: None for key in self.keys}
self.last_update = time.time()
def __call__(self, value):
"""Update the colorbar sliders."""
vals = {key: self.brain._data[key] for key in self.keys}
if self.name == "fmin" and self.reps["fmin"] is not None:
if vals['fmax'] < value:
vals['fmax'] = value
self.reps['fmax'].SetValue(value)
if vals['fmid'] < value:
vals['fmid'] = value
self.reps['fmid'].SetValue(value)
self.reps['fmin'].SetValue(value)
elif self.name == "fmid" and self.reps['fmid'] is not None:
if vals['fmin'] > value:
vals['fmin'] = value
self.reps['fmin'].SetValue(value)
if vals['fmax'] < value:
vals['fmax'] = value
self.reps['fmax'].SetValue(value)
self.reps['fmid'].SetValue(value)
elif self.name == "fmax" and self.reps['fmax'] is not None:
if vals['fmin'] > value:
vals['fmin'] = value
self.reps['fmin'].SetValue(value)
if vals['fmid'] > value:
vals['fmid'] = value
self.reps['fmid'].SetValue(value)
self.reps['fmax'].SetValue(value)
self.brain.update_lut(**vals)
if time.time() > self.last_update + 1. / 60.:
self.callback[self.name](value)
self.last_update = time.time()
self.plotter.update()
class ShowView(object):
"""Class that selects the correct view."""
def __init__(self, plotter=None, brain=None, orientation=None,
row=None, col=None, hemi=None):
self.plotter = plotter
self.brain = brain
self.orientation = orientation
self.short_orientation = [s[:3] for s in orientation]
self.row = row
self.col = col
self.hemi = hemi
self.slider_rep = None
def __call__(self, value, update_widget=False):
"""Update the view."""
self.brain.show_view(value, row=self.row, col=self.col,
hemi=self.hemi)
if update_widget:
if len(value) > 3:
idx = self.orientation.index(value)
else:
idx = self.short_orientation.index(value)
if self.slider_rep is not None:
self.slider_rep.SetValue(idx)
self.slider_rep.SetTitleText(self.orientation[idx])
self.plotter.update()
class SmartSlider(object):
"""Class to manage smart slider.
It stores it's own slider representation for efficiency
and uses it when necessary.
"""
def __init__(self, plotter=None, callback=None):
self.plotter = plotter
self.callback = callback
self.slider_rep = None
def __call__(self, value, update_widget=False):
"""Update the value."""
self.callback(value)
if update_widget:
if self.slider_rep is not None:
self.slider_rep.SetValue(value)
self.plotter.update()
class _TimeViewer(object):
"""Class to interact with _Brain."""
def __init__(self, brain, show_traces=False):
from ..backends._pyvista import _require_minimum_version
_require_minimum_version('0.24')
# shared configuration
self.brain = brain
self.orientation = [
'lateral',
'medial',
'rostral',
'caudal',
'dorsal',
'ventral',
'frontal',
'parietal'
]
self.default_smoothing_range = [0, 15]
# detect notebook
if brain._notebook:
self.notebook = True
self.configure_notebook()
return
else:
self.notebook = False
# Default configuration
self.playback = False
self.visibility = False
self.refresh_rate_ms = max(int(round(1000. / 60.)), 1)
self.default_scaling_range = [0.2, 2.0]
self.default_playback_speed_range = [0.01, 1]
self.default_playback_speed_value = 0.05
self.default_status_bar_msg = "Press ? for help"
self.act_data_smooth = {'lh': None, 'rh': None}
self.color_cycle = None
self.picked_points = {'lh': list(), 'rh': list()}
self._mouse_no_mvt = -1
self.icons = dict()
self.actions = dict()
self.keys = ('fmin', 'fmid', 'fmax')
self.slider_length = 0.02
self.slider_width = 0.04
self.slider_color = (0.43137255, 0.44313725, 0.45882353)
self.slider_tube_width = 0.04
self.slider_tube_color = (0.69803922, 0.70196078, 0.70980392)
# Direct access parameters:
self.brain.time_viewer = self
self.plotter = brain._renderer.plotter
self.main_menu = self.plotter.main_menu
self.window = self.plotter.app_window
self.tool_bar = self.window.addToolBar("toolbar")
self.status_bar = self.window.statusBar()
self.interactor = self.plotter.interactor
self.window.signal_close.connect(self.clean)
# Derived parameters:
self.playback_speed = self.default_playback_speed_value
_check_option('show_traces', type(show_traces), [bool, str])
if isinstance(show_traces, str) and show_traces == "separate":
self.show_traces = True
self.separate_canvas = True
else:
self.show_traces = show_traces
self.separate_canvas = False
self.load_icons()
self.interactor_stretch = 3
self.configure_time_label()
self.configure_sliders()
self.configure_scalar_bar()
self.configure_playback()
self.configure_point_picking()
self.configure_menu()
self.configure_tool_bar()
self.configure_status_bar()
# show everything at the end
self.toggle_interface()
with self.ensure_minimum_sizes():
self.brain.show()
@contextlib.contextmanager
def ensure_minimum_sizes(self):
sz = self.brain._size
adjust_mpl = self.show_traces and not self.separate_canvas
if not adjust_mpl:
yield
else:
self.mpl_canvas.canvas.setMinimumSize(
sz[0], int(round(sz[1] / self.interactor_stretch)))
try:
yield
finally:
self.mpl_canvas.canvas.setMinimumSize(0, 0)
def toggle_interface(self, value=None):
if value is None:
self.visibility = not self.visibility
else:
self.visibility = value
# update tool bar icon
if self.visibility:
self.actions["visibility"].setIcon(self.icons["visibility_on"])
else:
self.actions["visibility"].setIcon(self.icons["visibility_off"])
# manage sliders
for slider in self.plotter.slider_widgets:
slider_rep = slider.GetRepresentation()
if self.visibility:
slider_rep.VisibilityOn()
else:
slider_rep.VisibilityOff()
# manage time label
time_label = self.brain._data['time_label']
# if we actually have time points, we will show the slider so
# hide the time actor
have_ts = self.brain._times is not None and len(self.brain._times) > 1
if self.time_actor is not None:
if self.visibility and time_label is not None and not have_ts:
self.time_actor.SetInput(time_label(self.brain._current_time))
self.time_actor.VisibilityOn()
else:
self.time_actor.VisibilityOff()
self.plotter.update()
def _save_movie(self, filename, **kwargs):
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QCursor
def frame_callback(frame, n_frames):
if frame == n_frames:
# On the ImageIO step
self.status_msg.setText(
"Saving with ImageIO: %s"
% filename
)
self.status_msg.show()
self.status_progress.hide()
self.status_bar.layout().update()
else:
self.status_msg.setText(
"Rendering images (frame %d / %d) ..."
% (frame + 1, n_frames)
)
self.status_msg.show()
self.status_progress.show()
self.status_progress.setRange(0, n_frames - 1)
self.status_progress.setValue(frame)
self.status_progress.update()
self.status_progress.repaint()
self.status_msg.update()
self.status_msg.parent().update()
self.status_msg.repaint()
# temporarily hide interface
default_visibility = self.visibility
self.toggle_interface(value=False)
# set cursor to busy
default_cursor = self.interactor.cursor()
self.interactor.setCursor(QCursor(Qt.WaitCursor))
try:
self.brain.save_movie(
filename=filename,
time_dilation=(1. / self.playback_speed),
callback=frame_callback,
**kwargs
)
except (Exception, KeyboardInterrupt):
warn('Movie saving aborted:\n' + traceback.format_exc())
# restore visibility
self.toggle_interface(value=default_visibility)
# restore cursor
self.interactor.setCursor(default_cursor)
@copy_doc(_Brain.save_movie)
def save_movie(self, filename=None, **kwargs):
try:
from pyvista.plotting.qt_plotting import FileDialog
except ImportError:
from pyvistaqt.plotting import FileDialog
if filename is None:
self.status_msg.setText("Choose movie path ...")
self.status_msg.show()
self.status_progress.setValue(0)
def _clean(unused):
del unused
self.status_msg.hide()
self.status_progress.hide()
dialog = FileDialog(
self.plotter.app_window,
callback=partial(self._save_movie, **kwargs)
)
dialog.setDirectory(os.getcwd())
dialog.finished.connect(_clean)
return dialog
else:
self._save_movie(filename=filename, **kwargs)
return
def apply_auto_scaling(self):
self.brain.update_auto_scaling()
for key in ('fmin', 'fmid', 'fmax'):
self.reps[key].SetValue(self.brain._data[key])
self.plotter.update()
def restore_user_scaling(self):
self.brain.update_auto_scaling(restore=True)
for key in ('fmin', 'fmid', 'fmax'):
self.reps[key].SetValue(self.brain._data[key])
self.plotter.update()
def toggle_playback(self, value=None):
if value is None:
self.playback = not self.playback
else:
self.playback = value
# update tool bar icon
if self.playback:
self.actions["play"].setIcon(self.icons["pause"])
else:
self.actions["play"].setIcon(self.icons["play"])
if self.playback:
time_data = self.brain._data['time']
max_time = np.max(time_data)
if self.brain._current_time == max_time: # start over
self.brain.set_time_point(0) # first index
self._last_tick = time.time()
def set_playback_speed(self, speed):
self.playback_speed = speed
@safe_event
def play(self):
if self.playback:
try:
self._advance()
except Exception:
self.toggle_playback(value=False)
raise
def _advance(self):
this_time = time.time()
delta = this_time - self._last_tick
self._last_tick = time.time()
time_data = self.brain._data['time']
times = np.arange(self.brain._n_times)
time_shift = delta * self.playback_speed
max_time = np.max(time_data)
time_point = min(self.brain._current_time + time_shift, max_time)
# always use linear here -- this does not determine the data
# interpolation mode, it just finds where we are (in time) in
# terms of the time indices
idx = np.interp(time_point, time_data, times)
self.time_call(idx, update_widget=True)
if time_point == max_time:
self.toggle_playback(value=False)
def set_slider_style(self, slider, show_label=True, show_cap=False):
if slider is not None:
slider_rep = slider.GetRepresentation()
slider_rep.SetSliderLength(self.slider_length)
slider_rep.SetSliderWidth(self.slider_width)
slider_rep.SetTubeWidth(self.slider_tube_width)
slider_rep.GetSliderProperty().SetColor(self.slider_color)
slider_rep.GetTubeProperty().SetColor(self.slider_tube_color)
slider_rep.GetLabelProperty().SetShadow(False)
slider_rep.GetLabelProperty().SetBold(True)
slider_rep.GetLabelProperty().SetColor(self.brain._fg_color)
slider_rep.GetTitleProperty().ShallowCopy(
slider_rep.GetLabelProperty()
)
if not show_cap:
slider_rep.GetCapProperty().SetOpacity(0)
if not show_label:
slider_rep.ShowSliderLabelOff()
def configure_notebook(self):
from ._notebook import _NotebookInteractor
self.brain._renderer.figure.display = _NotebookInteractor(self)
def configure_time_label(self):
self.time_actor = self.brain._data.get('time_actor')
if self.time_actor is not None:
self.time_actor.SetPosition(0.5, 0.03)
self.time_actor.GetTextProperty().SetJustificationToCentered()
self.time_actor.GetTextProperty().BoldOn()
self.time_actor.VisibilityOff()
def configure_scalar_bar(self):
if self.brain._colorbar_added:
scalar_bar = self.plotter.scalar_bar
scalar_bar.SetOrientationToVertical()
scalar_bar.SetHeight(0.6)
scalar_bar.SetWidth(0.05)
scalar_bar.SetPosition(0.02, 0.2)
def configure_sliders(self):
rng = _get_range(self.brain)
# Orientation slider
# default: put orientation slider on the first view
if self.brain._hemi in ('split', 'both'):
self.plotter.subplot(0, 0)
# Use 'lh' as a reference for orientation for 'both'
if self.brain._hemi == 'both':
hemis_ref = ['lh']
else:
hemis_ref = self.brain._hemis
for hemi in hemis_ref:
if self.brain._hemi == 'split':
ci = 0 if hemi == 'lh' else 1
else:
ci = 0
for ri, view in enumerate(self.brain._views):
self.plotter.subplot(ri, ci)
self.orientation_call = ShowView(
plotter=self.plotter,
brain=self.brain,
orientation=self.orientation,
hemi=hemi,
row=ri,
col=ci,
)
orientation_slider = self.plotter.add_text_slider_widget(
self.orientation_call,
value=0,
data=self.orientation,
pointa=(0.82, 0.74),
pointb=(0.98, 0.74),
event_type='always'
)
self.orientation_call.slider_rep = \
orientation_slider.GetRepresentation()
self.set_slider_style(orientation_slider, show_label=False)
self.orientation_call(view, update_widget=True)
# necessary because show_view modified subplot
if self.brain._hemi in ('split', 'both'):
self.plotter.subplot(0, 0)
# Smoothing slider
self.smoothing_call = IntSlider(
plotter=self.plotter,
callback=self.brain.set_data_smoothing,
first_call=False,
)
smoothing_slider = self.plotter.add_slider_widget(
self.smoothing_call,
value=self.brain._data['smoothing_steps'],
rng=self.default_smoothing_range, title="smoothing",
pointa=(0.82, 0.90),
pointb=(0.98, 0.90)
)
self.smoothing_call.slider_rep = smoothing_slider.GetRepresentation()
# Time slider
max_time = len(self.brain._data['time']) - 1
# VTK on macOS bombs if we create these then hide them, so don't
# even create them
if max_time < 1:
self.time_call = None
time_slider = None
else:
self.time_call = TimeSlider(
plotter=self.plotter,
brain=self.brain,
first_call=False,
callback=self.plot_time_line,
)
time_slider = self.plotter.add_slider_widget(
self.time_call,
value=self.brain._data['time_idx'],
rng=[0, max_time],
pointa=(0.23, 0.1),
pointb=(0.77, 0.1),
event_type='always'
)
self.time_call.slider_rep = time_slider.GetRepresentation()
# configure properties of the time slider
time_slider.GetRepresentation().SetLabelFormat('idx=%0.1f')
current_time = self.brain._current_time
assert current_time is not None # should never be the case, float
time_label = self.brain._data['time_label']
if callable(time_label):
current_time = time_label(current_time)
else:
current_time = time_label
if time_slider is not None:
time_slider.GetRepresentation().SetTitleText(current_time)
if self.time_actor is not None:
self.time_actor.SetInput(current_time)
del current_time
# Playback speed slider
if time_slider is None:
self.playback_speed_call = None
playback_speed_slider = None
else:
self.playback_speed_call = SmartSlider(
plotter=self.plotter,
callback=self.set_playback_speed,
)
playback_speed_slider = self.plotter.add_slider_widget(
self.playback_speed_call,
value=self.default_playback_speed_value,
rng=self.default_playback_speed_range, title="speed",
pointa=(0.02, 0.1),
pointb=(0.18, 0.1),
event_type='always'
)
self.playback_speed_call.slider_rep = \
playback_speed_slider.GetRepresentation()
# Colormap slider
pointa = np.array((0.82, 0.26))
pointb = np.array((0.98, 0.26))
shift = np.array([0, 0.1])
# fmin
self.fmin_call = BumpColorbarPoints(
plotter=self.plotter,
brain=self.brain,
name="fmin"
)
fmin_slider = self.plotter.add_slider_widget(
self.fmin_call,
value=self.brain._data["fmin"],
rng=rng, title="clim",
pointa=pointa,
pointb=pointb,
event_type="always",
)
# fmid
self.fmid_call = BumpColorbarPoints(
plotter=self.plotter,
brain=self.brain,
name="fmid",
)
fmid_slider = self.plotter.add_slider_widget(
self.fmid_call,
value=self.brain._data["fmid"],
rng=rng, title="",
pointa=pointa + shift,
pointb=pointb + shift,
event_type="always",
)
# fmax
self.fmax_call = BumpColorbarPoints(
plotter=self.plotter,
brain=self.brain,
name="fmax",
)
fmax_slider = self.plotter.add_slider_widget(
self.fmax_call,
value=self.brain._data["fmax"],
rng=rng, title="",
pointa=pointa + 2 * shift,
pointb=pointb + 2 * shift,
event_type="always",
)
# fscale
self.fscale_call = UpdateColorbarScale(
plotter=self.plotter,
brain=self.brain,
)
fscale_slider = self.plotter.add_slider_widget(
self.fscale_call,
value=1.0,
rng=self.default_scaling_range, title="fscale",
pointa=(0.82, 0.10),
pointb=(0.98, 0.10)
)
self.fscale_call.fscale_slider_rep = fscale_slider.GetRepresentation()
# register colorbar slider representations
self.reps = {
"fmin": fmin_slider.GetRepresentation(),
"fmid": fmid_slider.GetRepresentation(),
"fmax": fmax_slider.GetRepresentation(),
}
self.fmin_call.reps = self.reps
self.fmid_call.reps = self.reps
self.fmax_call.reps = self.reps
self.fscale_call.reps = self.reps
# set the slider style
self.set_slider_style(smoothing_slider)
self.set_slider_style(fmin_slider)
self.set_slider_style(fmid_slider)
self.set_slider_style(fmax_slider)
self.set_slider_style(fscale_slider)
if time_slider is not None:
self.set_slider_style(playback_speed_slider)
self.set_slider_style(time_slider)
# store sliders for linking
self._time_slider = time_slider
self._playback_speed_slider = playback_speed_slider
def configure_playback(self):
self.plotter.add_callback(self.play, self.refresh_rate_ms)
def configure_point_picking(self):
if not self.show_traces:
return
from ..backends._pyvista import _update_picking_callback
# use a matplotlib canvas
self.color_cycle = _ReuseCycle(_get_color_list())
win = self.plotter.app_window
dpi = win.windowHandle().screen().logicalDotsPerInch()
w, h = win.geometry().width() / dpi, win.geometry().height() / dpi
h /= 3 # one third of the window
self.mpl_canvas = MplCanvas(self, w, h, dpi)
xlim = [np.min(self.brain._data['time']),
np.max(self.brain._data['time'])]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
self.mpl_canvas.axes.set(xlim=xlim)
vlayout = self.plotter.frame.layout()
if not self.separate_canvas:
vlayout.addWidget(self.mpl_canvas.canvas)
vlayout.setStretch(0, self.interactor_stretch)
vlayout.setStretch(1, 1)
self.mpl_canvas.set_color(
bg_color=self.brain._bg_color,
fg_color=self.brain._fg_color,
)
self.mpl_canvas.show()
# get brain data
for idx, hemi in enumerate(['lh', 'rh']):
hemi_data = self.brain._data.get(hemi)
if hemi_data is not None:
act_data = hemi_data['array']
if act_data.ndim == 3:
act_data = np.linalg.norm(act_data, axis=1)
smooth_mat = hemi_data.get('smooth_mat')
self.act_data_smooth[hemi] = (act_data, smooth_mat)
# simulate a picked renderer
if self.brain._hemi == 'split':
self.picked_renderer = self.plotter.renderers[idx]
else:
self.picked_renderer = self.plotter.renderers[0]
# initialize the default point
color = next(self.color_cycle)
ind = np.unravel_index(
np.argmax(self.act_data_smooth[hemi][0], axis=None),
self.act_data_smooth[hemi][0].shape
)
vertex_id = hemi_data['vertices'][ind[0]]
mesh = hemi_data['mesh']
line = self.plot_time_course(hemi, vertex_id, color)
self.add_point(hemi, mesh, vertex_id, line, color)
self.plot_time_line()
_update_picking_callback(
self.plotter,
self.on_mouse_move,
self.on_button_press,
self.on_button_release,
self.on_pick
)
def load_icons(self):
from PyQt5.QtGui import QIcon
_init_resources()
self.icons["help"] = QIcon(":/help.svg")
self.icons["play"] = QIcon(":/play.svg")
self.icons["pause"] = QIcon(":/pause.svg")
self.icons["scale"] = QIcon(":/scale.svg")
self.icons["clear"] = QIcon(":/clear.svg")
self.icons["movie"] = QIcon(":/movie.svg")
self.icons["restore"] = QIcon(":/restore.svg")
self.icons["screenshot"] = QIcon(":/screenshot.svg")
self.icons["visibility_on"] = QIcon(":/visibility_on.svg")
self.icons["visibility_off"] = QIcon(":/visibility_off.svg")
def configure_tool_bar(self):
self.actions["screenshot"] = self.tool_bar.addAction(
self.icons["screenshot"],
"Take a screenshot",
self.plotter._qt_screenshot
)
self.actions["movie"] = self.tool_bar.addAction(
self.icons["movie"],
"Save movie...",
self.save_movie
)
self.actions["visibility"] = self.tool_bar.addAction(
self.icons["visibility_on"],
"Toggle Visibility",
self.toggle_interface
)
self.actions["play"] = self.tool_bar.addAction(
self.icons["play"],
"Play/Pause",
self.toggle_playback
)
self.actions["scale"] = self.tool_bar.addAction(
self.icons["scale"],
"Auto-Scale",
self.apply_auto_scaling
)
self.actions["restore"] = self.tool_bar.addAction(
self.icons["restore"],
"Restore scaling",
self.restore_user_scaling
)
self.actions["clear"] = self.tool_bar.addAction(
self.icons["clear"],
"Clear traces",
self.clear_points
)
self.actions["help"] = self.tool_bar.addAction(
self.icons["help"],
"Help",
self.help
)
self.actions["movie"].setShortcut("ctrl+shift+s")
self.actions["visibility"].setShortcut("i")
self.actions["play"].setShortcut(" ")
self.actions["scale"].setShortcut("s")
self.actions["restore"].setShortcut("r")
self.actions["clear"].setShortcut("c")
self.actions["help"].setShortcut("?")
def configure_menu(self):
# remove default picking menu
to_remove = list()
for action in self.main_menu.actions():
if action.text() == "Tools":
to_remove.append(action)
for action in to_remove:
self.main_menu.removeAction(action)
# add help menu
menu = self.main_menu.addMenu('Help')
menu.addAction('Show MNE key bindings\t?', self.help)
def configure_status_bar(self):
from PyQt5.QtWidgets import QLabel, QProgressBar
self.status_msg = QLabel(self.default_status_bar_msg)
self.status_progress = QProgressBar()
self.status_bar.layout().addWidget(self.status_msg, 1)
self.status_bar.layout().addWidget(self.status_progress, 0)
self.status_progress.hide()
def on_mouse_move(self, vtk_picker, event):
if self._mouse_no_mvt:
self._mouse_no_mvt -= 1
def on_button_press(self, vtk_picker, event):
self._mouse_no_mvt = 2
def on_button_release(self, vtk_picker, event):
if self._mouse_no_mvt > 0:
x, y = vtk_picker.GetEventPosition()
# programmatically detect the picked renderer
self.picked_renderer = self.plotter.iren.FindPokedRenderer(x, y)
# trigger the pick
self.plotter.picker.Pick(x, y, 0, self.picked_renderer)
self._mouse_no_mvt = 0
def on_pick(self, vtk_picker, event):
cell_id = vtk_picker.GetCellId()
mesh = vtk_picker.GetDataSet()
if mesh is None or cell_id == -1:
return
if hasattr(mesh, "_is_point"):
self.remove_point(mesh)
elif self._mouse_no_mvt:
hemi = mesh._hemi
pos = vtk_picker.GetPickPosition()
vtk_cell = mesh.GetCell(cell_id)
cell = [vtk_cell.GetPointId(point_id) for point_id
in range(vtk_cell.GetNumberOfPoints())]
vertices = mesh.points[cell]
idx = np.argmin(abs(vertices - pos), axis=0)
vertex_id = cell[idx[0]]
if vertex_id not in self.picked_points[hemi]:
color = next(self.color_cycle)
# update associated time course
line = self.plot_time_course(hemi, vertex_id, color)
# add glyph at picked point
self.add_point(hemi, mesh, vertex_id, line, color)
def add_point(self, hemi, mesh, vertex_id, line, color):
from ..backends._pyvista import _sphere
center = mesh.GetPoints().GetPoint(vertex_id)
# from the picked renderer to the subplot coords
rindex = self.plotter.renderers.index(self.picked_renderer)
row, col = self.plotter.index_to_loc(rindex)
actors = list()
spheres = list()
for ri, view in enumerate(self.brain._views):
self.plotter.subplot(ri, col)
# Using _sphere() instead of renderer.sphere() for 2 reasons:
# 1) renderer.sphere() fails on Windows in a scenario where a lot
# of picking requests are done in a short span of time (could be
# mitigated with synchronization/delay?)
# 2) the glyph filter is used in renderer.sphere() but only one
# sphere is required in this function.
actor, sphere = _sphere(
plotter=self.plotter,
center=np.array(center),
color=color,
radius=4.0,
)
actors.append(actor)
spheres.append(sphere)
# add metadata for picking
for sphere in spheres:
sphere._is_point = True
sphere._hemi = hemi
sphere._line = line
sphere._actors = actors
sphere._color = color
sphere._vertex_id = vertex_id
self.picked_points[hemi].append(vertex_id)
# this is used for testing only
if hasattr(self, "_spheres"):
self._spheres += spheres
else:
self._spheres = spheres
def remove_point(self, mesh):
mesh._line.remove()
self.mpl_canvas.update_plot()
self.picked_points[mesh._hemi].remove(mesh._vertex_id)
with warnings.catch_warnings(record=True):
# We intentionally ignore these in case we have traversed the
# entire color cycle
warnings.simplefilter('ignore')
self.color_cycle.restore(mesh._color)
self.plotter.remove_actor(mesh._actors)
mesh._actors = None
def clear_points(self):
if hasattr(self, "_spheres"):
for sphere in self._spheres:
vertex_id = sphere._vertex_id
hemi = sphere._hemi
if vertex_id in self.picked_points[hemi]:
self.remove_point(sphere)
self._spheres.clear()
def plot_time_course(self, hemi, vertex_id, color):
if not hasattr(self, "mpl_canvas"):
return
time = self.brain._data['time'].copy() # avoid circular ref
hemi_str = 'L' if hemi == 'lh' else 'R'
hemi_int = 0 if hemi == 'lh' else 1
mni = vertex_to_mni(
vertices=vertex_id,
hemis=hemi_int,
subject=self.brain._subject_id,
subjects_dir=self.brain._subjects_dir
)
label = "{}:{} MNI: {}".format(
hemi_str, str(vertex_id).ljust(6),
', '.join('%5.1f' % m for m in mni))
act_data, smooth = self.act_data_smooth[hemi]
if smooth is not None:
act_data = smooth[vertex_id].dot(act_data)[0]
else:
act_data = act_data[vertex_id].copy()
line = self.mpl_canvas.plot(
time,
act_data,
label=label,
lw=1.,
color=color
)
return line
def plot_time_line(self):
if not hasattr(self, "mpl_canvas"):
return
if isinstance(self.show_traces, bool) and self.show_traces:
# add time information
current_time = self.brain._current_time
if not hasattr(self, "time_line"):
self.time_line = self.mpl_canvas.plot_time_line(
x=current_time,
label='time',
color=self.brain._fg_color,
lw=1,
)
else:
self.time_line.set_xdata(current_time)
self.mpl_canvas.update_plot()
def help(self):
pairs = [
('?', 'Display help window'),
('i', 'Toggle interface'),
('s', 'Apply auto-scaling'),
('r', 'Restore original clim'),
('c', 'Clear all traces'),
('Space', 'Start/Pause playback'),
]
text1, text2 = zip(*pairs)
text1 = '\n'.join(text1)
text2 = '\n'.join(text2)
_show_help(
col1=text1,
col2=text2,
width=5,
height=2,
)
@safe_event
def clean(self):
# resolve the reference cycle
self.clear_points()
self.actions.clear()
self.reps = None
self._time_slider = None
self._playback_speed_slider = None
self.orientation_call.plotter = None
self.orientation_call.brain = None
self.orientation_call = None
self.smoothing_call.plotter = None
self.smoothing_call = None
if self.time_call is not None:
self.time_call.plotter = None
self.time_call.brain = None
self.time_call = None
self.playback_speed_call.plotter = None
self.playback_speed_call = None
self.fmin_call.plotter = None
self.fmin_call.brain = None
self.fmin_call = None
self.fmid_call.plotter = None
self.fmid_call.brain = None
self.fmid_call = None
self.fmax_call.plotter = None
self.fmax_call.brain = None
self.fmax_call = None
self.fscale_call.plotter = None
self.fscale_call.brain = None
self.fscale_call = None
self.brain.time_viewer = None
self.brain = None
self.plotter = None
self.main_menu = None
self.window = None
self.tool_bar = None
self.status_bar = None
self.interactor = None
if hasattr(self, "mpl_canvas"):
self.mpl_canvas.close()
self.mpl_canvas.axes.clear()
self.mpl_canvas.fig.clear()
self.mpl_canvas.time_viewer = None
self.mpl_canvas.canvas = None
self.mpl_canvas = None
self.time_actor = None
self.picked_renderer = None
self.act_data_smooth["lh"] = None
self.act_data_smooth["rh"] = None
self.act_data_smooth = None
class _LinkViewer(object):
"""Class to link multiple _TimeViewer objects."""
def __init__(self, brains, time=True, camera=False):
self.brains = brains
self.time_viewers = [brain.time_viewer for brain in brains]
# check time infos
times = [brain._times for brain in brains]
if time and not all(np.allclose(x, times[0]) for x in times):
warn('stc.times do not match, not linking time')
time = False
if camera:
self.link_cameras()
if time:
# link time sliders
self.link_sliders(
name="_time_slider",
callback=self.set_time_point,
event_type="always"
)
# link playback speed sliders
self.link_sliders(
name="_playback_speed_slider",
callback=self.set_playback_speed,
event_type="always"
)
# link toggle to start/pause playback
for time_viewer in self.time_viewers:
time_viewer.actions["play"].triggered.disconnect()
time_viewer.actions["play"].triggered.connect(
self.toggle_playback)
# link time course canvas
def _func(*args, **kwargs):
for time_viewer in self.time_viewers:
time_viewer.time_call(*args, **kwargs)
for time_viewer in self.time_viewers:
if time_viewer.show_traces:
time_viewer.mpl_canvas.time_func = _func
def set_time_point(self, value):
for time_viewer in self.time_viewers:
time_viewer.time_call(value, update_widget=True)
def set_playback_speed(self, value):
for time_viewer in self.time_viewers:
time_viewer.playback_speed_call(value, update_widget=True)
def toggle_playback(self):
leader = self.time_viewers[0] # select a time_viewer as leader
value = leader.time_call.slider_rep.GetValue()
# synchronize starting points before playback
self.set_time_point(value)
for time_viewer in self.time_viewers:
time_viewer.toggle_playback()
def link_sliders(self, name, callback, event_type):
from ..backends._pyvista import _update_slider_callback
for time_viewer in self.time_viewers:
slider = getattr(time_viewer, name, None)
if slider is not None:
_update_slider_callback(
slider=slider,
callback=callback,
event_type=event_type
)
def link_cameras(self):
from ..backends._pyvista import _add_camera_callback
def _update_camera(vtk_picker, event):
for time_viewer in self.time_viewers:
time_viewer.plotter.update()
leader = self.time_viewers[0] # select a time_viewer as leader
camera = leader.plotter.camera
_add_camera_callback(camera, _update_camera)
for time_viewer in self.time_viewers:
for renderer in time_viewer.plotter.renderers:
renderer.camera = camera
def _get_range(brain):
val = np.abs(brain._current_act_data)
return [np.min(val), np.max(val)]
def _normalize(point, shape):
return (point[0] / shape[1], point[1] / shape[0])
@run_once
def _init_resources():
from ...icons import resources
resources.qInitResources()
| Teekuningas/mne-python | mne/viz/_brain/_timeviewer.py | Python | bsd-3-clause | 46,789 | [
"VTK"
] | ceefeece0fd9de8bcedb66f9aa074017dddecfca3e75930fbdcc37d15214bdbd |
from __future__ import print_function
"""
The basic idea of this module is to provide the STEM class, which simulates
STEM images from atomic configurations. Note that these simulations are very
simplistic, they make the following assumptions.
1. Atoms do not lose/gain/share electrons
2. Atoms' electron clouds are spherically symmetrical
3. Total electron density is a approximated by the atomic number
4. Electron density is radially shaped like a gaussian function
Thus we can compute the electron density for an atomic system by treating
each atom as a gaussian with intensity = f(0) and sigma = covalent radius
"""
import numpy as np
from scipy.stats import norm
from ase.data import covalent_radii
from builtins import range
def get_atomic_electron_density(atom, voxels, resolution):
# make var/covar matrix
sigma = np.zeros((3, 3))
for i in range(3):
sigma[i, i] = covalent_radii[atom.number]
# make gaussian distribution centered on atom
r = np.zeros(voxels.shape)
q = atom.position
im, jm, km, = voxels.shape
for i in range(im):
x = (i + .5) * resolution[0]
for j in range(jm):
y = (j + .5) * resolution[1]
for k in range(km):
z = (k + .5) * resolution[2]
r[i, j, k] = 0
for l, w in zip(range(3), [x, y, z]):
r[i, j, k] += (w - q[l]) ** 2
# make gaussian
# put gaussian on voxel grid
voxel_gaussian = norm.pdf(r)
# put e density in voxel
ed = voxel_gaussian * atom.number
return ed
if __name__ == '__main__':
import matplotlib.pyplot as plt
from ase import Atoms
from ase.visualize import view
from ase.cluster import FaceCenteredCubic
# atoms = Atoms('Pt', [(0, 0, 0)])
atoms = Atoms(
FaceCenteredCubic('Pt', [[1, 0, 0], [1, 1, 0], [1, 1, 1]], (2, 3, 2)))
# atoms = atoms[[atom.index for atom in atoms if atom.position[2]< 1.5]]
view(atoms)
atoms.set_cell(atoms.get_cell() * 1.2)
atoms.center()
cell = atoms.get_cell()
print(cell, len(atoms))
resolution = .3 * np.ones(3)
c = np.diagonal(cell)
v = tuple(np.int32(np.ceil(c / resolution)))
voxels = np.zeros(v)
ed = np.zeros(v)
i = 0
for atom in atoms:
print(i)
ed += get_atomic_electron_density(atom, voxels, resolution)
i += 1
print(ed[:, :, 0])
plt.imshow(np.sum(ed, axis=2), cmap='viridis')
plt.show()
| CJ-Wright/pyIID | pyiid/experiments/stem/__init__.py | Python | bsd-3-clause | 2,468 | [
"ASE",
"Gaussian"
] | 6e7bd68ce06504cb1304218582a57504bcfbf46f31ed2efa315cf7b71b0d7e06 |
"""
QAPI command marshaller generator
Copyright IBM, Corp. 2011
Copyright (C) 2014-2018 Red Hat, Inc.
Authors:
Anthony Liguori <aliguori@us.ibm.com>
Michael Roth <mdroth@linux.vnet.ibm.com>
Markus Armbruster <armbru@redhat.com>
This work is licensed under the terms of the GNU GPL, version 2.
See the COPYING file in the top-level directory.
"""
from qapi.common import *
from qapi.gen import QAPIGenCCode, QAPISchemaModularCVisitor, ifcontext
def gen_command_decl(name, arg_type, boxed, ret_type):
return mcgen('''
%(c_type)s qmp_%(c_name)s(%(params)s);
''',
c_type=(ret_type and ret_type.c_type()) or 'void',
c_name=c_name(name),
params=build_params(arg_type, boxed, 'Error **errp'))
def gen_call(name, arg_type, boxed, ret_type):
ret = ''
argstr = ''
if boxed:
assert arg_type
argstr = '&arg, '
elif arg_type:
assert not arg_type.variants
for memb in arg_type.members:
if memb.optional:
argstr += 'arg.has_%s, ' % c_name(memb.name)
argstr += 'arg.%s, ' % c_name(memb.name)
lhs = ''
if ret_type:
lhs = 'retval = '
ret = mcgen('''
%(lhs)sqmp_%(c_name)s(%(args)s&err);
error_propagate(errp, err);
''',
c_name=c_name(name), args=argstr, lhs=lhs)
if ret_type:
ret += mcgen('''
if (err) {
goto out;
}
qmp_marshal_output_%(c_name)s(retval, ret, errp);
''',
c_name=ret_type.c_name())
return ret
def gen_marshal_output(ret_type):
return mcgen('''
static void qmp_marshal_output_%(c_name)s(%(c_type)s ret_in, QObject **ret_out, Error **errp)
{
Visitor *v;
v = qobject_output_visitor_new(ret_out);
if (visit_type_%(c_name)s(v, "unused", &ret_in, errp)) {
visit_complete(v, ret_out);
}
visit_free(v);
v = qapi_dealloc_visitor_new();
visit_type_%(c_name)s(v, "unused", &ret_in, NULL);
visit_free(v);
}
''',
c_type=ret_type.c_type(), c_name=ret_type.c_name())
def build_marshal_proto(name):
return ('void qmp_marshal_%s(QDict *args, QObject **ret, Error **errp)'
% c_name(name))
def gen_marshal_decl(name):
return mcgen('''
%(proto)s;
''',
proto=build_marshal_proto(name))
def gen_marshal(name, arg_type, boxed, ret_type):
have_args = boxed or (arg_type and not arg_type.is_empty())
ret = mcgen('''
%(proto)s
{
Error *err = NULL;
bool ok = false;
Visitor *v;
''',
proto=build_marshal_proto(name))
if ret_type:
ret += mcgen('''
%(c_type)s retval;
''',
c_type=ret_type.c_type())
if have_args:
ret += mcgen('''
%(c_name)s arg = {0};
''',
c_name=arg_type.c_name())
ret += mcgen('''
v = qobject_input_visitor_new(QOBJECT(args));
if (!visit_start_struct(v, NULL, NULL, 0, errp)) {
goto out;
}
''')
if have_args:
ret += mcgen('''
if (visit_type_%(c_arg_type)s_members(v, &arg, errp)) {
ok = visit_check_struct(v, errp);
}
''',
c_arg_type=arg_type.c_name())
else:
ret += mcgen('''
ok = visit_check_struct(v, errp);
''')
ret += mcgen('''
visit_end_struct(v, NULL);
if (!ok) {
goto out;
}
''')
ret += gen_call(name, arg_type, boxed, ret_type)
ret += mcgen('''
out:
visit_free(v);
''')
ret += mcgen('''
v = qapi_dealloc_visitor_new();
visit_start_struct(v, NULL, NULL, 0, NULL);
''')
if have_args:
ret += mcgen('''
visit_type_%(c_arg_type)s_members(v, &arg, NULL);
''',
c_arg_type=arg_type.c_name())
ret += mcgen('''
visit_end_struct(v, NULL);
visit_free(v);
''')
ret += mcgen('''
}
''')
return ret
def gen_register_command(name, success_response, allow_oob, allow_preconfig):
options = []
if not success_response:
options += ['QCO_NO_SUCCESS_RESP']
if allow_oob:
options += ['QCO_ALLOW_OOB']
if allow_preconfig:
options += ['QCO_ALLOW_PRECONFIG']
if not options:
options = ['QCO_NO_OPTIONS']
options = " | ".join(options)
ret = mcgen('''
qmp_register_command(cmds, "%(name)s",
qmp_marshal_%(c_name)s, %(opts)s);
''',
name=name, c_name=c_name(name),
opts=options)
return ret
def gen_registry(registry, prefix):
ret = mcgen('''
void %(c_prefix)sqmp_init_marshal(QmpCommandList *cmds)
{
QTAILQ_INIT(cmds);
''',
c_prefix=c_name(prefix, protect=False))
ret += registry
ret += mcgen('''
}
''')
return ret
class QAPISchemaGenCommandVisitor(QAPISchemaModularCVisitor):
def __init__(self, prefix):
super().__init__(
prefix, 'qapi-commands',
' * Schema-defined QAPI/QMP commands', None, __doc__)
self._regy = QAPIGenCCode(None)
self._visited_ret_types = {}
def _begin_user_module(self, name):
self._visited_ret_types[self._genc] = set()
commands = self._module_basename('qapi-commands', name)
types = self._module_basename('qapi-types', name)
visit = self._module_basename('qapi-visit', name)
self._genc.add(mcgen('''
#include "qemu/osdep.h"
#include "qapi/visitor.h"
#include "qapi/qmp/qdict.h"
#include "qapi/qobject-output-visitor.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/dealloc-visitor.h"
#include "qapi/error.h"
#include "%(visit)s.h"
#include "%(commands)s.h"
''',
commands=commands, visit=visit))
self._genh.add(mcgen('''
#include "%(types)s.h"
''',
types=types))
def visit_end(self):
self._add_system_module('init', ' * QAPI Commands initialization')
self._genh.add(mcgen('''
#include "qapi/qmp/dispatch.h"
void %(c_prefix)sqmp_init_marshal(QmpCommandList *cmds);
''',
c_prefix=c_name(self._prefix, protect=False)))
self._genc.preamble_add(mcgen('''
#include "qemu/osdep.h"
#include "%(prefix)sqapi-commands.h"
#include "%(prefix)sqapi-init-commands.h"
''',
prefix=self._prefix))
self._genc.add(gen_registry(self._regy.get_content(), self._prefix))
def visit_command(self, name, info, ifcond, features,
arg_type, ret_type, gen, success_response, boxed,
allow_oob, allow_preconfig):
if not gen:
return
# FIXME: If T is a user-defined type, the user is responsible
# for making this work, i.e. to make T's condition the
# conjunction of the T-returning commands' conditions. If T
# is a built-in type, this isn't possible: the
# qmp_marshal_output_T() will be generated unconditionally.
if ret_type and ret_type not in self._visited_ret_types[self._genc]:
self._visited_ret_types[self._genc].add(ret_type)
with ifcontext(ret_type.ifcond,
self._genh, self._genc, self._regy):
self._genc.add(gen_marshal_output(ret_type))
with ifcontext(ifcond, self._genh, self._genc, self._regy):
self._genh.add(gen_command_decl(name, arg_type, boxed, ret_type))
self._genh.add(gen_marshal_decl(name))
self._genc.add(gen_marshal(name, arg_type, boxed, ret_type))
self._regy.add(gen_register_command(name, success_response,
allow_oob, allow_preconfig))
def gen_commands(schema, output_dir, prefix):
vis = QAPISchemaGenCommandVisitor(prefix)
schema.visit(vis)
vis.write(output_dir)
| dslutz/qemu | scripts/qapi/commands.py | Python | gpl-2.0 | 7,790 | [
"VisIt"
] | c6f217169398683fb40dd4b5ba5568dcc04a73ba31bb877fea086b6ba9ae6af4 |
"""
DIRAC.WorkloadManagementSystem.Client package
"""
__RCSID__ = "$Id$"
| fstagni/DIRAC | WorkloadManagementSystem/Client/__init__.py | Python | gpl-3.0 | 77 | [
"DIRAC"
] | b0e58e1903cbdfae4f85c06136aef233c866a73609678eedf46a93500b4fc755 |
"""
A collection of utility functions and classes. Originally, many
(but not all) were from the Python Cookbook -- hence the name cbook.
This module is safe to import from anywhere within matplotlib;
it imports matplotlib only at runtime.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
from itertools import repeat
import collections
import datetime
import errno
from functools import reduce
import glob
import gzip
import io
import locale
import os
import re
import sys
import time
import traceback
import types
import warnings
from weakref import ref, WeakKeyDictionary
import numpy as np
import numpy.ma as ma
class MatplotlibDeprecationWarning(UserWarning):
"""
A class for issuing deprecation warnings for Matplotlib users.
In light of the fact that Python builtin DeprecationWarnings are ignored
by default as of Python 2.7 (see link below), this class was put in to
allow for the signaling of deprecation, but via UserWarnings which are not
ignored by default.
http://docs.python.org/dev/whatsnew/2.7.html#the-future-for-python-2-x
"""
pass
mplDeprecation = MatplotlibDeprecationWarning
def _generate_deprecation_message(since, message='', name='',
alternative='', pending=False,
obj_type='attribute'):
if not message:
altmessage = ''
if pending:
message = (
'The %(func)s %(obj_type)s will be deprecated in a '
'future version.')
else:
message = (
'The %(func)s %(obj_type)s was deprecated in version '
'%(since)s.')
if alternative:
altmessage = ' Use %s instead.' % alternative
message = ((message % {
'func': name,
'name': name,
'alternative': alternative,
'obj_type': obj_type,
'since': since}) +
altmessage)
return message
def warn_deprecated(
since, message='', name='', alternative='', pending=False,
obj_type='attribute'):
"""
Used to display deprecation warning in a standard way.
Parameters
------------
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The format
specifier `%(func)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function; if not provided the name
is automatically determined from the passed in function,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user about
this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
obj_type : str, optional
The object type being deprecated.
Examples
--------
Basic example::
# To warn of the deprecation of "matplotlib.name_of_module"
warn_deprecated('1.4.0', name='matplotlib.name_of_module',
obj_type='module')
"""
message = _generate_deprecation_message(
since, message, name, alternative, pending, obj_type)
warnings.warn(message, mplDeprecation, stacklevel=1)
def deprecated(since, message='', name='', alternative='', pending=False,
obj_type='function'):
"""
Decorator to mark a function as deprecated.
Parameters
------------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier `%(func)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function; if not provided the name
is automatically determined from the passed in function,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user about
this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
Examples
--------
Basic example::
@deprecated('1.4.0')
def the_function_to_deprecate():
pass
"""
def deprecate(func, message=message, name=name, alternative=alternative,
pending=pending):
import functools
import textwrap
if isinstance(func, classmethod):
try:
func = func.__func__
except AttributeError:
# classmethods in Python2.6 and below lack the __func__
# attribute so we need to hack around to get it
method = func.__get__(None, object)
if hasattr(method, '__func__'):
func = method.__func__
elif hasattr(method, 'im_func'):
func = method.im_func
else:
# Nothing we can do really... just return the original
# classmethod
return func
is_classmethod = True
else:
is_classmethod = False
if not name:
name = func.__name__
message = _generate_deprecation_message(
since, message, name, alternative, pending, obj_type)
@functools.wraps(func)
def deprecated_func(*args, **kwargs):
warnings.warn(message, mplDeprecation, stacklevel=2)
return func(*args, **kwargs)
old_doc = deprecated_func.__doc__
if not old_doc:
old_doc = ''
old_doc = textwrap.dedent(old_doc).strip('\n')
message = message.strip()
new_doc = (('\n.. deprecated:: %(since)s'
'\n %(message)s\n\n' %
{'since': since, 'message': message}) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
deprecated_func.__doc__ = new_doc
if is_classmethod:
deprecated_func = classmethod(deprecated_func)
return deprecated_func
return deprecate
# On some systems, locale.getpreferredencoding returns None,
# which can break unicode; and the sage project reports that
# some systems have incorrect locale specifications, e.g.,
# an encoding instead of a valid locale name. Another
# pathological case that has been reported is an empty string.
# On some systems, getpreferredencoding sets the locale, which has
# side effects. Passing False eliminates those side effects.
def unicode_safe(s):
import matplotlib
if isinstance(s, bytes):
try:
preferredencoding = locale.getpreferredencoding(
matplotlib.rcParams['axes.formatter.use_locale']).strip()
if not preferredencoding:
preferredencoding = None
except (ValueError, ImportError, AttributeError):
preferredencoding = None
if preferredencoding is None:
return six.text_type(s)
else:
return six.text_type(s, preferredencoding)
return s
class converter(object):
"""
Base class for handling string -> python type with support for
missing values
"""
def __init__(self, missing='Null', missingval=None):
self.missing = missing
self.missingval = missingval
def __call__(self, s):
if s == self.missing:
return self.missingval
return s
def is_missing(self, s):
return not s.strip() or s == self.missing
class tostr(converter):
'convert to string or None'
def __init__(self, missing='Null', missingval=''):
converter.__init__(self, missing=missing, missingval=missingval)
class todatetime(converter):
'convert to a datetime or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s):
return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.datetime(*tup[:6])
class todate(converter):
'convert to a date or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s):
return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.date(*tup[:3])
class tofloat(converter):
'convert to a float or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
self.missingval = missingval
def __call__(self, s):
if self.is_missing(s):
return self.missingval
return float(s)
class toint(converter):
'convert to an int or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
def __call__(self, s):
if self.is_missing(s):
return self.missingval
return int(s)
class _BoundMethodProxy(object):
'''
Our own proxy object which enables weak references to bound and unbound
methods and arbitrary callables. Pulls information about the function,
class, and instance out of a bound method. Stores a weak reference to the
instance to support garbage collection.
@organization: IBM Corporation
@copyright: Copyright (c) 2005, 2006 IBM Corporation
@license: The BSD License
Minor bugfixes by Michael Droettboom
'''
def __init__(self, cb):
self._hash = hash(cb)
self._destroy_callbacks = []
try:
try:
if six.PY3:
self.inst = ref(cb.__self__, self._destroy)
else:
self.inst = ref(cb.im_self, self._destroy)
except TypeError:
self.inst = None
if six.PY3:
self.func = cb.__func__
self.klass = cb.__self__.__class__
else:
self.func = cb.im_func
self.klass = cb.im_class
except AttributeError:
self.inst = None
self.func = cb
self.klass = None
def add_destroy_callback(self, callback):
self._destroy_callbacks.append(_BoundMethodProxy(callback))
def _destroy(self, wk):
for callback in self._destroy_callbacks:
try:
callback(self)
except ReferenceError:
pass
def __getstate__(self):
d = self.__dict__.copy()
# de-weak reference inst
inst = d['inst']
if inst is not None:
d['inst'] = inst()
return d
def __setstate__(self, statedict):
self.__dict__ = statedict
inst = statedict['inst']
# turn inst back into a weakref
if inst is not None:
self.inst = ref(inst)
def __call__(self, *args, **kwargs):
'''
Proxy for a call to the weak referenced object. Take
arbitrary params to pass to the callable.
Raises `ReferenceError`: When the weak reference refers to
a dead object
'''
if self.inst is not None and self.inst() is None:
raise ReferenceError
elif self.inst is not None:
# build a new instance method with a strong reference to the
# instance
mtd = types.MethodType(self.func, self.inst())
else:
# not a bound method, just return the func
mtd = self.func
# invoke the callable and return the result
return mtd(*args, **kwargs)
def __eq__(self, other):
'''
Compare the held function and instance with that held by
another proxy.
'''
try:
if self.inst is None:
return self.func == other.func and other.inst is None
else:
return self.func == other.func and self.inst() == other.inst()
except Exception:
return False
def __ne__(self, other):
'''
Inverse of __eq__.
'''
return not self.__eq__(other)
def __hash__(self):
return self._hash
class CallbackRegistry(object):
"""
Handle registering and disconnecting for a set of signals and
callbacks:
>>> def oneat(x):
... print('eat', x)
>>> def ondrink(x):
... print('drink', x)
>>> from matplotlib.cbook import CallbackRegistry
>>> callbacks = CallbackRegistry()
>>> id_eat = callbacks.connect('eat', oneat)
>>> id_drink = callbacks.connect('drink', ondrink)
>>> callbacks.process('drink', 123)
drink 123
>>> callbacks.process('eat', 456)
eat 456
>>> callbacks.process('be merry', 456) # nothing will be called
>>> callbacks.disconnect(id_eat)
>>> callbacks.process('eat', 456) # nothing will be called
In practice, one should always disconnect all callbacks when they
are no longer needed to avoid dangling references (and thus memory
leaks). However, real code in matplotlib rarely does so, and due
to its design, it is rather difficult to place this kind of code.
To get around this, and prevent this class of memory leaks, we
instead store weak references to bound methods only, so when the
destination object needs to die, the CallbackRegistry won't keep
it alive. The Python stdlib weakref module can not create weak
references to bound methods directly, so we need to create a proxy
object to handle weak references to bound methods (or regular free
functions). This technique was shared by Peter Parente on his
`"Mindtrove" blog
<http://mindtrove.info/articles/python-weak-references/>`_.
"""
def __init__(self):
self.callbacks = dict()
self._cid = 0
self._func_cid_map = {}
def __getstate__(self):
# We cannot currently pickle the callables in the registry, so
# return an empty dictionary.
return {}
def __setstate__(self, state):
# re-initialise an empty callback registry
self.__init__()
def connect(self, s, func):
"""
register *func* to be called when a signal *s* is generated
func will be called
"""
self._func_cid_map.setdefault(s, WeakKeyDictionary())
# Note proxy not needed in python 3.
# TODO rewrite this when support for python2.x gets dropped.
proxy = _BoundMethodProxy(func)
if proxy in self._func_cid_map[s]:
return self._func_cid_map[s][proxy]
proxy.add_destroy_callback(self._remove_proxy)
self._cid += 1
cid = self._cid
self._func_cid_map[s][proxy] = cid
self.callbacks.setdefault(s, dict())
self.callbacks[s][cid] = proxy
return cid
def _remove_proxy(self, proxy):
for signal, proxies in list(six.iteritems(self._func_cid_map)):
try:
del self.callbacks[signal][proxies[proxy]]
except KeyError:
pass
if len(self.callbacks[signal]) == 0:
del self.callbacks[signal]
del self._func_cid_map[signal]
def disconnect(self, cid):
"""
disconnect the callback registered with callback id *cid*
"""
for eventname, callbackd in list(six.iteritems(self.callbacks)):
try:
del callbackd[cid]
except KeyError:
continue
else:
for signal, functions in list(
six.iteritems(self._func_cid_map)):
for function, value in list(six.iteritems(functions)):
if value == cid:
del functions[function]
return
def process(self, s, *args, **kwargs):
"""
process signal *s*. All of the functions registered to receive
callbacks on *s* will be called with *\*args* and *\*\*kwargs*
"""
if s in self.callbacks:
for cid, proxy in list(six.iteritems(self.callbacks[s])):
try:
proxy(*args, **kwargs)
except ReferenceError:
self._remove_proxy(proxy)
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a given type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None:
self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def __str__(self):
return repr(self)
def __getstate__(self):
# store a dictionary of this SilentList's state
return {'type': self.type, 'seq': self[:]}
def __setstate__(self, state):
self.type = state['type']
self.extend(state['seq'])
class IgnoredKeywordWarning(UserWarning):
"""
A class for issuing warnings about keyword arguments that will be ignored
by matplotlib
"""
pass
def local_over_kwdict(local_var, kwargs, *keys):
"""
Enforces the priority of a local variable over potentially conflicting
argument(s) from a kwargs dict. The following possible output values are
considered in order of priority:
local_var > kwargs[keys[0]] > ... > kwargs[keys[-1]]
The first of these whose value is not None will be returned. If all are
None then None will be returned. Each key in keys will be removed from the
kwargs dict in place.
Parameters
------------
local_var: any object
The local variable (highest priority)
kwargs: dict
Dictionary of keyword arguments; modified in place
keys: str(s)
Name(s) of keyword arguments to process, in descending order of
priority
Returns
---------
out: any object
Either local_var or one of kwargs[key] for key in keys
Raises
--------
IgnoredKeywordWarning
For each key in keys that is removed from kwargs but not used as
the output value
"""
out = local_var
for key in keys:
kwarg_val = kwargs.pop(key, None)
if kwarg_val is not None:
if out is None:
out = kwarg_val
else:
warnings.warn('"%s" keyword argument will be ignored' % key,
IgnoredKeywordWarning)
return out
def strip_math(s):
'remove latex formatting from mathtext'
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove:
s = s.replace(r, '')
return s
class Bunch(object):
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables::
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52308
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
keys = six.iterkeys(self.__dict__)
return 'Bunch(%s)' % ', '.join(['%s=%s' % (k, self.__dict__[k])
for k
in keys])
def unique(x):
'Return a list of unique elements of *x*'
return list(six.iterkeys(dict([(val, 1) for val in x])))
def iterable(obj):
'return true if *obj* is iterable'
try:
iter(obj)
except TypeError:
return False
return True
def is_string_like(obj):
'Return True if *obj* looks like a string'
if isinstance(obj, six.string_types):
return True
# numpy strings are subclass of str, ma strings are not
if ma.isMaskedArray(obj):
if obj.ndim == 0 and obj.dtype.kind in 'SU':
return True
else:
return False
try:
obj + ''
except:
return False
return True
def is_sequence_of_strings(obj):
"""
Returns true if *obj* is iterable and contains strings
"""
if not iterable(obj):
return False
if is_string_like(obj) and not isinstance(obj, np.ndarray):
try:
obj = obj.values
except AttributeError:
# not pandas
return False
for o in obj:
if not is_string_like(o):
return False
return True
def is_writable_file_like(obj):
'return true if *obj* looks like a file object with a *write* method'
return hasattr(obj, 'write') and six.callable(obj.write)
def file_requires_unicode(x):
"""
Returns `True` if the given writable file-like object requires Unicode
to be written to it.
"""
try:
x.write(b'')
except TypeError:
return True
else:
return False
def is_scalar(obj):
'return true if *obj* is not string like and is not iterable'
return not is_string_like(obj) and not iterable(obj)
def is_numlike(obj):
'return true if *obj* looks like a number'
try:
obj + 1
except:
return False
else:
return True
def to_filehandle(fname, flag='rU', return_opened=False):
"""
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in .gz. *flag* is a
read/write flag for :func:`file`
"""
if is_string_like(fname):
if fname.endswith('.gz'):
# get rid of 'U' in flag for gzipped files.
flag = flag.replace('U', '')
fh = gzip.open(fname, flag)
elif fname.endswith('.bz2'):
# get rid of 'U' in flag for bz2 files
flag = flag.replace('U', '')
import bz2
fh = bz2.BZ2File(fname, flag)
else:
fh = open(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fh, opened
return fh
def is_scalar_or_string(val):
"""Return whether the given object is a scalar or string like."""
return is_string_like(val) or not iterable(val)
def _string_to_bool(s):
if not is_string_like(s):
return s
if s == 'on':
return True
if s == 'off':
return False
raise ValueError("string argument must be either 'on' or 'off'")
def get_sample_data(fname, asfileobj=True):
"""
Return a sample data file. *fname* is a path relative to the
`mpl-data/sample_data` directory. If *asfileobj* is `True`
return a file object, otherwise just a file path.
Set the rc parameter examples.directory to the directory where we should
look, if sample_data files are stored in a location different than
default (which is 'mpl-data/sample_data` at the same level of 'matplotlib`
Python module files).
If the filename ends in .gz, the file is implicitly ungzipped.
"""
import matplotlib
if matplotlib.rcParams['examples.directory']:
root = matplotlib.rcParams['examples.directory']
else:
root = os.path.join(os.path.dirname(__file__),
"mpl-data", "sample_data")
path = os.path.join(root, fname)
if asfileobj:
if (os.path.splitext(fname)[-1].lower() in
('.csv', '.xrc', '.txt')):
mode = 'r'
else:
mode = 'rb'
base, ext = os.path.splitext(fname)
if ext == '.gz':
return gzip.open(path, mode)
else:
return open(path, mode)
else:
return path
def flatten(seq, scalarp=is_scalar_or_string):
"""
Returns a generator of flattened nested containers
For example:
>>> from matplotlib.cbook import flatten
>>> l = (('John', ['Hunter']), (1, 23), [[([42, (5, 23)], )]])
>>> print(list(flatten(l)))
['John', 'Hunter', 1, 23, 42, 5, 23]
By: Composite of Holger Krekel and Luther Blissett
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/121294
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item):
yield item
else:
for subitem in flatten(item, scalarp):
yield subitem
class Sorter(object):
"""
Sort by attribute or item
Example usage::
sort = Sorter()
list = [(1, 2), (4, 8), (0, 3)]
dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
{'a': 9, 'b': 9}]
sort(list) # default sort
sort(list, 1) # sort by index 1
sort(dict, 'a') # sort a list of dicts by key 'a'
"""
def _helper(self, data, aux, inplace):
aux.sort()
result = [data[i] for junk, i in aux]
if inplace:
data[:] = result
return result
def byItem(self, data, itemindex=None, inplace=1):
if itemindex is None:
if inplace:
data.sort()
result = data
else:
result = data[:]
result.sort()
return result
else:
aux = [(data[i][itemindex], i) for i in range(len(data))]
return self._helper(data, aux, inplace)
def byAttribute(self, data, attributename, inplace=1):
aux = [(getattr(data[i], attributename), i) for i in range(len(data))]
return self._helper(data, aux, inplace)
# a couple of handy synonyms
sort = byItem
__call__ = byItem
class Xlator(dict):
"""
All-in-one multiple-string-substitution class
Example usage::
text = "Larry Wall is the creator of Perl"
adict = {
"Larry Wall" : "Guido van Rossum",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print multiple_replace(adict, text)
xlat = Xlator(adict)
print xlat.xlat(text)
"""
def _make_regex(self):
""" Build re object based on the keys of the current dictionary """
return re.compile("|".join(map(re.escape, list(six.iterkeys(self)))))
def __call__(self, match):
""" Handler invoked for each regex *match* """
return self[match.group(0)]
def xlat(self, text):
""" Translate *text*, returns the modified text. """
return self._make_regex().sub(self, text)
def soundex(name, len=4):
""" soundex module conforming to Odell-Russell algorithm """
# digits holds the soundex values for the alphabet
soundex_digits = '01230120022455012623010202'
sndx = ''
fc = ''
# Translate letters in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc:
fc = c # Remember first letter
d = soundex_digits[ord(c) - ord('A')]
# Duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# Replace first digit with first letter
sndx = fc + sndx[1:]
# Remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# Return soundex code truncated or 0-padded to len characters
return (sndx + (len * '0'))[:len]
class Null(object):
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __str__(self):
return "Null()"
def __repr__(self):
return "Null()"
if six.PY3:
def __bool__(self):
return 0
else:
def __nonzero__(self):
return 0
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
def mkdirs(newdir, mode=0o777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
# this functionality is now in core python as of 3.2
# LPY DROP
if six.PY3:
os.makedirs(newdir, mode=mode, exist_ok=True)
else:
try:
os.makedirs(newdir, mode=mode)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class GetRealpathAndStat(object):
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
get_realpath_and_stat = GetRealpathAndStat()
def dict_delall(d, keys):
'delete all of the *keys* from the :class:`dict` *d*'
for key in keys:
try:
del d[key]
except KeyError:
pass
class RingBuffer(object):
""" class that implements a not-yet-full buffer """
def __init__(self, size_max):
self.max = size_max
self.data = []
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.data[self.cur] = x
self.cur = (self.cur + 1) % self.max
def get(self):
""" return list of elements in correct order """
return self.data[self.cur:] + self.data[:self.cur]
def append(self, x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = __Full
def get(self):
""" Return a list of elements from the oldest to the newest. """
return self.data
def __get_item__(self, i):
return self.data[i % len(self.data)]
def get_split_ind(seq, N):
"""
*seq* is a list of words. Return the index into seq such that::
len(' '.join(seq[:ind])<=N
.
"""
sLen = 0
# todo: use Alex's xrange pattern from the cbook for efficiency
for (word, ind) in zip(seq, xrange(len(seq))):
sLen += len(word) + 1 # +1 to account for the len(' ')
if sLen >= N:
return ind
return len(seq)
def wrap(prefix, text, cols):
'wrap *text* with *prefix* at length *cols*'
pad = ' ' * len(prefix.expandtabs())
available = cols - len(pad)
seq = text.split(' ')
Nseq = len(seq)
ind = 0
lines = []
while ind < Nseq:
lastInd = ind
ind += get_split_ind(seq[ind:], available)
lines.append(seq[lastInd:ind])
# add the prefix to the first line, pad with spaces otherwise
ret = prefix + ' '.join(lines[0]) + '\n'
for line in lines[1:]:
ret += pad + ' '.join(line) + '\n'
return ret
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile("(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path
import fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
results = []
for dirname, dirs, files in os.walk(root):
# Append to results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if return_folders or os.path.isfile(fullname):
for pattern in pattern_list:
if fnmatch.fnmatch(name, pattern):
results.append(fullname)
break
# Block recursion if recursion was disallowed
if not recurse:
break
return results
def get_recursive_filelist(args):
"""
Recurse all the files and dirs in *args* ignoring symbolic links
and return the files as a list of strings
"""
files = []
for arg in args:
if os.path.isfile(arg):
files.append(arg)
continue
if os.path.isdir(arg):
newfiles = listFiles(arg, recurse=1, return_folders=1)
files.extend(newfiles)
return [f for f in files if not os.path.islink(f)]
def pieces(seq, num=2):
"Break up the *seq* into *num* tuples"
start = 0
while 1:
item = seq[start:start + num]
if not len(item):
break
yield item
start += num
def exception_to_str(s=None):
if six.PY3:
sh = io.StringIO()
else:
sh = io.BytesIO()
if s is not None:
print(s, file=sh)
traceback.print_exc(file=sh)
return sh.getvalue()
def allequal(seq):
"""
Return *True* if all elements of *seq* compare equal. If *seq* is
0 or 1 length, return *True*
"""
if len(seq) < 2:
return True
val = seq[0]
for i in xrange(1, len(seq)):
thisval = seq[i]
if thisval != val:
return False
return True
def alltrue(seq):
"""
Return *True* if all elements of *seq* evaluate to *True*. If
*seq* is empty, return *False*.
"""
if not len(seq):
return False
for val in seq:
if not val:
return False
return True
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq):
return False
for val in seq:
if val:
return True
return False
def allpairs(x):
"""
return all possible pairs in sequence *x*
Condensed by Alex Martelli from this thread_ on c.l.python
.. _thread: http://groups.google.com/groups?q=all+pairs+group:*python*&hl=en&lr=&ie=UTF-8&selm=mailman.4028.1096403649.5135.python-list%40python.org&rnum=1
"""
return [(s, f) for i, f in enumerate(x) for s in x[i + 1:]]
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to contrain size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if k not in self:
if len(self) >= self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
self._killkeys.append(k)
dict.__setitem__(self, k, v)
class Stack(object):
"""
Implement a stack where elements can be pushed on and you can move
back and forth. But no pop. Should mimic home / back / forward
in a browser
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
'return the current element, or None'
if not len(self._elements):
return self._default
else:
return self._elements[self._pos]
def __len__(self):
return self._elements.__len__()
def __getitem__(self, ind):
return self._elements.__getitem__(ind)
def forward(self):
'move the position forward and return the current element'
N = len(self._elements)
if self._pos < N - 1:
self._pos += 1
return self()
def back(self):
'move the position back and return the current element'
if self._pos > 0:
self._pos -= 1
return self()
def push(self, o):
"""
push object onto stack at current position - all elements
occurring later than the current position are discarded
"""
self._elements = self._elements[:self._pos + 1]
self._elements.append(o)
self._pos = len(self._elements) - 1
return self()
def home(self):
'push the first element onto the top of the stack'
if not len(self._elements):
return
self.push(self._elements[0])
return self()
def empty(self):
return len(self._elements) == 0
def clear(self):
'empty the stack'
self._pos = -1
self._elements = []
def bubble(self, o):
"""
raise *o* to the top of the stack and return *o*. *o* must be
in the stack
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso == o:
bubbles.append(thiso)
else:
self.push(thiso)
for thiso in bubbles:
self.push(o)
return o
def remove(self, o):
'remove element *o* from the stack'
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso == o:
continue
else:
self.push(thiso)
def popall(seq):
'empty a list'
for i in xrange(len(seq)):
seq.pop()
def finddir(o, match, case=False):
"""
return all attributes of *o* which match string in match. if case
is True require an exact case match.
"""
if case:
names = [(name, name) for name in dir(o) if is_string_like(name)]
else:
names = [(name.lower(), name) for name in dir(o)
if is_string_like(name)]
match = match.lower()
return [orig for name, orig in names if name.find(match) >= 0]
def reverse_dict(d):
'reverse the dictionary -- may lose data if values are not unique!'
return dict([(v, k) for k, v in six.iteritems(d)])
def restrict_dict(d, keys):
"""
Return a dictionary that contains those keys that appear in both
d and keys, with values from d.
"""
return dict([(k, v) for (k, v) in six.iteritems(d) if k in keys])
def report_memory(i=0): # argument may go away
'return the memory consumed by process'
from matplotlib.compat.subprocess import Popen, PIPE
pid = os.getpid()
if sys.platform == 'sunos5':
try:
a2 = Popen('ps -p %d -o osz' % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Sun OS only if "
"the 'ps' program is found")
mem = int(a2[-1].strip())
elif sys.platform.startswith('linux'):
try:
a2 = Popen('ps -p %d -o rss,sz' % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Linux only if "
"the 'ps' program is found")
mem = int(a2[1].split()[1])
elif sys.platform.startswith('darwin'):
try:
a2 = Popen('ps -p %d -o rss,vsz' % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Mac OS only if "
"the 'ps' program is found")
mem = int(a2[1].split()[0])
elif sys.platform.startswith('win'):
try:
a2 = Popen(["tasklist", "/nh", "/fi", "pid eq %d" % pid],
stdout=PIPE).stdout.read()
except OSError:
raise NotImplementedError(
"report_memory works on Windows only if "
"the 'tasklist' program is found")
mem = int(a2.strip().split()[-2].replace(',', ''))
else:
raise NotImplementedError(
"We don't have a memory monitor for %s" % sys.platform)
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
def safezip(*args):
'make sure *args* are equal len before zipping'
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i + 1, len(arg)))
return list(zip(*args))
def issubclass_safe(x, klass):
'return issubclass(x, klass) and return False on a TypeError'
try:
return issubclass(x, klass)
except TypeError:
return False
def safe_masked_invalid(x):
x = np.asanyarray(x)
try:
xm = np.ma.masked_invalid(x, copy=False)
xm.shrink_mask()
except TypeError:
return x
return xm
class MemoryMonitor(object):
def __init__(self, nmax=20000):
self._nmax = nmax
self._mem = np.zeros((self._nmax,), np.int32)
self.clear()
def clear(self):
self._n = 0
self._overflow = False
def __call__(self):
mem = report_memory()
if self._n < self._nmax:
self._mem[self._n] = mem
self._n += 1
else:
self._overflow = True
return mem
def report(self, segments=4):
n = self._n
segments = min(n, segments)
dn = int(n / segments)
ii = list(xrange(0, n, dn))
ii[-1] = n - 1
print()
print('memory report: i, mem, dmem, dmem/nloops')
print(0, self._mem[0])
for i in range(1, len(ii)):
di = ii[i] - ii[i - 1]
if di == 0:
continue
dm = self._mem[ii[i]] - self._mem[ii[i - 1]]
print('%5d %5d %3d %8.3f' % (ii[i], self._mem[ii[i]],
dm, dm / float(di)))
if self._overflow:
print("Warning: array size was too small for the number of calls.")
def xy(self, i0=0, isub=1):
x = np.arange(i0, self._n, isub)
return x, self._mem[i0:self._n:isub]
def plot(self, i0=0, isub=1, fig=None):
if fig is None:
from .pylab import figure
fig = figure()
ax = fig.add_subplot(111)
ax.plot(*self.xy(i0, isub))
fig.canvas.draw()
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
from types import FrameType
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
if isinstance(step, dict):
for key, val in six.iteritems(step):
if val is next:
outstream.write("[%s]" % repr(key))
break
if key is next:
outstream.write("[key] = %s" % repr(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write("Examining: %r\n" % (obj,))
recurse(obj, obj, {}, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retreived by
using the object as an iterator.
The objects being joined must be hashable and weak-referenceable.
For example:
>>> from matplotlib.cbook import Grouper
>>> class Foo(object):
... def __init__(self, s):
... self.s = s
... def __repr__(self):
... return self.s
...
>>> a, b, c, d, e, f = [Foo(x) for x in 'abcdef']
>>> grp = Grouper()
>>> grp.join(a, b)
>>> grp.join(b, c)
>>> grp.join(d, e)
>>> sorted(map(tuple, grp))
[(a, b, c), (d, e)]
>>> grp.joined(a, b)
True
>>> grp.joined(a, c)
True
>>> grp.joined(a, d)
False
"""
def __init__(self, init=()):
mapping = self._mapping = {}
for x in init:
mapping[ref(x)] = [ref(x)]
def __contains__(self, item):
return ref(item) in self._mapping
def clean(self):
"""
Clean dead weak references from the dictionary
"""
mapping = self._mapping
to_drop = [key for key in mapping if key() is None]
for key in to_drop:
val = mapping.pop(key)
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more
arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(ref(a), [ref(a)])
for arg in args:
set_b = mapping.get(ref(arg))
if set_b is None:
set_a.append(ref(arg))
mapping[ref(arg)] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""
Returns True if *a* and *b* are members of the same set.
"""
self.clean()
mapping = self._mapping
try:
return mapping[ref(a)] is mapping[ref(b)]
except KeyError:
return False
def remove(self, a):
self.clean()
mapping = self._mapping
seta = mapping.pop(ref(a), None)
if seta is not None:
seta.remove(ref(a))
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
class Token:
pass
token = Token()
# Mark each group as we come across if by appending a token,
# and don't yield it twice
for group in six.itervalues(self._mapping):
if not group[-1] is token:
yield [x() for x in group]
group.append(token)
# Cleanup the tokens
for group in six.itervalues(self._mapping):
if group[-1] is token:
del group[-1]
def get_siblings(self, a):
"""
Returns all of the items joined with *a*, including itself.
"""
self.clean()
siblings = self._mapping.get(ref(a), [ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
if steps == 1:
return a
steps = int(np.floor(steps))
new_length = ((len(a) - 1) * steps) + 1
new_shape = list(a.shape)
new_shape[0] = new_length
result = np.zeros(new_shape, a.dtype)
result[0] = a[0]
a0 = a[0:-1]
a1 = a[1:]
delta = ((a1 - a0) / steps)
for i in range(1, steps):
result[i::steps] = delta * i + a0
result[steps::steps] = a1
return result
def recursive_remove(path):
if os.path.isdir(path):
for fname in (glob.glob(os.path.join(path, '*')) +
glob.glob(os.path.join(path, '.*'))):
if os.path.isdir(fname):
recursive_remove(fname)
os.removedirs(fname)
else:
os.remove(fname)
#os.removedirs(path)
else:
os.remove(path)
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if (is_string_like(args[0]) or not iterable(args[0])):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if (not is_string_like(x)) and iterable(x) and len(x) == nrecs:
seqlist[i] = True
if ma.isMA(x):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if ma.isMA(x):
masks.append(~ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except: # Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = reduce(np.logical_and, masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x.take(igood, axis=0)
for i, x in enumerate(margs):
if seqlist[i] and ma.isMA(x):
margs[i] = x.filled()
return margs
def boxplot_stats(X, whis=1.5, bootstrap=None, labels=None):
'''
Returns list of dictionaries of staticists to be use to draw a series of
box and whisker plots. See the `Returns` section below to the required
keys of the dictionary. Users can skip this function and pass a user-
defined set of dictionaries to the new `axes.bxp` method instead of
relying on MPL to do the calcs.
Parameters
----------
X : array-like
Data that will be represented in the boxplots. Should have 2 or fewer
dimensions.
whis : float, string, or sequence (default = 1.5)
As a float, determines the reach of the whiskers past the first and
third quartiles (e.g., Q3 + whis*IQR, QR = interquartile range, Q3-Q1).
Beyond the whiskers, data are considered outliers and are plotted as
individual points. Set this to an unreasonably high value to force the
whiskers to show the min and max data. Alternatively, set this to an
ascending sequence of percentile (e.g., [5, 95]) to set the whiskers
at specific percentiles of the data. Finally, can `whis` be the
string 'range' to force the whiskers to the min and max of the data.
In the edge case that the 25th and 75th percentiles are equivalent,
`whis` will be automatically set to 'range'
bootstrap : int or None (default)
Number of times the confidence intervals around the median should
be bootstrapped (percentile method).
labels : sequence
Labels for each dataset. Length must be compatible with dimensions
of `X`
Returns
-------
bxpstats : list of dict
A list of dictionaries containing the results for each column
of data. Keys of each dictionary are the following:
======== ===================================
Key Value Description
======== ===================================
label tick label for the boxplot
mean arithemetic mean value
med 50th percentile
q1 first quartile (25th percentile)
q3 third quartile (75th percentile)
cilo lower notch around the median
cihi upper notch around the median
whislo end of the lower whisker
whishi end of the upper whisker
fliers outliers
======== ===================================
Notes
-----
Non-bootstrapping approach to confidence interval uses Gaussian-based
asymptotic approximation:
.. math::
\mathrm{med} \pm 1.57 \\times \\frac{\mathrm{iqr}}{\sqrt{N}}
General approach from:
McGill, R., Tukey, J.W., and Larsen, W.A. (1978) "Variations of
Boxplots", The American Statistician, 32:12-16.
'''
def _bootstrap_median(data, N=5000):
# determine 95% confidence intervals of the median
M = len(data)
percentiles = [2.5, 97.5]
ii = np.random.randint(M, size=(N, M))
bsData = x[ii]
estimate = np.median(bsData, axis=1, overwrite_input=True)
CI = np.percentile(estimate, percentiles)
return CI
def _compute_conf_interval(data, med, iqr, bootstrap):
if bootstrap is not None:
# Do a bootstrap estimate of notch locations.
# get conf. intervals around median
CI = _bootstrap_median(data, N=bootstrap)
notch_min = CI[0]
notch_max = CI[1]
else:
N = len(data)
notch_min = med - 1.57 * iqr / np.sqrt(N)
notch_max = med + 1.57 * iqr / np.sqrt(N)
return notch_min, notch_max
# output is a list of dicts
bxpstats = []
# convert X to a list of lists
X = _reshape_2D(X)
ncols = len(X)
if labels is None:
labels = repeat(None)
elif len(labels) != ncols:
raise ValueError("Dimensions of labels and X must be compatible")
input_whis = whis
for ii, (x, label) in enumerate(zip(X, labels), start=0):
# empty dict
stats = {}
if label is not None:
stats['label'] = label
# restore whis to the input values in case it got changed in the loop
whis = input_whis
# note tricksyness, append up here and then mutate below
bxpstats.append(stats)
# if empty, bail
if len(x) == 0:
stats['fliers'] = np.array([])
stats['mean'] = np.nan
stats['med'] = np.nan
stats['q1'] = np.nan
stats['q3'] = np.nan
stats['cilo'] = np.nan
stats['cihi'] = np.nan
stats['whislo'] = np.nan
stats['whishi'] = np.nan
stats['med'] = np.nan
continue
# up-convert to an array, just to be safe
x = np.asarray(x)
# arithmetic mean
stats['mean'] = np.mean(x)
# medians and quartiles
q1, med, q3 = np.percentile(x, [25, 50, 75])
# interquartile range
stats['iqr'] = q3 - q1
if stats['iqr'] == 0:
whis = 'range'
# conf. interval around median
stats['cilo'], stats['cihi'] = _compute_conf_interval(
x, med, stats['iqr'], bootstrap
)
# lowest/highest non-outliers
if np.isscalar(whis):
if np.isreal(whis):
loval = q1 - whis * stats['iqr']
hival = q3 + whis * stats['iqr']
elif whis in ['range', 'limit', 'limits', 'min/max']:
loval = np.min(x)
hival = np.max(x)
else:
whismsg = ('whis must be a float, valid string, or '
'list of percentiles')
raise ValueError(whismsg)
else:
loval = np.percentile(x, whis[0])
hival = np.percentile(x, whis[1])
# get high extreme
wiskhi = np.compress(x <= hival, x)
if len(wiskhi) == 0 or np.max(wiskhi) < q3:
stats['whishi'] = q3
else:
stats['whishi'] = np.max(wiskhi)
# get low extreme
wisklo = np.compress(x >= loval, x)
if len(wisklo) == 0 or np.min(wisklo) > q1:
stats['whislo'] = q1
else:
stats['whislo'] = np.min(wisklo)
# compute a single array of outliers
stats['fliers'] = np.hstack([
np.compress(x < stats['whislo'], x),
np.compress(x > stats['whishi'], x)
])
# add in the remaining stats
stats['q1'], stats['med'], stats['q3'] = q1, med, q3
return bxpstats
# FIXME I don't think this is used anywhere
def unmasked_index_ranges(mask, compressed=True):
"""
Find index ranges where *mask* is *False*.
*mask* will be flattened if it is not already 1-D.
Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
indices for slices of the compressed :class:`numpy.ndarray`
corresponding to each of *N* uninterrupted runs of unmasked
values. If optional argument *compressed* is *False*, it returns
the start and stop indices into the original :class:`numpy.ndarray`,
not the compressed :class:`numpy.ndarray`. Returns *None* if there
are no unmasked values.
Example::
y = ma.array(np.arange(5), mask = [0,0,1,0,0])
ii = unmasked_index_ranges(ma.getmaskarray(y))
# returns array [[0,2,] [2,4,]]
y.compressed()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
# returns array [[0, 2], [3, 5]]
y.filled()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
Prior to the transforms refactoring, this was used to support
masked arrays in Line2D.
"""
mask = mask.reshape(mask.size)
m = np.concatenate(((1,), mask, (1,)))
indices = np.arange(len(mask) + 1)
mdif = m[1:] - m[:-1]
i0 = np.compress(mdif == -1, indices)
i1 = np.compress(mdif == 1, indices)
assert len(i0) == len(i1)
if len(i1) == 0:
return None # Maybe this should be np.zeros((0,2), dtype=int)
if not compressed:
return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
seglengths = i1 - i0
breakpoints = np.cumsum(seglengths)
ic0 = np.concatenate(((0,), breakpoints[:-1]))
ic1 = breakpoints
return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
# a dict to cross-map linestyle arguments
_linestyles = [('-', 'solid'),
('--', 'dashed'),
('-.', 'dashdot'),
(':', 'dotted')]
ls_mapper = dict(_linestyles)
# The ls_mapper maps short codes for line style to their full name used
# by backends
# The reverse mapper is for mapping full names to short ones
ls_mapper_r = dict([(ls[1], ls[0]) for ls in _linestyles])
def align_iterators(func, *iterables):
"""
This generator takes a bunch of iterables that are ordered by func
It sends out ordered tuples::
(func(row), [rows from all iterators matching func(row)])
It is used by :func:`matplotlib.mlab.recs_join` to join record arrays
"""
class myiter:
def __init__(self, it):
self.it = it
self.key = self.value = None
self.iternext()
def iternext(self):
try:
self.value = next(self.it)
self.key = func(self.value)
except StopIteration:
self.value = self.key = None
def __call__(self, key):
retval = None
if key == self.key:
retval = self.value
self.iternext()
elif self.key and key > self.key:
raise ValueError("Iterator has been left behind")
return retval
# This can be made more efficient by not computing the minimum key for each
# iteration
iters = [myiter(it) for it in iterables]
minvals = minkey = True
while 1:
minvals = ([_f for _f in [it.key for it in iters] if _f])
if minvals:
minkey = min(minvals)
yield (minkey, [it(minkey) for it in iters])
else:
break
def is_math_text(s):
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
try:
s = six.text_type(s)
except UnicodeDecodeError:
raise ValueError(
"matplotlib display text must have all code points < 128 or use "
"Unicode strings")
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
return even_dollars
def _check_1d(x):
'''
Converts a sequence of less than 1 dimension, to an array of 1
dimension; leaves everything else untouched.
'''
if not hasattr(x, 'shape') or len(x.shape) < 1:
return np.atleast_1d(x)
else:
try:
x[:, None]
return x
except (IndexError, TypeError):
return np.atleast_1d(x)
def _reshape_2D(X):
"""
Converts a non-empty list or an ndarray of two or fewer dimensions
into a list of iterable objects so that in
for v in _reshape_2D(X):
v is iterable and can be used to instantiate a 1D array.
"""
if hasattr(X, 'shape'):
# one item
if len(X.shape) == 1:
if hasattr(X[0], 'shape'):
X = list(X)
else:
X = [X, ]
# several items
elif len(X.shape) == 2:
nrows, ncols = X.shape
if nrows == 1:
X = [X]
elif ncols == 1:
X = [X.ravel()]
else:
X = [X[:, i] for i in xrange(ncols)]
else:
raise ValueError("input `X` must have 2 or fewer dimensions")
if not hasattr(X[0], '__len__'):
X = [X]
else:
X = [np.ravel(x) for x in X]
return X
def violin_stats(X, method, points=100):
'''
Returns a list of dictionaries of data which can be used to draw a series
of violin plots. See the `Returns` section below to view the required keys
of the dictionary. Users can skip this function and pass a user-defined set
of dictionaries to the `axes.vplot` method instead of using MPL to do the
calculations.
Parameters
----------
X : array-like
Sample data that will be used to produce the gaussian kernel density
estimates. Must have 2 or fewer dimensions.
method : callable
The method used to calculate the kernel density estimate for each
column of data. When called via `method(v, coords)`, it should
return a vector of the values of the KDE evaluated at the values
specified in coords.
points : scalar, default = 100
Defines the number of points to evaluate each of the gaussian kernel
density estimates at.
Returns
-------
A list of dictionaries containing the results for each column of data.
The dictionaries contain at least the following:
- coords: A list of scalars containing the coordinates this particular
kernel density estimate was evaluated at.
- vals: A list of scalars containing the values of the kernel density
estimate at each of the coordinates given in `coords`.
- mean: The mean value for this column of data.
- median: The median value for this column of data.
- min: The minimum value for this column of data.
- max: The maximum value for this column of data.
'''
# List of dictionaries describing each of the violins.
vpstats = []
# Want X to be a list of data sequences
X = _reshape_2D(X)
for x in X:
# Dictionary of results for this distribution
stats = {}
# Calculate basic stats for the distribution
min_val = np.min(x)
max_val = np.max(x)
# Evaluate the kernel density estimate
coords = np.linspace(min_val, max_val, points)
stats['vals'] = method(x, coords)
stats['coords'] = coords
# Store additional statistics for this distribution
stats['mean'] = np.mean(x)
stats['median'] = np.median(x)
stats['min'] = min_val
stats['max'] = max_val
# Append to output
vpstats.append(stats)
return vpstats
class _NestedClassGetter(object):
# recipe from http://stackoverflow.com/a/11493777/741316
"""
When called with the containing class as the first argument,
and the name of the nested class as the second argument,
returns an instance of the nested class.
"""
def __call__(self, containing_class, class_name):
nested_class = getattr(containing_class, class_name)
# make an instance of a simple object (this one will do), for which we
# can change the __class__ later on.
nested_instance = _NestedClassGetter()
# set the class of the instance, the __init__ will never be called on
# the class but the original state will be set later on by pickle.
nested_instance.__class__ = nested_class
return nested_instance
class _InstanceMethodPickler(object):
"""
Pickle cannot handle instancemethod saving. _InstanceMethodPickler
provides a solution to this.
"""
def __init__(self, instancemethod):
"""Takes an instancemethod as its only argument."""
if six.PY3:
self.parent_obj = instancemethod.__self__
self.instancemethod_name = instancemethod.__func__.__name__
else:
self.parent_obj = instancemethod.im_self
self.instancemethod_name = instancemethod.im_func.__name__
def get_instancemethod(self):
return getattr(self.parent_obj, self.instancemethod_name)
def _step_validation(x, *args):
"""
Helper function of `pts_to_*step` functions
This function does all of the normalization required to the
input and generate the template for output
"""
args = tuple(np.asanyarray(y) for y in args)
x = np.asanyarray(x)
if x.ndim != 1:
raise ValueError("x must be 1 dimenional")
if len(args) == 0:
raise ValueError("At least one Y value must be passed")
return np.vstack((x, ) + args)
def pts_to_prestep(x, *args):
"""
Covert continuous line to pre-steps
Given a set of N points convert to 2 N -1 points
which when connected linearly give a step function
which changes values at the begining the intervals.
Parameters
----------
x : array
The x location of the steps
y1, y2, ... : array
Any number of y arrays to be turned into steps.
All must be the same length as ``x``
Returns
-------
x, y1, y2, .. : array
The x and y values converted to steps in the same order
as the input. If the input is length ``N``, each of these arrays
will be length ``2N + 1``
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
# do normalization
vertices = _step_validation(x, *args)
# create the output array
steps = np.zeros((vertices.shape[0], 2 * len(x) - 1), np.float)
# do the to step conversion logic
steps[0, 0::2], steps[0, 1::2] = vertices[0, :], vertices[0, :-1]
steps[1:, 0::2], steps[1:, 1:-1:2] = vertices[1:, :], vertices[1:, 1:]
# convert 2D array back to tuple
return tuple(steps)
def pts_to_poststep(x, *args):
"""
Covert continuous line to pre-steps
Given a set of N points convert to 2 N -1 points
which when connected linearly give a step function
which changes values at the begining the intervals.
Parameters
----------
x : array
The x location of the steps
y1, y2, ... : array
Any number of y arrays to be turned into steps.
All must be the same length as ``x``
Returns
-------
x, y1, y2, .. : array
The x and y values converted to steps in the same order
as the input. If the input is length ``N``, each of these arrays
will be length ``2N + 1``
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
# do normalization
vertices = _step_validation(x, *args)
# create the output array
steps = ma.zeros((vertices.shape[0], 2 * len(x) - 1), np.float)
# do the to step conversion logic
steps[0, ::2], steps[0, 1:-1:2] = vertices[0, :], vertices[0, 1:]
steps[1:, 0::2], steps[1:, 1::2] = vertices[1:, :], vertices[1:, :-1]
# convert 2D array back to tuple
return tuple(steps)
def pts_to_midstep(x, *args):
"""
Covert continuous line to pre-steps
Given a set of N points convert to 2 N -1 points
which when connected linearly give a step function
which changes values at the begining the intervals.
Parameters
----------
x : array
The x location of the steps
y1, y2, ... : array
Any number of y arrays to be turned into steps.
All must be the same length as ``x``
Returns
-------
x, y1, y2, .. : array
The x and y values converted to steps in the same order
as the input. If the input is length ``N``, each of these arrays
will be length ``2N + 1``
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
# do normalization
vertices = _step_validation(x, *args)
# create the output array
steps = ma.zeros((vertices.shape[0], 2 * len(x)), np.float)
steps[0, 1:-1:2] = 0.5 * (vertices[0, :-1] + vertices[0, 1:])
steps[0, 2::2] = 0.5 * (vertices[0, :-1] + vertices[0, 1:])
steps[0, 0] = vertices[0, 0]
steps[0, -1] = vertices[0, -1]
steps[1:, 0::2], steps[1:, 1::2] = vertices[1:, :], vertices[1:, :]
# convert 2D array back to tuple
return tuple(steps)
STEP_LOOKUP_MAP = {'pre': pts_to_prestep,
'post': pts_to_poststep,
'mid': pts_to_midstep,
'step-pre': pts_to_prestep,
'step-post': pts_to_poststep,
'step-mid': pts_to_midstep}
def index_of(y):
"""
A helper function to get the index of an input to plot
against if x values are not explicitly given.
Tries to get `y.index` (works if this is a pd.Series), if that
fails, return np.arange(y.shape[0]).
This will be extended in the future to deal with more types of
labeled data.
Parameters
----------
y : scalar or array-like
The proposed y-value
Returns
-------
x, y : ndarray
The x and y values to plot.
"""
try:
return y.index.values, y.values
except AttributeError:
y = np.atleast_1d(y)
return np.arange(y.shape[0], dtype=float), y
def safe_first_element(obj):
if isinstance(obj, collections.Iterator):
raise RuntimeError("matplotlib does not support generators "
"as input")
return next(iter(obj))
def get_label(y, default_name):
try:
return y.name
except AttributeError:
return default_name
# Numpy > 1.6.x deprecates putmask in favor of the new copyto.
# So long as we support versions 1.6.x and less, we need the
# following local version of putmask. We choose to make a
# local version of putmask rather than of copyto because the
# latter includes more functionality than the former. Therefore
# it is easy to make a local version that gives full putmask
# behavior, but duplicating the full copyto behavior would be
# more difficult.
try:
np.copyto
except AttributeError:
_putmask = np.putmask
else:
def _putmask(a, mask, values):
return np.copyto(a, values, where=mask)
| rbalda/neural_ocr | env/lib/python2.7/site-packages/matplotlib/cbook.py | Python | mit | 77,442 | [
"Gaussian"
] | 0bef506ff75b9c9d694a1c5024b6db7c9dbd686e47f8112a050ae4661cfb1e6d |
import os
from ase.structure import molecule
from ase.io import read, write
from ase.parallel import rank
from gpaw import GPAW, restart
import warnings
# cmr calls all available methods in ase.atoms detected by the module inspect.
# Therefore also deprecated methods are called - and we choose to silence those warnings.
warnings.filterwarnings('ignore', 'ase.atoms.*deprecated',)
import cmr
#from cmr.tools.log import Log
#cmr.logger.set_message_selection(Log.MSG_TYPE_ALL)
calculate = True
recalculate = True
analyse_from_dir = True # analyse local cmr files
upload_to_db = False # upload cmr files to the database
analyse_from_db = False # analyse database
create_group = True # group calculations beloging to a given reaction
clean = False
if create_group: assert analyse_from_dir or analyse_from_db
if analyse_from_db: assert upload_to_db
symbol = 'Li'
# define the project in order to find it in the database!
project_id = 'my first project: atomize'
vacuum = 3.5
# calculator parameters
xc = 'LDA'
mode = 'lcao'
h = 0.20
cmr_params_template = {
'db_keywords': [project_id],
# add project_id also as a field to support search across projects
'project_id': project_id,
# user's tags
'U_vacuum': vacuum,
'U_xc': xc,
'U_mode': mode,
'U_h': h,
}
if calculate:
# molecule
formula = symbol + '2'
# set formula name to be written into the cmr file
cmr_params = cmr_params_template.copy()
cmr_params['U_formula'] = formula
cmrfile = formula + '.cmr'
system = molecule(formula)
system.center(vacuum=vacuum)
# Note: Molecules do not need broken cell symmetry!
if 0:
system.cell[1, 1] += 0.01
system.cell[2, 2] += 0.02
# Hund rule (for atoms)
hund = (len(system) == 1)
cmr_params['U_hund'] = hund
# first calculation: LDA lcao
calc = GPAW(mode=mode, xc=xc, h=h, hund=hund, txt=formula + '.txt')
system.set_calculator(calc)
e = system.get_potential_energy()
# write gpw file
calc.write(formula)
# add total energy to users tags
cmr_params['U_potential_energy'] = e
# write the information 'as in' corresponding trajectory file
# plus cmr_params into cmr file
write(cmrfile, system, cmr_params=cmr_params)
del calc
# atom
formula = symbol
# set formula name to be written into the cmr file
cmr_params = cmr_params_template.copy()
cmr_params['U_formula'] = formula
cmrfile = formula + '.cmr'
system = molecule(formula)
system.center(vacuum=vacuum)
# Note: Li does not need broken cell symmetry! Many other atoms do!
if 0:
system.cell[1, 1] += 0.01
system.cell[2, 2] += 0.02
# Hund rule (for atoms)
hund = (len(system) == 1)
cmr_params['U_hund'] = hund
# first calculation: LDA lcao
calc = GPAW(mode=mode, xc=xc, h=h, hund=hund, txt=formula + '.txt')
system.set_calculator(calc)
e = system.get_potential_energy()
# write gpw file
calc.write(formula)
# add total energy to users tags
cmr_params['U_potential_energy'] = e
# write the information 'as in' corresponding trajectory file
# plus cmr_params into cmr file
write(cmrfile, system, cmr_params=cmr_params)
del calc
if recalculate:
# now calculate PBE energies on LDA orbitals
# molecule
formula = symbol + '2'
system, calc = restart(formula, txt=None)
ediff = calc.get_xc_difference('PBE')
cmrfile = formula + '.cmr'
# add new results to the cmrfile
data = cmr.read(cmrfile)
data.set_user_variable('U_potential_energy_PBE', data['U_potential_energy'] + ediff)
data.write(cmrfile)
del calc
# atom
formula = symbol
system, calc = restart(formula, txt=None)
ediff = calc.get_xc_difference('PBE')
cmrfile = formula + '.cmr'
# add new results to the cmrfile
data = cmr.read(cmrfile)
data.set_user_variable('U_potential_energy_PBE', data['U_potential_energy'] + ediff)
data.write(cmrfile)
del calc
if analyse_from_dir:
# analyze the results from cmr files in the local directory
from cmr.ui import DirectoryReader
# read all compounds in the project with lcao and LDA orbitals
reader = DirectoryReader(directory='.', ext='.cmr')
all = reader.find(name_value_list=[('U_mode', 'lcao'), ('U_xc', 'LDA')],
keyword_list=[project_id])
if rank == 0:
print 'results from cmr files in the local directory'
# print requested results
# column_length=0 aligns data in the table (-1 : data unaligned is default)
all.print_table(column_length=0,
columns=['U_formula', 'U_vacuum',
'U_xc', 'U_h', 'U_hund',
'U_potential_energy', 'U_potential_energy_PBE',
'ase_temperature'])
# access the results directly and calculate atomization energies
f2 = symbol + '2'
f1 = symbol
if rank == 0:
# results are accesible only on master rank
r2 = all.get('U_formula', f2)
r1 = all.get('U_formula', f1)
# calculate atomization energies (ea)
ea_LDA = 2 * r1['U_potential_energy'] - r2['U_potential_energy']
ea_PBE = 2 * r1['U_potential_energy_PBE'] - r2['U_potential_energy_PBE']
print 'atomization energy [eV] ' + xc + ' = ' + str(ea_LDA)
print 'atomization energy [eV] PBE = ' + str(ea_PBE)
if create_group:
# ea_LDA and ea_PBE define a group
group = cmr.create_group();
group.add(r1['db_hash']);
group.add(r2['db_hash']);
group.set_user_variable('U_ea_LDA', ea_LDA)
group.set_user_variable('U_ea_PBE', ea_PBE)
group.set_user_variable('U_description', 'atomization energy [eV]')
group.set_user_variable('U_reaction', '2 * ' + symbol + ' - ' + symbol + '2')
group.set_user_variable('db_keywords', [project_id])
group.set_user_variable('project_id', project_id)
group.write(symbol + '2_atomize_from_dir.cmr');
if True:
all = reader.find(keyword_list=[project_id])
if rank == 0:
print 'contents of the cmr files present in the local directory'
# print requested results
# column_length=0 aligns data in the table (-1 : data unaligned is default)
all.print_table(column_length=0,
columns=['U_formula', 'U_vacuum',
'U_xc', 'U_h', 'U_hund',
'U_potential_energy', 'U_potential_energy_PBE',
'ase_temperature', 'U_reaction', 'U_ea_LDA', 'U_ea_PBE', 'U_description'])
if upload_to_db:
# upload cmr files to the database
if rank == 0:
os.system('cmr --commit ' + symbol + '*.cmr')
if analyse_from_db:
# analyze the results from the database
# analysis can only be performed on rank 0!!
from cmr.ui import DBReader
reader = DBReader()
all = reader.find(name_value_list=[('U_mode', 'lcao'),
('U_xc', 'LDA'),
#('db_user', '')
],
keyword_list=[project_id])
if rank == 0:
print 'results from the database'
# print requested results
# column_length=0 aligns data in the table (-1 : data unaligned is default)
all.print_table(column_length=0,
columns=['U_formula', 'U_vacuum',
'U_xc', 'U_h', 'U_hund',
'U_potential_energy', 'U_potential_energy_PBE',
'ase_temperature'])
# access the results directly and calculate atomization energies
f2 = symbol + '2'
f1 = symbol
# results are accesible only on master rank
r1 = all.get('U_formula', f1)
r2 = all.get('U_formula', f2)
# check if results were successfully retrieved, otherwise we have to wait
if r1 is None or r2 is None:
print "Results are not yet in the database. Wait, and try again."
else:
# calculate atomization energies (ea)
ea_LDA = 2 * r1['U_potential_energy'] - r2['U_potential_energy']
ea_PBE = 2 * r1['U_potential_energy_PBE'] - r2['U_potential_energy_PBE']
if rank == 0:
print 'atomization energy [eV] ' + xc + ' = ' + str(ea_LDA)
print 'atomization energy [eV] PBE = ' + str(ea_PBE)
if create_group:
# ea_LDA and ea_PBE define a group
group = cmr.create_group();
group.add(r1['db_hash']);
group.add(r2['db_hash']);
group.set_user_variable('U_ea_LDA', ea_LDA)
group.set_user_variable('U_ea_PBE', ea_PBE)
group.set_user_variable('U_description', 'atomization energy [eV] (from database)')
group.set_user_variable('U_reaction', '2 * ' + symbol + ' - ' + symbol + '2')
group.set_user_variable('db_keywords', [project_id])
group.set_user_variable('project_id', project_id)
group.write(symbol + '2_atomize_from_db.cmr');
group.write(".cmr");
if True:
all = reader.find(keyword_list=[project_id])
if rank == 0:
print 'contents of the database'
# print requested results
# column_length=0 aligns data in the table (-1 : data unaligned is default)
all.print_table(column_length=0,
columns=['U_formula', 'U_vacuum',
'U_xc', 'U_h', 'U_hund',
'U_potential_energy', 'U_potential_energy_PBE',
'ase_temperature', 'U_reaction', 'U_ea_LDA', 'U_ea_PBE', 'U_description'])
if clean:
if rank == 0:
for file in [symbol + '.cmr', symbol + '.gpw', symbol + '.txt',
symbol + '2.cmr', symbol + '2.gpw', symbol + '2.txt',
symbol + '2_atomize_from_dir.cmr',
symbol + '2_atomize_from_db.cmr']:
if os.path.exists(file): os.unlink(file)
| robwarm/gpaw-symm | gpaw/test/cmrtest/Li2_atomize.py | Python | gpl-3.0 | 10,201 | [
"ASE",
"GPAW"
] | 37fb293c5b155b3a048b875739544bf6d5b2045644fc036e94d6c2e0f23510e8 |
# ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
import copy
import time
from copy import deepcopy
from itertools import chain
from math import sqrt, log, asinh
from sqlalchemy.orm import validates, reconstructor
import eos.db
from eos import capSim
from eos.effectHandlerHelpers import *
from eos.effectHandlerHelpers import HandledModuleList, HandledDroneCargoList, HandledImplantBoosterList, HandledProjectedDroneList, HandledProjectedModList
from eos.enum import Enum
from eos.saveddata.module import State, Hardpoint
from eos.types import Ship, Character, Slot, Module, Citadel
from utils.timer import Timer
import logging
logger = logging.getLogger(__name__)
try:
from collections import OrderedDict
except ImportError:
from utils.compat import OrderedDict
class ImplantLocation(Enum):
def __init__(self):
pass
FIT = 0
CHARACTER = 1
class Fit(object):
"""Represents a fitting, with modules, ship, implants, etc."""
PEAK_RECHARGE = 0.25
def __init__(self, ship=None, name=""):
"""Initialize a fit from the program"""
# use @mode.setter's to set __attr and IDs. This will set mode as well
self.ship = ship
if self.ship:
self.ship.parent = self
self.__modules = HandledModuleList()
self.__drones = HandledDroneCargoList()
self.__fighters = HandledDroneCargoList()
self.__cargo = HandledDroneCargoList()
self.__implants = HandledImplantBoosterList()
self.__boosters = HandledImplantBoosterList()
# self.__projectedFits = {}
self.__projectedModules = HandledProjectedModList()
self.__projectedDrones = HandledProjectedDroneList()
self.__projectedFighters = HandledProjectedDroneList()
self.__character = None
self.__owner = None
self.projected = False
self.name = name
self.timestamp = time.time()
self.modeID = None
self.build()
@reconstructor
def init(self):
"""Initialize a fit from the database and validate"""
self.__ship = None
self.__mode = None
if self.shipID:
item = eos.db.getItem(self.shipID)
if item is None:
logger.error("Item (id: %d) does not exist", self.shipID)
return
try:
try:
self.__ship = Ship(item, self)
except ValueError:
self.__ship = Citadel(item, self)
# @todo extra attributes is now useless, however it set to be
# the same as ship attributes for ease (so we don't have to
# change all instances in source). Remove this at some point
self.extraAttributes = self.__ship.itemModifiedAttributes
except ValueError:
logger.error("Item (id: %d) is not a Ship", self.shipID)
return
if self.modeID and self.__ship:
item = eos.db.getItem(self.modeID)
# Don't need to verify if it's a proper item, as validateModeItem assures this
self.__mode = self.ship.validateModeItem(item)
else:
self.__mode = self.ship.validateModeItem(None)
self.build()
def build(self):
self.__extraDrains = []
self.__ehp = None
self.__weaponDPS = None
self.__minerYield = None
self.__weaponVolley = None
self.__droneDPS = None
self.__droneVolley = None
self.__droneYield = None
self.__sustainableTank = None
self.__effectiveSustainableTank = None
self.__effectiveTank = None
self.__calculated = False
self.__capStable = None
self.__capState = None
self.__capUsed = None
self.__capRecharge = None
self.__calculatedTargets = []
self.factorReload = False
self.boostsFits = set()
self.gangBoosts = None
self.ecmProjectedStr = 1
self.commandBonuses = {}
@property
def targetResists(self):
return self.__targetResists
@targetResists.setter
def targetResists(self, targetResists):
self.__targetResists = targetResists
self.__weaponDPS = None
self.__weaponVolley = None
self.__droneDPS = None
self.__droneVolley = None
@property
def damagePattern(self):
return self.__damagePattern
@damagePattern.setter
def damagePattern(self, damagePattern):
self.__damagePattern = damagePattern
self.__ehp = None
self.__effectiveTank = None
@property
def isInvalid(self):
return self.__ship is None
@property
def mode(self):
return self.__mode
@mode.setter
def mode(self, mode):
self.__mode = mode
self.modeID = mode.item.ID if mode is not None else None
@property
def character(self):
return self.__character if self.__character is not None else Character.getAll0()
@character.setter
def character(self, char):
self.__character = char
@property
def ship(self):
return self.__ship
@ship.setter
def ship(self, ship):
self.__ship = ship
self.shipID = ship.item.ID if ship is not None else None
if ship is not None:
# set mode of new ship
self.mode = self.ship.validateModeItem(None) if ship is not None else None
# set fit attributes the same as ship
self.extraAttributes = self.ship.itemModifiedAttributes
@property
def isStructure(self):
return isinstance(self.ship, Citadel)
@property
def drones(self):
return self.__drones
@property
def fighters(self):
return self.__fighters
@property
def cargo(self):
return self.__cargo
@property
def modules(self):
return self.__modules
@property
def implants(self):
return self.__implants
@property
def boosters(self):
return self.__boosters
@property
def projectedModules(self):
return self.__projectedModules
@property
def projectedFits(self):
# only in extreme edge cases will the fit be invalid, but to be sure do
# not return them.
return [fit for fit in self.__projectedFits.values() if not fit.isInvalid]
@property
def commandFits(self):
return [fit for fit in self.__commandFits.values() if not fit.isInvalid]
def getProjectionInfo(self, fitID):
return self.projectedOnto.get(fitID, None)
def getCommandInfo(self, fitID):
return self.boostedOnto.get(fitID, None)
@property
def projectedDrones(self):
return self.__projectedDrones
@property
def projectedFighters(self):
return self.__projectedFighters
@property
def weaponDPS(self):
if self.__weaponDPS is None:
self.calculateWeaponStats()
return self.__weaponDPS
@property
def weaponVolley(self):
if self.__weaponVolley is None:
self.calculateWeaponStats()
return self.__weaponVolley
@property
def droneDPS(self):
if self.__droneDPS is None:
self.calculateWeaponStats()
return self.__droneDPS
@property
def droneVolley(self):
if self.__droneVolley is None:
self.calculateWeaponStats()
return self.__droneVolley
@property
def totalDPS(self):
return self.droneDPS + self.weaponDPS
@property
def totalVolley(self):
return self.droneVolley + self.weaponVolley
@property
def minerYield(self):
if self.__minerYield is None:
self.calculateMiningStats()
return self.__minerYield
@property
def droneYield(self):
if self.__droneYield is None:
self.calculateMiningStats()
return self.__droneYield
@property
def totalYield(self):
return self.droneYield + self.minerYield
@property
def maxTargets(self):
return min(self.extraAttributes["maxTargetsLockedFromSkills"],
self.ship.getModifiedItemAttr("maxLockedTargets"))
@property
def maxTargetRange(self):
return self.ship.getModifiedItemAttr("maxTargetRange")
@property
def scanStrength(self):
return max([self.ship.getModifiedItemAttr("scan%sStrength" % scanType)
for scanType in ("Magnetometric", "Ladar", "Radar", "Gravimetric")])
@property
def scanType(self):
maxStr = -1
type = None
for scanType in ("Magnetometric", "Ladar", "Radar", "Gravimetric"):
currStr = self.ship.getModifiedItemAttr("scan%sStrength" % scanType)
if currStr > maxStr:
maxStr = currStr
type = scanType
elif currStr == maxStr:
type = "Multispectral"
return type
@property
def jamChance(self):
return (1 - self.ecmProjectedStr) * 100
@property
def maxSpeed(self):
speedLimit = self.ship.getModifiedItemAttr("speedLimit")
if speedLimit and self.ship.getModifiedItemAttr("maxVelocity") > speedLimit:
return speedLimit
return self.ship.getModifiedItemAttr("maxVelocity")
@property
def alignTime(self):
agility = self.ship.getModifiedItemAttr("agility") or 0
mass = self.ship.getModifiedItemAttr("mass")
return -log(0.25) * agility * mass / 1000000
@property
def implantSource(self):
return self.implantLocation
@implantSource.setter
def implantSource(self, source):
self.implantLocation = source
@property
def appliedImplants(self):
if self.implantLocation == ImplantLocation.CHARACTER:
return self.character.implants
else:
return self.implants
@validates("ID", "ownerID", "shipID")
def validator(self, key, val):
map = {"ID": lambda val: isinstance(val, int),
"ownerID": lambda val: isinstance(val, int) or val is None,
"shipID": lambda val: isinstance(val, int) or val is None}
if not map[key](val):
raise ValueError(str(val) + " is not a valid value for " + key)
else:
return val
def clear(self, projected=False):
self.__effectiveTank = None
self.__weaponDPS = None
self.__minerYield = None
self.__weaponVolley = None
self.__effectiveSustainableTank = None
self.__sustainableTank = None
self.__droneDPS = None
self.__droneVolley = None
self.__droneYield = None
self.__ehp = None
self.__calculated = False
self.__capStable = None
self.__capState = None
self.__capUsed = None
self.__capRecharge = None
self.ecmProjectedStr = 1
self.commandBonuses = {}
del self.__calculatedTargets[:]
del self.__extraDrains[:]
if self.ship:
self.ship.clear()
c = chain(
self.modules,
self.drones,
self.fighters,
self.boosters,
self.implants,
self.projectedDrones,
self.projectedModules,
self.projectedFighters,
(self.character, self.extraAttributes),
)
for stuff in c:
if stuff is not None and stuff != self:
stuff.clear()
# If this is the active fit that we are clearing, not a projected fit,
# then this will run and clear the projected ships and flag the next
# iteration to skip this part to prevent recursion.
if not projected:
for stuff in self.projectedFits:
if stuff is not None and stuff != self:
stuff.clear(projected=True)
# Methods to register and get the thing currently affecting the fit,
# so we can correctly map "Affected By"
def register(self, currModifier, origin=None):
self.__modifier = currModifier
self.__origin = origin
if hasattr(currModifier, "itemModifiedAttributes"):
currModifier.itemModifiedAttributes.fit = origin or self
if hasattr(currModifier, "chargeModifiedAttributes"):
currModifier.chargeModifiedAttributes.fit = origin or self
def getModifier(self):
return self.__modifier
def getOrigin(self):
return self.__origin
def addCommandBonus(self, warfareBuffID, value, module, effect, runTime="normal"):
# oh fuck this is so janky
# @todo should we pass in min/max to this function, or is abs okay?
# (abs is old method, ccp now provides the aggregate function in their data)
if warfareBuffID not in self.commandBonuses or abs(self.commandBonuses[warfareBuffID][1]) < abs(value):
self.commandBonuses[warfareBuffID] = (runTime, value, module, effect)
def __runCommandBoosts(self, runTime="normal"):
logger.debug("Applying gang boosts for %r", self)
for warfareBuffID in self.commandBonuses.keys():
# Unpack all data required to run effect properly
effect_runTime, value, thing, effect = self.commandBonuses[warfareBuffID]
if runTime != effect_runTime:
continue
# This should always be a gang effect, otherwise it wouldn't be added to commandBonuses
# @todo: Check this
if effect.isType("gang"):
self.register(thing)
if warfareBuffID == 10: # Shield Burst: Shield Harmonizing: Shield Resistance
for damageType in ("Em", "Explosive", "Thermal", "Kinetic"):
self.ship.boostItemAttr("shield%sDamageResonance" % damageType, value)
if warfareBuffID == 11: # Shield Burst: Active Shielding: Repair Duration/Capacitor
self.modules.filteredItemBoost(
lambda mod: mod.item.requiresSkill("Shield Operation") or mod.item.requiresSkill(
"Shield Emission Systems"), "capacitorNeed", value)
self.modules.filteredItemBoost(
lambda mod: mod.item.requiresSkill("Shield Operation") or mod.item.requiresSkill(
"Shield Emission Systems"), "duration", value)
if warfareBuffID == 12: # Shield Burst: Shield Extension: Shield HP
self.ship.boostItemAttr("shieldCapacity", value, stackingPenalties=True)
if warfareBuffID == 13: # Armor Burst: Armor Energizing: Armor Resistance
for damageType in ("Em", "Thermal", "Explosive", "Kinetic"):
self.ship.boostItemAttr("armor%sDamageResonance" % damageType, value)
if warfareBuffID == 14: # Armor Burst: Rapid Repair: Repair Duration/Capacitor
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Remote Armor Repair Systems") or mod.item.requiresSkill("Repair Systems"),
"capacitorNeed", value)
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Remote Armor Repair Systems") or mod.item.requiresSkill("Repair Systems"), "duration",
value)
if warfareBuffID == 15: # Armor Burst: Armor Reinforcement: Armor HP
self.ship.boostItemAttr("armorHP", value, stackingPenalties=True)
if warfareBuffID == 16: # Information Burst: Sensor Optimization: Scan Resolution
self.ship.boostItemAttr("scanResolution", value, stackingPenalties=True)
if warfareBuffID == 17: # Information Burst: Electronic Superiority: EWAR Range and Strength
groups = ("ECM", "Sensor Dampener", "Weapon Disruptor", "Target Painter")
self.modules.filteredItemBoost(lambda mod: mod.item.group.name in groups, "maxRange", value,
stackingPenalties=True)
self.modules.filteredItemBoost(lambda mod: mod.item.group.name in groups,
"falloffEffectiveness", value, stackingPenalties=True)
for scanType in ("Magnetometric", "Radar", "Ladar", "Gravimetric"):
self.modules.filteredItemBoost(lambda mod: mod.item.group.name == "ECM",
"scan%sStrengthBonus" % scanType, value,
stackingPenalties=True)
for attr in ("missileVelocityBonus", "explosionDelayBonus", "aoeVelocityBonus", "falloffBonus",
"maxRangeBonus", "aoeCloudSizeBonus", "trackingSpeedBonus"):
self.modules.filteredItemBoost(lambda mod: mod.item.group.name == "Weapon Disruptor",
attr, value)
for attr in ("maxTargetRangeBonus", "scanResolutionBonus"):
self.modules.filteredItemBoost(lambda mod: mod.item.group.name == "Sensor Dampener",
attr, value)
self.modules.filteredItemBoost(lambda mod: mod.item.gorup.name == "Target Painter",
"signatureRadiusBonus", value, stackingPenalties=True)
if warfareBuffID == 18: # Information Burst: Electronic Hardening: Scan Strength
for scanType in ("Gravimetric", "Radar", "Ladar", "Magnetometric"):
self.ship.boostItemAttr("scan%sStrength" % scanType, value, stackingPenalties=True)
if warfareBuffID == 19: # Information Burst: Electronic Hardening: RSD/RWD Resistance
self.ship.boostItemAttr("sensorDampenerResistance", value)
self.ship.boostItemAttr("weaponDisruptionResistance", value)
if warfareBuffID == 26: # Information Burst: Sensor Optimization: Targeting Range
self.ship.boostItemAttr("maxTargetRange", value)
if warfareBuffID == 20: # Skirmish Burst: Evasive Maneuvers: Signature Radius
self.ship.boostItemAttr("signatureRadius", value, stackingPenalties=True)
if warfareBuffID == 21: # Skirmish Burst: Interdiction Maneuvers: Tackle Range
groups = ("Stasis Web", "Warp Scrambler")
self.modules.filteredItemBoost(lambda mod: mod.item.group.name in groups, "maxRange", value,
stackingPenalties=True)
if warfareBuffID == 22: # Skirmish Burst: Rapid Deployment: AB/MWD Speed Increase
self.modules.filteredItemBoost(
lambda mod: mod.item.requiresSkill("Afterburner") or mod.item.requiresSkill(
"High Speed Maneuvering"), "speedFactor", value, stackingPenalties=True)
if warfareBuffID == 23: # Mining Burst: Mining Laser Field Enhancement: Mining/Survey Range
self.modules.filteredItemBoost(
lambda mod: mod.item.requiresSkill("Mining") or mod.item.requiresSkill(
"Ice Harvesting") or mod.item.requiresSkill("Gas Cloud Harvesting"), "maxRange",
value, stackingPenalties=True)
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("CPU Management"),
"surveyScanRange", value, stackingPenalties=True)
if warfareBuffID == 24: # Mining Burst: Mining Laser Optimization: Mining Capacitor/Duration
self.modules.filteredItemBoost(
lambda mod: mod.item.requiresSkill("Mining") or mod.item.requiresSkill(
"Ice Harvesting") or mod.item.requiresSkill("Gas Cloud Harvesting"),
"capacitorNeed", value, stackingPenalties=True)
self.modules.filteredItemBoost(
lambda mod: mod.item.requiresSkill("Mining") or mod.item.requiresSkill(
"Ice Harvesting") or mod.item.requiresSkill("Gas Cloud Harvesting"), "duration",
value, stackingPenalties=True)
if warfareBuffID == 25: # Mining Burst: Mining Equipment Preservation: Crystal Volatility
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Mining"),
"crystalVolatilityChance", value, stackingPenalties=True)
if warfareBuffID == 60: # Skirmish Burst: Evasive Maneuvers: Agility
self.ship.boostItemAttr("agility", value, stackingPenalties=True)
# Titan effects
if warfareBuffID == 39: # Avatar Effect Generator : Capacitor Recharge bonus
self.ship.boostItemAttr("rechargeRate", value, stackingPenalties=True)
if warfareBuffID == 40: # Avatar Effect Generator : Kinetic resistance bonus
for attr in ("armorKineticDamageResonance", "shieldKineticDamageResonance", "kineticDamageResonance"):
self.ship.boostItemAttr(attr, value, stackingPenalties=True)
if warfareBuffID == 41: # Avatar Effect Generator : EM resistance penalty
for attr in ("armorEmDamageResonance", "shieldEmDamageResonance", "emDamageResonance"):
self.ship.boostItemAttr(attr, value, stackingPenalties=True)
if warfareBuffID == 42: # Erebus Effect Generator : Armor HP bonus
self.ship.boostItemAttr("armorHP", value, stackingPenalties=True)
if warfareBuffID == 43: # Erebus Effect Generator : Explosive resistance bonus
for attr in ("armorExplosiveDamageResonance", "shieldExplosiveDamageResonance", "explosiveDamageResonance"):
self.ship.boostItemAttr(attr, value, stackingPenalties=True)
if warfareBuffID == 44: # Erebus Effect Generator : Thermal resistance penalty
for attr in ("armorThermalDamageResonance", "shieldThermalDamageResonance", "thermalDamageResonance"):
self.ship.boostItemAttr(attr, value, stackingPenalties=True)
if warfareBuffID == 45: # Ragnarok Effect Generator : Signature Radius bonus
self.ship.boostItemAttr("signatureRadius", value, stackingPenalties=True)
if warfareBuffID == 46: # Ragnarok Effect Generator : Thermal resistance bonus
for attr in ("armorThermalDamageResonance", "shieldThermalDamageResonance", "thermalDamageResonance"):
self.ship.boostItemAttr(attr, value, stackingPenalties=True)
if warfareBuffID == 47: # Ragnarok Effect Generator : Explosive resistance penaly
for attr in ("armorExplosiveDamageResonance", "shieldExplosiveDamageResonance", "explosiveDamageResonance"):
self.ship.boostItemAttr(attr, value, stackingPenalties=True)
if warfareBuffID == 48: # Leviathan Effect Generator : Shield HP bonus
self.ship.boostItemAttr("shieldCapacity", value, stackingPenalties=True)
if warfareBuffID == 49: # Leviathan Effect Generator : EM resistance bonus
for attr in ("armorEmDamageResonance", "shieldEmDamageResonance", "emDamageResonance"):
self.ship.boostItemAttr(attr, value, stackingPenalties=True)
if warfareBuffID == 50: # Leviathan Effect Generator : Kinetic resistance penalty
for attr in ("armorKineticDamageResonance", "shieldKineticDamageResonance", "kineticDamageResonance"):
self.ship.boostItemAttr(attr, value, stackingPenalties=True)
if warfareBuffID == 51: # Avatar Effect Generator : Velocity penalty
self.ship.boostItemAttr("maxVelocity", value, stackingPenalties=True)
if warfareBuffID == 52: # Erebus Effect Generator : Shield RR penalty
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Shield Emission Systems"), "shieldBonus", value, stackingPenalties=True)
if warfareBuffID == 53: # Leviathan Effect Generator : Armor RR penalty
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Remote Armor Repair Systems"), "armorDamageAmount", value, stackingPenalties=True)
if warfareBuffID == 54: # Ragnarok Effect Generator : Laser and Hybrid Optimal penalty
groups = ("Energy Weapon", "Hybrid Weapon")
self.modules.filteredItemBoost(lambda mod: mod.item.group.name in groups, "maxRange", value, stackingPenalties=True)
del self.commandBonuses[warfareBuffID]
def calculateModifiedAttributes(self, targetFit=None, withBoosters=False, dirtyStorage=None):
timer = Timer(u'Fit: {}, {}'.format(self.ID, self.name), logger)
logger.debug("Starting fit calculation on: %r, withBoosters: %s", self, withBoosters)
shadow = False
if targetFit and not withBoosters:
logger.debug("Applying projections to target: %r", targetFit)
projectionInfo = self.getProjectionInfo(targetFit.ID)
logger.debug("ProjectionInfo: %s", projectionInfo)
if self == targetFit:
copied = self # original fit
shadow = True
# Don't inspect this, we genuinely want to reassign self
# noinspection PyMethodFirstArgAssignment
self = copy.deepcopy(self)
logger.debug("Handling self projection - making shadow copy of fit. %r => %r", copied, self)
# we delete the fit because when we copy a fit, flush() is
# called to properly handle projection updates. However, we do
# not want to save this fit to the database, so simply remove it
eos.db.saveddata_session.delete(self)
if self.commandFits and not withBoosters:
for fit in self.commandFits:
if self == fit:
continue
fit.calculateModifiedAttributes(self, True)
# If we're not explicitly asked to project fit onto something,
# set self as target fit
if targetFit is None:
targetFit = self
projected = False
else:
projected = not withBoosters
# If fit is calculated and we have nothing to do here, get out
# A note on why projected fits don't get to return here. If we return
# here, the projection afflictions will not be run as they are
# intertwined into the regular fit calculations. So, even if the fit has
# been calculated, we need to recalculate it again just to apply the
# projections. This is in contract to gang boosts, which are only
# calculated once, and their items are then looped and accessed with
# self.gangBoosts.iteritems()
# We might be able to exit early in the fit calculations if we separate
# projections from the normal fit calculations. But we must ensure that
# projection have modifying stuff applied, such as gang boosts and other
# local modules that may help
if self.__calculated and not projected and not withBoosters:
logger.debug("Fit has already been calculated and is not projected, returning: %r", self)
return
for runTime in ("early", "normal", "late"):
# Items that are unrestricted. These items are run on the local fit
# first and then projected onto the target fit it one is designated
u = [
(self.character, self.ship),
self.drones,
self.fighters,
self.boosters,
self.appliedImplants,
self.modules
] if not self.isStructure else [
# Ensure a restricted set for citadels
(self.character, self.ship),
self.fighters,
self.modules
]
# Items that are restricted. These items are only run on the local
# fit. They are NOT projected onto the target fit. # See issue 354
r = [(self.mode,), self.projectedDrones, self.projectedFighters, self.projectedModules]
# chain unrestricted and restricted into one iterable
c = chain.from_iterable(u + r)
# We calculate gang bonuses first so that projected fits get them
# if self.gangBoosts is not None:
# self.__calculateGangBoosts(runTime)
for item in c:
# Registering the item about to affect the fit allows us to
# track "Affected By" relations correctly
if item is not None:
if not self.__calculated:
# apply effects locally if this is first time running them on fit
self.register(item)
item.calculateModifiedAttributes(self, runTime, False)
if projected is True and item not in chain.from_iterable(r):
# apply effects onto target fit
for _ in xrange(projectionInfo.amount):
targetFit.register(item, origin=self)
item.calculateModifiedAttributes(targetFit, runTime, True)
if targetFit and withBoosters and item in self.modules:
# Apply the gang boosts to target fit
# targetFit.register(item, origin=self)
item.calculateModifiedAttributes(targetFit, runTime, False, True)
print "Command: "
print self.commandBonuses
if not withBoosters and self.commandBonuses:
self.__runCommandBoosts(runTime)
timer.checkpoint('Done with runtime: %s' % runTime)
# Mark fit as calculated
self.__calculated = True
# Only apply projected fits if fit it not projected itself.
if not projected and not withBoosters:
for fit in self.projectedFits:
if fit.getProjectionInfo(self.ID).active:
fit.calculateModifiedAttributes(self, withBoosters=withBoosters, dirtyStorage=dirtyStorage)
timer.checkpoint('Done with fit calculation')
if shadow:
logger.debug("Delete shadow fit object")
del self
def fill(self):
"""
Fill this fit's module slots with enough dummy slots so that all slots are used.
This is mostly for making the life of gui's easier.
GUI's can call fill() and then stop caring about empty slots completely.
"""
if self.ship is None:
return
for slotType in (Slot.LOW, Slot.MED, Slot.HIGH, Slot.RIG, Slot.SUBSYSTEM, Slot.SERVICE):
amount = self.getSlotsFree(slotType, True)
if amount > 0:
for _ in xrange(int(amount)):
self.modules.append(Module.buildEmpty(slotType))
if amount < 0:
# Look for any dummies of that type to remove
toRemove = []
for mod in self.modules:
if mod.isEmpty and mod.slot == slotType:
toRemove.append(mod)
amount += 1
if amount == 0:
break
for mod in toRemove:
self.modules.remove(mod)
def unfill(self):
for i in xrange(len(self.modules) - 1, -1, -1):
mod = self.modules[i]
if mod.isEmpty:
del self.modules[i]
@property
def modCount(self):
x = 0
for i in xrange(len(self.modules) - 1, -1, -1):
mod = self.modules[i]
if not mod.isEmpty:
x += 1
return x
def getItemAttrSum(self, dict, attr):
amount = 0
for mod in dict:
add = mod.getModifiedItemAttr(attr)
if add is not None:
amount += add
return amount
def getItemAttrOnlineSum(self, dict, attr):
amount = 0
for mod in dict:
add = mod.getModifiedItemAttr(attr) if mod.state >= State.ONLINE else None
if add is not None:
amount += add
return amount
def getHardpointsUsed(self, type):
amount = 0
for mod in self.modules:
if mod.hardpoint is type and not mod.isEmpty:
amount += 1
return amount
def getSlotsUsed(self, type, countDummies=False):
amount = 0
for mod in chain(self.modules, self.fighters):
if mod.slot is type and (not getattr(mod, "isEmpty", False) or countDummies):
if type in (Slot.F_HEAVY, Slot.F_SUPPORT, Slot.F_LIGHT) and not mod.active:
continue
amount += 1
return amount
slots = {Slot.LOW: "lowSlots",
Slot.MED: "medSlots",
Slot.HIGH: "hiSlots",
Slot.RIG: "rigSlots",
Slot.SUBSYSTEM: "maxSubSystems",
Slot.SERVICE: "serviceSlots",
Slot.F_LIGHT: "fighterLightSlots",
Slot.F_SUPPORT: "fighterSupportSlots",
Slot.F_HEAVY: "fighterHeavySlots"}
def getSlotsFree(self, type, countDummies=False):
if type in (Slot.MODE, Slot.SYSTEM):
# These slots don't really exist, return default 0
return 0
slotsUsed = self.getSlotsUsed(type, countDummies)
totalSlots = self.ship.getModifiedItemAttr(self.slots[type]) or 0
return int(totalSlots - slotsUsed)
def getNumSlots(self, type):
return self.ship.getModifiedItemAttr(self.slots[type]) or 0
@property
def calibrationUsed(self):
return self.getItemAttrOnlineSum(self.modules, 'upgradeCost')
@property
def pgUsed(self):
return self.getItemAttrOnlineSum(self.modules, "power")
@property
def cpuUsed(self):
return self.getItemAttrOnlineSum(self.modules, "cpu")
@property
def droneBandwidthUsed(self):
amount = 0
for d in self.drones:
amount += d.getModifiedItemAttr("droneBandwidthUsed") * d.amountActive
return amount
@property
def droneBayUsed(self):
amount = 0
for d in self.drones:
amount += d.item.volume * d.amount
return amount
@property
def fighterBayUsed(self):
amount = 0
for f in self.fighters:
amount += f.item.volume * f.amountActive
return amount
@property
def fighterTubesUsed(self):
amount = 0
for f in self.fighters:
if f.active:
amount += 1
return amount
@property
def cargoBayUsed(self):
amount = 0
for c in self.cargo:
amount += c.getModifiedItemAttr("volume") * c.amount
return amount
@property
def activeDrones(self):
amount = 0
for d in self.drones:
amount += d.amountActive
return amount
# Expresses how difficult a target is to probe down with scan probes
# If this is <1.08, the ship is unproabeable
@property
def probeSize(self):
sigRad = self.ship.getModifiedItemAttr("signatureRadius")
sensorStr = float(self.scanStrength)
probeSize = sigRad / sensorStr if sensorStr != 0 else None
# http://www.eveonline.com/ingameboard.asp?a=topic&threadID=1532170&page=2#42
if probeSize is not None:
# http://forum.eve-ru.com/index.php?showtopic=74195&view=findpost&p=1333691
# http://forum.eve-ru.com/index.php?showtopic=74195&view=findpost&p=1333763
# Tests by tester128 and several conclusions by me, prove that cap is in range
# from 1.1 to 1.12, we're picking average value
probeSize = max(probeSize, 1.11)
return probeSize
@property
def warpSpeed(self):
base = self.ship.getModifiedItemAttr("baseWarpSpeed") or 1
multiplier = self.ship.getModifiedItemAttr("warpSpeedMultiplier") or 1
return base * multiplier
@property
def maxWarpDistance(self):
capacity = self.ship.getModifiedItemAttr("capacitorCapacity")
mass = self.ship.getModifiedItemAttr("mass")
warpCapNeed = self.ship.getModifiedItemAttr("warpCapacitorNeed")
if not warpCapNeed:
return 0
return capacity / (mass * warpCapNeed)
@property
def capStable(self):
if self.__capStable is None:
self.simulateCap()
return self.__capStable
@property
def capState(self):
"""
If the cap is stable, the capacitor state is the % at which it is stable.
If the cap is unstable, this is the amount of time before it runs out
"""
if self.__capState is None:
self.simulateCap()
return self.__capState
@property
def capUsed(self):
if self.__capUsed is None:
self.simulateCap()
return self.__capUsed
@property
def capRecharge(self):
if self.__capRecharge is None:
self.simulateCap()
return self.__capRecharge
@property
def sustainableTank(self):
if self.__sustainableTank is None:
self.calculateSustainableTank()
return self.__sustainableTank
def calculateSustainableTank(self, effective=True):
if self.__sustainableTank is None:
if self.capStable:
sustainable = {"armorRepair": self.extraAttributes["armorRepair"],
"shieldRepair": self.extraAttributes["shieldRepair"],
"hullRepair": self.extraAttributes["hullRepair"]}
else:
sustainable = {}
repairers = []
# Map a repairer type to the attribute it uses
groupAttrMap = {"Armor Repair Unit": "armorDamageAmount",
"Ancillary Armor Repairer": "armorDamageAmount",
"Hull Repair Unit": "structureDamageAmount",
"Shield Booster": "shieldBonus",
"Ancillary Shield Booster": "shieldBonus",
"Remote Armor Repairer": "armorDamageAmount",
"Remote Shield Booster": "shieldBonus"}
# Map repairer type to attribute
groupStoreMap = {"Armor Repair Unit": "armorRepair",
"Hull Repair Unit": "hullRepair",
"Shield Booster": "shieldRepair",
"Ancillary Shield Booster": "shieldRepair",
"Remote Armor Repairer": "armorRepair",
"Remote Shield Booster": "shieldRepair",
"Ancillary Armor Repairer": "armorRepair", }
capUsed = self.capUsed
for attr in ("shieldRepair", "armorRepair", "hullRepair"):
sustainable[attr] = self.extraAttributes[attr]
dict = self.extraAttributes.getAfflictions(attr)
if self in dict:
for mod, _, amount, used in dict[self]:
if not used:
continue
if mod.projected is False:
usesCap = True
try:
if mod.capUse:
capUsed -= mod.capUse
else:
usesCap = False
except AttributeError:
usesCap = False
# Modules which do not use cap are not penalized based on cap use
if usesCap:
cycleTime = mod.getModifiedItemAttr("duration")
amount = mod.getModifiedItemAttr(groupAttrMap[mod.item.group.name])
sustainable[attr] -= amount / (cycleTime / 1000.0)
repairers.append(mod)
# Sort repairers by efficiency. We want to use the most efficient repairers first
repairers.sort(key=lambda mod: mod.getModifiedItemAttr(
groupAttrMap[mod.item.group.name]) / mod.getModifiedItemAttr("capacitorNeed"), reverse=True)
# Loop through every module until we're above peak recharge
# Most efficient first, as we sorted earlier.
# calculate how much the repper can rep stability & add to total
totalPeakRecharge = self.capRecharge
for mod in repairers:
if capUsed > totalPeakRecharge:
break
cycleTime = mod.cycleTime
capPerSec = mod.capUse
if capPerSec is not None and cycleTime is not None:
# Check how much this repper can work
sustainability = min(1, (totalPeakRecharge - capUsed) / capPerSec)
# Add the sustainable amount
amount = mod.getModifiedItemAttr(groupAttrMap[mod.item.group.name])
sustainable[groupStoreMap[mod.item.group.name]] += sustainability * (amount / (cycleTime / 1000.0))
capUsed += capPerSec
sustainable["passiveShield"] = self.calculateShieldRecharge()
self.__sustainableTank = sustainable
return self.__sustainableTank
def calculateCapRecharge(self, percent=PEAK_RECHARGE):
capacity = self.ship.getModifiedItemAttr("capacitorCapacity")
rechargeRate = self.ship.getModifiedItemAttr("rechargeRate") / 1000.0
return 10 / rechargeRate * sqrt(percent) * (1 - sqrt(percent)) * capacity
def calculateShieldRecharge(self, percent=PEAK_RECHARGE):
capacity = self.ship.getModifiedItemAttr("shieldCapacity")
rechargeRate = self.ship.getModifiedItemAttr("shieldRechargeRate") / 1000.0
return 10 / rechargeRate * sqrt(percent) * (1 - sqrt(percent)) * capacity
def addDrain(self, src, cycleTime, capNeed, clipSize=0):
""" Used for both cap drains and cap fills (fills have negative capNeed) """
energyNeutralizerSignatureResolution = src.getModifiedItemAttr("energyNeutralizerSignatureResolution")
signatureRadius = self.ship.getModifiedItemAttr("signatureRadius")
# Signature reduction, uses the bomb formula as per CCP Larrikin
if energyNeutralizerSignatureResolution:
capNeed = capNeed * min(1, signatureRadius / energyNeutralizerSignatureResolution)
resistance = self.ship.getModifiedItemAttr("energyWarfareResistance") or 1 if capNeed > 0 else 1
self.__extraDrains.append((cycleTime, capNeed * resistance, clipSize))
def removeDrain(self, i):
del self.__extraDrains[i]
def iterDrains(self):
return self.__extraDrains.__iter__()
def __generateDrain(self):
drains = []
capUsed = 0
capAdded = 0
for mod in self.modules:
if mod.state >= State.ACTIVE:
if (mod.getModifiedItemAttr("capacitorNeed") or 0) != 0:
cycleTime = mod.rawCycleTime or 0
reactivationTime = mod.getModifiedItemAttr("moduleReactivationDelay") or 0
fullCycleTime = cycleTime + reactivationTime
if fullCycleTime > 0:
capNeed = mod.capUse
if capNeed > 0:
capUsed += capNeed
else:
capAdded -= capNeed
# If this is a turret, don't stagger activations
disableStagger = mod.hardpoint == Hardpoint.TURRET
drains.append((int(fullCycleTime), mod.getModifiedItemAttr("capacitorNeed") or 0,
mod.numShots or 0, disableStagger))
for fullCycleTime, capNeed, clipSize in self.iterDrains():
# Stagger incoming effects for cap simulation
drains.append((int(fullCycleTime), capNeed, clipSize, False))
if capNeed > 0:
capUsed += capNeed / (fullCycleTime / 1000.0)
else:
capAdded += -capNeed / (fullCycleTime / 1000.0)
return drains, capUsed, capAdded
def simulateCap(self):
drains, self.__capUsed, self.__capRecharge = self.__generateDrain()
self.__capRecharge += self.calculateCapRecharge()
if len(drains) > 0:
sim = capSim.CapSimulator()
sim.init(drains)
sim.capacitorCapacity = self.ship.getModifiedItemAttr("capacitorCapacity")
sim.capacitorRecharge = self.ship.getModifiedItemAttr("rechargeRate")
sim.stagger = True
sim.scale = False
sim.t_max = 6 * 60 * 60 * 1000
sim.reload = self.factorReload
sim.run()
capState = (sim.cap_stable_low + sim.cap_stable_high) / (2 * sim.capacitorCapacity)
self.__capStable = capState > 0
self.__capState = min(100, capState * 100) if self.__capStable else sim.t / 1000.0
else:
self.__capStable = True
self.__capState = 100
@property
def hp(self):
hp = {}
for (type, attr) in (('shield', 'shieldCapacity'), ('armor', 'armorHP'), ('hull', 'hp')):
hp[type] = self.ship.getModifiedItemAttr(attr)
return hp
@property
def ehp(self):
if self.__ehp is None:
if self.damagePattern is None:
ehp = self.hp
else:
ehp = self.damagePattern.calculateEhp(self)
self.__ehp = ehp
return self.__ehp
@property
def tank(self):
hps = {"passiveShield": self.calculateShieldRecharge()}
for type in ("shield", "armor", "hull"):
hps["%sRepair" % type] = self.extraAttributes["%sRepair" % type]
return hps
@property
def effectiveTank(self):
if self.__effectiveTank is None:
if self.damagePattern is None:
ehps = self.tank
else:
ehps = self.damagePattern.calculateEffectiveTank(self, self.extraAttributes)
self.__effectiveTank = ehps
return self.__effectiveTank
@property
def effectiveSustainableTank(self):
if self.__effectiveSustainableTank is None:
if self.damagePattern is None:
eshps = self.sustainableTank
else:
eshps = self.damagePattern.calculateEffectiveTank(self, self.sustainableTank)
self.__effectiveSustainableTank = eshps
return self.__effectiveSustainableTank
def calculateLockTime(self, radius):
scanRes = self.ship.getModifiedItemAttr("scanResolution")
if scanRes is not None and scanRes > 0:
# Yes, this function returns time in seconds, not miliseconds.
# 40,000 is indeed the correct constant here.
return min(40000 / scanRes / asinh(radius) ** 2, 30 * 60)
else:
return self.ship.getModifiedItemAttr("scanSpeed") / 1000.0
def calculateMiningStats(self):
minerYield = 0
droneYield = 0
for mod in self.modules:
minerYield += mod.miningStats
for drone in self.drones:
droneYield += drone.miningStats
self.__minerYield = minerYield
self.__droneYield = droneYield
def calculateWeaponStats(self):
weaponDPS = 0
droneDPS = 0
weaponVolley = 0
droneVolley = 0
for mod in self.modules:
dps, volley = mod.damageStats(self.targetResists)
weaponDPS += dps
weaponVolley += volley
for drone in self.drones:
dps, volley = drone.damageStats(self.targetResists)
droneDPS += dps
droneVolley += volley
for fighter in self.fighters:
dps, volley = fighter.damageStats(self.targetResists)
droneDPS += dps
droneVolley += volley
self.__weaponDPS = weaponDPS
self.__weaponVolley = weaponVolley
self.__droneDPS = droneDPS
self.__droneVolley = droneVolley
@property
def fits(self):
for mod in self.modules:
if not mod.fits(self):
return False
return True
def __deepcopy__(self, memo):
copy = Fit()
# Character and owner are not copied
copy.character = self.__character
copy.owner = self.owner
copy.ship = deepcopy(self.ship, memo)
copy.name = "%s copy" % self.name
copy.damagePattern = self.damagePattern
copy.targetResists = self.targetResists
copy.notes = self.notes
toCopy = (
"modules",
"drones",
"fighters",
"cargo",
"implants",
"boosters",
"projectedModules",
"projectedDrones",
"projectedFighters")
for name in toCopy:
orig = getattr(self, name)
c = getattr(copy, name)
for i in orig:
c.append(deepcopy(i, memo))
for fit in self.projectedFits:
copy.__projectedFits[fit.ID] = fit
# this bit is required -- see GH issue # 83
eos.db.saveddata_session.flush()
eos.db.saveddata_session.refresh(fit)
return copy
def __repr__(self):
return u"Fit(ID={}, ship={}, name={}) at {}".format(
self.ID, self.ship.item.name, self.name, hex(id(self))
).encode('utf8')
def __str__(self):
return u"{} ({})".format(
self.name, self.ship.item.name
).encode('utf8')
| Ebag333/Pyfa | eos/saveddata/fit.py | Python | gpl-3.0 | 51,862 | [
"CRYSTAL"
] | 12afaae004c13f262983f43f58a503644b162f2411485d65e76efd5a62370041 |
"""
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
from __future__ import absolute_import
import os
import numpy as np
#------------------------------
def setrun(claw_pkg='amrclaw'):
#------------------------------
from clawpack.clawutil import data
assert claw_pkg.lower() == 'amrclaw', "Expected claw_pkg = 'amrclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
# ---------------------------
# Physical problem parameters
# ---------------------------
# 0 = rigid body rotation;
# 1 = horizontal flow
# 2 = sine patch
# 3 = horizontal flow with variable speed
# 4 = time dependent in both x and y
# 5 = spatially dependent
example = 2
refine_pattern = 0 # 0 = constant theta; 1 = constant_r
rps = -1 # units of theta/second (example=0)
cart_speed = 1.092505803290319 # Horizontal speed (example=1)
freq = 1 # Frequency for sine path
# Region occupied by annulus
beta = 0.4
theta = [0.125,0.375]
# Example 1 (constant horizontal speed)
vcart = [cart_speed,0]
# Example 2
amplitude = 0.05
if example in [0,1,2,3] :
ravg = (1 + beta)/2
t0 = np.pi/2*(1 + (1/8))
initial_location = ravg*np.array([np.cos(t0), np.sin(t0)])
elif example in [4]:
# Vertical motion
r0 = beta + 0.25*(1-beta)
initial_location = [0,r0]
# ---------------
# Grid parameters
# ---------------
grid_mx = 32 # Size of ForestClaw grids
mi = 4 # Number of ForestClaw blocks
mj = 2
mx = mi*grid_mx
my = mj*grid_mx
# -------------
# Time stepping
# -------------
if example in [0,1,2,3]:
dt_initial = 2.5e-3
nout = 100 # 400 steps => T=2
nsteps = 10
elif example == 4:
dt_initial = 1.25e-3 # Stable for level 1
nout = 200
nsteps = 20
# ------------------
# AMRClaw parameters
# ------------------
regrid_interval = 10000 # Don't regrid
maxlevel = 2
ratioxy = 2
ratiok = 1
limiter = 'minmod' # 'none', 'minmod', 'superbee', 'vanleer', 'mc'
# 0 = no qad
# 1 = original qad
# 2 = modified (fixed to include call to rpn2qad)
# 3 = new qad (equivalent to 2 but uses f90)
qad_mode = 1
maux = 15
use_fwaves = True
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
# example 0 : Rigid body rotation (possibly using a streamfunction)
# Make vertical speed small so we leave grid
probdata.add_param('example', example, 'example')
probdata.add_param('revolutions per second', rps, 'rps')
probdata.add_param('cart_speed', cart_speed, 'cart_speed')
probdata.add_param('vcart[0]', vcart[0], 'vcart[0]')
probdata.add_param('vcart[1]', vcart[1], 'vcart[1]')
probdata.add_param('amplitude', amplitude, 'amplitude')
probdata.add_param('freq', freq, 'freq')
probdata.add_param('initial radius', 0.05, 'init_radius')
probdata.add_param('initial_location[0]', initial_location[0], 'initial_location[0]')
probdata.add_param('initial_location[1]', initial_location[1], 'initial_location[1]')
probdata.add_param('beta', beta, 'beta')
probdata.add_param('theta1', theta[0], 'theta(1)')
probdata.add_param('theta2', theta[1], 'theta(2)')
probdata.add_param('grid_mx', grid_mx, 'grid_mx')
probdata.add_param('mi', mi, 'mi')
probdata.add_param('mj', mj, 'mj')
probdata.add_param('maxlevel', maxlevel, 'maxlevel')
probdata.add_param('reffactor', ratioxy, 'reffactor')
probdata.add_param('refine_pattern', refine_pattern, 'refine_pattern')
probdata.add_param('qad_mode', qad_mode, 'qad_mode')
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
# (or to amrclaw.data for AMR)
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
clawdata.num_dim = num_dim
clawdata.lower[0] = 0 # xlower
clawdata.upper[0] = 1 # xupper
clawdata.lower[1] = 0 # ylower
clawdata.upper[1] = 1 # yupper
clawdata.num_cells[0] = mx # mx
clawdata.num_cells[1] = my # my
clawdata.num_eqn = 1
clawdata.num_aux = maux
clawdata.capa_index = 1
# ----------------------------------------------------------
# Time stepping
# ----------------------------------------------------------
clawdata.output_style = 3
clawdata.dt_variable = False
clawdata.dt_initial = dt_initial
if clawdata.output_style==1:
clawdata.num_output_times = 16
clawdata.tfinal = 4.0
elif clawdata.output_style == 2:
clawdata.output_times = [0., 0.5, 1.0]
elif clawdata.output_style == 3:
clawdata.total_steps = nout
clawdata.output_step_interval = nsteps
clawdata.output_format = 'ascii' # 'ascii', 'binary', 'netcdf'
# ---------------------------
# Misc time stepping and I/O
# ---------------------------
clawdata.cfl_desired = 0.900000
clawdata.cfl_max = 1.000000
clawdata.output_t0 = True # output at initial (or restart) time?
clawdata.t0 = 0.000000
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
clawdata.output_q_components = 'all' # only 'all'
clawdata.output_aux_components = 'none' # 'all' or 'none'
clawdata.output_aux_onlyonce = False # output aux arrays only at t0?
clawdata.dt_max = 1.000000e+99
clawdata.steps_max = 1000
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = maxlevel
# ----------------------------------------------------
# Clawpack parameters
# -----------------------------------------------------
clawdata.order = 2
clawdata.dimensional_split = 'unsplit'
clawdata.transverse_waves = 2
clawdata.num_waves = 1
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'vanleer' ==> van Leer
# 4 or 'mc' ==> MC limiter
clawdata.limiter = [limiter]
clawdata.use_fwaves = use_fwaves # True ==> use f-wave version of algorithms
clawdata.source_split = 0
# --------------------
# Boundary conditions:
# --------------------
clawdata.num_ghost = 2
clawdata.bc_lower[0] = 'extrap' # at xlower
clawdata.bc_upper[0] = 'extrap' # at xupper
clawdata.bc_lower[1] = 'extrap' # at ylower
clawdata.bc_upper[1] = 'extrap' # at yupper
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
amrdata.amr_levels_max = maxlevel
amrdata.refinement_ratios_x = [ratioxy]*maxlevel
amrdata.refinement_ratios_y = [ratioxy]*maxlevel
amrdata.refinement_ratios_t = [ratiok]*maxlevel
# If we are taking a global time step (stable for maxlevel grids), we
# probably want to increase number of steps taken to hit same
# time 'tfinal'.
if ratiok == 1:
refine_factor = 1
for i in range(1,maxlevel):
refine_factor *= amrdata.refinement_ratios_x[i]
# Decrease time step
clawdata.dt_initial = dt_initial/refine_factor
# Increase number of steps taken.
clawdata.total_steps = nout*refine_factor
clawdata.output_step_interval = nsteps*refine_factor
# Refinement threshold
amrdata.flag2refine_tol = -1 # tolerance used in this routine
# ------------------------------------------------------
# Misc AMR parameters
# ------------------------------------------------------
amrdata.flag_richardson = False # use Richardson?
amrdata.flag_richardson_tol = 1.000000e+00 # Richardson tolerance
amrdata.flag2refine = True # use this?
amrdata.regrid_interval = regrid_interval
amrdata.regrid_buffer_width = 0
amrdata.clustering_cutoff = 0.800000
amrdata.verbosity_regrid = 0
# ----------------------------------------------------------------
# Conservative form (cell-centered velocities)
# 1 capacity
# 2-3 Cell-centered velocities projected
# 4-5 normal at x face (left)
# 6-7 normal at x face (right)
# 8-9 normal at y face (bottom)
# 10-11 normal at y face (top)
# 12-15 edgelengths at all four x/y faces in cell.
# ----------------------------------------------------------------
if qad_mode in [0,1]:
# We don't expect to use the values at right/top faces.
amrdata.aux_type = ['capacity'] + ['center']*2 + ['xleft']*4 + \
['yleft']*4 + ['xleft']*2 + ['yleft']*2
else:
# Each cell has data for all four faces
amrdata.aux_type = ['capacity'] + ['center']*14
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
return rundata
# end of function setrun
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
| ForestClaw/forestclaw | applications/paper/transport_2d_annulus/setrun.py | Python | bsd-2-clause | 10,831 | [
"NetCDF"
] | bb2ae3397b6f813c61b9744001d33abd95ac7549178dde96731fbde4e4a9dc37 |
import cPickle as pickle
from ase import *
from ase import io
from ase.dft.bee import BEEF_Ensemble
from ase.optimize import QuasiNewton
from ase.structure import molecule
from ase.vibrations import Vibrations
from ase.thermochemistry import IdealGasThermo
from espresso import espresso
from espresso.vibespresso import vibespresso
name = 'N2'
# load N2 molecule and add 20.0 AA vacuum
atoms = molecule('N2')
atoms.center(10.0)
calc = espresso(pw=500, # plane-wave cutoff
dw=5000, # density cutoff
xc='BEEF-vdW', # exchange-correlation functional
kpts='gamma', # k-point sampling
nbands=-10, # 10 extra bands besides the bands needed to hold
# the valence electrons
sigma=0.1,
psppath='/home/vossj/suncat/psp/gbrv1.5pbe', # pseudopotential
convergence={'energy': 1e-5,
'mixing': 0.1,
'nmix': 10,
'mix': 4,
'maxsteps': 500,
'diag': 'david'
}, # convergence parameters
beefensemble=True,
printensemble=True,
outdir='calcdir') # output directory for Quantum Espresso files
atoms.set_calculator(calc)
vibrateatoms = [atom.index for atom in atoms]
dyn = QuasiNewton(atoms, logfile=name + '.log', trajectory=name + '.traj')
dyn.run(fmax=0.05)
energy = atoms.get_potential_energy()
calc.stop()
# Calculate vibrations
calcvib = vibespresso(pw=500, # plane-wave cutoff
dw=5000, # density cutoff
xc='BEEF-vdW', # exchange-correlation functional
kpts='gamma', # k-point sampling
nbands=-10, # 10 extra bands besides the bands needed to hold
# the valence electrons
sigma=0.1,
psppath='/home/vossj/suncat/psp/gbrv1.5pbe', # pseudopotential
convergence={'energy': 1e-5,
'mixing': 0.1,
'nmix': 10,
'mix': 4,
'maxsteps': 500,
'diag': 'david'
}, # convergence parameters
outdirprefix='calcdirv') # output directory for Quantum Espresso files
atoms.set_calculator(calcvib)
vib = Vibrations(atoms, indices=vibrateatoms, delta=0.03)
vib.run()
vib.summary(method='standard')
# Make trajectory files to visualize the modes.
for mode in range(len(vibrateatoms) * 3):
vib.write_mode(mode)
# Calculate free energy
vib_energies = vib.get_energies()
thermo = IdealGasThermo(vib_energies=vib_energies,
electronicenergy=energy,
atoms=atoms,
geometry='linear',
symmetrynumber=2, spin=0)
# At 300K and 101325 Pa
# change for your operating conditions
freeenergy = thermo.get_gibbs_energy(temperature=300, pressure=101325)
f = open(name + '.energy', 'w')
f.write('Potential energy: ' + str(energy) + '\n' +
'Free energy: ' + str(freeenergy) + '\n')
f.close
ens = BEEF_Ensemble(calc)
ens_e = ens.get_ensemble_energies()
ens.write('ensemble.bee')
pickle.dump(ens_e, open('ensemble.pkl', 'w'))
| chemeng444/chemeng444.github.io | ASE/Adsorption/run_N2.py | Python | gpl-2.0 | 3,523 | [
"ASE",
"ESPResSo",
"Quantum ESPRESSO"
] | 3336ce1d456a97f23379e832e4c4e3db67593c4d6a0a381c1c6e5a358d3682ae |
from __future__ import print_function
from fabric.api import run, env
from time import sleep
from boto.exception import EC2ResponseError
from .util import eval_template
def attach_volumes(vm_launcher, options, format=False):
"""
"""
volumes = options.get("volumes", [])
if not volumes:
return
boto_connection = vm_launcher.boto_connection()
instance_id = run("curl --silent http://169.254.169.254/latest/meta-data/instance-id")
for volume in volumes:
volume_id = volume['id']
device_id = volume['device']
if not _get_attached(boto_connection, instance_id, device_id, valid_states=["attached", "attaching"]):
boto_connection.attach_volume(volume_id, instance_id, device_id)
for volume in volumes:
volume_id = volume['id']
device_id = volume['device']
path = volume.get("path")
while True:
if _get_attached(boto_connection, instance_id, device_id):
break
sleep(5)
print("Waiting for volume corresponding to device %s to attach" % device_id)
break
# Don't mount if already mounted
if _find_mounted_device_id(path):
continue
format = str(volume.get('format', "False")).lower()
if format == "true":
_format_device(device_id)
env.safe_sudo("mkdir -p '%s'" % path)
try:
_mount(device_id, path)
except:
if format == "__auto__":
print("Failed to mount device. format is set to __auto__ so will now format device and retry mount")
_format_device(device_id)
_mount(device_id, path)
else:
raise
def _mount(device_id, path):
env.safe_sudo("mount '%s' '%s'" % (device_id, path))
def _format_device(device_id):
env.safe_sudo("mkfs -t ext3 %s" % device_id)
def detach_volumes(vm_launcher, options):
volumes = options.get("volumes", [])
if not volumes:
return
boto_connection = vm_launcher.boto_connection()
instance_id = run("curl --silent http://169.254.169.254/latest/meta-data/instance-id")
for volume in volumes:
volume_id = volume['id']
path = volume.get("path")
env.safe_sudo("umount '%s'" % path)
_detach(boto_connection, instance_id, volume_id)
def make_snapshots(vm_launcher, options):
volumes = options.get("volumes", [])
for volume in volumes:
path = volume.get("path")
desc = volume.get("description", "Snapshot of path %s" % path)
desc = eval_template(env, desc)
# Allow volume to specify it should not be snapshotted, e.g. if
# piggy backing on core teams snapshots for galaxyIndicies for instance.
snapshot = volume.get("snapshot", True)
if snapshot:
_make_snapshot(vm_launcher, path, desc)
def _get_attached(conn, instance_id, device_id, valid_states=['attached']):
vol_list = conn.get_all_volumes()
fs_vol = None
for vol in vol_list:
if vol.attach_data.instance_id == instance_id and vol.attach_data.device == device_id:
if vol.attach_data.status in valid_states:
fs_vol = vol
break
return fs_vol
def _make_snapshot(vm_launcher, fs_path, desc):
""" Create a snapshot of an existing volume that is currently attached to an
instance, taking care of the unmounting and detaching. If you specify the
optional argument (:galaxy), the script will pull the latest Galaxy code
from bitbucket and perform an update before snapshotting. Else, the script
will prompt for the file system path to be snapshoted.
In order for this to work, an instance on EC2 needs to be running with a
volume that wants to be snapshoted attached and mounted. The script will
unmount the volume, create a snaphost and offer to reattach and mount the
volume or create a new one from the freshly created snapshot.
Except for potentially Galaxy, MAKE SURE there are no running processes
using the volume and that no one is logged into the instance and sitting
in the given directory.
"""
instance_id = run("curl --silent http://169.254.169.254/latest/meta-data/instance-id")
availability_zone = run("curl --silent http://169.254.169.254/latest/meta-data/placement/availability-zone")
instance_region = availability_zone[:-1] # Truncate zone letter to get region name
# Find the device where the file system is mounted to
# Find the EBS volume where the file system resides
device_id = _find_mounted_device_id(fs_path)
ec2_conn = vm_launcher.boto_connection()
fs_vol = _get_attached(ec2_conn, instance_id, device_id)
if fs_vol:
env.safe_sudo("umount %s" % fs_path)
_detach(ec2_conn, instance_id, fs_vol.id)
snap_id = _create_snapshot(ec2_conn, fs_vol.id, desc)
# TODO: Auto Update snaps?
make_public = True
if make_public: # Make option
ec2_conn.modify_snapshot_attribute(snap_id, attribute='createVolumePermission', operation='add', groups=['all'])
reattach = True
if reattach:
_attach(ec2_conn, instance_id, fs_vol.id, device_id)
env.safe_sudo("mount %s %s" % (device_id, fs_path))
delete_old_volume = False
if delete_old_volume:
_delete_volume(ec2_conn, fs_vol.id)
print("----- Done snapshoting volume '%s' for file system '%s' -----" % (fs_vol.id, fs_path))
else:
print("ERROR: Failed to find require file system, is boto installed? Is it not actually mounted?")
def _find_mounted_device_id(path):
# Adding dollar sign to grep to distinguish between /mnt/galaxy and /mnt/galaxyIndices
device_id = env.safe_sudo("df | grep '%s$' | awk '{print $1}'" % path)
return device_id
def _attach(ec2_conn, instance_id, volume_id, device):
"""
Attach EBS volume to the given device (using boto).
Try it for some time.
"""
try:
print("Attaching volume '%s' to instance '%s' as device '%s'" % (volume_id, instance_id, device))
volumestatus = ec2_conn.attach_volume(volume_id, instance_id, device)
except EC2ResponseError as e:
print("Attaching volume '%s' to instance '%s' as device '%s' failed. Exception: %s" % (volume_id, instance_id, device, e))
return False
for counter in range(30):
print("Attach attempt %s, volume status: %s" % (counter, volumestatus))
if volumestatus == 'attached':
print("Volume '%s' attached to instance '%s' as device '%s'" % (volume_id, instance_id, device))
break
if counter == 29:
print("Volume '%s' FAILED to attach to instance '%s' as device '%s'. Aborting." % (volume_id, instance_id, device))
return False
volumes = ec2_conn.get_all_volumes([volume_id])
volumestatus = volumes[0].attachment_state()
sleep(3)
return True
def _detach(ec2_conn, instance_id, volume_id):
"""
Detach EBS volume from the given instance (using boto).
Try it for some time.
"""
try:
volumestatus = ec2_conn.detach_volume( volume_id, instance_id, force=True )
except EC2ResponseError as e:
print("Detaching volume '%s' from instance '%s' failed. Exception: %s" % ( volume_id, instance_id, str(e) ))
return False
for counter in range( 30 ):
print("Volume '%s' status '%s'" % ( volume_id, volumestatus ))
if volumestatus == 'available':
print("Volume '%s' successfully detached from instance '%s'." % ( volume_id, instance_id ))
break
if counter == 29:
print("Volume '%s' FAILED to detach to instance '%s'." % ( volume_id, instance_id ))
sleep(3)
volumes = ec2_conn.get_all_volumes( [volume_id] )
volumestatus = volumes[0].status
def _delete_volume(ec2_conn, vol_id):
try:
ec2_conn.delete_volume(vol_id)
print("Deleted volume '%s'" % vol_id)
except EC2ResponseError as e:
print("ERROR deleting volume '%s': %s" % (vol_id, e))
def _create_snapshot(ec2_conn, volume_id, description=None):
"""
Create a snapshot of the EBS volume with the provided volume_id.
Wait until the snapshot process is complete (note that this may take quite a while)
"""
snapshot = ec2_conn.create_snapshot(volume_id, description=description)
if snapshot:
while snapshot.status != 'completed':
sleep(6)
snapshot.update()
print("Creation of snapshot for volume '%s' completed: '%s'" % (volume_id, snapshot))
return snapshot.id
else:
print("Could not create snapshot from volume with ID '%s'" % volume_id)
return False
| chapmanb/cloudbiolinux | cloudbio/deploy/volume.py | Python | mit | 8,821 | [
"Galaxy"
] | e7503dcebb30849f5861e8ab7648bdbc5f15b9d529a04a223cbf3f73a3e856e0 |
#=== PHYSICS =========================================================================================
# 2D physics functions.
# Authors: Tom De Smedt, Giorgio Olivero
# License: BSD (see LICENSE.txt for details).
# Copyright (c) 2008-2012 City In A Bottle (cityinabottle.org)
# http://cityinabottle.org/nodebox
# This module can benefit greatly from loading psyco.
from math import sqrt, pow
from math import sin, cos, atan2, degrees, radians, pi
from random import random
from heapq import heappush, heappop
from warnings import warn
# float("inf") doesn't work on windows.
INFINITE = 1e20
# This module is standalone, line(), ellipse() and Text.draw()
# must be either implemented or patched:
def line(x1, y1, x2, y2, stroke=(0,0,0,1), strokewidth=1):
pass
def ellipse(x, y, width, height, fill=(0,0,0,1), stroke=None, strokewidth=1):
pass
class Text:
def __init__(self, string, **kwargs):
self.string = string
self.__dict__.update(kwargs)
def copy(self):
k = self.__dict__.copy()
k.pop("string")
return Text(self.string, **k)
def draw(self):
pass
#=====================================================================================================
#--- VECTOR ------------------------------------------------------------------------------------------
# A Euclidean vector (sometimes called a geometric or spatial vector, or - as here - simply a vector)
# is a geometric object that has both a magnitude (or length) and direction.
# A vector is frequently represented by a line segment with an arrow.
class Vector(object):
def __init__(self, x=0, y=0, z=0, length=None, angle=None):
""" A vector represents a direction and a magnitude (or length).
Vectors can be added, subtracted, multiplied, divided, flipped, and 2D rotated.
Vectors are used in physics to represent velocity and acceleration.
"""
self.x = float(x)
self.y = float(y)
self.z = float(z)
if length is not None:
self.length = length
if angle is not None:
self.angle = angle
def copy(self):
return Vector(self.x, self.y, self.z)
def __getitem__(self, i):
return (self.x, self.y, self.z)[i]
def __setitem__(self, i, v):
setattr(self, ("x", "y", "z")[i], float(v))
def _get_xyz(self):
return (self.x, self.y, self.z)
def _set_xyz(self, (x,y,z)):
self.x = float(x)
self.y = float(y)
self.z = float(z)
xyz = property(_get_xyz, _set_xyz)
def _get_xy(self):
return (self.x, self.y)
def _set_xy(self, (x,y)):
self.x = float(x)
self.y = float(y)
xy = property(_get_xy, _set_xy)
def _get_length(self):
return sqrt(self.x**2 + self.y**2 + self.z**2)
def _set_length(self, n):
d = self.length or 1
self.x *= n/d
self.y *= n/d
self.z *= n/d
length = magnitude = property(_get_length, _set_length)
def distance(self, v):
""" Returns the distance between two vectors,
e.g. if two vectors would be two sides of a triangle, returns the third side.
"""
dx = v.x - self.x
dy = v.y - self.y
dz = v.z - self.z
return sqrt(dx**2 + dy**2 + dz**2)
def distance2(self, v):
# Squared distance, avoiding the costly root calculation.
return (v.x-self.x)**2 + (v.y-self.y)**2 + (v.z-self.z)**2
def normalize(self):
""" Normalizes the vector to a unit vector with length=1.
"""
d = self.length or 1
self.x /= d
self.y /= d
self.z /= d
def _normalized(self):
""" Yields a new vector that is the normalized vector of this vector.
"""
d = self.length
if d == 0:
return self.copy()
return Vector(self.x/d, self.y/d, self.z/d)
normalized = unit = property(_normalized)
def reverse(self):
""" Reverses the direction of the vector so it points in the opposite direction.
"""
self.x = -self.x
self.y = -self.y
self.z = -self.z
flip = reverse
def _reversed(self):
""" Yields a new vector pointing in the opposite direction of this vector.
"""
return Vector(-self.x, -self.y, -self.z)
reversed = flipped = inverse = property(_reversed)
# v.normal, v.angle, v.rotate(), v.rotated() and v.angle_to() are defined in 2D.
# v.in2D.rotate() is here for decorational purposes.
@property
def in2D(self):
return self
def _orthogonal(self):
""" Yields a new vector whose 2D angle is 90 degrees (perpendicular) to this vector.
In 3D, there would be many perpendicular vectors.
"""
return Vector(self.y, -self.x, self.z)
orthogonal = perpendicular = normal = property(_orthogonal)
def _get_angle(self):
""" Yields the 2D direction of the vector.
"""
return degrees(atan2(self.y, self.x))
def _set_angle(self, degrees):
d = self.length
self.x = cos(radians(degrees)) * d
self.y = sin(radians(degrees)) * d
angle = direction = property(_get_angle, _set_angle)
def rotate(self, degrees):
""" Rotates the direction of the vector in 2D.
"""
self.angle += degrees
def rotated(self, degrees):
""" Returns a copy of the vector with direction rotated in 2D.
"""
v = self.copy()
v.rotate(degrees)
return v
def angle_to(self, v):
""" Returns the 2D angle between two vectors.
"""
return degrees(atan2(v.y, v.x) - atan2(self.y, self.x))
angle_between = angle_to
# Arithmetic operators.
# + - * / returns new vector objects.
def __add__(self, v):
if isinstance(v, (int, float)):
return Vector(self.x+v, self.y+v, self.z+v)
return Vector(self.x+v.x, self.y+v.y, self.z+v.z)
def __sub__(self, v):
if isinstance(v, (int, float)):
return Vector(self.x-v, self.y-v, self.z-v)
return Vector(self.x-v.x, self.y-v.y, self.z-v.z)
def __mul__(self, v):
if isinstance(v, (int, float)):
return Vector(self.x*v, self.y*v, self.z*v)
return Vector(self.x*v.x, self.y*v.y, self.z*v.z)
def __div__(self, v):
if isinstance(v, (int, float)):
return Vector(self.x/v, self.y/v, self.z/v)
return Vector(self.x/v.x, self.y/v.y, self.z/v.z)
# += -= *= /= modify the vector coordinates in-place.
def __iadd__(self, v):
if isinstance(v, (int, float)):
self.x+=v; self.y+=v; self.z+=v; return self
self.x+=v.x; self.y+=v.y; self.z+=v.z; return self
def __isub__(self, v):
if isinstance(v, (int, float)):
self.x-=v; self.y-=v; self.z-=v; return self
self.x-=v.x; self.y-=v.y; self.z-=v.z; return self
def __imul__(self, v):
if isinstance(v, (int, float)):
self.x*=v; self.y*=v; self.z*=v; return self
self.x*=v.x; self.y*=v.y; self.z*=v.z; return self
def __idiv__(self, v):
if isinstance(v, (int, float)):
self.x/=v; self.y/=v; self.z/=v; return self
self.x/=v.x; self.y/=v.y; self.z/=v.z; return self
def dot(self, v):
""" Returns a scalar that is the dot product between the two vectors.
"""
return self.x*v.x + self.y*v.y + self.z*v.z
def cross(self, v):
""" Returns a new vector that is the cross product between the two vectors.
"""
return Vector(self.y*v.z - self.z*v.y,
self.z*v.x - self.x*v.z,
self.x*v.y - self.y*v.x)
def __neg__(self):
return Vector(-self.x, -self.y, -self.z)
def __eq__(self, v):
return isinstance(v, Vector) and self.x == v.x and self.y == v.y and self.z == v.z
def __ne__(self, v):
return not self.__eq__(v)
def __repr__(self):
return "%s(%.2f, %.2f, %.2f)" % (self.__class__.__name__, self.x, self.y, self.z)
def draw(self, x, y):
""" Draws the vector in 2D (z-axis is ignored).
Set stroke() and strokewidth() first.
"""
ellipse(x, y, 4, 4)
line(x, y, x+self.x, y+self.y)
#=====================================================================================================
#--- FLOCKING ----------------------------------------------------------------------------------------
# Boids is an artificial life program, developed by Craig Reynolds in 1986,
# which simulates the flocking behavior of birds.
# Boids is an example of emergent behavior, the complexity of Boids arises
# from the interaction of individual agents adhering to a set of simple rules:
# - separation: steer to avoid crowding local flockmates,
# - alignment: steer towards the average heading of local flockmates,
# - cohesion: steer to move toward the average position of local flockmates.
# Unexpected behavior, such as splitting flocks and reuniting after avoiding obstacles,
# can be considered emergent. The boids framework is often used in computer graphics,
# providing realistic-looking representations of flocks of birds and other creatures,
# such as schools of fish or herds of animals.
_UID = 0
def _uid():
global _UID; _UID+=1; return _UID
class Boid:
def __init__(self, flock, x=0, y=0, z=0, sight=70, space=30):
""" An agent in a flock with an (x,y,z)-position subject to different forces.
- sight : radius of local flockmates when calculating cohesion and alignment.
- space : radius of personal space when calculating separation.
"""
self._id = _uid()
self.flock = flock
self.x = x
self.y = y
self.z = z
self.velocity = Vector(random()*2-1, random()*2-1, random()*2-1)
self.target = None # A target Vector towards which the boid will steer.
self.sight = sight # The radius of cohesion and alignment, and visible obstacles.
self.space = space # The radius of separation.
self.dodge = False # Avoiding an obstacle?
self.crowd = 0 # Percentage (0.0-1.0) of flockmates within sight.
def __eq__(self, other):
# Comparing boids by id makes it significantly faster.
return isinstance(other, Boid) and self._id == other._id
def __ne__(self, other):
return not self.__eq__(other)
def copy(self):
b = Boid(self.flock, self.x, self.y, self.z, self.sight, self.space)
b.velocity = self.velocity.copy()
b.target = self.target
return b
@property
def heading(self):
""" The boid's heading as an angle in degrees.
"""
return self.velocity.angle
@property
def depth(self):
""" The boid's relative depth (0.0-1.0) in the flock's container box.
"""
return not self.flock.depth and 1.0 or max(0.0, min(1.0, self.z / self.flock.depth))
def near(self, boid, distance=50):
""" Returns True if the given boid is within distance.
"""
# Distance is measured in a box instead of a sphere for performance.
return abs(self.x - boid.x) < distance and \
abs(self.y - boid.y) < distance and \
abs(self.z - boid.z) < distance
def separation(self, distance=25):
""" Returns steering velocity (vx,vy,vz) to avoid crowding local flockmates.
"""
vx = vy = vz = 0.0
for b in self.flock:
if b != self:
if abs(self.x-b.x) < distance: vx += self.x - b.x
if abs(self.y-b.y) < distance: vy += self.y - b.y
if abs(self.z-b.z) < distance: vz += self.z - b.z
return vx, vy, vz
def alignment(self, distance=50):
""" Returns steering velocity (vx,vy,vz) towards the average heading of local flockmates.
"""
vx = vy = vz = n = 0.0
for b in self.flock:
if b != self and b.near(self, distance):
vx += b.velocity.x
vy += b.velocity.y
vz += b.velocity.z; n += 1
if n:
return (vx/n-self.velocity.x), (vy/n-self.velocity.y), (vz/n-self.velocity.z)
return vx, vy, vz
def cohesion(self, distance=40):
""" Returns steering velocity (vx,vy,vz) towards the average position of local flockmates.
"""
vx = vy = vz = n = 0.0
for b in self.flock:
if b != self and b.near(self, distance):
vx += b.x
vy += b.y
vz += b.z; n += 1
# Calculate percentage of flockmates within sight.
self.crowd = float(n) / (len(self.flock) or 1)
if n:
return (vx/n-self.x), (vy/n-self.y), (vz/n-self.z)
return vx, vy, vz
def avoidance(self):
""" Returns steering velocity (vx,vy,0) to avoid 2D obstacles.
The boid is not guaranteed to avoid collision.
"""
vx = vy = 0.0
self.dodge = False
for o in self.flock.obstacles:
dx = o.x - self.x
dy = o.y - self.y
d = sqrt(dx**2 + dy**2) # Distance to obstacle.
s = (self.sight + o.radius) # Visibility range.
if d < s:
self.dodge = True
# Force grows exponentially from 0.0 to 1.0,
# where 1.0 means the boid touches the obstacle circumference.
f = (d-o.radius) / (s-o.radius)
f = (1-f)**2
if d < o.radius:
f *= 4
#self.velocity.reverse()
vx -= dx * f
vy -= dy * f
return (vx, vy, 0)
def limit(self, speed=10.0):
""" Limits the boid's velocity (the boid can momentarily go very fast).
"""
v = self.velocity
m = max(abs(v.x), abs(v.y), abs(v.z)) or 1
if abs(v.x) > speed: v.x = v.x / m * speed
if abs(v.y) > speed: v.y = v.y / m * speed
if abs(v.z) > speed: v.z = v.z / m * speed
def update(self, separation=0.2, cohesion=0.2, alignment=0.6, avoidance=0.6, target=0.2, limit=15.0):
""" Updates the boid's velocity based on the cohesion, separation and alignment forces.
- separation: force that keeps boids apart.
- cohesion : force that keeps boids closer together.
- alignment : force that makes boids move in the same direction.
- avoidance : force that steers the boid away from obstacles.
- target : force that steers the boid towards a target vector.
- limit : maximum velocity.
"""
f = 0.1
m1, m2, m3, m4, m5 = separation*f, cohesion*f, alignment*f, avoidance*f, target*f
vx1, vy1, vz1 = self.separation(self.space)
vx2, vy2, vz2 = self.cohesion(self.sight)
vx3, vy3, vz3 = self.alignment(self.sight)
vx4, vy4, vz4 = self.avoidance()
vx5, vy5, vz5 = self.target and (
(self.target.x-self.x),
(self.target.y-self.y),
(self.target.z-self.z)) or (0,0,0)
self.velocity.x += m1*vx1 + m2*vx2 + m3*vx3 + m4*vx4 + m5*vx5
self.velocity.y += m1*vy1 + m2*vy2 + m3*vy3 + m4*vy4 + m5*vy5
self.velocity.z += m1*vz1 + m2*vz2 + m3*vz3 + m4*vz4 + m5*vz5
self.velocity.z = self.flock.depth and self.velocity.z or 0 # No z-axis for Flock.depth=0
self.limit(speed=limit)
self.x += self.velocity.x
self.y += self.velocity.y
self.z += self.velocity.z
def seek(self, vector):
""" Sets the given Vector as the boid's target.
"""
self.target = vector
def __repr__(self):
return "Boid(x=%.1f, y=%.1f, z=%.1f)" % (self.x, self.y, self.z)
class Obstacle:
def __init__(self, x=0, y=0, z=0, radius=10):
""" An obstacle with an (x, y, z) position and a radius.
Boids will steer around obstacles that the flock is aware of, and that they can see.
"""
self.x = x
self.y = y
self.z = z
self.radius = radius
def copy(self):
return Obstacle(self.x, self.y, self.z, self.radius)
def __repr__(self):
return "Obstacle(x=%.1f, y=%.1f, z=%.1f, radius=%.1f)" % (self.x, self.y, self.z, self.radius)
class Flock(list):
def __init__(self, amount, x, y, width, height, depth=100.0, obstacles=[]):
""" A flock of the given amount of boids, confined to a box.
Obstacles can be added to Flock.obstacles (boids will steer away from them).
"""
self.x = x
self.y = y
self.width = width
self.height = height
self.depth = depth
self.scattered = False
self.gather = 0.05
self.obstacles = []
for i in range(amount):
# Boids will originate from the center of the flocking area.
b = Boid(self,
self.x + 0.5 * (width or 0),
self.y + 0.5 * (height or 0),
0.5 * (depth or 0))
self.append(b)
@property
def boids(self):
return self
def copy(self):
f = Flock(0, self.x, self.y, self.width, self.height, self.depth)
f.scattered = self.scattered
f.gather = self.gather
f.obstacles = [o.copy() for o in self.obstacles]
for b in self:
f.append(b.copy())
return f
def seek(self, target):
""" Sets the target vector of all boids in the flock (None for no target).
"""
for b in self:
b.seek(target)
def sight(self, distance):
for b in self:
b.sight = distance
def space(self, distance):
for b in self:
b.space = distance
def constrain(self, force=1.0, teleport=False):
""" Keep the flock inside the rectangular flocking area.
The given force determines how fast the boids will swivel when near an edge.
Alternatively, with teleport=True boids that cross a 2D edge teleport to the opposite side.
"""
f = 5
def _teleport(b):
if b.x < self.x:
b.x = self.x + self.width
if b.x > self.x + self.width:
b.x = self.x
if b.y < self.y:
b.y = self.y + self.height
if b.y > self.y + self.height:
b.y = self.y
def _constrain(b):
if b.x < self.x:
b.velocity.x += force * f * random()
if b.x > self.x + self.width:
b.velocity.x -= force * f * random()
if b.y < self.y:
b.velocity.y += force * f * random()
if b.y > self.y + self.height:
b.velocity.y -= force * f * random()
for b in self:
if b.z < 0:
b.velocity.z += force * f * random()
if b.z > self.depth:
b.velocity.z -= force * f * random()
teleport and _teleport(b) \
or _constrain(b)
def scatter(self, gather=0.05):
""" Scatters the flock, until Flock.scattered=False.
Flock.gather is the chance (0.0-1.0, or True/False) that the flock will reunite by itself.
"""
self.scattered = True
self.gather = gather
def update(self, separation=0.2, cohesion=0.2, alignment=0.6, avoidance=0.6, target=0.2, limit=15.0, constrain=1.0, teleport=False):
""" Updates the boid velocities based on the given forces.
Different forces elicit different flocking behavior; fine-tuning them can be delicate.
"""
if self.scattered:
# When scattered, make the boid cohesion negative and diminish alignment.
self.scattered = (random() > self.gather)
cohesion = -0.01
alignment *= 0.25
for b in self:
b.update(separation, cohesion, alignment, avoidance, target, limit)
self.constrain(force=constrain, teleport=teleport)
def by_depth(self):
""" Returns the boids in the flock sorted by depth (z-axis).
"""
return sorted(self, key=lambda boid: boid.z)
def __repr__(self):
return "Flock(%s)" % repr(list(self))
flock = Flock
#=== SYSTEM ==========================================================================================
# A computer graphics technique to simulate certain fuzzy phenomena,
# which are otherwise very hard to reproduce with conventional rendering techniques:
# fire, explosions, smoke, moving water, sparks, falling leaves, clouds, fog, snow, dust,
# meteor tails, hair, fur, grass, or abstract visual effects like glowing trails, magic spells.
#--- FORCE -------------------------------------------------------------------------------------------
class Force:
def __init__(self, particle1, particle2, strength=1.0, threshold=100.0):
""" An attractive or repulsive force that causes objects with a mass to accelerate.
A negative strength indicates an attractive force.
"""
self.particle1 = particle1
self.particle2 = particle2
self.strength = strength
self.threshold = threshold
def apply(self):
""" Applies the force between two particles, based on the distance and mass of the particles.
"""
# Distance has a minimum threshold to keep forces from growing too large,
# e.g. distance 100 divides force by 10000, distance 5 only by 25.
# Decreasing the threshold moves particles that are very close to each other away faster.
dx = self.particle2.x - self.particle1.x
dy = self.particle2.y - self.particle1.y
d = sqrt(dx*dx + dy*dy)
d = max(d, self.threshold)
# The force between particles increases according to their weight.
# The force decreases as distance between them increases.
f = 10.0 * -self.strength * self.particle1.mass * self.particle2.mass
f = f / (d*d)
fx = f * dx / d
fy = f * dy / d
self.particle1.force.x += fx
self.particle1.force.y += fy
self.particle2.force.x -= fx
self.particle2.force.y -= fy
def __repr__(self):
return "Force(strength=%.2f)" % self.strength
force = Force
#--- SPRING ------------------------------------------------------------------------------------------
class Spring:
def __init__(self, particle1, particle2, length, strength=1.0):
""" A force that exerts attractive resistance when its length changes.
A spring acts as a flexible (but secure) connection between two particles.
"""
self.particle1 = particle1
self.particle2 = particle2
self.strength = strength
self.length = length
self.snapped = False
def snap(self):
""" Breaks the connection between the two particles.
"""
self.snapped = True
def apply(self):
""" Applies the force between two particles.
"""
# Distance between two particles.
dx = self.particle2.x - self.particle1.x
dy = self.particle2.y - self.particle1.y
d = sqrt(dx*dx + dy*dy)
if d == 0:
return
# The attractive strength decreases for heavy particles.
# The attractive strength increases when the spring is stretched.
f = 10.0 * self.strength / (self.particle1.mass * self.particle2.mass)
f = f * (d - self.length)
fx = f * dx / d
fy = f * dy / d
self.particle1.force.x += fx
self.particle1.force.y += fy
self.particle2.force.x -= fx
self.particle2.force.y -= fy
def draw(self, **kwargs):
line(self.particle1.x, self.particle1.y,
self.particle2.x, self.particle2.y, **kwargs)
def __repr__(self):
return "Spring(strength='%.2f', length='%.2f')" % (self.strength, self.length)
spring = Spring
#--- PARTICLE ----------------------------------------------------------------------------------------
MASS = "mass"
class Particle:
def __init__(self, x, y, velocity=(0.0,0.0), mass=10.0, radius=10.0, life=None, fixed=False):
""" An object with a mass subjected to attractive and repulsive forces.
The object's velocity is an inherent force (e.g. a rocket propeller to escape gravity).
"""
self._id = _uid()
self.x = x + random()
self.y = y + random()
self.mass = mass
self.radius = radius == MASS and mass or radius
self.velocity = isinstance(velocity, tuple) and Vector(*velocity) or velocity
self.force = Vector(0.0, 0.0) # Force accumulator.
self.life = life
self._age = 0.0
self.dead = False
self.fixed = fixed
@property
def age(self):
# Yields the particle's age as a number between 0.0 and 1.0.
return self.life and min(1.0, float(self._age) / self.life) or 0.0
def draw(self, **kwargs):
r = self.radius * (1 - self.age)
ellipse(self.x, self.y, r*2, r*2, **kwargs)
def __eq__(self, other):
return isinstance(other, Particle) and self._id == other._id
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "Particle(x=%.1f, y=%.1f, radius=%.1f, mass=%.1f)" % (
self.x, self.y, self.radius, self.mass)
particle = Particle
#--- SYSTEM ------------------------------------------------------------------------------------------
class flist(list):
def __init__(self, system):
# List of forces or springs that keeps System.dynamics in synch.
self.system = system
def insert(self, i, force):
list.insert(self, i, force)
self.system._dynamics.setdefault(force.particle1._id, []).append(force)
self.system._dynamics.setdefault(force.particle2._id, []).append(force)
def append(self, force):
self.insert(len(self), force)
def extend(self, forces):
for f in forces: self.append(f)
def pop(self, i):
f = list.pop(self, i)
self.system._dynamics.pop(force.particle1._id)
self.system._dynamics.pop(force.particle2._id)
return f
def remove(self, force):
i = self.index(force); self.pop(i)
class System(object):
def __init__(self, gravity=(0,0), drag=0.0):
""" A collection of particles and the forces working on them.
"""
self.particles = []
self.emitters = []
self.forces = flist(self)
self.springs = flist(self)
self.gravity = isinstance(gravity, tuple) and Vector(*gravity) or gravity
self.drag = drag
self._dynamics = {} # Particle id linked to list of applied forces.
def __len__(self):
return len(self.particles)
def __iter__(self):
return iter(self.particles)
def __getitem__(self, i):
return self.particles[i]
def extend(self, x):
for x in x: self.append(x)
def append(self, x):
if isinstance(x, Particle) and not x in self.particles:
self.particles.append(x)
elif isinstance(x, Force):
self.forces.append(x)
elif isinstance(x, Spring):
self.springs.append(x)
elif isinstance(x, Emitter):
self.emitters.append(x)
self.extend(x.particles)
x.system = self
def _cross(self, f=lambda particle1, particle2: None, source=None, particles=[]):
# Applies function f to any two given particles in the list,
# or between source and any other particle if source is given.
P = particles or self.particles
for i, p1 in enumerate(P):
if source is None:
[f(p1, p2) for p2 in P[i+1:]]
else:
f(source, p1)
def force(self, strength=1.0, threshold=100, source=None, particles=[]):
""" The given force is applied between each two particles.
The effect this yields (with a repulsive force) is an explosion.
- source: one vs. all, apply the force to this particle with all others.
- particles: a list of particles to apply the force to (some vs. some or some vs. source).
Be aware that 50 particles wield yield 1250 forces: O(n**2/2); or O(n) with source.
The force is applied to particles present in the system,
those added later on are not subjected to the force.
"""
f = lambda p1, p2: self.forces.append(Force(p1, p2, strength, threshold))
self._cross(f, source, particles)
def dynamics(self, particle, type=None):
""" Returns a list of forces working on the particle, optionally filtered by type (e.g. Spring).
"""
F = self._dynamics.get(isinstance(particle, Particle) and particle._id or particle, [])
F = [f for f in F if type is None or isinstance(f, type)]
return F
def limit(self, particle, m=None):
""" Limits the movement of the particle to m.
When repulsive particles are close to each other, their force can be very high.
This results in large movement steps, and gaps in the animation.
This can be remedied by limiting the total force.
"""
# The right way to do it requires 4x sqrt():
# if m and particle.force.length > m:
# particle.force.length = m
# if m and particle.velocity.length > m:
# particle.velocity.length = m
if m is not None:
for f in (particle.force, particle.velocity):
if abs(f.x) > m:
f.y *= m / abs(f.x)
f.x *= m / abs(f.x)
if abs(f.y) > m:
f.x *= m / abs(f.y)
f.y *= m / abs(f.y)
def update(self, limit=30):
""" Updates the location of the particles by applying all the forces.
"""
for e in self.emitters:
# Fire particles from emitters.
e.update()
for p in self.particles:
# Apply gravity. Heavier objects have a stronger attraction.
p.force.x = 0
p.force.y = 0
p.force.x += 0.1 * self.gravity.x * p.mass
p.force.y += 0.1 * -self.gravity.y * p.mass
for f in self.forces:
# Apply attractive and repulsive forces between particles.
if not f.particle1.dead and \
not f.particle2.dead:
f.apply()
for s in self.springs:
# Apply spring forces between particles.
if not s.particle1.dead and \
not s.particle2.dead and \
not s.snapped:
s.apply()
for p in self.particles:
if not p.fixed:
# Apply drag.
p.velocity.x *= 1.0 - min(1.0, self.drag)
p.velocity.y *= 1.0 - min(1.0, self.drag)
# Apply velocity.
p.force.x += p.velocity.x
p.force.y += p.velocity.y
# Limit the accumulated force and update the particle's position.
self.limit(p, limit)
p.x += p.force.x
p.y += p.force.y
if p.life:
# Apply lifespan.
p._age += 1
p.dead = p._age > p.life
@property
def dead(self):
# Yields True when all particles are dead (and we don't need to update anymore).
for p in self.particles:
if not p.dead: return False
return True
def draw(self, **kwargs):
""" Draws the system at the current iteration.
"""
for s in self.springs:
if not s.particle1.dead and \
not s.particle2.dead and \
not s.snapped:
s.draw(**kwargs)
for p in self.particles:
if not p.dead:
p.draw(**kwargs)
def __repr__(self):
return "System(particles=%i, forces=%i, springs=%i)" % \
(len(self.particles), len(self.forces), len(self.springs))
system = System
# Notes:
# While this system is interesting for many effects, it is unstable.
# If for example very strong springs are applied, particles will start "shaking".
# This is because the forces are simply added to the particle's position instead of integrated.
# See also:
# http://local.wasp.uwa.edu.au/~pbourke/miscellaneous/particle/
# http://local.wasp.uwa.edu.au/~pbourke/miscellaneous/particle/particlelib.c
#def euler_derive(particle, dt=0.1):
# particle.x += particle.velocity.x * dt
# particle.y += particle.velocity.y * dt
# particle.velocity.x += particle.force.x / particle.mass * dt
# particle.velocity.y += particle.force.y / particle.mass * dt
# If this is applied, springs will need a velocity dampener:
#fx = f + 0.01 + (self.particle2.velocity.x - self.particle1.velocity.x) * dx / d
#fy = f + 0.01 + (self.particle2.velocity.y - self.particle1.velocity.y) * dy / d
# In pure Python this is slow, since only 1/10 of the force is applied each System.update().
#--- EMITTER -----------------------------------------------------------------------------------------
class Emitter(object):
def __init__(self, x, y, angle=0, strength=1.0, spread=10):
""" A source that shoots particles in a given direction with a given strength.
"""
self.system = None # Set when appended to System.
self.particles = []
self.x = x
self.y = y
self.velocity = Vector(1, 1, length=strength, angle=angle)
self.spread = spread # Angle-of-view.
self._i = 0 # Current iteration.
def __len__(self):
return len(self.particles)
def __iter__(self):
return iter(self.particles)
def __getitem__(self, i):
return self.particles[i]
def extend(self, x, life=100):
for x in x: self.append(x, life)
def append(self, particle, life=100):
particle.life = particle.life or life
particle._age = particle.life
particle.dead = True
self.particles.append(particle)
if self.system is not None:
# Also append the particle to the system the emitter is part of.
self.system.append(particle)
def _get_angle(self):
return self.velocity.angle
def _set_angle(self, v):
self.velocity.angle = v
angle = property(_get_angle, _set_angle)
def _get_strength(self):
return self.velocity.length
def _set_strength(self, v):
self.velocity.length = max(v, 0.01)
strength = length = magnitude = property(_get_strength, _set_strength)
def update(self):
""" Update the system and respawn dead particles.
When a particle dies, it can be reused as a new particle fired from the emitter.
This is more efficient than creating a new Particle object.
"""
self._i += 1 # Respawn occurs gradually.
p = self.particles[self._i % len(self.particles)]
if p.dead:
p.x = self.x
p.y = self.y
p.velocity = self.velocity.rotated(self.spread * 0.5 * (random()*2-1))
p._age = 0
p.dead = False
emitter = Emitter
#=== GRAPH ===========================================================================================
# Graph visualization is a way of representing information as diagrams of abstract graphs and networks.
# Automatic graph drawing has many important applications in software engineering,
# database and web design, networking, and in visual interfaces for many other domains.
#--- NODE --------------------------------------------------------------------------------------------
def deepcopy(o):
# A color can be represented as a tuple or as a nodebox.graphics.Color object,
# in which case it needs to be copied by invoking Color.copy().
if o is None:
return o
if hasattr(o, "copy"):
return o.copy()
if isinstance(o, (basestring, bool, int, float, long, complex)):
return o
if isinstance(o, (list, tuple, set)):
return o.__class__(deepcopy(v) for v in o)
if isinstance(o, dict):
return dict((deepcopy(k), deepcopy(v)) for k,v in o.iteritems())
raise Exception, "don't know how to copy %s" % o.__class__.__name__
class Node(object):
def __init__(self, id="", radius=5, **kwargs):
""" A node with a unique id in the graph.
Node.id is drawn as a text label, unless optional parameter text=False.
Optional parameters include: fill, stroke, strokewidth, text, font, fontsize, fontweight.
"""
self.graph = None
self.links = Links()
self.id = id
self._x = 0.0 # Calculated by Graph.layout.update().
self._y = 0.0 # Calculated by Graph.layout.update().
self.force = Vector(0.0, 0.0)
self.radius = radius
self.fixed = kwargs.pop("fixed", False)
self.fill = kwargs.pop("fill", None)
self.stroke = kwargs.pop("stroke", (0,0,0,1))
self.strokewidth = kwargs.pop("strokewidth", 1)
self.text = kwargs.get("text", True) and \
Text(isinstance(id, unicode) and id or str(id).decode("utf-8", "ignore"),
width = 85,
fill = kwargs.pop("text", (0,0,0,1)),
fontsize = kwargs.pop("fontsize", 11), **kwargs) or None
self._weight = None # Calculated by Graph.eigenvector_centrality().
self._centrality = None # Calculated by Graph.betweenness_centrality().
@property
def _distance(self):
# Graph.distance controls the (x,y) spacing between nodes.
return self.graph and float(self.graph.distance) or 1.0
def _get_x(self):
return self._x * self._distance
def _get_y(self):
return self._y * self._distance
def _set_x(self, v):
self._x = v / self._distance
def _set_y(self, v):
self._y = v / self._distance
x = property(_get_x, _set_x)
y = property(_get_y, _set_y)
@property
def edges(self):
""" Yields a list of edges from/to the node.
"""
return self.graph is not None \
and [e for e in self.graph.edges if self.id in (e.node1.id, e.node2.id)] \
or []
@property
def weight(self):
""" Yields eigenvector centrality as a number between 0.0-1.0.
"""
if self.graph and self._weight is None:
self.graph.eigenvector_centrality()
return self._weight
@property
def centrality(self):
""" Yields betweenness centrality as a number between 0.0-1.0.
"""
if self.graph and self._centrality is None:
self.graph.betweenness_centrality()
return self._centrality
def flatten(self, depth=1, traversable=lambda node, edge: True, _visited=None):
""" Recursively lists the node and nodes linked to it.
Depth 0 returns a list with the node.
Depth 1 returns a list with the node and all the directly linked nodes.
Depth 2 includes the linked nodes' links, and so on.
"""
_visited = _visited or {}
_visited[self.id] = (self, depth)
if depth >= 1:
for n in self.links:
if n.id not in _visited or _visited[n.id][1] < depth-1:
if traversable(self, self.links.edges[n.id]):
n.flatten(depth-1, traversable, _visited)
return [n for n,d in _visited.values()] # Fast, but not order-preserving.
def draw(self, weighted=False):
""" Draws the node as a circle with the given radius, fill, stroke and strokewidth.
Draws the node centrality as a shadow effect when weighted=True.
Draws the node text label.
Override this method in a subclass for custom drawing.
"""
# Draw the node weight as a shadow (based on node betweenness centrality).
if weighted is not False and self.centrality > (weighted==True and -1 or weighted):
w = self.centrality * 35
ellipse(
self.x,
self.y,
self.radius*2 + w,
self.radius*2 + w, fill=(0,0,0,0.2), stroke=None)
# Draw the node.
ellipse(
self.x,
self.y,
self.radius*2,
self.radius*2, fill=self.fill, stroke=self.stroke, strokewidth=self.strokewidth)
# Draw the node text label.
if self.text:
self.text.draw(
self.x + self.radius,
self.y + self.radius)
def contains(self, x, y):
""" Returns True if the given coordinates (x, y) are inside the node radius.
"""
return abs(self.x - x) < self.radius*2 and \
abs(self.y - y) < self.radius*2
def __repr__(self):
return "%s(id=%s)" % (self.__class__.__name__, repr(self.id))
def __eq__(self, node):
return isinstance(node, Node) and self.id == node.id
def __ne__(self, node):
return not self.__eq__(node)
class Links(list):
def __init__(self):
""" A list in which each node has an associated edge.
The edge() method returns the edge for a given node id.
"""
self.edges = dict()
def append(self, node, edge=None):
if node.id not in self.edges:
list.append(self, node)
self.edges[node.id] = edge
def remove(self, node):
list.remove(self, node)
self.edges.pop(node.id, None)
def edge(self, node):
return self.edges.get(isinstance(node, Node) and node.id or node)
#--- EDGE --------------------------------------------------------------------------------------------
coordinates = lambda x, y, d, a: (x + d*cos(radians(a)), y + d*sin(radians(a)))
class Edge(object):
def __init__(self, node1, node2, weight=0.0, length=1.0, type=None, stroke=(0,0,0,1), strokewidth=1):
""" A connection between two nodes.
Its weight indicates the importance (not the cost) of the connection.
Its type is useful in a semantic network (e.g. "is-a", "is-part-of", ...)
"""
self.node1 = node1
self.node2 = node2
self._weight = weight
self.length = length
self.type = type
self.stroke = stroke
self.strokewidth = strokewidth
def _get_weight(self):
return self._weight
def _set_weight(self, v):
self._weight = v
# Clear cached adjacency map in the graph, since edge weights have changed.
if self.node1.graph is not None:
self.node1.graph._adjacency = None
if self.node2.graph is not None:
self.node2.graph._adjacency = None
weight = property(_get_weight, _set_weight)
def draw(self, weighted=False, directed=False):
""" Draws the edge as a line with the given stroke and strokewidth (increased with Edge.weight).
Override this method in a subclass for custom drawing.
"""
w = weighted and self.weight or 0
line(
self.node1.x,
self.node1.y,
self.node2.x,
self.node2.y, stroke=self.stroke, strokewidth=self.strokewidth+w)
if directed:
self.draw_arrow(stroke=self.stroke, strokewidth=self.strokewidth+w)
def draw_arrow(self, **kwargs):
""" Draws the direction of the edge as an arrow on the rim of the receiving node.
"""
x0, y0 = self.node1.x, self.node1.y
x1, y1 = self.node2.x, self.node2.y
# Find the edge's angle based on node1 and node2 position.
a = degrees(atan2(y1-y0, x1-x0))
# The arrow points to node2's rim instead of it's center.
r = self.node2.radius
d = sqrt(pow(x1-x0, 2) + pow(y1-y0, 2))
x01, y01 = coordinates(x0, y0, d-r-1, a)
# Find the two other arrow corners under the given angle.
r = max(kwargs.get("strokewidth", 1) * 3, 6)
dx1, dy1 = coordinates(x01, y01, -r, a-20)
dx2, dy2 = coordinates(x01, y01, -r, a+20)
line(x01, y01, dx1, dy1, **kwargs)
line(x01, y01, dx2, dy2, **kwargs)
line(dx1, dy1, dx2, dy2, **kwargs)
def __repr__(self):
return "%s(id1=%s, id2=%s)" % (self.__class__.__name__, repr(self.node1.id), repr(self.node2.id))
#--- GRAPH -------------------------------------------------------------------------------------------
# Return value of Graph.shortest_paths().
# Dictionary values can be accessed by Node as well as by node id.
class nodedict(dict):
def __init__(self, graph, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.graph = graph
def __contains__(self, node):
return dict.__contains__(self, self.graph.get(node, node))
def __getitem__(self, node):
return dict.__getitem__(self, isinstance(node, Node) and node or self.graph[node])
def get(self, node, default=None):
return dict.get(self, self.graph.get(node, node), default)
def unique(list):
u, b = [], {}
for item in list:
if item not in b: u.append(item); b[item]=True
return u
# Graph layouts:
SPRING = "spring"
# Graph node sort order:
WEIGHT, CENTRALITY = "weight", "centrality"
ALL = "all"
class Graph(dict):
def __init__(self, layout=SPRING, distance=10.0):
""" A network of nodes connected by edges that can be drawn with a given layout.
"""
self.nodes = [] # List of Node objects.
self.edges = [] # List of Edge objects.
self.root = None
self._adjacency = None # Cached adjacency() dict.
self.layout = layout==SPRING and GraphSpringLayout(self) or GraphLayout(self)
self.distance = distance
def __getitem__(self, id):
try:
return dict.__getitem__(self, id)
except KeyError:
raise KeyError, "no node with id '%s' in graph" % id
def append(self, base, *args, **kwargs):
""" Appends a Node or Edge to the graph: Graph.append(Node, id="rabbit").
"""
kwargs["base"] = base
if issubclass(base, Node):
return self.add_node(*args, **kwargs)
if issubclass(base, Edge):
return self.add_edge(*args, **kwargs)
def add_node(self, id, *args, **kwargs):
""" Appends a new Node to the graph.
An optional base parameter can be used to pass a subclass of Node.
"""
n = kwargs.pop("base", Node)
n = isinstance(id, Node) and id or self.get(id) or n(id, *args, **kwargs)
if n.id not in self:
self.nodes.append(n)
self[n.id] = n; n.graph = self
self.root = kwargs.get("root", False) and n or self.root
# Clear adjacency cache.
self._adjacency = None
return n
def add_edge(self, id1, id2, *args, **kwargs):
""" Appends a new Edge to the graph.
An optional base parameter can be used to pass a subclass of Edge:
Graph.add_edge("cold", "winter", base=IsPropertyOf)
"""
# Create nodes that are not yet part of the graph.
n1 = self.add_node(id1)
n2 = self.add_node(id2)
# Creates an Edge instance.
# If an edge (in the same direction) already exists, yields that edge instead.
e1 = n1.links.edge(n2)
if e1 and e1.node1 == n1 and e1.node2 == n2:
return e1
e2 = kwargs.pop("base", Edge)
e2 = e2(n1, n2, *args, **kwargs)
self.edges.append(e2)
# Synchronizes Node.links:
# A.links.edge(B) yields edge A->B
# B.links.edge(A) yields edge B->A
n1.links.append(n2, edge=e2)
n2.links.append(n1, edge=e1 or e2)
# Clear adjacency cache.
self._adjacency = None
return e2
def remove(self, x):
""" Removes the given Node (and all its edges) or Edge from the graph.
Note: removing Edge a->b does not remove Edge b->a.
"""
if isinstance(x, Node) and x.id in self:
self.pop(x.id)
self.nodes.remove(x); x.graph = None
# Remove all edges involving the given node.
for e in list(self.edges):
if x in (e.node1, e.node2):
if x in e.node1.links: e.node1.links.remove(x)
if x in e.node2.links: e.node2.links.remove(x)
self.edges.remove(e)
if isinstance(x, Edge):
self.edges.remove(x)
# Clear adjacency cache.
self._adjacency = None
def node(self, id):
""" Returns the node in the graph with the given id.
"""
return self.get(id)
def edge(self, id1, id2):
""" Returns the edge between the nodes with given id1 and id2.
"""
return id1 in self and id2 in self and self[id1].links.edge(id2) or None
def paths(self, node1, node2, length=4, path=[]):
""" Returns a list of paths (shorter than or equal to given length) connecting the two nodes.
"""
if not isinstance(node1, Node):
node1 = self[node1]
if not isinstance(node2, Node):
node2 = self[node2]
return [[self[id] for id in p] for p in paths(self, node1.id, node2.id, length, path)]
def shortest_path(self, node1, node2, heuristic=None, directed=False):
""" Returns a list of nodes connecting the two nodes.
"""
if not isinstance(node1, Node):
node1 = self[node1]
if not isinstance(node2, Node):
node2 = self[node2]
try:
p = dijkstra_shortest_path(self, node1.id, node2.id, heuristic, directed)
p = [self[id] for id in p]
return p
except IndexError:
return None
def shortest_paths(self, node, heuristic=None, directed=False):
""" Returns a dictionary of nodes, each linked to a list of nodes (shortest path).
"""
if not isinstance(node, Node):
node = self[node]
p = nodedict(self)
for id, path in dijkstra_shortest_paths(self, node.id, heuristic, directed).iteritems():
p[self[id]] = path and [self[id] for id in path] or None
return p
def eigenvector_centrality(self, normalized=True, reversed=True, rating={}, iterations=100, tolerance=0.0001):
""" Calculates eigenvector centrality and returns a node => weight dictionary.
Node.weight is updated in the process.
Node.weight is higher for nodes with a lot of (indirect) incoming traffic.
"""
ec = eigenvector_centrality(self, normalized, reversed, rating, iterations, tolerance)
ec = nodedict(self, ((self[id], w) for id, w in ec.iteritems()))
for n, w in ec.iteritems():
n._weight = w
return ec
def betweenness_centrality(self, normalized=True, directed=False):
""" Calculates betweenness centrality and returns a node => weight dictionary.
Node.centrality is updated in the process.
Node.centrality is higher for nodes with a lot of passing traffic.
"""
bc = brandes_betweenness_centrality(self, normalized, directed)
bc = nodedict(self, ((self[id], w) for id, w in bc.iteritems()))
for n, w in bc.iteritems():
n._centrality = w
return bc
def sorted(self, order=WEIGHT, threshold=0.0):
""" Returns a list of nodes sorted by WEIGHT or CENTRALITY.
Nodes with a lot of traffic will be at the start of the list.
"""
o = lambda node: getattr(node, order)
nodes = ((o(n), n) for n in self.nodes if o(n) >= threshold)
nodes = reversed(sorted(nodes))
return [n for w, n in nodes]
def prune(self, depth=0):
""" Removes all nodes with less or equal links than depth.
"""
for n in (n for n in self.nodes if len(n.links) <= depth):
self.remove(n)
def fringe(self, depth=0):
""" For depth=0, returns the list of leaf nodes (nodes with only one connection).
For depth=1, returns the list of leaf nodes and their connected nodes, and so on.
"""
u = []; [u.extend(n.flatten(depth)) for n in self.nodes if len(n.links) == 1]
return unique(u)
@property
def density(self):
""" Yields the number of edges vs. the maximum number of possible edges.
For example, <0.35 => sparse, >0.65 => dense, 1.0 => complete.
"""
return 2.0*len(self.edges) / (len(self.nodes) * (len(self.nodes)-1))
@property
def is_complete(self):
return self.density == 1.0
@property
def is_dense(self):
return self.density > 0.65
@property
def is_sparse(self):
return self.density < 0.35
def split(self):
""" Returns the list of unconnected subgraphs.
"""
return partition(self)
def update(self, iterations=10, **kwargs):
""" Graph.layout.update() is called the given number of iterations.
"""
for i in range(iterations):
self.layout.update(**kwargs)
def draw(self, weighted=False, directed=False):
""" Draws all nodes and edges.
"""
for e in self.edges:
e.draw(weighted, directed)
for n in reversed(self.nodes): # New nodes (with Node._weight=None) first.
n.draw(weighted)
def node_at(self, x, y):
""" Returns the node at (x,y) or None.
"""
for n in self.nodes:
if n.contains(x, y): return n
def _add_node_copy(self, n, **kwargs):
# Magical fairy dust to copy subclasses of Node.
# We assume that the subclass constructor takes an optional "text" parameter
# (Text objects in NodeBox for OpenGL's implementation are expensive).
try:
new = self.add_node(n.id, root=kwargs.get("root",False), text=False)
except TypeError:
new = self.add_node(n.id, root=kwargs.get("root",False))
new.__class__ = n.__class__
new.__dict__.update((k, deepcopy(v)) for k,v in n.__dict__.iteritems()
if k not in ("graph", "links", "_x", "_y", "force", "_weight", "_centrality"))
def _add_edge_copy(self, e, **kwargs):
if kwargs.get("node1", e.node1).id not in self \
or kwargs.get("node2", e.node2).id not in self:
return
new = self.add_edge(
kwargs.get("node1", self[e.node1.id]),
kwargs.get("node2", self[e.node2.id]))
new.__class__ = e.__class__
new.__dict__.update((k, deepcopy(v)) for k,v in e.__dict__.iteritems()
if k not in ("node1", "node2"))
def copy(self, nodes=ALL):
""" Returns a copy of the graph with the given list of nodes (and connecting edges).
The layout will be reset.
"""
g = Graph(layout=None, distance=self.distance)
g.layout = self.layout.copy(graph=g)
for n in (nodes==ALL and self.nodes or (isinstance(n, Node) and n or self[n] for n in nodes)):
g._add_node_copy(n, root=self.root==n)
for e in self.edges:
g._add_edge_copy(e)
return g
#--- GRAPH LAYOUT ------------------------------------------------------------------------------------
# Graph drawing or graph layout, as a branch of graph theory,
# applies topology and geometry to derive two-dimensional representations of graphs.
class GraphLayout:
def __init__(self, graph):
""" Calculates node positions iteratively when GraphLayout.update() is called.
"""
self.graph = graph
self.iterations = 0
def update(self):
self.iterations += 1
def reset(self):
self.iterations = 0
for n in self.graph.nodes:
n._x = 0
n._y = 0
n.force = Vector(0,0)
@property
def bounds(self):
""" Returns a (x, y, width, height)-tuple of the approximate layout dimensions.
"""
x0, y0 = +INFINITE, +INFINITE
x1, y1 = -INFINITE, -INFINITE
for n in self.graph.nodes:
if (n.x < x0): x0 = n.x
if (n.y < y0): y0 = n.y
if (n.x > x1): x1 = n.x
if (n.y > y1): y1 = n.y
return (x0, y0, x1-x0, y1-y0)
def copy(self, graph):
return GraphLayout(self, graph)
class GraphSpringLayout(GraphLayout):
def __init__(self, graph):
""" A force-based layout in which edges are regarded as springs.
The forces are applied to the nodes, pulling them closer or pushing them apart.
"""
# Based on: http://snipplr.com/view/1950/graph-javascript-framework-version-001/
GraphLayout.__init__(self, graph)
self.k = 4.0 # Force constant.
self.force = 0.01 # Force multiplier.
self.repulsion = 15 # Maximum repulsive force radius.
def _distance(self, node1, node2):
# Yields a tuple with distances (dx, dy, d, d**2).
# Ensures that the distance is never zero (which deadlocks the animation).
dx = node2._x - node1._x
dy = node2._y - node1._y
d2 = dx*dx + dy*dy
if d2 < 0.01:
dx = random() * 0.1 + 0.1
dy = random() * 0.1 + 0.1
d2 = dx*dx + dy*dy
return dx, dy, sqrt(d2), d2
def _repulse(self, node1, node2):
# Updates Node.force with the repulsive force.
dx, dy, d, d2 = self._distance(node1, node2)
if d < self.repulsion:
f = self.k**2 / d2
node2.force.x += f * dx
node2.force.y += f * dy
node1.force.x -= f * dx
node1.force.y -= f * dy
def _attract(self, node1, node2, weight=0, length=1.0):
# Updates Node.force with the attractive edge force.
dx, dy, d, d2 = self._distance(node1, node2)
d = min(d, self.repulsion)
f = (d2 - self.k**2) / self.k * length
f *= weight * 0.5 + 1
f /= d
node2.force.x -= f * dx
node2.force.y -= f * dy
node1.force.x += f * dx
node1.force.y += f * dy
def update(self, weight=10.0, limit=0.5):
""" Updates the position of nodes in the graph.
The weight parameter determines the impact of edge weight.
The limit parameter determines the maximum movement each update().
"""
GraphLayout.update(self)
# Forces on all nodes due to node-node repulsions.
for i, n1 in enumerate(self.graph.nodes):
for j, n2 in enumerate(self.graph.nodes[i+1:]):
self._repulse(n1, n2)
# Forces on nodes due to edge attractions.
for e in self.graph.edges:
self._attract(e.node1, e.node2, weight*e.weight, 1.0/(e.length or 0.01))
# Move nodes by given force.
for n in self.graph.nodes:
if not n.fixed:
n._x += max(-limit, min(self.force * n.force.x, limit))
n._y += max(-limit, min(self.force * n.force.y, limit))
n.force.x = 0
n.force.y = 0
def copy(self, graph):
g = GraphSpringLayout(graph)
g.k, g.force, g.repulsion = self.k, self.force, self.repulsion
return g
#--- GRAPH TRAVERSAL ---------------------------------------------------------------------------------
def depth_first_search(node, visit=lambda node: False, traversable=lambda node, edge: True, _visited=None):
""" Visits all the nodes connected to the given root node, depth-first.
The visit function is called on each node.
Recursion will stop if it returns True, and subsequently dfs() will return True.
The traversable function takes the current node and edge,
and returns True if we are allowed to follow this connection to the next node.
For example, the traversable for directed edges is follows:
lambda node, edge: node == edge.node1
"""
stop = visit(node)
_visited = _visited or {}
_visited[node.id] = True
for n in node.links:
if stop: return True
if traversable(node, node.links.edge(n)) is False: continue
if not n.id in _visited:
stop = depth_first_search(n, visit, traversable, _visited)
return stop
dfs = depth_first_search;
def breadth_first_search(node, visit=lambda node: False, traversable=lambda node, edge: True):
""" Visits all the nodes connected to the given root node, breadth-first.
"""
q = [node]
_visited = {}
while q:
node = q.pop(0)
if not node.id in _visited:
if visit(node):
return True
q.extend((n for n in node.links if traversable(node, node.links.edge(n)) is not False))
_visited[node.id] = True
return False
bfs = breadth_first_search;
def paths(graph, id1, id2, length=4, path=[], _root=True):
""" Returns a list of paths from node with id1 to node with id2.
Only paths shorter than or equal to the given length are included.
Uses a brute-force DFS approach (performance drops exponentially for longer paths).
"""
if len(path) >= length:
return []
if id1 not in graph:
return []
if id1 == id2:
return [path + [id1]]
path = path + [id1]
p = []
s = set(path) # 5% speedup.
for node in graph[id1].links:
if node.id not in s:
p.extend(paths(graph, node.id, id2, length, path, False))
return _root and sorted(p, key=len) or p
def edges(path):
""" Returns an iterator of Edge objects for the given list of nodes.
It yields None where two successive nodes are not connected.
"""
# For example, the distance (i.e., edge weight sum) of a path:
# sum(e.weight for e in edges(path))
return len(path) > 1 and (n.links.edge(path[i+1]) for i,n in enumerate(path[:-1])) or iter(())
#--- GRAPH THEORY ------------------------------------------------------------------------------------
def adjacency(graph, directed=False, reversed=False, stochastic=False, heuristic=None):
""" Returns a dictionary indexed by node id1's,
in which each value is a dictionary of connected node id2's linking to the edge weight.
If directed=True, edges go from id1 to id2, but not the other way.
If stochastic=True, all the weights for the neighbors of a given node sum to 1.
A heuristic function can be given that takes two node id's and returns
an additional cost for movement between the two nodes.
"""
# Caching a heuristic from a method won't work.
# Bound method objects are transient,
# i.e., id(object.method) returns a new value each time.
if graph._adjacency is not None and \
graph._adjacency[1:] == (directed, reversed, stochastic, heuristic and id(heuristic)):
return graph._adjacency[0]
map = {}
for n in graph.nodes:
map[n.id] = {}
for e in graph.edges:
id1, id2 = not reversed and (e.node1.id, e.node2.id) or (e.node2.id, e.node1.id)
map[id1][id2] = 1.0 - 0.5 * e.weight
if heuristic:
map[id1][id2] += heuristic(id1, id2)
if not directed:
map[id2][id1] = map[id1][id2]
if stochastic:
for id1 in map:
n = sum(map[id1].values())
for id2 in map[id1]:
map[id1][id2] /= n
# Cache the adjacency map: this makes dijkstra_shortest_path() 2x faster in repeated use.
graph._adjacency = (map, directed, reversed, stochastic, heuristic and id(heuristic))
return map
def dijkstra_shortest_path(graph, id1, id2, heuristic=None, directed=False):
""" Dijkstra algorithm for finding the shortest path between two nodes.
Returns a list of node id's, starting with id1 and ending with id2.
Raises an IndexError between nodes on unconnected graphs.
"""
# Based on: Connelly Barnes, http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466
def flatten(list):
# Flattens a linked list of the form [0,[1,[2,[]]]]
while len(list) > 0:
yield list[0]; list=list[1]
G = adjacency(graph, directed=directed, heuristic=heuristic)
q = [(0, id1, ())] # Heap of (cost, path_head, path_rest).
visited = set() # Visited nodes.
while True:
(cost1, n1, path) = heappop(q)
if n1 not in visited:
visited.add(n1)
if n1 == id2:
return list(flatten(path))[::-1] + [n1]
path = (n1, path)
for (n2, cost2) in G[n1].iteritems():
if n2 not in visited:
heappush(q, (cost1 + cost2, n2, path))
def dijkstra_shortest_paths(graph, id, heuristic=None, directed=False):
""" Dijkstra algorithm for finding the shortest paths from the given node to all other nodes.
Returns a dictionary of node id's, each linking to a list of node id's (i.e., the path).
"""
# Based on: Dijkstra's algorithm for shortest paths modified from Eppstein.
# Based on: NetworkX 1.4.1: Aric Hagberg, Dan Schult and Pieter Swart.
# This is 5x faster than:
# for n in g: dijkstra_shortest_path(g, id, n.id)
W = adjacency(graph, directed=directed, heuristic=heuristic)
Q = [] # Use Q as a heap with (distance, node id)-tuples.
D = {} # Dictionary of final distances.
P = {} # Dictionary of paths.
P[id] = [id]
seen = {id: 0}
heappush(Q, (0, id))
while Q:
(dist, v) = heappop(Q)
if v in D: continue
D[v] = dist
for w in W[v].iterkeys():
vw_dist = D[v] + W[v][w]
if w not in D and (w not in seen or vw_dist < seen[w]):
seen[w] = vw_dist
heappush(Q, (vw_dist, w))
P[w] = P[v] + [w]
for n in graph:
if n not in P: P[n]=None
return P
def floyd_warshall_all_pairs_distance(graph, heuristic=None, directed=False):
""" Floyd-Warshall's algorithm for finding the path length for all pairs for nodes.
Returns a dictionary of node id's,
each linking to a dictionary of node id's linking to path length.
"""
from collections import defaultdict # Requires Python 2.5+.
g = graph.keys()
d = defaultdict(lambda: defaultdict(lambda: 1e30)) # float('inf')
p = defaultdict(dict) # Predecessors.
for e in graph.edges:
u = e.node1.id
v = e.node2.id
w = 1.0 - 0.5 * e.weight
w = heuristic and heuristic(u, v) + w or w
d[u][v] = min(w, d[u][v])
d[u][u] = 0
p[u][v] = u
if not directed:
d[v][u] = min(w, d[v][u])
p[v][u] = v
for w in g:
dw = d[w]
for u in g:
du, duw = d[u], d[u][w]
for v in g:
# Performance optimization, assumes d[w][v] > 0.
#if du[v] > duw + dw[v]:
if du[v] > duw and du[v] > duw + dw[v]:
d[u][v] = duw + dw[v]
p[u][v] = p[w][v]
class pdict(dict):
def __init__(self, predecessors, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.predecessors = predecessors
return pdict(p, ((u, dict((v, w) for v,w in d[u].iteritems() if w < 1e30)) for u in d))
def predecessor_path(tree, u, v):
""" Returns the path between node u and node v as a list of node id's.
The given tree is the return value of floyd_warshall_all_pairs_distance().predecessors.
"""
def _traverse(u, v):
w = tree[u][v]
if w == u:
return []
return _traverse(u,w) + [w] + _traverse(w,v)
return [u] + _traverse(u,v) + [v]
def brandes_betweenness_centrality(graph, normalized=True, directed=False):
""" Betweenness centrality for nodes in the graph.
Betweenness centrality is a measure of the number of shortests paths that pass through a node.
Nodes in high-density areas will get a good score.
"""
# Ulrik Brandes, A Faster Algorithm for Betweenness Centrality,
# Journal of Mathematical Sociology 25(2):163-177, 2001,
# http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
# Based on: Dijkstra's algorithm for shortest paths modified from Eppstein.
# Based on: NetworkX 1.0.1: Aric Hagberg, Dan Schult and Pieter Swart.
# http://python-networkx.sourcearchive.com/documentation/1.0.1/centrality_8py-source.html
W = adjacency(graph, directed=directed)
b = dict.fromkeys(graph, 0.0)
for id in graph:
Q = [] # Use Q as a heap with (distance, node id)-tuples.
D = {} # Dictionary of final distances.
P = {} # Dictionary of paths.
for n in graph: P[n]=[]
seen = {id: 0}
heappush(Q, (0, id, id))
S = []
E = dict.fromkeys(graph, 0) # sigma
E[id] = 1.0
while Q:
(dist, pred, v) = heappop(Q)
if v in D:
continue
D[v] = dist
S.append(v)
E[v] += E[pred]
for w in W[v]:
vw_dist = D[v] + W[v][w]
if w not in D and (w not in seen or vw_dist < seen[w]):
seen[w] = vw_dist
heappush(Q, (vw_dist, v, w))
P[w] = [v]
E[w] = 0.0
elif vw_dist == seen[w]: # Handle equal paths.
P[w].append(v)
E[w] += E[v]
d = dict.fromkeys(graph, 0.0)
for w in reversed(S):
for v in P[w]:
d[v] += (1.0 + d[w]) * E[v] / E[w]
if w != id:
b[w] += d[w]
# Normalize between 0.0 and 1.0.
m = normalized and max(b.values()) or 1
b = dict((id, w/m) for id, w in b.iteritems())
return b
def eigenvector_centrality(graph, normalized=True, reversed=True, rating={}, iterations=100, tolerance=0.0001):
""" Eigenvector centrality for nodes in the graph (cfr. Google's PageRank).
Eigenvector centrality is a measure of the importance of a node in a directed network.
It rewards nodes with a high potential of (indirectly) connecting to high-scoring nodes.
Nodes with no incoming connections have a score of zero.
If you want to measure outgoing connections, reversed should be False.
"""
# Based on: NetworkX, Aric Hagberg (hagberg@lanl.gov)
# http://python-networkx.sourcearchive.com/documentation/1.0.1/centrality_8py-source.html
# Note: much faster than betweenness centrality (which grows exponentially).
def normalize(vector):
w = 1.0 / (sum(vector.values()) or 1)
for node in vector:
vector[node] *= w
return vector
G = adjacency(graph, directed=True, reversed=reversed)
v = normalize(dict([(n, random()) for n in graph])) # Node ID => weight vector.
# Eigenvector calculation using the power iteration method: y = Ax.
# It has no guarantee of convergence.
for i in range(iterations):
v0 = v
v = dict.fromkeys(v0.iterkeys(), 0)
for n1 in v:
for n2 in G[n1]:
v[n1] += 0.01 + v0[n2] * G[n1][n2] * rating.get(n1, 1)
normalize(v)
e = sum([abs(v[n]-v0[n]) for n in v]) # Check for convergence.
if e < len(G) * tolerance:
# Normalize between 0.0 and 1.0.
m = normalized and max(v.values()) or 1
v = dict((id, w/m) for id, w in v.iteritems())
return v
warn("node weight is 0 because eigenvector_centrality() did not converge.", Warning)
return dict((n, 0) for n in G)
# a | b => all elements from a and all the elements from b.
# a & b => elements that appear in a as well as in b.
# a - b => elements that appear in a but not in b.
def union(a, b):
return list(set(a) | set(b))
def intersection(a, b):
return list(set(a) & set(b))
def difference(a, b):
return list(set(a) - set(b))
def partition(graph):
""" Returns a list of unconnected subgraphs.
"""
# Creates clusters of nodes and directly connected nodes.
# Iteratively merges two clusters if they overlap.
g = []
for n in graph.nodes:
g.append(dict.fromkeys((n.id for n in n.flatten()), True))
for i in reversed(range(len(g))):
for j in reversed(range(i+1, len(g))):
if g[i] and g[j] and len(intersection(g[i], g[j])) > 0:
g[i] = union(g[i], g[j])
g[j] = []
g = [graph.copy(nodes=[graph[id] for id in n]) for n in g if n]
g.sort(lambda a, b: len(b) - len(a))
return g
#--- GRAPH THEORY | CLIQUE ---------------------------------------------------------------------------
def is_clique(graph):
""" A clique is a set of nodes in which each node is connected to all other nodes.
"""
#for n1 in graph.nodes:
# for n2 in graph.nodes:
# if n1 != n2 and graph.edge(n1.id, n2.id) is None:
# return False
return graph.density == 1.0
def clique(graph, id):
""" Returns the largest possible clique for the node with given id.
"""
if isinstance(id, Node):
id = id.id
a = [id]
for n in graph.nodes:
try:
# Raises StopIteration if all nodes in the clique are connected to n:
(id for id in a if n.id==id or graph.edge(n.id, id) is None).next()
except StopIteration:
a.append(n.id)
return a
def cliques(graph, threshold=3):
""" Returns all cliques in the graph with at least the given number of nodes.
"""
a = []
for n in graph.nodes:
c = clique(graph, n.id)
if len(c) >= threshold:
c.sort()
if c not in a: a.append(c)
return a
#--- GRAPH MAINTENANCE -------------------------------------------------------------------------------
# Utility commands for safe linking and unlinking of nodes,
# with respect for the surrounding nodes.
def unlink(graph, node1, node2=None):
""" Removes the edges between node1 and node2.
If only node1 is given, removes all edges to and from it.
This does not remove node1 from the graph.
"""
if not isinstance(node1, Node):
node1 = graph[node1]
if not isinstance(node2, Node) and node2 is not None:
node2 = graph[node2]
for e in list(graph.edges):
if node1 in (e.node1, e.node2) and node2 in (e.node1, e.node2, None):
graph.edges.remove(e)
try:
node1.links.remove(node2)
node2.links.remove(node1)
except: # 'NoneType' object has no attribute 'links'
pass
def redirect(graph, node1, node2):
""" Connects all of node1's edges to node2 and unlinks node1.
"""
if not isinstance(node1, Node):
node1 = graph[node1]
if not isinstance(node2, Node):
node2 = graph[node2]
for e in graph.edges:
if node1 in (e.node1, e.node2):
if e.node1 == node1 and e.node2 != node2:
graph._add_edge_copy(e, node1=node2, node2=e.node2)
if e.node2 == node1 and e.node1 != node2:
graph._add_edge_copy(e, node1=e.node1, node2=node2)
unlink(graph, node1)
def cut(graph, node):
""" Unlinks the given node, but keeps edges intact by connecting the surrounding nodes.
If A, B, C, D are nodes and A->B, B->C, B->D, if we then cut B: A->C, A->D.
"""
if not isinstance(node, Node):
node = graph[node]
for e in graph.edges:
if node in (e.node1, e.node2):
for n in node.links:
if e.node1 == node and e.node2 != n:
graph._add_edge_copy(e, node1=n, node2=e.node2)
if e.node2 == node and e.node1 != n:
graph._add_edge_copy(e, node1=e.node1, node2=n)
unlink(graph, node)
def insert(graph, node, a, b):
""" Inserts the given node between node a and node b.
If A, B, C are nodes and A->B, if we then insert C: A->C, C->B.
"""
if not isinstance(node, Node):
node = graph[node]
if not isinstance(a, Node):
a = graph[a]
if not isinstance(b, Node):
b = graph[b]
for e in graph.edges:
if e.node1 == a and e.node2 == b:
graph._add_edge_copy(e, node1=a, node2=node)
graph._add_edge_copy(e, node1=node, node2=b)
if e.node1 == b and e.node2 == a:
graph._add_edge_copy(e, node1=b, node2=node)
graph._add_edge_copy(e, node1=node, node2=a)
unlink(graph, a, b)
| nodebox/nodebox-opengl | nodebox/graphics/physics.py | Python | bsd-3-clause | 78,058 | [
"VisIt"
] | 50116e3aaa83d095e75b9ae33f22b71ccbb011f491b26fedf111f84424ab44e1 |
'''
Program to run FEFF 8.4.175 calculation in cycle
We need only determine the data folder with feff input files
Program return chi dat files for each input file and calculate mean, median, std, max, min values of chi
for all chi dat files
author: Zhenia Syriany (Yevgen Syryanyy)
e-mail: yuginboy@gmail.com
License: this code is under the GPL license
Last modified: 2016-07-27
'''
"Main file for run feff calculation package"
# import pandas as pd
import sys
import os
import time
from io import StringIO
import numpy as np
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.gridspec as gridspec
from matplotlib import pylab
import matplotlib.pyplot as plt
import scipy as sp
from scipy.interpolate import interp1d
import re
from shutil import copyfile
# for run executable file:
import subprocess
# import plotting procedure:
# sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), 'libs'))
from feff.libs.plot_data import plotData
from feff.libs.dir_and_file_operations import create_out_data_folder, listOfFiles, listOfFilesFN, \
deleteAllFilesInFolder, listOfFilesNameWithoutExt, listOfFilesFN_with_selected_ext, touch
# from libs.create_images import create_graphs_and_save_images_from_chi_dat
# for parallel calculation:
from joblib import Parallel, delayed
import multiprocessing
from settings import path_to_exe
# def runInParallel(*fns):
# proc = []
# for fn in fns:
# p = Process(target=fn)
# p.start()
# proc.append(p)
# for p in proc:
# p.join()
def feffCalcFun(dataPath = '/home/yugin/VirtualboxShare/FEFF/load/60/', tmpPath = '/home/yugin/VirtualboxShare/FEFF/tmp',
outDirPath = '/home/yugin/VirtualboxShare/FEFF/out', plotTheData = True):
# Please, change only the load data (input) directory name!
# directory with the input feff files = dataPath
folder_path, folder_name = os.path.split( os.path.dirname(dataPath) )
files = listOfFiles(dataPath) # only files name
# only the names without extansion:
names = listOfFilesNameWithoutExt(dataPath)
filesFullPathName = listOfFilesFN_with_selected_ext(dataPath, ext = 'inp')
# tmpPath = os.path.join(tmpDirPath, folder_name ) # tmp folder for the temporary calculation files
# if not (os.path.isdir(tmpPath)):
# os.makedirs(tmpPath, exist_ok=True)
# standart feff exe file:
# feff_exe = 'wine /home/yugin/PycharmProjects/feff/exe/feff84_nclusx_175.exe' # path to exe file of the feff program
# for big cases. In src files was changed
# c max number of atoms in problem for the pathfinder
# parameter (natx =10000) before it was: natx =1000
# c max number of unique potentials (potph) (nphx must be ODD to
# c avoid compilation warnings about alignment in COMMON blocks)
# parameter (nphx = 21) before it was: nphx = 7
feff_exe = path_to_exe # path to exe file of the feff program
# outDirPath = os.path.join( '/home/yugin/VirtualboxShare/FEFF/out', folder_name )
# if not (os.path.isdir(outDirPath)):
# os.makedirs(outDirPath, exist_ok=True)
result_dir = create_out_data_folder( outDirPath )
# go to the tmp directory:
os.chdir(tmpPath)
i = 0
numOfColumns = len(filesFullPathName)
# started to average from that snupshot number:
shift = round(numOfColumns / 2)
k = np.r_[0:20.05:0.05]
numOfRows = len(k)
chi = np.zeros((numOfRows, numOfColumns))
chi_std = np.zeros((numOfRows))
chi_mean = np.zeros((numOfRows))
chi_median = np.zeros((numOfRows))
chi_max = np.zeros((numOfRows))
chi_min = np.zeros((numOfRows))
# copy file input to the tmp directory and start calculations:
for f in filesFullPathName:
# copy input file to the feff.inp file in tmp directory:
copyfile(f, os.path.join(tmpPath, 'feff.inp'))
print('copy the ', f, ' to the -> ', os.path.join(tmpPath, 'feff.inp'))
print('run the feff calculation')
currentFileNameBase = os.path.basename(f)
currentFileName = os.path.splitext(currentFileNameBase)[0]
# run the feff calculation:
subprocess.call(feff_exe, shell=True)
# Check if chi.dat is created:
if os.path.isfile( os.path.join(tmpPath, 'chi.dat') ):
print('copy the chi.dat to the ->', )
# create a new name to the chi.dat output file:
# chiOutName = "chi_%05d.dat" %(i)
chiOutName = 'chi_' + currentFileName + "_%05d.dat" %(i+1)
copyfile(os.path.join(tmpPath, 'chi.dat'), os.path.join(outDirPath, chiOutName))
print('feff calculation is finished')
deleteAllFilesInFolder(tmpPath)
# load txt output files:
data = np.loadtxt(os.path.join(outDirPath, chiOutName), float)
# select chi values only for k > 0 because FEFF output files contain the different length of k-vector:
if len(data[:, 0]) < numOfRows:
chi[1:,i] = data[:,1]
elif len(data[:, 0]) == numOfRows:
chi[:,i] = data[:,1]
else:
print('you have unexpected numbers of rows in your output files')
print('input file name is: ', f)
print('number of elements is: ', len(data[:, 0]), ' the first k-element is: ', data[0, 0])
if ((i % 500) == 0 ) and ( i > 2 ):
chi_std = np.std(chi[:, 0:i], axis=1)
chi_mean = np.mean(chi[:, 0:i], axis=1)
chi_median = np.median(chi[:, 0:i], axis=1)
chi_max = np.amax(chi[:, 0:i], axis=1)
chi_min = np.amin(chi[:, 0:i], axis=1)
out_array = np.zeros((numOfRows, 3+3))
out_array[:, 0] = k
out_array[:, 1] = chi_mean
out_array[:, 2] = chi_std
out_array[:, 3] = chi_median
out_array[:, 4] = chi_max
out_array[:, 5] = chi_min
headerTxt = 'k\t<chi>\tstd\tchi_median\tchi_max\tchi_min'
out_file_name = "result_%s" %(folder_name) + "_%05d.txt" %(i)
np.savetxt(os.path.join(result_dir, out_file_name), out_array, fmt='%1.6e', delimiter='\t',header=headerTxt)
print('==> write iter number {0} to the {1} file'.format(i, out_file_name))
if plotTheData:
plotData(x = k, y = chi_mean, error = chi_std, numOfIter = i, out_dir = result_dir, case = folder_name,
y_median= chi_median, y_max=chi_max, y_min=chi_min)
if (i > shift) and (((i-shift) % 500)==0) and ( (i-shift) > 2 ):
# calc average with a shift (started not from the first snapshot)
chi_std = np.std(chi[:, shift:i], axis=1)
chi_mean = np.mean(chi[:, shift:i], axis=1)
chi_median = np.median(chi[:, shift:i], axis=1)
chi_max = np.amax(chi[:, shift:i], axis=1)
chi_min = np.amin(chi[:, shift:i], axis=1)
out_array = np.zeros((numOfRows, 3+3))
out_array[:, 0] = k
out_array[:, 1] = chi_mean
out_array[:, 2] = chi_std
out_array[:, 3] = chi_median
out_array[:, 4] = chi_max
out_array[:, 5] = chi_min
headerTxt = 'k\t<chi>\tstd\tchi_median\tchi_max\tchi_min'
out_file_name = f'aver_from_{shift}_to_{i}_' + "result_%s" %(folder_name) + "_%05d.txt" %(i)
np.savetxt(os.path.join(result_dir, out_file_name), out_array, fmt='%1.6e', delimiter='\t',header=headerTxt)
print('==> write iter number {0} to the {1} file'.format(i, out_file_name))
if plotTheData:
plotData(x = k, y = chi_mean, error = chi_std, numOfIter = i, out_dir = result_dir, case = folder_name + f'_from_{shift}_to_{i}]',
y_median= chi_median, y_max=chi_max, y_min=chi_min)
else:
# if chi.dat is absent
chiOutName = 'chi_' + currentFileName + ".error"
print('create the {} file ->'.format(chiOutName))
inp_errors_dir = os.path.join(result_dir, 'inp_errors')
if not (os.path.isdir(inp_errors_dir)):
os.makedirs(inp_errors_dir, exist_ok=True)
copyfile(f, os.path.join(inp_errors_dir, currentFileName + '.inp'))
print('copy the ', f, ' to the -> ', os.path.join(inp_errors_dir, currentFileName + '.inp'))
# create a new name to the chi.dat output file:
# chiOutName = "chi_%05d.dat" %(i)
touch(os.path.join(outDirPath, chiOutName))
print('feff calculation was crushed')
i += 1
chi_std = np.std(chi[:, :], axis=1)
chi_mean = np.mean(chi[:, :], axis=1)
chi_median = np.median(chi[:, :], axis=1)
chi_max = np.amax(chi[:, :], axis=1)
chi_min = np.amin(chi[:, :], axis=1)
out_array = np.zeros((numOfRows, 3+3))
out_array[:, 0] = k
out_array[:, 1] = chi_mean
out_array[:, 2] = chi_std
out_array[:, 3] = chi_median
out_array[:, 4] = chi_max
out_array[:, 5] = chi_min
headerTxt = 'k\t<chi>\tstd\tchi_median\tchi_max\tchi_min'
np.savetxt(os.path.join(result_dir, 'result.txt'), out_array, fmt='%1.6e', delimiter='\t',header=headerTxt)
# copy result.txt file to the outDirPath folder:
copyfile(os.path.join(result_dir, 'result.txt'), os.path.join(outDirPath, 'result_' + folder_name +'.txt'))
if plotTheData:
plotData(x = k, y = chi_mean, error = chi_std, numOfIter = i, out_dir = result_dir, case = folder_name,
y_median= chi_median, y_max=chi_max, y_min=chi_min)
# ==========================================================================
# Calculate average data started from the middle snapshot number.
# We suppose that structure will be already relaxed to the moment of middle snapshot.
chi_std = np.std(chi[:, shift:-1],axis=1)
chi_mean = np.mean(chi[:, shift:-1],axis=1)
chi_median = np.median(chi[:, shift:-1],axis=1)
chi_max = np.amax(chi[:, shift:-1],axis=1)
chi_min = np.amin(chi[:, shift:-1],axis=1)
out_array = np.zeros((numOfRows, 3+3))
out_array[:, 0] = k
out_array[:, 1] = chi_mean
out_array[:, 2] = chi_std
out_array[:, 3] = chi_median
out_array[:, 4] = chi_max
out_array[:, 5] = chi_min
headerTxt = 'k\t<chi>\tstd\tchi_median\tchi_max\tchi_min'
np.savetxt(os.path.join(result_dir, f'aver_from_{shift}_to_{numOfColumns}_result.txt'), out_array, fmt='%1.6e', delimiter='\t',header=headerTxt)
# copy result.txt file to the outDirPath folder:
if plotTheData:
plotData(x = k, y = chi_mean, error = chi_std, numOfIter = i, out_dir = result_dir, case = folder_name + f'_shift={shift}',
y_median= chi_median, y_max=chi_max, y_min=chi_min)
print('program is finished')
# print('-> create a video file:')
# create_graphs_and_save_images_from_chi_dat(dataPath = outDirPath, each_elem_to_draw = 10)
if __name__ == "__main__":
print ('-> you run ', __file__, ' file in a main mode' )
# runInParallel(main(dataPath = '/home/yugin/PycharmProjects/feff/load/53/'), main(dataPath = '/home/yugin/PycharmProjects/feff/load/66/'),
# main(dataPath = '/home/yugin/PycharmProjects/feff/load/67/'))
# run in terminal the next command: python3 mainFEFF.py 60
# where '60' - the name of case-folder, which you want to calculate
debugMode = True
userHomeDirPath = os.path.expanduser('~')
feffLoadDirLocalPath = 'VirtualboxShare/GaMnO/debug/'
feffLoadDirAbsPath = os.path.join(userHomeDirPath, feffLoadDirLocalPath)
if debugMode:
# for test and debug:
# feffCalcFun(dataPath=feffLoadDirAbsPath + 'test/', plotTheData=False)
feffCalcFun(dataPath=feffLoadDirAbsPath + 'feff_debug/', tmpPath=feffLoadDirAbsPath + 'feff_debug/tmp/',
outDirPath = feffLoadDirAbsPath + 'feff_debug/feff_out/', plotTheData=True)
else:
dataPath = []
if len(sys.argv) > 1:
for i in range(len(sys.argv[1:])):
tmpPath = feffLoadDirAbsPath + "%s/" % sys.argv[i+1]
if (os.path.isdir(tmpPath)):
print (tmpPath)
# time.sleep(2)
dataPath.append(tmpPath)
# main(dataPath = dataPath)
else:
print('-> Selected case: ', sys.argv[i+1], ' dose not correct \n-> Program can not find data folder: ', tmpPath)
print(len(dataPath))
num_cores = multiprocessing.cpu_count()
if (len(dataPath) <= num_cores) and (len(dataPath) > 0):
print ('Program will be calculating on {} numbers of CPUs'.format(len(dataPath)) )
time.sleep(1)
print('Programm will calculate the next cases:\n{:}\n'.format(dataPath))
Parallel(n_jobs=len(dataPath))(delayed(feffCalcFun)(i) for i in dataPath)
else:
print('PC doesn''t have these numbers of needed CPUs for parallel calculation' )
else:
print('- > No selected case was found. Please, try to use \'run in terminal the next command: python3 mainFEFF.py 60\' \n '
'where \'60\' - the name of case-folder, which you want to calculate')
# main(dataPath = '/home/yugin/PycharmProjects/feff/load/53/')
# main(dataPath = '/home/yugin/PycharmProjects/feff/load/66/')
# main(dataPath = '/home/yugin/PycharmProjects/feff/load/67/')
# print (sys.argv[:])
print('-> finished')
| yuginboy/from_GULP_to_FEFF | feff/mainFEFF.py | Python | gpl-3.0 | 13,969 | [
"FEFF"
] | d4adcd5e4e84a0ea3eef2f3c3283ffc8cfb3385bf6550bb6323ab7a936e4c944 |
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Andrew I Baznikin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#
# Written by Egyeki Gergely <egeri@elte.hu>, 2004
"""
Specific classes for relationships.
"""
from __future__ import unicode_literals
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib import Person
import gramps.gen.relationship
#-------------------------------------------------------------------------
#
# Shared constants
#
#-------------------------------------------------------------------------
_level = \
["", "", "másod", "harmad", "negyed", "ötöd", "hatod",
"heted", "nyolcad", "kilenced", "tized", "tizenegyed", "tizenketted",
"tizenharmad", "tizennegyed", "tizenötöd", "tizenhatod",
"tizenheted", "tizennyolcad", "tizenkilenced", "huszad","huszonegyed"]
#-------------------------------------------------------------------------
#
# Specific relationship functions
#
#-------------------------------------------------------------------------
class RelationshipCalculator(gramps.gen.relationship.RelationshipCalculator):
"""
RelationshipCalculator Class
"""
def __init__(self):
gramps.gen.relationship.RelationshipCalculator.__init__(self)
def get_parents (self, level):
if level == 0: return ""
elif level == 1: return "szülei"
elif level == 2: return "nagyszülei"
elif level == 3: return "dédszülei"
elif level == 4: return "ükszülei"
else: return "%d. szülei" % level
def get_father (self, level):
if level == 0: return ""
elif level == 1: return "apja"
elif level == 2: return "nagyapja"
elif level == 3: return "dédapja"
elif level == 4: return "ükapja"
else: return "%d. ükapja" % level
def get_mother (self, level):
if level == 0: return ""
elif level == 1: return "anyja"
elif level == 2: return "nagyanyja"
elif level == 3: return "dédanyja"
elif level == 4: return "ükanyja"
else: return "%d. ükanyja" % level
def get_son (self, level):
if level == 0: return ""
elif level == 1: return "fia"
elif level == 2: return "unokája"
elif level == 3: return "dédunokája"
elif level == 4: return "ükunokája"
else: return "%d. unokája" % level
def get_daughter (self, level):
if level == 0: return ""
elif level == 1: return "lánya"
elif level <= len([level]): return self.get_son(level)
def get_uncle (self, level):
if level == 0: return ""
elif level == 1: return "testvére"
elif level == 2: return "nagybátyja"
else: return "%d. nagybátyja" % level
def get_aunt (self, level):
if level == 0: return ""
elif level == 1: return "testvére"
elif level == 2: return "nagynénje"
else: return "%d. nagynénje" % level
def get_nephew (self, level):
if level == 0: return ""
elif level == 1: return "unokája"
else: return "%d. unokája" % level
def get_niece(self, level):
return self.get_nephew(level)
def get_male_cousin (self, level):
if level == 0: return ""
elif level == 1: return "unokatestvére"
else: return "%d. unokatestvére" % level
def get_female_cousin (self, level):
return self.get_male_cousin(level)
#----------------------------------------------
#
# brother and sister age differences
#
#----------------------------------------------
def get_age_comp(self, orig_person, other_person):
# in 3.X api we can't know persons age
return 0
def get_age_brother (self, level):
if level == 0 : return "testvére"
elif level == 1 : return "öccse"
else : return "bátyja"
def get_age_sister (self, level):
if level == 0 : return "testvére"
elif level == 1 : return "húga"
else : return "nővére"
#---------------------------------------------
#
# en: father-in-law, mother-in-law, son-in-law, daughter-in-law
# hu: após, anyós, vő, meny
#
#---------------------------------------------
def is_fathermother_in_law(self, orig, other):
for f in other.get_family_handle_list():
family = self.db.get_family_from_handle(f)
sp_id = None
if family:
if other == family.get_father_handle():
sp_id = family.get_mother_handle()
elif other == family.get_mother_handle():
sp_id = family.get_father_handle()
for g in orig.get_family_handle_list():
family = self.db.get_family_from_handle(g)
if family:
if sp_id in family.get_child_handle_list():
return 1
return 0
#------------------------------------------------------------------------
#
# hu: sógor, sógornő
# en: brother-in-law, sister-in-law
#
#------------------------------------------------------------------------
def is_brothersister_in_law(self, orig, other):
for f in orig.get_family_handle_list():
family = self.db.get_family_from_handle(f)
sp_id = None
if family:
if orig == family.get_father_handle():
sp_id = family.get_mother_handle()
elif other == family.get_mother_handle():
sp_id = family.get_father_handler()
p = other.get_main_parents_family_handle()
family = self.db.get_family_from_handle(p)
if family:
c = family.get_child_handle_list()
if (other.get_handle() in c) and (sp_id in c):
return 1
return 0
#-------------------------------------------------------------------------
#
# get_relationship
#
#-------------------------------------------------------------------------
def get_relationship(self, secondRel, firstRel, orig_person, other_person, in_law_a, in_law_b):
"""
returns a string representing the relationshp between the two people,
along with a list of common ancestors (typically father,mother)
"""
common = ""
if in_law_a or in_law_a:
if firstRel == 0 and secondRel == 0:
if other_person == Person.MALE:
return ("apósa","")
elif other_person == Person.FEMALE:
return ("anyósa","")
else:
return ("apósa vagy anyósa","")
elif secondRel == 0:
if orig_person == Person.MALE:
return ("veje","")
elif orig_person == Person.FEMALE:
return ("menye","")
else:
return ("veje vagy menye","")
elif firstRel == 1:
if other_person == Person.MALE:
return ("sógora","")
elif other_person == Person.FEMALE:
return ("sógornője","")
else:
return ("sógora vagy sógornője","")
if firstRel == 0:
if secondRel == 0:
return ('', common)
elif other_person == Person.MALE:
return (self.get_father(secondRel), common)
else:
return (self.get_mother(secondRel), common)
elif secondRel == 0:
if other_person == Person.MALE:
return (self.get_son(firstRel), common)
else:
return (self.get_daughter(firstRel), common)
elif firstRel == 1:
if other_person == Person.MALE:
if secondRel == 1:
return (self.get_age_brother(self.get_age_comp(orig_person, other_person)), common)
else :return (self.get_uncle(secondRel), common)
else:
if secondRel == 1:
return (self.get_age_sister(self.get_age_comp(orig_person, other_person)), common)
else :return (self.get_aunt(secondRel), common)
elif secondRel == 1:
if other_person == Person.MALE:
return (self.get_nephew(firstRel-1), common)
else:
return (self.get_niece(firstRel-1), common)
else:
if other_person == Person.MALE:
return (self.get_male_cousin(firstRel-1), common)
else:
return (self.get_female_cousin(firstRel-1), common)
def get_single_relationship_string(self, Ga, Gb, gender_a, gender_b,
reltocommon_a, reltocommon_b,
only_birth=True,
in_law_a=False, in_law_b=False):
return self.get_relationship(Ga, Gb, gender_a, gender_b, in_law_a, in_law_b)[0]
def get_sibling_relationship_string(self, sib_type, gender_a, gender_b,
in_law_a=False, in_law_b=False):
return self.get_relationship(1, 1, gender_a, gender_b, in_law_a, in_law_b)[0]
if __name__ == "__main__":
# Test function. Call it as follows from the command line (so as to find
# imported modules):
# export PYTHONPATH=/path/to/gramps/src
# python src/plugins/rel/rel_hu.py
# (Above not needed here)
"""TRANSLATORS, copy this if statement at the bottom of your
rel_xx.py module, and test your work with:
python src/plugins/rel/rel_xx.py
"""
from gramps.gen.relationship import test
RC = RelationshipCalculator()
test(RC, True)
| Forage/Gramps | gramps/plugins/rel/rel_hu.py | Python | gpl-2.0 | 10,875 | [
"Brian"
] | 52c239118ceb9343290c232fcbb769e04669e0277d95fb04ed74a9dc6a9812ba |
import numpy as np
from ase import Atoms
from ase.units import fs
from ase.calculators.test import TestPotential
from ase.calculators.emt import EMT
from ase.md import VelocityVerlet
from ase.io import PickleTrajectory, read
from ase.optimize import QuasiNewton
np.seterr(all='raise')
a = Atoms('4X',
masses=[1, 2, 3, 4],
positions=[(0, 0, 0),
(1, 0, 0),
(0, 1, 0),
(0.1, 0.2, 0.7)],
calculator=TestPotential())
print a.get_forces()
md = VelocityVerlet(a, dt=0.5 * fs, logfile='-', loginterval=500)
traj = PickleTrajectory('4N.traj', 'w', a)
md.attach(traj.write, 100)
e0 = a.get_total_energy()
md.run(steps=10000)
del traj
assert abs(read('4N.traj').get_total_energy() - e0) < 0.0001
qn = QuasiNewton(a)
qn.run(0.001)
assert abs(a.get_potential_energy() - 1.0) < 0.000002
| grhawk/ASE | tools/ase/test/verlet.py | Python | gpl-2.0 | 868 | [
"ASE"
] | e2a86f892eaf9d8dc851752d93fef158e0eaddec71df9c3038c8bb12517015d7 |
#!/usr/bin/env python3
"""
Copyright 2017 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import re
from http import cookies
import dbSession
import dbShared
import cgi
import pymysql
from xml.dom import minidom
import ghNames
#
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
C = cookies.SimpleCookie()
try:
C.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = C['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = C['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = C['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
else:
currentUser = ''
loginResult = 'success'
sid = form.getfirst('gh_sid', '')
# Get form info
galaxy = form.getfirst("galaxy", "")
fltCount = form.getfirst("fltCount", "")
fltOrders = form.getfirst("fltOrders", "")
fltTypes = form.getfirst("fltTypes", "")
fltValues = form.getfirst("fltValues", "")
CRmins = form.getfirst("CRmins", "")
CDmins = form.getfirst("CDmins", "")
DRmins = form.getfirst("DRmins", "")
FLmins = form.getfirst("FLmins", "")
HRmins = form.getfirst("HRmins", "")
MAmins = form.getfirst("MAmins", "")
PEmins = form.getfirst("PEmins", "")
OQmins = form.getfirst("OQmins", "")
SRmins = form.getfirst("SRmins", "")
UTmins = form.getfirst("UTmins", "")
ERmins = form.getfirst("ERmins", "")
qualityMins = form.getfirst("qualityMins", "")
alertTypes = form.getfirst("alertTypes", "")
fltGroups = form.getfirst("fltGroups", "")
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
galaxy = dbShared.dbInsertSafe(galaxy)
fltCount = dbShared.dbInsertSafe(fltCount)
fltOrders = dbShared.dbInsertSafe(fltOrders)
fltTypes = dbShared.dbInsertSafe(fltTypes)
fltValues = dbShared.dbInsertSafe(fltValues)
CRmins = dbShared.dbInsertSafe(CRmins)
CDmins = dbShared.dbInsertSafe(CDmins)
DRmins = dbShared.dbInsertSafe(DRmins)
FLmins = dbShared.dbInsertSafe(FLmins)
HRmins = dbShared.dbInsertSafe(HRmins)
MAmins = dbShared.dbInsertSafe(MAmins)
PEmins = dbShared.dbInsertSafe(PEmins)
OQmins = dbShared.dbInsertSafe(OQmins)
SRmins = dbShared.dbInsertSafe(SRmins)
UTmins = dbShared.dbInsertSafe(UTmins)
ERmins = dbShared.dbInsertSafe(ERmins)
qualityMins = dbShared.dbInsertSafe(qualityMins)
alertTypes = dbShared.dbInsertSafe(alertTypes)
fltGroups = dbShared.dbInsertSafe(fltGroups)
fltOrders = fltOrders.split(",")
fltUpdated = fltTypes.split(",")
fltTypes = fltTypes.split(",")
fltValues = fltValues.split(",")
CRmins = CRmins.split(",")
CDmins = CDmins.split(",")
DRmins = DRmins.split(",")
FLmins = FLmins.split(",")
HRmins = HRmins.split(",")
MAmins = MAmins.split(",")
PEmins = PEmins.split(",")
OQmins = OQmins.split(",")
SRmins = SRmins.split(",")
UTmins = UTmins.split(",")
ERmins = ERmins.split(",")
qualityMins = qualityMins.split(",")
alertTypes = alertTypes.split(",")
fltGroups = fltGroups.split(",")
result = ""
# Get a session
logged_state = 0
sess = dbSession.getSession(sid)
if (sess != ''):
logged_state = 1
currentUser = sess
def n2n(inVal):
if (inVal == '' or inVal == None or inVal == 'undefined' or inVal == 'None'):
return 'NULL'
else:
return str(inVal)
# Check for errors
errstr = ""
fc = len(fltValues)
if (galaxy == ""):
errstr = errstr + "Error: no galaxy selected. \r\n"
if fc == len(fltOrders) and fc == len(fltTypes) and fc == len(alertTypes) and fc == len(CRmins) and fc == len(CDmins) and fc == len(DRmins) and fc == len(FLmins) and fc == len(HRmins) and fc == len(MAmins) and fc == len(PEmins) and fc == len(OQmins) and fc == len(SRmins) and fc == len(UTmins) and fc == len(ERmins) and fc == len(fltGroups) and fc == len(qualityMins):
for x in range(len(fltValues)):
if fltValues[x] != "":
if (fltTypes[x].isdigit() != True):
errstr = errstr + "Error: Type for " + fltValues[x] + " was not valid. \r\n"
if (alertTypes[x].isdigit() != True):
errstr = errstr + "Error: Alert options for " + fltValues[x] + " was not valid. \r\n"
if (CRmins[x].isdigit() != True):
CRmins[x] = 0
if (CDmins[x].isdigit() != True):
CDmins[x] = 0
if (DRmins[x].isdigit() != True):
DRmins[x] = 0
if (FLmins[x].isdigit() != True):
FLmins[x] = 0
if (HRmins[x].isdigit() != True):
HRmins[x] = 0
if (MAmins[x].isdigit() != True):
MAmins[x] = 0
if (PEmins[x].isdigit() != True):
PEmins[x] = 0
if (OQmins[x].isdigit() != True):
OQmins[x] = 0
if (SRmins[x].isdigit() != True):
SRmins[x] = 0
if (UTmins[x].isdigit() != True):
UTmins[x] = 0
if (ERmins[x].isdigit() != True):
ERmins[x] = 0
if (qualityMins[x].isdigit() != True):
qualityMins[x] = 0
fltUpdated[x] = 0
else:
errstr = errstr + "Error: One of the filters sent is missing a type, alert, group, or stat. Orders: " + str(len(fltOrders)) + " Types: " + str(len(fltTypes)) + " Values: " + str(fc) + " AlertTypes: " + str(len(alertTypes)) + " Groups: " + str(len(fltGroups)) + " CRs: " + str(len(CRmins)) + " CDs: " + str(len(CDmins)) + " DRs: " + str(len(DRmins)) + " FLs: " + str(len(FLmins)) + " HRs: " + str(len(HRmins)) + " MAs: " + str(len(MAmins)) + " PEs: " + str(len(PEmins)) + " OQs: " + str(len(OQmins)) + " SRs: " + str(len(SRmins)) + " UTs: " + str(len(UTmins)) + " ERs: " + str(len(ERmins)) + "\r\n"
# Only process if no errors
if (errstr == ""):
result = ""
if (logged_state > 0):
# Delete alerts to be removed and update those to be updated
udCount = 0
delCount = 0
addCount = 0
conn = dbShared.ghConn()
# open list of users existing filters
cursor = conn.cursor()
cursor.execute("SELECT rowOrder, fltType, fltValue FROM tFilters WHERE galaxy=" + str(galaxy) + " AND userID='" + currentUser + "';")
row = cursor.fetchone()
while row != None:
rowOrder = row[0]
fltType = row[1]
fltValue = row[2]
fltFound = False
for x in range(fc):
if str(rowOrder) == str(fltOrders[x]) and str(fltType) == str(fltTypes[x]) and fltValue == fltValues[x]:
fltFound = True
# update details of filter
cursor2 = conn.cursor()
tempSQL = "UPDATE tFilters SET alertTypes=" + str(alertTypes[x]) + ", CRmin=" + str(CRmins[x]) + ", CDmin=" + str(CDmins[x]) + ", DRmin=" + str(DRmins[x]) + ", FLmin=" + str(FLmins[x]) + ", HRmin=" + str(HRmins[x]) + ", MAmin=" + str(MAmins[x]) + ", PEmin=" + str(PEmins[x]) + ", OQmin=" + str(OQmins[x]) + ", SRmin=" + str(SRmins[x]) + ", UTmin=" + str(UTmins[x]) + ", ERmin=" + str(ERmins[x]) + ", minQuality=" + str(qualityMins[x]) + ", fltGroup='" + fltGroups[x] + "' WHERE userID='" + currentUser + "' AND galaxy=" + str(galaxy) + " AND rowOrder=" + str(rowOrder) + " AND fltType=" + str(fltType) + " AND fltValue='" + fltValue + "';"
cursor2.execute(tempSQL)
fltUpdated[x] = 1
udCount += cursor2.rowcount
cursor2.close()
if fltFound == False:
# delete the filter if its not in the list passed
cursor2 = conn.cursor()
tempSQL = "DELETE FROM tFilters WHERE galaxy=" + str(galaxy) + " AND userID='" + currentUser + "' AND rowOrder=" + str(rowOrder) + " AND fltType=" + str(fltType) + " AND fltValue='" + fltValue + "';"
cursor2.execute(tempSQL)
delCount += cursor2.rowcount
cursor2.close()
row = cursor.fetchone()
cursor.close()
# Add new filters
for x in range(len(fltValues)):
if fltValues[x] != "" and fltUpdated[x] == 0:
# if the filter was not marked as updated previously, it does not exist and we need to add it
cursor2 = conn.cursor()
tempSQL = "INSERT INTO tFilters (userID, galaxy, rowOrder, fltType, fltValue, alertTypes, CRmin, CDmin, DRmin, FLmin, HRmin, MAmin, PEmin, OQmin, SRmin, UTmin, ERmin, fltGroup, minQuality) VALUES ('" + currentUser + "', " + str(galaxy) + ", " + str(fltOrders[x]) + ", " + str(fltTypes[x]) + ", '" + fltValues[x] + "', " + str(alertTypes[x]) + ", " + str(CRmins[x]) + ", " + str(CDmins[x]) + ", " + str(DRmins[x]) + ", " + str(FLmins[x]) + ", " + str(HRmins[x]) + ", " + str(MAmins[x]) + ", " + str(PEmins[x]) + ", " + str(OQmins[x]) + ", " + str(SRmins[x]) + ", " + str(UTmins[x]) + ", " + str(ERmins[x]) + ", '" + fltGroups[x] + "', " + str(qualityMins[x]) + ");"
cursor2.execute(tempSQL)
addCount += cursor2.rowcount
cursor2.close()
conn.close()
result = "Filter update complete: " + str(addCount) + " added, " + str(udCount) + " updated, " + str(delCount) + " deleted."
else:
result = "Error: must be logged in to update alerts"
else:
result = errstr
print('Content-type: text/xml\n')
doc = minidom.Document()
eRoot = doc.createElement("result")
doc.appendChild(eRoot)
eName = doc.createElement("fltCount")
tName = doc.createTextNode(str(fltCount))
eName.appendChild(tName)
eRoot.appendChild(eName)
eText = doc.createElement("resultText")
tText = doc.createTextNode(result)
eText.appendChild(tText)
eRoot.appendChild(eText)
print(doc.toxml())
if (result.find("Error:") > -1):
sys.exit(500)
else:
sys.exit(200)
| pwillworth/galaxyharvester | html/updateFilters.py | Python | gpl-3.0 | 9,693 | [
"Galaxy"
] | cfb4b258b67e89eca9bcb827bb43079f82a84b179e2317d5af5c0520a731460d |
# Copyright 2012-2014 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
""" This module provides the LDAP base functions
with a subset of the functions from the real ldap module. """
import ssl
from typing import Optional, TypeVar, Callable, Tuple, Generator
import ldap3
import ldap3.core.exceptions as exceptions
import logging
from urllib.parse import urlparse
logger = logging.getLogger(__name__)
def _debug(*argv):
argv = [str(arg) for arg in argv]
logger.debug(" ".join(argv))
Entity = TypeVar('Entity')
class LdapBase(object):
""" The vase LDAP connection class. """
def __init__(self, settings_dict: dict) -> None:
self.settings_dict = settings_dict
self._obj = None
self._connection_class = ldap3.Connection
def close(self) -> None:
if self._obj is not None:
self._obj.unbind()
self._obj = None
#########################
# Connection Management #
#########################
def set_connection_class(self, connection_class):
self._connection_class = connection_class
def check_password(self, dn: str, password: str) -> bool:
try:
conn = self._connect(user=dn, password=password)
conn.unbind()
return True
except exceptions.LDAPInvalidCredentialsResult:
return False
except exceptions.LDAPUnwillingToPerformResult:
return False
def _connect(self, user: str, password: str) -> ldap3.Connection:
settings = self.settings_dict
_debug("connecting")
url = urlparse(settings['URI'])
if url.scheme == "ldaps":
use_ssl = True
elif url.scheme == "ldap":
use_ssl = False
else:
raise RuntimeError("Unknown scheme '%s'" % url.scheme)
if ":" in url.netloc:
host, port = url.netloc.split(":")
port = int(port)
else:
host = url.netloc
if use_ssl:
port = 636
else:
port = 389
start_tls = False
if 'START_TLS' in settings and settings['START_TLS']:
start_tls = True
tls = None
if use_ssl or start_tls:
tls = ldap3.Tls()
if 'TLS_CA' in settings and settings['TLS_CA']:
tls.ca_certs_file = settings['TLS_CA']
if 'REQUIRE_TLS' in settings and settings['REQUIRE_TLS']:
tls.validate = ssl.CERT_REQUIRED
s = ldap3.Server(host, port=port, use_ssl=use_ssl, tls=tls)
c = self._connection_class(
s, # client_strategy=ldap3.STRATEGY_SYNC_RESTARTABLE,
user=user, password=password, authentication=ldap3.SIMPLE)
c.strategy.restartable_sleep_time = 0
c.strategy.restartable_tries = 1
c.raise_exceptions = True
c.open()
if start_tls:
c.start_tls()
try:
c.bind()
except: # noqa: E722
c.unbind()
raise
return c
def _reconnect(self) -> None:
settings = self.settings_dict
try:
self._obj = self._connect(
user=settings['USER'], password=settings['PASSWORD'])
except Exception:
self._obj = None
raise
assert self._obj is not None
def _do_with_retry(self, fn: Callable[[ldap3.Connection], Entity]) -> Entity:
if self._obj is None:
self._reconnect()
assert self._obj is not None
try:
return fn(self._obj)
except ldap3.core.exceptions.LDAPSessionTerminatedByServerError:
# if it fails, reconnect then retry
_debug("SERVER_DOWN, reconnecting")
self._reconnect()
return fn(self._obj)
###################
# read only stuff #
###################
def search(self, base, scope, filterstr='(objectClass=*)',
attrlist=None, limit=None) -> Generator[Tuple[str, dict], None, None]:
"""
Search for entries in LDAP database.
"""
_debug("search", base, scope, filterstr, attrlist, limit)
# first results
if attrlist is None:
attrlist = ldap3.ALL_ATTRIBUTES
elif isinstance(attrlist, set):
attrlist = list(attrlist)
def first_results(obj):
_debug("---> searching ldap", limit)
obj.search(
base, filterstr, scope, attributes=attrlist, paged_size=limit)
return obj.response
# get the 1st result
result_list = self._do_with_retry(first_results)
# Loop over list of search results
for result_item in result_list:
# skip searchResRef for now
if result_item['type'] != "searchResEntry":
continue
dn = result_item['dn']
attributes = result_item['raw_attributes']
# did we already retrieve this from cache?
_debug("---> got ldap result", dn)
_debug("---> yielding", result_item)
yield (dn, attributes)
# we are finished - return results, eat cake
_debug("---> done")
return
####################
# Cache Management #
####################
def reset(self, force_flush_cache: bool = False) -> None:
"""
Reset transaction back to original state, discarding all
uncompleted transactions.
"""
pass
##########################
# Transaction Management #
##########################
# Fake it
def is_dirty(self) -> bool:
""" Are there uncommitted changes? """
raise NotImplementedError()
def is_managed(self) -> bool:
""" Are we inside transaction management? """
raise NotImplementedError()
def enter_transaction_management(self) -> None:
""" Start a transaction. """
raise NotImplementedError()
def leave_transaction_management(self) -> None:
"""
End a transaction. Must not be dirty when doing so. ie. commit() or
rollback() must be called if changes made. If dirty, changes will be
discarded.
"""
raise NotImplementedError()
def commit(self) -> None:
"""
Attempt to commit all changes to LDAP database. i.e. forget all
rollbacks. However stay inside transaction management.
"""
raise NotImplementedError()
def rollback(self) -> None:
"""
Roll back to previous database state. However stay inside transaction
management.
"""
raise NotImplementedError()
##################################
# Functions needing Transactions #
##################################
def add(self, dn: str, mod_list: dict) -> None:
"""
Add a DN to the LDAP database; See ldap module. Doesn't return a result
if transactions enabled.
"""
raise NotImplementedError()
def modify(self, dn: str, mod_list: dict) -> None:
"""
Modify a DN in the LDAP database; See ldap module. Doesn't return a
result if transactions enabled.
"""
raise NotImplementedError()
def modify_no_rollback(self, dn: str, mod_list: dict) -> None:
"""
Modify a DN in the LDAP database; See ldap module. Doesn't return a
result if transactions enabled.
"""
raise NotImplementedError()
def delete(self, dn: str) -> None:
"""
delete a dn in the ldap database; see ldap module. doesn't return a
result if transactions enabled.
"""
raise NotImplementedError()
def rename(self, dn: str, new_rdn: str, new_base_dn: Optional[str] = None) -> None:
"""
rename a dn in the ldap database; see ldap module. doesn't return a
result if transactions enabled.
"""
raise NotImplementedError()
| brianmay/python-tldap | tldap/backend/base.py | Python | gpl-3.0 | 8,635 | [
"Brian"
] | 8085a72386e3735577996492ca5dbbc534e25f9827fc04dcc9f9d3ba7673ef2d |
"""Unit tests for /tenants/<id>/users/ endpoints."""
# Copyright 2015 Solinea, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from django.contrib.auth import get_user_model
from mock import patch
from rest_framework.status import HTTP_200_OK, HTTP_401_UNAUTHORIZED, \
HTTP_400_BAD_REQUEST, HTTP_201_CREATED, HTTP_403_FORBIDDEN, \
HTTP_204_NO_CONTENT
from goldstone.test_utils import Setup, create_and_login, login, \
AUTHORIZATION_PAYLOAD, CONTENT_BAD_TOKEN, CONTENT_NO_CREDENTIALS, \
check_response_without_uuid, TEST_USER_1, CONTENT_PERMISSION_DENIED, \
BAD_TOKEN, BAD_UUID, CONTENT_NOT_BLANK_USERNAME
from .models import Tenant
from .tests_tenants import TENANTS_ID_URL
# URLs used by this module.
TENANTS_ID_USERS_URL = TENANTS_ID_URL + "users/"
TENANTS_ID_USERS_ID_URL = TENANTS_ID_USERS_URL + "%s/"
class TenantsIdUsers(Setup):
"""Listing users of a tenant, and creating user of a tenant."""
def test_not_logged_in(self):
"""Getting the tenant users, or creating a tenant user, without being
logged in."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
# Try the GET and POST without an authorization token.
responses = [self.client.get(TENANTS_ID_USERS_URL % tenant.uuid),
self.client.post(TENANTS_ID_USERS_URL % tenant.uuid,
json.dumps({"username": "fool",
"password": "fooll",
"email": "a@b.com"}),
content_type="application/json")]
for response in responses:
self.assertContains(response,
CONTENT_NO_CREDENTIALS,
status_code=HTTP_401_UNAUTHORIZED)
# Try the GET and POST with a bad authorization token.
responses = [
self.client.get(
TENANTS_ID_USERS_URL % tenant.uuid,
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN),
self.client.post(
TENANTS_ID_USERS_URL % tenant.uuid,
json.dumps({"username": "fool",
"password": "fooll",
"email": "a@b.com"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN)]
for response in responses:
self.assertContains(response,
CONTENT_BAD_TOKEN,
status_code=HTTP_401_UNAUTHORIZED)
def test_no_access(self):
"""Getting the tenant users, or creating a tenant user, without being
an authorized user."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
# Create a normal user who's a member of the tenant, but *not* a
# tenant_admin
token = create_and_login()
user = get_user_model().objects.get(username=TEST_USER_1[0])
user.tenant = tenant
user.save()
# Try the GET and POST.
responses = [
self.client.get(
TENANTS_ID_USERS_URL % tenant.uuid,
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.post(
TENANTS_ID_USERS_URL % tenant.uuid,
json.dumps({"username": "fool",
"password": "fooll",
"email": "a@b.com"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)]
for response in responses:
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
def test_no_tenant(self):
"""Getting a tenant, or creating a user of a tenant, when the tenant
doesn't exist."""
# Create a Django admin user.
token = create_and_login(is_superuser=True)
# Make a tenant, then delete it.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
tenant.delete()
# Try the GET and POST to a tenant that doesn't exist.
responses = [
self.client.get(
TENANTS_ID_USERS_URL % tenant.uuid,
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.post(
TENANTS_ID_USERS_URL % tenant.uuid,
json.dumps({"username": "fool",
"password": "fooll",
"email": "a@b.com"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)]
for response in responses:
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
def test_get(self):
"""List a tenant's users."""
# The accounts in this test.
TENANT_USERS = [{"username": "a",
"email": "a@b.com",
"password": "a",
"tenant_admin": True},
{"username": "b", "email": "b@b.com", "password": "b"},
{"username": "c", "email": "c@b.com", "password": "c"},
]
USERS = [{"username": "d", "email": "d@b.com", "password": "d"},
{"username": "e", "email": "e@b.com", "password": "e"},
]
EXPECTED_RESULT = [{"username": "a",
"first_name": '',
"last_name": '',
"email": "a@b.com",
"default_tenant_admin": False,
"tenant_name": "tennet",
"tenant_admin": True,
"is_superuser": False},
{"username": "b",
"first_name": '',
"last_name": '',
"email": "b@b.com",
"default_tenant_admin": False,
"tenant_admin": False,
"is_superuser": False},
{"username": "c",
"first_name": '',
"last_name": '',
"email": "c@b.com",
"default_tenant_admin": False,
"tenant_admin": False,
"is_superuser": False},
]
# Make a tenant
tenant = Tenant.objects.create(name='tennet',
owner='John',
owner_contact='206.867.5309')
# Create users belonging to this tenant. One will be the tenant_admin.
for user in TENANT_USERS:
user["tenant"] = tenant
get_user_model().objects.create_user(**user)
# Create users who don't belong to the tenant.
for user in USERS:
get_user_model().objects.create(**user)
# Log in as the tenant_admin.
tenant_admin = [x for x in TENANT_USERS if "tenant_admin" in x][0]
token = login(tenant_admin["username"], tenant_admin["password"])
# Get the tenant's user list and check the response. We do a partial
# check of the uuid, date_joined, and last_login keys. They must exist,
# and their values must be strings, and the UUID ought to be >= 32
# characters.
response = self.client.get(
TENANTS_ID_USERS_URL % tenant.uuid,
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
# pylint: disable=E1101
self.assertEqual(response.status_code, HTTP_200_OK)
response_content = json.loads(response.content)
for entry in response_content["results"]:
self.assertIsInstance(entry["uuid"], basestring)
self.assertGreaterEqual(len(entry["uuid"]), 32)
self.assertIsInstance(entry["date_joined"], basestring)
# The tenant_admin has logged in, but the other two users have
# never logged in.
if entry["tenant_admin"]:
self.assertIsInstance(entry["last_login"], basestring)
else:
self.assertIsNone(entry["last_login"])
del entry["uuid"]
del entry["date_joined"]
del entry["last_login"]
self.assertItemsEqual(response_content["results"], EXPECTED_RESULT)
@patch("djoser.utils.send_email")
def test_post(self, send_email):
"""Create a user in a tenant."""
# The accounts in this test.
TENANT_USERS = [{"username": "a", "email": "a@b.com", "password": "a"},
{"username": "b", "email": "b@b.com", "password": "b"}]
def create(user_number):
"""Create one user in the tenant.
:param user_number: The TENANT_USERS index to use
:type user_number: int
"""
from django.conf import settings
response = self.client.post(
TENANTS_ID_USERS_URL % tenant.uuid,
json.dumps(TENANT_USERS[user_number]),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response,
HTTP_201_CREATED,
EXPECTED_RESULT[user_number],
extra_keys=["last_login",
"date_joined"])
# Was email send to the new user?
self.assertEqual(send_email.call_count, 1)
# Did the e-mail seem to have the correct content?
self.assertEqual(send_email.call_args[0][0],
TENANT_USERS[user_number]["email"])
self.assertEqual(send_email.call_args[0][1],
"webmaster@localhost") # from
self.assertEqual(send_email.call_args[0][2]["site_name"],
settings.DJOSER["SITE_NAME"]) # The site name
self.assertIn("tenant", send_email.call_args[0][2]["tenant_name"])
self.assertEqual(send_email.call_args[1],
{'plain_body_template_name':
'new_tenant_body.txt',
'subject_template_name': 'new_tenant.txt'})
EXPECTED_RESULT = [{"username": "a",
"first_name": '',
"last_name": '',
"email": "a@b.com",
"tenant_admin": False,
"is_superuser": False,
"default_tenant_admin": False},
{"username": "b",
"first_name": '',
"last_name": '',
"email": "b@b.com",
"tenant_admin": False,
"is_superuser": False,
"default_tenant_admin": False}]
# Make a tenant
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Create a user who's the tenant_admin of this tenant, and log him in.
token = create_and_login(tenant=tenant)
# Create one user in this empty tenant and check the result.
create(0)
# Now try it again.
send_email.reset_mock()
create(1)
class TenantsIdUsersId(Setup):
"""Retrieving one particular user record from a tenant, and updating one
user record in a tenant."""
def test_not_logged_in(self):
"""The client is not logged in."""
# Make a tenant, and put one member, a tenant_admin, in it.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
user = get_user_model().objects.create_user(username=TEST_USER_1[0],
password=TEST_USER_1[2])
user.tenant = tenant
user.tenant_admin = True
user.save()
# Try GET, PUT, and DELETE without an authorization token.
responses = [self.client.get(TENANTS_ID_USERS_ID_URL %
(tenant.uuid, user.uuid)),
self.client.put(TENANTS_ID_USERS_ID_URL %
(tenant.uuid, user.uuid),
json.dumps({"username": "fool",
"password": "fooll",
"email": "a@b.com"}),
content_type="application/json"),
self.client.delete(TENANTS_ID_USERS_ID_URL %
(tenant.uuid, user.uuid)),
]
for response in responses:
self.assertContains(response,
CONTENT_NO_CREDENTIALS,
status_code=HTTP_401_UNAUTHORIZED)
# Try again with a bad authorization token.
responses = [
self.client.get(
TENANTS_ID_USERS_ID_URL % (tenant.uuid, user.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN),
self.client.put(
TENANTS_ID_USERS_ID_URL % (tenant.uuid, user.uuid),
json.dumps({"username": "fool",
"password": "fooll",
"email": "a@b.com"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN),
self.client.delete(
TENANTS_ID_USERS_ID_URL % (tenant.uuid, user.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % BAD_TOKEN),
]
for response in responses:
self.assertContains(response,
CONTENT_BAD_TOKEN,
status_code=HTTP_401_UNAUTHORIZED)
def test_no_access(self):
"""The client isn't an authorized user."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant 1',
owner='John',
owner_contact='206.867.5309')
# Create a normal user who's a member of the tenant, but *not* a
# tenant_admin
token = create_and_login()
user = get_user_model().objects.get(username=TEST_USER_1[0])
user.tenant = tenant
user.save()
# Try GET, PUT, and DELETE.
responses = [
self.client.get(
TENANTS_ID_USERS_ID_URL % (tenant.uuid, user.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.put(
TENANTS_ID_USERS_ID_URL % (tenant.uuid, user.uuid),
json.dumps({"username": "fool",
"password": "fooll",
"email": "a@b.com"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.delete(
TENANTS_ID_USERS_ID_URL % (tenant.uuid, user.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
]
for response in responses:
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
# Ensure the user wasn't deleted.
self.assertEqual(get_user_model().objects.count(), 1)
def test_no_tenant(self):
"""Getting a tenant, or creating a user of a tenant, or deleting a
user, when the tenant doesn't exist."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Create a tenant_admin of the tenant.
token = create_and_login(tenant=tenant)
user = get_user_model().objects.get(username=TEST_USER_1[0])
# Try GET, PUT, and DELETE to a nonexistent tenant.
responses = [
self.client.get(
TENANTS_ID_USERS_ID_URL % (BAD_UUID, user.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.put(
TENANTS_ID_USERS_ID_URL % (BAD_UUID, user.uuid),
json.dumps({"username": "fool",
"password": "fooll",
"email": "a@b.com"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
self.client.delete(
TENANTS_ID_USERS_ID_URL % (BAD_UUID, user.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token),
]
for response in responses:
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
def test_get_no_user(self):
"""Get a user that does not exist from a tenant."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Create a tenant_admin of the tenant.
token = create_and_login(tenant=tenant)
# Try GETing a nonexistent user from this tenant.
response = self.client.get(
TENANTS_ID_USERS_ID_URL %
(tenant.uuid, BAD_UUID),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
def test_get(self):
"""Get a user."""
# Expected results, sans uuid keys.
EXPECTED_RESULTS = [{"username": "fred",
"first_name": "",
"last_name": "",
"email": "fred@fred.com",
"tenant_admin": True,
"is_superuser": False,
"tenant_name": "tennent",
"default_tenant_admin": False},
{"username": "Traci",
"first_name": "",
"last_name": "",
"email": '',
"tenant_admin": False,
"is_superuser": False,
"default_tenant_admin": False},
]
# Make a tenant.
tenant = Tenant.objects.create(name='tennent',
owner='John',
owner_contact='206.867.5309')
# Create a tenant_admin of the tenant.
token = create_and_login(tenant=tenant)
user = get_user_model().objects.get(username=TEST_USER_1[0])
# Try GETing the tenant admin.
response = self.client.get(
TENANTS_ID_USERS_ID_URL %
(tenant.uuid, user.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response,
HTTP_200_OK,
EXPECTED_RESULTS[0],
extra_keys=["last_login", "date_joined"])
# Add another user to the tenant, and get her.
user = get_user_model().objects.create_user(username="Traci",
password='a')
user.tenant = tenant
user.save()
# Try GETing the second user.
response = self.client.get(
TENANTS_ID_USERS_ID_URL %
(tenant.uuid, user.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response,
HTTP_200_OK,
EXPECTED_RESULTS[1],
extra_keys=["last_login", "date_joined"])
def test_put_no_user(self):
"""Update a non-existent user of a tenant."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Create a tenant_admin of the tenant.
token = create_and_login(tenant=tenant)
# Try PUTing to a nonexistent user in this tenant.
response = self.client.put(
TENANTS_ID_USERS_ID_URL % (tenant.uuid, BAD_UUID),
json.dumps({"username": "fool", "email": "a@b.com"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
def test_put_bad_fields(self):
"""Update a user with missing required fields, or unrecognized
fields, or a field that's not allowed to be changed by the
tenant_admin."""
# Expected responses, sans uuid keys.
EXPECTED_RESPONSES = [
# PUTting no changes.
{"username": "Beth",
"first_name": "",
"last_name": "",
"email": "",
"tenant_admin": False,
"is_superuser": False,
"default_tenant_admin": False},
# PUTting to an unrecognized field.
{"username": "Beth",
"first_name": "Michelle",
"last_name": "",
"email": "",
"tenant_admin": False,
"is_superuser": False,
"default_tenant_admin": False},
]
# Make a tenant.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Create a tenant_admin of the tenant, and a normal user of the tenant.
token = create_and_login(tenant=tenant)
user = get_user_model().objects.create_user(username="Beth",
password='x')
user.tenant = tenant
user.save()
# Try PUTing to the user with no username.
response = self.client.put(
TENANTS_ID_USERS_ID_URL % (tenant.uuid, user.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response,
CONTENT_NOT_BLANK_USERNAME,
status_code=HTTP_400_BAD_REQUEST)
# Try PUTing to the user with no changes, and with a change to an
# unrecognized field.
for i, entry in enumerate([{"username": "Beth"},
{"username": "Beth",
"billybopfoo": "blaRGH",
"first_name": "Michelle"},
]):
response = self.client.put(
TENANTS_ID_USERS_ID_URL % (tenant.uuid, user.uuid),
json.dumps(entry),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response,
HTTP_200_OK,
EXPECTED_RESPONSES[i],
extra_keys=["last_login",
"date_joined"])
# Try PUTing to the user on a field that's not allowed to be changed.
# The response should be the same as the "unrecognized field" case.
response = self.client.put(
TENANTS_ID_USERS_ID_URL % (tenant.uuid, user.uuid),
json.dumps({"username": "Beth",
"billybopfoo": "blaRGH",
"tenant_admin": True,
"default_tenant_admin": True,
"first_name": "Michelle"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response,
HTTP_200_OK,
EXPECTED_RESPONSES[1],
extra_keys=["last_login", "date_joined"])
def test_put(self):
"""Update a user in a tenant."""
# Expected response, sans uuid.
EXPECTED_RESPONSE = {"username": "Beth",
"first_name": "1",
"last_name": "2",
"email": "x@y.com",
"tenant_admin": False,
"is_superuser": False,
"default_tenant_admin": False}
# Make a tenant.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Create a tenant_admin of the tenant, and a normal user of the tenant.
token = create_and_login(tenant=tenant)
user = get_user_model().objects.create_user(username="Beth",
password='x')
user.tenant = tenant
user.save()
# Try PUTing to the user.
response = self.client.put(
TENANTS_ID_USERS_ID_URL % (tenant.uuid, user.uuid),
json.dumps({"username": "Beth",
"first_name": '1',
"last_name": '2',
"email": "x@y.com"}),
content_type="application/json",
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
check_response_without_uuid(response,
HTTP_200_OK,
EXPECTED_RESPONSE,
extra_keys=["last_login", "date_joined"])
def test_delete_default_tnnt_admin(self):
"""Try deleting the system's default tenant admin."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Create a tenant_admin, default_tenant_admin, and a normal user.
token = create_and_login(tenant=tenant)
default_tenant_admin = \
get_user_model().objects.create_user(username="Amber",
password="xxx")
default_tenant_admin.tenant = tenant
default_tenant_admin.default_tenant_admin = True
default_tenant_admin.save()
get_user_model().objects.create_user(username="Beth",
password='x',
tenant=tenant)
# Try to DELETE the default_admin_user.
response = self.client.delete(
TENANTS_ID_USERS_ID_URL %
(tenant.uuid, default_tenant_admin.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
# Ensure we have the right number of user accounts
self.assertEqual(get_user_model().objects.count(), 3)
def test_delete_self(self):
"""Try deleting oneself."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Create a tenant_admin.
token = create_and_login(tenant=tenant)
admin_user = get_user_model().objects.get(username=TEST_USER_1[0])
# Try DELETE on oneself.
response = self.client.delete(
TENANTS_ID_USERS_ID_URL % (tenant.uuid, admin_user.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
# Ensure we have the right number of user accounts
self.assertEqual(get_user_model().objects.count(), 1)
def test_delete_not_member(self):
"""Try deleting a user of another tenant."""
# Make two tenants.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
tenant_2 = Tenant.objects.create(name='tenant_2',
owner='John',
owner_contact='206.867.5309')
# Create a tenant_admin, default_tenant_admin, and a normal user of
# another tenant.
token = create_and_login(tenant=tenant)
default_tenant_admin = \
get_user_model().objects.create_user(username="Amber",
password="xxx")
default_tenant_admin.tenant = tenant
default_tenant_admin.default_tenant_admin = True
default_tenant_admin.save()
user = get_user_model().objects.create_user(username="Beth",
password='x',
tenant=tenant_2)
# Try DELETE on the normal user.
response = self.client.delete(
TENANTS_ID_USERS_ID_URL % (tenant.uuid, user.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
# Ensure we have the right number of user accounts
self.assertEqual(get_user_model().objects.count(), 3)
def test_delete_django_admin(self):
"""Try deleting a Django admin, a.k.a. Goldstone system admin."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Log in as the tenant admin.
token = create_and_login(tenant=tenant)
# Create a Django admin who's a member of the tenant.
django_admin = \
get_user_model().objects.create_superuser("Amber",
"a@b.com",
"xxx",
tenant=tenant)
# Try DELETE on the Django admin.
response = self.client.delete(
TENANTS_ID_USERS_ID_URL % (tenant.uuid, django_admin.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response,
CONTENT_PERMISSION_DENIED,
status_code=HTTP_403_FORBIDDEN)
# Ensure we have the right number of user accounts
self.assertEqual(get_user_model().objects.count(), 2)
def test_delete(self):
"""Delete a user in a tenant."""
# Make a tenant.
tenant = Tenant.objects.create(name='tenant',
owner='John',
owner_contact='206.867.5309')
# Create a tenant_admin, default_tenant_admin, and a normal user.
token = create_and_login(tenant=tenant)
default_tenant_admin = \
get_user_model().objects.create_user(username="Amber",
password="xxx")
default_tenant_admin.tenant = tenant
default_tenant_admin.default_tenant_admin = True
default_tenant_admin.save()
user = get_user_model().objects.create_user(username="Beth",
password='x',
tenant=tenant)
# Try to DELETE the normal user.
response = self.client.delete(
TENANTS_ID_USERS_ID_URL % (tenant.uuid, user.uuid),
HTTP_AUTHORIZATION=AUTHORIZATION_PAYLOAD % token)
self.assertContains(response, '', status_code=HTTP_204_NO_CONTENT)
# Ensure we have the right number of user accounts
self.assertEqual(get_user_model().objects.count(), 2)
| slashk/goldstone-server | goldstone/tenants/tests_user.py | Python | apache-2.0 | 34,229 | [
"Amber"
] | ffe1332e49ba325af061bd5a0e91c3a266125e30c60082de005b73dac5d59e34 |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 11 16:55:10 2017
@author: Kirby Urner
Grayham Forscutt constructs phrases that file
by letter sum. Examples below. Sometimes add
or subtract 'the' for plus/minus 33.
intelligent infinity + the = 266
focus on visualising the angelic fleet habitation - the = 466
"""
from string import ascii_lowercase
# expecting 433
phrases = \
"""\
the light entities of the higher evolution
hyperspace metropolis design blueprint
the collective vehicle generated from thought
focus on visualising the angelic fleet habitation
mentally construct the triacontahedron
collectively visualize a triacontahedron
create one hundred and forty four lightbodies
spun all hundred and forty four triaconta
become fully realized integrated ascended master
stationed inside the rhombic triacontahedron
the intergeometrical approach of the chakra vajra
liquid crystal thought structures
principle resonant phase conjugate cavity
volumes inside the rhombic triacontahedron\
""".split("\n")
# this overwrites the above -- just paste in phrases
# expecting 266
phrases = """\
quantized fractal volume
pre cosmic unified fractal field
disdyakis triacontahedron
the rhombic triacontahedron
modelling consciousness
universal language of light
this is template reality
the original divine matrix
intelligent infinity
fractal dynamics of phi ratio
prepare for first contact
oneness consciousness
full access to the higher self
compound quasicrystal
supersymmetric plasma
basic building block of reality
the underlying unified field
superluminal scalar waves
golden ratio symmetries
the collective oversoul
packing rhombic triacontahedra
triacontahedral clusters
plasma cavitation membranes
protective plasma membrane
programming the lightbody
phase conjugate mirroring
survived compression
collapsing quantum wave
implosive compression\
""".split("\n")
def sumphrase(s):
# a=1, b=2... skips non-ascii characters
return sum([ord(c)-96 for c in s if c in ascii_lowercase])
for p in phrases:
print(sumphrase(p), p)
| 4dsolutions/Python5 | numerology.py | Python | mit | 2,065 | [
"CRYSTAL"
] | d9517a89187d3a278b500b3aad71c3dc0148bcf9f97ef70b9b5b3b32ebaa0393 |
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from django.views import defaults as default_views
from django.views.i18n import JavaScriptCatalog
from graphene_django.views import GraphQLView
urlpatterns = [
url(r'^graphql$', GraphQLView.as_view(graphiql=True)),
url(r"", include("thefederation.urls")),
# JavaScript translations
path("jsi18n/", JavaScriptCatalog.as_view(packages=['thefederation']), name="javascript-catalog"),
# Admin pages
url(settings.ADMIN_URL, admin.site.urls),
url(r"^django-rq/", include("django_rq.urls")),
url(r'^_silk/', include('silk.urls', namespace='silk')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r"^400/$", default_views.bad_request, kwargs={"exception": Exception("Bad Request!")}),
url(r"^403/$", default_views.permission_denied, kwargs={"exception": Exception("Permission Denied")}),
url(r"^404/$", default_views.page_not_found, kwargs={"exception": Exception("Page not Found")}),
url(r"^500/$", default_views.server_error),
]
if settings.DEBUG_TOOLBAR_ENABLED:
import debug_toolbar
urlpatterns += [
url(r"^__debug__/", include(debug_toolbar.urls)),
]
| jaywink/diaspora-hub | config/urls.py | Python | agpl-3.0 | 1,554 | [
"VisIt"
] | 7214f6fcc07029238b24242fc9615c0c993a6ba2a9a7e31858134e95caac9b06 |
# -insar.py- coding: utf-8 -*-
"""
Created on Fri Sep 3 10:46:50 2010
@author: bosmanoglu
InSAR module. Includes functions for analyzing SAR interferometry with python.
"""
from numpy import *
from pylab import *
from basic import *
import scipy
from scipy import ndimage #scipy.pkgload('ndimage')
from scipy import signal #scipy.pkgload('signal') #ndimage
from scipy import interpolate #scipy.pkgload('interpolate'); #interp1d,RectBivariateSpline
from scipy import constants #scipy.pkgload('scipy.constants')
from scipy import optimize #scipy.pkgload('optimize')
from scipy import stats
import time_series
import pdb
try:
import stack
from cutting_edge import *
except:
pass
def coh2snr(coh):
return coh/(1.-coh);
def snr2coh(snr):
return snr/(snr+1.);
def coh2pdf(coh,n=100):
domain=linspace(-pi,pi,n);
pdf=(1-coh**2)/(2*pi) \
/ (1-coh**2 * cos(domain)**2) \
* (1 \
+ (coh*cos(domain)*arccos(-1*coh*cos(domain))) \
/ sqrt(1-coh**2*cos(domain)**2) \
)
return pdf
def coh2pdfML(coh,L,n=100,domain=None):
"""coh2pdfML(coh,L,n=100,domain=None)
coh: scalar or vector.
L= scalar, multilook factor
n=100, number of samples in domain [-pi,pi]
domain=vector or [#coh, n] . user specified domains. First axis has to be the same as size(coh).
"""
import scipy
from scipy import special #scipy.pkgload('special')
G=scipy.special.gamma #math.gamma #returns the gamma function value at X, same as scipy.special.gamma
F=scipy.special.hyp2f1 #returns gauss hypergeometric function
if domain is None:
domain=linspace(-pi,pi,n);
if domain.shape[0] == coh.size:
#user specified domain. Should be the same number of elements with coh:
#ccd=dot(atleast_2d(coh), atleast_2d(cos(domain)))
coh=tile(coh, (domain.shape[1],1)).T
ccd=coh*cos(domain);
else:
ccd=dot(atleast_2d(coh).T, atleast_2d(cos(domain))) #Coherence Cos Domain
coh=tile(coh, (domain.shape[0],1)).T
pdf=(1-coh**2)**L/(2*pi) \
* F(L, 1, 0.5,ccd**2) \
+ (G(L+0.5)*(1-coh**2)**L * ccd) \
/ (2*sqrt(pi) * G(L) * (1-ccd**2)**(L+0.5))
return pdf
def coh2stdpha(coh,n=100,lut=None):
'''coh2stdpha(coh,n=100,lut=None)
n:number of samples between -pi +pi
lut: number of samples in look-up-table
ex:
stdpha=coh2stdpha(coh)
stdpha=coh2stdpha(coh,lut=100); #This is much faster but only accurate to 1/100th of coh.max()=1 and coh.min()=0.
'''
if isinstance(coh,list):
coh=array(coh)
elif isinstance(coh,float):
coh=array([coh])
domain=linspace(-pi,pi,n);
dims=coh.shape
stdpha=zeros(dims)
if lut is None:
for k in xrange(size(coh)):#r_[0:size(coh)]:
#numpy.trapz(Y,X) = matlab.trapz(X,Y)
idx=unravel_index(k, dims)
stdpha[idx]=sqrt(trapz(domain**2*coh2pdf(coh[idx],n),domain));
else:
lutx=linspace(coh.min(), coh.max(), lut); #lutx=look up table x
luty=zeros(lutx.shape); # luty=look up table y
for k in xrange(len(lutx)):
luty[k]=sqrt(trapz(domain**2*coh2pdf(lutx[k],n),domain));
lutf=scipy.interpolate.interp1d(lutx,luty, 'linear')
stdpha=lutf(coh)
return stdpha
def coh2stdphaML(coh,L,n=100,lut=None):
'''coh2stdpha(coh,L,n=100,lut=None)
n:number of samples between -pi +pi
lut: number of samples in look-up-table
ex:
L=iobj.coherence.Multilookfactor_azimuth_direction * iobj.coherence.Multilookfactor_range_direction
stdpha=coh2stdpha(coh,L)
stdpha=coh2stdpha(coh,L,lut=100); #This is much faster but only accurate to 1/100th of coh.max()=1 and coh.min()=0.
'''
if isinstance(coh,list):
coh=array(coh)
elif isinstance(coh, number):
coh=array([coh])
#elif isinstance(coh,float):
# coh=array([coh])
domain=linspace(-pi,pi,n);
dims=coh.shape
stdpha=zeros(dims)
if lut is None:
for k in xrange(size(coh)):#r_[0:size(coh)]:
#numpy.trapz(Y,X) = matlab.trapz(X,Y)
idx=unravel_index(k, dims)
stdpha[idx]=sqrt(trapz(domain**2*coh2pdfML(coh[idx],L,n),domain));
else:
lutx=linspace(coh.min(), coh.max(), lut); #lutx=look up table x
luty=zeros(lutx.shape); # luty=look up table y
for k in xrange(len(lutx)):
luty[k]=sqrt(trapz(domain**2*coh2pdfML(lutx[k],L,n),domain));
lutf=scipy.interpolate.interp1d(lutx,luty, 'linear')
stdpha=lutf(coh)
return stdpha
def stdpha2coh(stdpha, L=1, n=100, lut=100):
'''stdpha2cohML(stdpha, L=1, n=100, lut=100):
Creates a lookup table for coherence to stdpha and uses it to reverse the relation
'''
if isinstance(stdpha,list):
stdpha=array(stdpha)
elif isinstance(stdpha, number):
stdpha=array([stdpha])
domain=linspace(-pi,pi,n);
lutx=linspace(0.01, 0.99, lut); #lutx=look up table x
luty=zeros(lutx.shape); # luty=look up table y
for k in xrange(len(lutx)):
luty[k]=sqrt(trapz(domain**2*coh2pdfML(lutx[k],L,n),domain));
lutf=scipy.interpolate.interp1d(flipud(luty),flipud(lutx), 'linear', bounds_error=False)
coh=lutf(stdpha);
coh[stdpha > luty.max() ]=0.01;
coh[stdpha < luty.min() ]=0.99;
return coh
def gradient_coherence(m,s=None, w=(5,5), low_pass=True):
if any(iscomplexobj(m)):
mg0,mg1=cpxgradient(m)
else:
mg0,mg1=gradient(m)
if s is None:
s=empty(m.shape, dtype=complex);
s[:]=1.+0.j
if any(iscomplexobj(s)):
sg0,sg1=cpxgradient(s)
else:
sg0,sg1=gradient(s)
if low_pass is True:
mg0=scipy.ndimage.generic_filter(mg0, mean, size=w)
mg1=scipy.ndimage.generic_filter(mg1, mean, size=w)
sg0=scipy.ndimage.generic_filter(sg0, mean, size=w)
sg1=scipy.ndimage.generic_filter(sg1, mean, size=w)
#pdb.set_trace()
return coherence(mg0+1j*mg1, sg0+1j*sg1, w=w)
def coherence(m,s=None,w=(5,5)):
'''coherence(master, slave=None, window):
input is master and slave complex images (tested for 1D only)
w is the calculation window.
'''
coh=zeros(size(m))
corrFilter= ones(w)
nfilt=corrFilter.size
corrFilter=corrFilter/nfilt
# Em=scipy.ndimage.filters.correlate(m*conj(m),corrFilter,mode='nearest')
# Es=scipy.ndimage.filters.correlate(s*conj(s),corrFilter,mode='nearest')
# Ems=scipy.ndimage.filters.correlate(m*conj(s),corrFilter,mode='nearest')
if s is None:
s=empty(m.shape, dtype=complex)
s[:]=exp(1.j*0);
Em=scipy.signal.signaltools.correlate(m*conj(m), corrFilter, mode='same')
Es=scipy.signal.signaltools.correlate(s*conj(s), corrFilter, mode='same')
Ems=scipy.signal.signaltools.correlate(m*conj(s), corrFilter, mode='same')
coh=abs(Ems / (sqrt(Em**2+Es**2)/sqrt(2))) #need to divide by two to get root mean square
# for k in r_[0:len(m)]:
# if k+w>=len(m):
# a=k+w-len(m)+1
# else:
# a=0
# mw=m[k-a:k+w]
# sw=s[k-a:k+w]
# coh[k]=mean(mw*conj(sw))/sqrt(mean(mw*conj(mw))*mean(sw*conj(sw)))
return coh
def crosscorrelate(m,s):
"""crosscorrelation(m,s):
"""
coh=zeros(size(m))
#corrFilter= ones(m.shape)
#nfilt=corrFilter.size
#corrFilter=corrFilter/nfilt
#m=rescale(m, [-1,1]);
#m=m-m.mean()
#s=rescale(s, [-1,1]);
#s=s-s.mean()
Em=(m*m.conj()).mean() # Em=(m**2.).sum()
Es=(s*s.conj()).mean() # Es=(s**2.).sum()
Ems=(m*s.conj()).mean() # Ems=(m*s).sum()
#Em=scipy.signal.signaltools.correlate(m*m, corrFilter, mode='same')
#Es=scipy.signal.signaltools.correlate(s*s, corrFilter, mode='same')
#Ems=scipy.signal.signaltools.correlate(m*s, corrFilter, mode='same')
coh=abs(Ems / sqrt(Em*Es))#1.4142135623730949#(2./sqrt(2.))
return coh
def correlate(m,s,w):
coh=zeros(m.shape)
w0=int(w[0]/2.)
w1=int(w[1]/2.)
for k in xrange(m.shape[0]):
for l in xrange(m.shape[1]):
if k<w0:
kk=r_[0:k+w0];
elif k>m.shape[0]-w0:
kk=r_[k-w0:m.shape[0]]
else:
kk=r_[k-w0:k+w0]
if l<w1:
ll=r_[0:l+w1];
elif l>m.shape[1]-w1:
ll=r_[l-w1:m.shape[1]]
else:
ll=r_[l-w1:l+w1]
K,L=meshgrid(kk,ll)
coh[k,l]=crosscorrelate(m[K,L],s[K,L])
#coh[k,l]=abs(scipy.stats.pearsonr(m[K,L].ravel(),s[K,L].ravel())[0]);
return coh
def readComplexData(fname, width, length=0, dtype=float):
if length==0:
filesize=os.path.getsize(fname)
length=float(filesize)/width/2
if isint(length):
print("Error with file width, will continue but results might be bad.")
data=fromfile(fname, dtype ,width*2*length).reshape(2*width, length)
def ipd(x):
return angle(hstack([0, x[1:]*x[0:-1].conj()])).cumsum()
def ipg(cintP, cintNei, unwNei, weiNei=None):
if weiNei is None:
weiNei=ones(size(cintNei));
return sum(weiNei*(unwNei-angle(cintNei*conj(cintP))))/sum(weiNei);
def radarcode_dem(dem, alpha=0.1745, theta=0.3316, R1=830000., dx=80. ):
"""radarcoded_DEM=radarcode_dem(dem, alpha=0.1745, theta=0.3316, R1=830000., dx=80. )
calculates a 1m 1rad bperp and runs siminterf without any noise.
"""
#based on SIMINTERF.m which was
# Created by Bert Kampes 05-Oct-2000
# Tested by Erik Steenbergen
#initialize output
numlines=dem.shape[0]
numpixels=dem.shape[1]
rdem=zeros([numlines,numpixels]);
# Some variables for ERS1/2 and Envisat
#alpha=deg2rad(10.); #[rad] baseline orientation
#wavelen = 0.05666; #[m] wavelength
#theta = deg2rad(19.) #[rad] looking angle to first pixel
#R1 = 830000. #[m] range to first point
#pi4divlam = (-4.*pi)/wavelen #lam(lambda)=wavelen, can't use lambda in python it is a registered command.
#dx = 80 #[m] dem resolution
#Radarcode DEM by orbit information
print ('Radarcoding DEM')
numpixelsdem=dem.shape[1]
x0=sin(theta) * R1 #x coord. of first DEM point
sat1_x=0.
sat1_y=cos(theta) * R1 + dem[1,1]
maxrange = sqrt((x0+(numpixelsdem-1)*dx)**2+sat1_y**2)-dem.max();
R1extra = R1+dem.max();
totalrange = maxrange-R1extra;
rangebinsize = totalrange/numpixels;
rangegrid = arange(R1extra,maxrange,rangebinsize)-rangebinsize;
x = arange(x0,x0+dx*(numpixelsdem),dx);# x coord. w.r.t. sat1
xsqr = x**2;
#compute range for all lines of the dem
for az in range(0,dem.shape[0]):
y = sat1_y-dem[az,:]
range2master = sqrt(y**2+xsqr)
## Interpolate p to grid rangebins
## range is not always increasing due to foreshortning
sortindex = argsort(range2master);
range2master = range2master[sortindex]
rdem[az,:]=interp(rangegrid,range2master,dem[az,:]);
return rdem
def siminterf(dem,Bperp=100,doNoise=1,waterHeight=None,alpha=0.1745, \
wavelen=0.05666, theta=0.3316, R1=830000., dx=80., Bpar=None, defoRate=None, Btemp=None, coh='Geometric',
temporal_decorrelation_factor=3e-4*365.):
'''[interf,coh,h2ph,refpha]=siminterf(dem,Bperp=100,doNoise=1,waterHeight=None,alpha=0.1745, \
wavelen=0.05666, theta=0.3316, R1=830000, dx=80):
DEPRECATED:doNoise can be 1 or 0. If zero gaussian noise is not added.(USE COH=None for 0 instead).
if Bpar is given, alpha is calculated based on Bpar and Bperp. See Radar Interferometry pg.117, by R. Hanssen.
coh=[None|'Geometric'|'Geometric+Temporal'|float|array]
If None, no additional noise is added.
Geometric: Based on critical perpendicular baseline (simnoise)
Temporal: Based on temporal baseline (see temporal_decorrelation_factor)
float: Single coherence value for all interferogram
array: Apply given coherence.
temporal_decorrelation_factor=3e-4*365 for btemp in years: exp(-TDF * Btemp) e.g. TDF=3e-4 for btemp in days (See Simulation of timeseries surface deformation by C.W. Lee et al., 2012)
'''
#based on SIMINTERF.m which was
# Created by Bert Kampes 05-Oct-2000
# Tested by Erik Steenbergen
#initialize output
numlines=dem.shape[0]
numpixels=dem.shape[1]
interf=zeros([numlines,numpixels]);
slope =zeros([numlines,numpixels]);
h2ph =ones([numlines,numpixels]);
refpha=ones([numlines,numpixels]);
# Some variables for ERS1/2 and Envisat
#alpha=deg2rad(10.); #[rad] baseline orientation
#wavelen = 0.05666; #[m] wavelength
#theta = deg2rad(19.) #[rad] looking angle to first pixel
#R1 = 830000. #[m] range to first point
pi4divlam = (-4.*pi)/wavelen #lam(lambda)=wavelen, can't use lambda in python it is a registered command.
#dx = 80 #[m] dem resolution
#Radarcode DEM by orbit information
print ('Radarcoding DEM')
numpixelsdem=dem.shape[1]
x0=sin(theta) * R1 #x coord. of first DEM point
sat1_x=0.
sat1_y=cos(theta) * R1 + dem[1,1]
maxrange = sqrt((x0+(numpixelsdem-1)*dx)**2+sat1_y**2)-dem.max();
R1extra = R1+dem.max();
totalrange = maxrange-R1extra;
rangebinsize = totalrange/numpixels;
rangegrid = arange(R1extra,maxrange,rangebinsize)-rangebinsize;
#compute range diff to slave satellite
#B = Bperp / cos(theta-alpha);
#batu - bpar
if (Bpar!=None):
alpha = theta - arctan2(Bpar, Bperp);
B = sqrt(Bpar**2.+Bperp**2.); #Bpar / sin(theta-alpha);
print 'alpha: ', alpha
else:
B = Bperp / cos(theta-alpha);
Bpar = B * sin (theta - alpha);
print 'Bpar: ', Bpar
#end bpar
sat2_x = B * cos(alpha);
sat2_y = B * sin(alpha) + sat1_y;
x = arange(x0,x0+dx*(numpixelsdem),dx);# x coord. w.r.t. sat1
x2sqr = (x - sat2_x)**2;
xsqr = x**2;
#compute range for all lines of the dem
for az in range(0,dem.shape[0]):
y = sat1_y-dem[az,:]
range2master = sqrt(y**2+xsqr)
y2 = sat2_y-dem[az,:]
range2slave = sqrt(y2**2+x2sqr)
phase = pi4divlam * (range2slave-range2master);
# remove reference phase
tantheta = x/y2
deltax = dem[az,:] / tantheta # far field approx
x2_0 = x - deltax
refpharangemaster = sqrt(sat1_y**2 + x2_0**2)
refpharangeslave = sqrt(sat2_y**2 + (x2_0-sat2_x)**2)
refphase = pi4divlam * (refpharangeslave-refpharangemaster);
refpha[az,:]=refphase;
phase = phase - refphase;
## Interpolate p to grid rangebins
## range is not always increasing due to foreshortning
sortindex = argsort(range2master);
range2master = range2master[sortindex]
phase = phase[sortindex];
interf[az,:]=interp(rangegrid,range2master,phase);
## calculate slope and simulate noise
slopedem= arctan2(diff(dem[az,:]),dx)
slopedem= hstack((slopedem, [0]))
slopedem= slopedem[sortindex]
slope[az,:]=interp(rangegrid,range2master,slopedem);
h2ph[az,:] = -pi4divlam*Bperp/(range2master*sin(theta));
noise=zeros(interf.shape)
if doNoise==1 and coh is None:
print("DEPRECATED. Use coh instead.")
coh="Geometric"
if coh is not None:
if "Geometric" in coh:
noiseCoherence=simnoise(slope, Bperp)
noise = noiseCoherence[0];
#coh = noiseCoherence[1];
if "Temporal" in coh and temporal_decorrelation_factor is not None:
temporal_coh=exp(-temporal_decorrelation_factor*Btemp)
noise=noise+random.randn(*interf.shape)*coh2stdpha(temporal_coh, 20)
if defoRate is not None: # Deformation is always included Coherence if specified.
noise=noise+ (pi4divlam*defoRate*Btemp)
if isfloat(coh) and coh.size==1: #isfloat=basic.isfloat
stdphase=coh2stdpha(coh, 20); # This calculation is based on simnoise.
noise=random.randn(*interf.shape) * stdphase
if isarray(coh) and coh.shape==interf.shape:
stdphase=coh2stdpha(coh, 20); # This calculation is based on simnoise.
noise=random.randn(*coh.shape) * stdphase
#noiseCoherence=simnoise(slope, Bperp, Bw=15550000.,wavelen=wavelen, theta=theta, R1=R1)
#noise = noiseCoherence[0];
#coh = noiseCoherence[1];
#if doNoise==1:
#coh=coherence(exp(-1j*interf), exp(-1j*(interf+noise)), [3,3]) # This overwrites coherence based on the actual noise applied. Should be close to input coherence???
interf= interf + noise # This also adds the deformation signal.
coh = stdpha2coh(moving_window(interf, func=std))
#if defoRate is not None:
# interf= interf+ (-pi4divlam*defoRate*Btemp)
if waterHeight!=None:
waterMask=(dem<waterHeight);
putmask(interf,waterMask,2*pi*randn(sum(waterMask)));
putmask(coh,waterMask,0.05*abs(randn(sum(waterMask))))
return [interf,coh,h2ph,refpha]
def simnoise(slope,Bperp,Bw=15550000.,wavelen=0.05666, theta=0.3316, R1=830000.):
"""simnoise(slope,Bperp,Bw=15550000.,wavelen=0.05666, theta=0.3316, R1=830000.):
Bw=range Band width [Hz]
wavelen = [m]
theta = look angle [rad]
R1= range to first pixel [m]
This function calculates the geometric coherence and related noise level
based on the ERS1/2 configuration (range bandwith, wavelength, look angle,
satellite altitude).
"""
# Some variables for ERS1/2 and Envisat
#Bw = 15550000; #[Hz] range bandwidth
#alpha=deg2rad(10.); #[rad] baseline orientation
#wavelen = 0.05666; #[m] wavelength
#theta = deg2rad(19.) #[rad] looking angle to first pixel
#R1 = 830000. #[m] range to first point
#pi4divlam = (-4.*pi)/wavelen #lam(lambda)=wavelen, can't use lambda in python it is a registered command.
#dx = 80 #[m] dem resolution
c = scipy.constants.c; #[m/s] speed of light
#critical baseline
Bcritical = wavelen*(Bw/c)*R1*tan(theta-slope);
gammageom = abs((Bcritical-abs(Bperp))/Bcritical); #Batu: 20181228 - Bperp<0 was causing nan otherwise.
gammageom[isnan(gammageom)]=0
stdphase=coh2stdpha(gammageom,20)
#r = random.randn(*gammageom.shape)
noise = random.randn(*gammageom.shape) * stdphase
#gammageom = gammageom*(1-gammageom)*abs(r)
return [noise, gammageom]
def phaseDerivativeVariance(p):
"""phaseDerivativeVariance(phase)
This function calculates the derivative variance for the given complex phase
data. This function is based on Bruce Spottiswoode 2008 PhaseDerivativeVariance.m
file. This function is re-written based on Ghiglia and Pritt,
'Two dimensional phase unwrapping', 1998, p.76
"""
#calculate dr (range)
dims=p.shape
dr=zeros(dims)
#first row
dr[:,0]=angle(p[:,0]*conj(p[:,1]))
dr[:,-1]=angle(p[:,-2]*conj(p[:,-1]))
for r in r_[1:dims[1]-1]:
dr[:,r]=angle(p[:,r-1]*conj(p[:,r]))
nfilt=9.0
corrFilter= array([[1,1,1],[1,1,1],[1,1,1]])/nfilt #http://docs.scipy.org/doc/scipy-0.7.x/reference/tutorial/ndimage.html
mean_dr=scipy.ndimage.filters.correlate(dr,corrFilter,mode='nearest')
var_dr=scipy.ndimage.filters.correlate((dr-mean_dr)**2,corrFilter,mode='nearest')
#calculate da (azimuth), dy in spottiswoode
da=zeros(dims)
da[0,:]=angle(p[0,:]*conj(p[1,:]))
da[-1,:]=angle(p[-2,:]*conj(p[-1,:]))
for a in r_[1:dims[0]-1]:
da[a,:]=angle(p[a-1,:]*conj(p[a,:]))
mean_da=scipy.ndimage.filters.correlate(da,corrFilter,mode='nearest')
var_da=scipy.ndimage.filters.correlate((da-mean_da)**2,corrFilter,mode='nearest')
var=sqrt(var_da)+sqrt(var_dr)
return var
def phaseDerivativeVarianceReal(p):
"""phaseDerivativeVarianceReal(2dArray)
This function calculates the derivative variance for the given complex phase
data. This function is based on Bruce Spottiswoode 2008 PhaseDerivativeVariance.m
file. This function is re-written based on Ghiglia and Pritt,
'Two dimensional phase unwrapping', 1998, p.76
"""
#calculate dr (range)
dims=p.shape
dr=np.zeros(dims)
#first row
dr[:,0]=p[:,0]-p[:,1]
dr[:,-1]=p[:,-2]-p[:,-1]
for r in np.r_[1:dims[1]-1]:
dr[:,r]=p[:,r-1]-p[:,r]
nfilt=9.0
corrFilter=np.array([[1,1,1],[1,1,1],[1,1,1]])/nfilt #http://docs.scipy.org/doc/scipy-0.7.x/reference/tutorial/ndimage.html
mean_dr=scipy.ndimage.filters.correlate(dr,corrFilter,mode='nearest')
var_dr=scipy.ndimage.filters.correlate((dr-mean_dr)**2,corrFilter,mode='nearest')
#calculate da (azimuth), dy in spottiswoode
da=np.zeros(dims)
da[0,:]=p[0,:]-p[1,:]
da[-1,:]=p[-2,:]-p[-1,:]
for a in np.r_[1:dims[0]-1]:
da[a,:]=p[a-1,:]-p[a,:]
mean_da=scipy.ndimage.filters.correlate(da,corrFilter,mode='nearest')
var_da=scipy.ndimage.filters.correlate((da-mean_da)**2,corrFilter,mode='nearest')
return np.sqrt(var_da+var_dr)
def cpxgradient(cpx):
out=[];
for k in xrange(cpx.ndim):
cpx=rollaxis(cpx,k,0)
d=zeros(cpx.shape)
d[0:-1,:]=angle(cpx[1:,:]*conj(cpx[0:-1,:]))
d[1:,:]=d[1:,:]+d[0:-1,:]
d[1:,:]=0.5*d[1:,:]
out.append(rollaxis(d,k,0))
return out;
def multilook(x,ratio):
"""multilook(data,ratio)
data: is a numpy array.
ratio: is a list of ratios with number of elements equal to number of data dimensions.
CURRENTLY only 2D data is SUPPORTED.
"""
#http://lists.ipython.scipy.org/pipermail/numpy-discussion/2010-July/051760.html
#l=0;
L=x.shape[0];
#p=0;
P=x.shape[1];
outL=np.int(floor(float(L)/ratio[0]))
outP=np.int(floor(float(P)/ratio[1]))
x=x[0:ratio[0]*outL,0:ratio[1]*outP]
out=x.reshape(outL,ratio[0],outP,ratio[1]);
return out.mean(axis=3).mean(axis=1);
def oversample(data,ratio, method='quick', shape=None):
"""oversample(data,ratio, method='quick', shape=None)
data: is a numpy array.
ratio: is a list of ratios with number of elements equal to number of data dimensions.
method={'quick','linear', 'nearest', 'cubic'}
CURRENTLY only 2D data is SUPPORTED.
"""
includesNan=False
if any(np.isnan(data)):
m=np.isnan(data);
z=data.copy();
z[m]=0;
includesNan=True
else:
z=data
x=np.r_[0:z.shape[0]];
y=np.r_[0:z.shape[1]];
if shape is None:
X=np.linspace(0.,z.shape[0]-1,z.shape[0]*ratio[0])
Y=np.linspace(0.,z.shape[1]-1,z.shape[1]*ratio[1])
else:
X=np.linspace(0.,z.shape[0]-1,shape[0])
Y=np.linspace(0.,z.shape[1]-1,shape[1])
if method == "quick":
spl=scipy.interpolate.RectBivariateSpline(x,y,z)
zo=spl(X,Y);
else:
y,x=np.meshgrid(y,x)
Y,X=np.meshgrid(Y,X)
zo=scipy.interpolate.griddata((x[~m],y[~m]),z[~m], (X,Y), method=method)
if (includesNan) & (method == "quick"):
splm=scipy.interpolate.RectBivariateSpline(x,y,m);
mo=splm(X,Y)
mo[mo>0.5]=True
mo[mo<0.5]=False
#print int( np.ceil(np.sqrt(zo.shape[0]/z.shape[0]*zo.shape[1]/z.shape[1])) +3)
mo=scipy.ndimage.binary_dilation(mo, iterations=int( np.ceil(np.sqrt(zo.shape[0]/z.shape[0]*zo.shape[1]/z.shape[1])) +3) );
zo[mo.astype(np.bool)]=np.nan
return zo
def rad2dist(radians, wavelength=0.056):
'''rad2dist(radians, wavelength=0.056)
Returns distance corresponding to radians in the same unit as wavelegth.
'''
return radians*(wavelength/(4*pi));
def dist2rad(distance, wavelength=0.056):
'''dist2rad(distance, wavelength=0.056):
Returns radians corresponding to distance. Distance and wavelength has to be in the same units.
'''
return distance*4*pi/wavelength
def h2ph(Bperp, wavelength=0.0566, R=830e3, theta=deg2rad(23.0), bistatic=False):
'''h2ph(Bperp, wavelength=0.0566, R=800e3, theta=deg2rad(23.0))
Height-to-phase calculation.
Bperp: Perpendicular baseline [m]
Wavelength: Radar wavelength [m]
R: range to master [m]
theta: Look-angle [rad]
'''
if bistatic:
pi4divlam=(-2.*pi)/wavelength;
else:
pi4divlam=(-4.*pi)/wavelength;
return -pi4divlam*Bperp/(R*sin(theta))
def xyz2los(inVector, projectionVector=zeros([1,3]), incidenceAngle=0, headingAngle=0 ):
'''xyz2los(inVector, projectionVector=zeros([1,3]), incidenceAngle=0, headingAngle=0 ):
'''
if all(projectionVector==0):
#Using Hanssen Radar Interferometry, page 162 Eq. 5.1.1
projectionVector=[-sin(incidenceAngle)*cos(headingAngle-1.5*pi), -sin(incidenceAngle)*sin(headingAngle-1.5*pi), cos(incidenceAngle)];#North East Up
projectionVector=atleast_2d(projectionVector);
los=dot(inVector, projectionVector.T) / sqrt(nansum((projectionVector)**2));
return los
def los2up(los, incidenceAngle=0):
'''los2up(los, incidenceAngle )
los: Line of sight deformation
incidenceAngle: radar incidence angle in radians
Returns vertical translation of LOS assuming horizontal displacement is zero.
'''
return los / cos(incidenceAngle)
| bosmanoglu/adore-doris | lib/python/insar/__init__.py | Python | gpl-2.0 | 25,698 | [
"Gaussian"
] | eceef6ec85beabb7ed4a7fd217f1316e045bf6e7c389ba6662dba8f99aebc9df |
"""
This file defines IAM resource static configuration parameters for CDK Constructs
"""
IAM_Role_List = {
# Pattern
# "cdk-id": "role-name"
# Example:
# "cdk-infra-editor": "cdk-poc-infra-editor",
# "cdk-infra-viewer": "cdk-poc-infra-viewer",
}
| kubeflow/testing | aws/IaC/CDK/test-infra/config/static_config/IAM_Resources.py | Python | apache-2.0 | 269 | [
"CDK"
] | 9375513a98296d2eb354b234e0c7cf285b3c578ca45b0046e26c27621d66fe4f |
""" This module exposes the BaseClient class,
which serves as base for InnerRPCClient and TransferClient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import _thread
import DIRAC
from DIRAC.Core.DISET.private.Protocols import gProtocolDict
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from DIRAC.Core.Utilities import List, Network
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.ConfigurationSystem.Client.PathFinder import getServiceURL, getServiceFailoverURL
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.ConfigurationSystem.Client.Helpers.CSGlobals import skipCACheck
from DIRAC.Core.DISET.private.TransportPool import getGlobalTransportPool
from DIRAC.Core.DISET.ThreadConfig import ThreadConfig
class BaseClient(object):
"""Glues together stubs with threading, credentials, and URLs discovery (by DIRAC vo and setup).
Basically what needs to be done to enable RPC calls, and transfer, to find a URL.
"""
VAL_EXTRA_CREDENTIALS_HOST = "hosts"
KW_USE_CERTIFICATES = "useCertificates"
KW_EXTRA_CREDENTIALS = "extraCredentials"
KW_TIMEOUT = "timeout"
KW_SETUP = "setup"
KW_VO = "VO"
KW_DELEGATED_DN = "delegatedDN"
KW_DELEGATED_GROUP = "delegatedGroup"
KW_IGNORE_GATEWAYS = "ignoreGateways"
KW_PROXY_LOCATION = "proxyLocation"
KW_PROXY_STRING = "proxyString"
KW_PROXY_CHAIN = "proxyChain"
KW_SKIP_CA_CHECK = "skipCACheck"
KW_KEEP_ALIVE_LAPSE = "keepAliveLapse"
__threadConfig = ThreadConfig()
def __init__(self, serviceName, **kwargs):
"""Constructor
:param serviceName: URL of the service (proper uri or just System/Component)
:param useCertificates: If set to True, use the server certificate
:param extraCredentials:
:param timeout: Timeout of the call (default 600 s)
:param setup: Specify the Setup
:param VO: Specify the VO
:param delegatedDN: Not clear what it can be used for.
:param delegatedGroup: Not clear what it can be used for.
:param ignoreGateways: Ignore the DIRAC Gatways settings
:param proxyLocation: Specify the location of the proxy
:param proxyString: Specify the proxy string
:param proxyChain: Specify the proxy chain
:param skipCACheck: Do not check the CA
:param keepAliveLapse: Duration for keepAliveLapse (heartbeat like)
"""
if not isinstance(serviceName, str):
raise TypeError(
"Service name expected to be a string. Received %s type %s" % (str(serviceName), type(serviceName))
)
# Explicitly convert to a str to avoid Python 2 M2Crypto issues with unicode objects
self._destinationSrv = str(serviceName)
self._serviceName = str(serviceName)
self.kwargs = kwargs
self.__useCertificates = None
# The CS useServerCertificate option can be overridden by explicit argument
self.__forceUseCertificates = self.kwargs.get(self.KW_USE_CERTIFICATES)
self.__initStatus = S_OK()
self.__idDict = {}
self.__extraCredentials = ""
self.__enableThreadCheck = False
self.__retry = 0
self.__retryDelay = 0
# by default we always have 1 url for example:
# RPCClient('dips://volhcb38.cern.ch:9162/Framework/SystemAdministrator')
self.__nbOfUrls = 1
self.__nbOfRetry = 3 # by default we try try times
self.__retryCounter = 1
self.__bannedUrls = []
for initFunc in (
self.__discoverSetup,
self.__discoverVO,
self.__discoverTimeout,
self.__discoverURL,
self.__discoverCredentialsToUse,
self.__checkTransportSanity,
self.__setKeepAliveLapse,
):
result = initFunc()
if not result["OK"] and self.__initStatus["OK"]:
self.__initStatus = result
self.numberOfURLs = 0
self._initialize()
# HACK for thread-safety:
self.__allowedThreadID = False
def _initialize(self):
pass
def getDestinationService(self):
"""Return service destination
:return: str
"""
return self._destinationSrv
def getServiceName(self):
"""Return service name
:return: str
"""
return self._serviceName
def __discoverSetup(self):
"""Discover which setup to use and stores it in self.setup
The setup is looked for:
* kwargs of the constructor (see KW_SETUP)
* the ThreadConfig
* in the CS /DIRAC/Setup
* default to 'Test'
:return: S_OK()/S_ERROR()
"""
if self.KW_SETUP in self.kwargs and self.kwargs[self.KW_SETUP]:
self.setup = str(self.kwargs[self.KW_SETUP])
else:
self.setup = self.__threadConfig.getSetup()
if not self.setup:
self.setup = gConfig.getValue("/DIRAC/Setup", "Test")
return S_OK()
def __discoverVO(self):
"""Discover which VO to use and stores it in self.vo
The VO is looked for:
* kwargs of the constructor (see KW_VO)
* in the CS /DIRAC/VirtualOrganization
* default to 'unknown'
:return: S_OK()/S_ERROR()
"""
if self.KW_VO in self.kwargs and self.kwargs[self.KW_VO]:
self.vo = str(self.kwargs[self.KW_VO])
else:
self.vo = gConfig.getValue("/DIRAC/VirtualOrganization", "unknown")
return S_OK()
def __discoverURL(self):
"""Calculate the final URL. It is called at initialization and in connect in case of issue
It sets:
* self.serviceURL: the url (dips) selected as target using __findServiceURL
* self.__URLTuple: a split of serviceURL obtained by Network.splitURL
* self._serviceName: the last part of URLTuple (typically System/Component)
:return: S_OK()/S_ERROR()
"""
# Calculate final URL
try:
result = self.__findServiceURL()
except Exception as e:
return S_ERROR(repr(e))
if not result["OK"]:
return result
self.serviceURL = result["Value"]
retVal = Network.splitURL(self.serviceURL)
if not retVal["OK"]:
return retVal
self.__URLTuple = retVal["Value"]
self._serviceName = self.__URLTuple[-1]
res = gConfig.getOptionsDict("/DIRAC/ConnConf/%s:%s" % self.__URLTuple[1:3])
if res["OK"]:
opts = res["Value"]
for k in opts:
if k not in self.kwargs:
self.kwargs[k] = opts[k]
return S_OK()
def __discoverTimeout(self):
"""Discover which timeout to use and stores it in self.timeout
The timeout can be specified kwargs of the constructor (see KW_TIMEOUT),
with a minimum of 120 seconds.
If unspecified, the timeout will be 600 seconds.
The value is set in self.timeout, as well as in self.kwargs[KW_TIMEOUT]
:return: S_OK()/S_ERROR()
"""
if self.KW_TIMEOUT in self.kwargs:
self.timeout = self.kwargs[self.KW_TIMEOUT]
else:
self.timeout = False
if self.timeout:
self.timeout = max(120, self.timeout)
else:
self.timeout = 600
self.kwargs[self.KW_TIMEOUT] = self.timeout
return S_OK()
def __discoverCredentialsToUse(self):
"""Discovers which credentials to use for connection.
* Server certificate:
-> If KW_USE_CERTIFICATES in kwargs, sets it in self.__useCertificates
-> If not, check gConfig.useServerCertificate(),
and sets it in self.__useCertificates and kwargs[KW_USE_CERTIFICATES]
* Certification Authorities check:
-> if KW_SKIP_CA_CHECK is not in kwargs and we are using the certificates,
set KW_SKIP_CA_CHECK to false in kwargs
-> if KW_SKIP_CA_CHECK is not in kwargs and we are not using the certificate, check the CS.skipCACheck
* Proxy Chain
-> if KW_PROXY_CHAIN in kwargs, we remove it and dump its string form into kwargs[KW_PROXY_STRING]
:return: S_OK()/S_ERROR()
"""
# Use certificates?
if self.KW_USE_CERTIFICATES in self.kwargs:
self.__useCertificates = self.kwargs[self.KW_USE_CERTIFICATES]
else:
self.__useCertificates = gConfig.useServerCertificate()
self.kwargs[self.KW_USE_CERTIFICATES] = self.__useCertificates
if self.KW_SKIP_CA_CHECK not in self.kwargs:
if self.__useCertificates:
self.kwargs[self.KW_SKIP_CA_CHECK] = False
else:
self.kwargs[self.KW_SKIP_CA_CHECK] = skipCACheck()
if self.KW_PROXY_CHAIN in self.kwargs:
try:
self.kwargs[self.KW_PROXY_STRING] = self.kwargs[self.KW_PROXY_CHAIN].dumpAllToString()["Value"]
del self.kwargs[self.KW_PROXY_CHAIN]
except Exception:
return S_ERROR("Invalid proxy chain specified on instantiation")
return S_OK()
def __discoverExtraCredentials(self):
"""Add extra credentials informations.
* self.__extraCredentials
-> if KW_EXTRA_CREDENTIALS in kwargs, we set it
-> Otherwise, if we use the server certificate, we set it to VAL_EXTRA_CREDENTIALS_HOST
-> If we have a delegation (see bellow), we set it to (delegatedDN, delegatedGroup)
-> otherwise it is an empty string
* delegation:
-> if KW_DELEGATED_DN in kwargs, or delegatedDN in threadConfig, put in in self.kwargs
-> if KW_DELEGATED_GROUP in kwargs or delegatedGroup in threadConfig, put it in self.kwargs
-> If we have a delegated DN but not group, we find the corresponding group in the CS
:return: S_OK()/S_ERROR()
"""
# which extra credentials to use?
self.__extraCredentials = self.VAL_EXTRA_CREDENTIALS_HOST if self.__useCertificates else ""
if self.KW_EXTRA_CREDENTIALS in self.kwargs:
self.__extraCredentials = self.kwargs[self.KW_EXTRA_CREDENTIALS]
# Are we delegating something?
delegatedDN = self.kwargs.get(self.KW_DELEGATED_DN) or self.__threadConfig.getDN()
delegatedGroup = self.kwargs.get(self.KW_DELEGATED_GROUP) or self.__threadConfig.getGroup()
if delegatedDN:
self.kwargs[self.KW_DELEGATED_DN] = delegatedDN
if not delegatedGroup:
result = Registry.findDefaultGroupForDN(delegatedDN)
if not result["OK"]:
return result
delegatedGroup = result["Value"]
self.kwargs[self.KW_DELEGATED_GROUP] = delegatedGroup
self.__extraCredentials = (delegatedDN, delegatedGroup)
return S_OK()
def __findServiceURL(self):
"""Discovers the URL of a service, taking into account gateways, multiple URLs, banned URLs
If the site on which we run is configured to use gateways (/DIRAC/Gateways/<siteName>),
these URLs will be used. To ignore the gateway, it is possible to set KW_IGNORE_GATEWAYS
to False in kwargs.
If self._destinationSrv (given as constructor attribute) is a properly formed URL,
we just return this one. If we have to use a gateway, we just replace the server name in the url.
The list of URLs defined in the CS (<System>/URLs/<Component>) is randomized
This method also sets some attributes:
* self.__nbOfUrls = number of URLs
* self.__nbOfRetry = 2 if we have more than 2 urls, otherwise 3
* self.__bannedUrls is reinitialized if all the URLs are banned
:return: S_OK(str)/S_ERROR() -- the selected URL
"""
if not self.__initStatus["OK"]:
return self.__initStatus
# Load the Gateways URLs for the current site Name
gatewayURL = False
if not self.kwargs.get(self.KW_IGNORE_GATEWAYS):
dRetVal = gConfig.getOption("/DIRAC/Gateways/%s" % DIRAC.siteName())
if dRetVal["OK"]:
rawGatewayURL = List.randomize(List.fromChar(dRetVal["Value"], ","))[0]
gatewayURL = "/".join(rawGatewayURL.split("/")[:3])
# If what was given as constructor attribute is a properly formed URL,
# we just return this one.
# If we have to use a gateway, we just replace the server name in it
for protocol in gProtocolDict:
if self._destinationSrv.find("%s://" % protocol) == 0:
gLogger.debug("Already given a valid url", self._destinationSrv)
if not gatewayURL:
return S_OK(self._destinationSrv)
gLogger.debug("Reconstructing given URL to pass through gateway")
path = "/".join(self._destinationSrv.split("/")[3:])
finalURL = "%s/%s" % (gatewayURL, path)
gLogger.debug("Gateway URL conversion:\n %s -> %s" % (self._destinationSrv, finalURL))
return S_OK(finalURL)
if gatewayURL:
gLogger.debug("Using gateway", gatewayURL)
return S_OK("%s/%s" % (gatewayURL, self._destinationSrv))
# We extract the list of URLs from the CS (System/URLs/Component)
try:
urls = getServiceURL(self._destinationSrv, setup=self.setup)
except Exception as e:
return S_ERROR("Cannot get URL for %s in setup %s: %s" % (self._destinationSrv, self.setup, repr(e)))
if not urls:
return S_ERROR("URL for service %s not found" % self._destinationSrv)
failoverUrls = []
# Try if there are some failover URLs to use as last resort
try:
failoverUrlsStr = getServiceFailoverURL(self._destinationSrv, setup=self.setup)
if failoverUrlsStr:
failoverUrls = failoverUrlsStr.split(",")
except Exception:
pass
# We randomize the list, and add at the end the failover URLs (System/FailoverURLs/Component)
urlsList = List.randomize(List.fromChar(urls, ",")) + failoverUrls
self.__nbOfUrls = len(urlsList)
self.__nbOfRetry = (
2 if self.__nbOfUrls > 2 else 3
) # we retry 2 times all services, if we run more than 2 services
if self.__nbOfUrls == len(self.__bannedUrls):
self.__bannedUrls = [] # retry all urls
gLogger.debug("Retrying again all URLs")
if len(self.__bannedUrls) > 0 and len(urlsList) > 1:
# we have host which is not accessible. We remove that host from the list.
# We only remove if we have more than one instance
for i in self.__bannedUrls:
gLogger.debug("Removing banned URL", "%s" % i)
urlsList.remove(i)
# Take the first URL from the list
# randUrls = List.randomize( urlsList ) + failoverUrls
sURL = urlsList[0]
# If we have banned URLs, and several URLs at disposals, we make sure that the selected sURL
# is not on a host which is banned. If it is, we take the next one in the list using __selectUrl
# If we have banned URLs, and several URLs at disposals, we make sure that the selected sURL
# is not on a host which is banned. If it is, we take the next one in the list using __selectUrl
if len(self.__bannedUrls) > 0 and self.__nbOfUrls > 2: # when we have multiple services then we can
# have a situation when two services are running on the same machine with different ports...
retVal = Network.splitURL(sURL)
nexturl = None
if retVal["OK"]:
nexturl = retVal["Value"]
found = False
for i in self.__bannedUrls:
retVal = Network.splitURL(i)
if retVal["OK"]:
bannedurl = retVal["Value"]
else:
break
# We found a banned URL on the same host as the one we are running on
if nexturl[1] == bannedurl[1]:
found = True
break
if found:
nexturl = self.__selectUrl(nexturl, urlsList[1:])
if nexturl: # an url found which is in different host
sURL = nexturl
gLogger.debug("Discovering URL for service", "%s -> %s" % (self._destinationSrv, sURL))
return S_OK(sURL)
def __selectUrl(self, notselect, urls):
"""In case when multiple services are running in the same host, a new url has to be in a different host
Note: If we do not have different host we will use the selected url...
:param notselect: URL that should NOT be selected
:param list urls: list of potential URLs
:return: str -- selected URL
"""
url = None
for i in urls:
retVal = Network.splitURL(i)
if retVal["OK"]:
if retVal["Value"][1] != notselect[1]: # the hosts are different
url = i
break
else:
gLogger.error(retVal["Message"])
return url
def __checkThreadID(self):
"""
..warning:: just guessing....
This seems to check that we are not creating a client and then using it
in a multithreaded environment.
However, it is triggered only if self.__enableThreadCheck is to True, but it is
hardcoded to False, and does not seem to be modified anywhere in the code.
"""
if not self.__initStatus["OK"]:
return self.__initStatus
cThID = _thread.get_ident()
if not self.__allowedThreadID:
self.__allowedThreadID = cThID
elif cThID != self.__allowedThreadID:
msgTxt = """
=======DISET client thread safety error========================
Client %s
can only run on thread %s
and this is thread %s
===============================================================""" % (
str(self),
self.__allowedThreadID,
cThID,
)
gLogger.error("DISET client thread safety error", msgTxt)
# raise Exception( msgTxt )
def _connect(self):
"""Establish the connection.
It uses the URL discovered in __discoverURL.
In case the connection cannot be established, __discoverURL
is called again, and _connect calls itself.
We stop after trying self.__nbOfRetry * self.__nbOfUrls
:return: S_OK()/S_ERROR()
"""
# Check if the useServerCertificate configuration changed
# Note: I am not really sure that all this block makes
# any sense at all since all these variables are
# evaluated in __discoverCredentialsToUse
if gConfig.useServerCertificate() != self.__useCertificates:
if self.__forceUseCertificates is None:
self.__useCertificates = gConfig.useServerCertificate()
self.kwargs[self.KW_USE_CERTIFICATES] = self.__useCertificates
# The server certificate use context changed, rechecking the transport sanity
result = self.__checkTransportSanity()
if not result["OK"]:
return result
# Take all the extra credentials
self.__discoverExtraCredentials()
if not self.__initStatus["OK"]:
return self.__initStatus
if self.__enableThreadCheck:
self.__checkThreadID()
gLogger.debug("Trying to connect to: %s" % self.serviceURL)
try:
# Calls the transport method of the apropriate protocol.
# self.__URLTuple[1:3] = [server name, port, System/Component]
transport = gProtocolDict[self.__URLTuple[0]]["transport"](self.__URLTuple[1:3], **self.kwargs)
# the socket timeout is the default value which is 1.
# later we increase to 5
retVal = transport.initAsClient()
# We try at most __nbOfRetry each URLs
if not retVal["OK"]:
gLogger.warn("Issue getting socket:", "%s : %s : %s" % (transport, self.__URLTuple, retVal["Message"]))
# We try at most __nbOfRetry each URLs
if self.__retry < self.__nbOfRetry * self.__nbOfUrls - 1:
# Recompose the URL (why not using self.serviceURL ? )
url = "%s://%s:%d/%s" % (
self.__URLTuple[0],
self.__URLTuple[1],
int(self.__URLTuple[2]),
self.__URLTuple[3],
)
# Add the url to the list of banned URLs if it is not already there. (Can it happen ? I don't think so)
if url not in self.__bannedUrls:
gLogger.warn("Non-responding URL temporarily banned", "%s" % url)
self.__bannedUrls += [url]
# Increment the retry counter
self.__retry += 1
# 16.07.20 CHRIS: I guess this setSocketTimeout does not behave as expected.
# If the initasClient did not work, we anyway re-enter the whole method,
# so a new transport object is created.
# However, it migh be that this timeout value was propagated down to the
# SocketInfoFactory singleton, and thus used, but that means that the timeout
# specified in parameter was then void.
# If it is our last attempt for each URL, we increase the timeout
if self.__retryCounter == self.__nbOfRetry - 1:
transport.setSocketTimeout(5) # we increase the socket timeout in case the network is not good
gLogger.info("Retry connection", ": %d to %s" % (self.__retry, self.serviceURL))
# If we tried all the URL, we increase the global counter (__retryCounter), and sleep
if len(self.__bannedUrls) == self.__nbOfUrls:
self.__retryCounter += 1
# we run only one service! In that case we increase the retry delay.
self.__retryDelay = 3.0 / self.__nbOfUrls if self.__nbOfUrls > 1 else 2
gLogger.info("Waiting %f seconds before retry all service(s)" % self.__retryDelay)
time.sleep(self.__retryDelay)
# rediscover the URL
self.__discoverURL()
# try to reconnect
return self._connect()
else:
return retVal
except Exception as e:
gLogger.exception(lException=True, lExcInfo=True)
return S_ERROR("Can't connect to %s: %s" % (self.serviceURL, repr(e)))
# We add the connection to the transport pool
gLogger.debug("Connected to: %s" % self.serviceURL)
trid = getGlobalTransportPool().add(transport)
return S_OK((trid, transport))
def _disconnect(self, trid):
"""Disconnect the connection.
:param str trid: Transport ID in the transportPool
"""
getGlobalTransportPool().close(trid)
@staticmethod
def _serializeStConnectionInfo(stConnectionInfo):
"""We want to send tuple but we need to convert
into a list
"""
serializedTuple = [list(x) if isinstance(x, tuple) else x for x in stConnectionInfo]
return serializedTuple
def _proposeAction(self, transport, action):
"""Proposes an action by sending a tuple containing
* System/Component
* Setup
* VO
* action
* extraCredentials
It is kind of a handshake.
The server might ask for a delegation, in which case it is done here.
The result of the delegation is then returned.
:param transport: the Transport object returned by _connect
:param action: tuple (<action type>, <action name>). It depends on the
subclasses of BaseClient. <action type> can be for example
'RPC' or 'FileTransfer'
:return: whatever the server sent back
"""
if not self.__initStatus["OK"]:
return self.__initStatus
stConnectionInfo = ((self.__URLTuple[3], self.setup, self.vo), action, self.__extraCredentials, DIRAC.version)
# Send the connection info and get the answer back
retVal = transport.sendData(S_OK(BaseClient._serializeStConnectionInfo(stConnectionInfo)))
if not retVal["OK"]:
return retVal
serverReturn = transport.receiveData()
# TODO: Check if delegation is required. This seems to be used only for the GatewayService
if serverReturn["OK"] and "Value" in serverReturn and isinstance(serverReturn["Value"], dict):
gLogger.debug("There is a server requirement")
serverRequirements = serverReturn["Value"]
if "delegate" in serverRequirements:
gLogger.debug("A delegation is requested")
serverReturn = self.__delegateCredentials(transport, serverRequirements["delegate"])
return serverReturn
def __delegateCredentials(self, transport, delegationRequest):
"""Perform a credential delegation. This seems to be used only for the GatewayService.
It calls the delegation mechanism of the Transport class. Note that it is not used when
delegating credentials to the ProxyDB
:param transport: the Transport object returned by _connect
:param delegationRequest: delegation request
:return: S_OK()/S_ERROR()
"""
retVal = gProtocolDict[self.__URLTuple[0]]["delegation"](delegationRequest, self.kwargs)
if not retVal["OK"]:
return retVal
retVal = transport.sendData(retVal["Value"])
if not retVal["OK"]:
return retVal
return transport.receiveData()
def __checkTransportSanity(self):
"""Calls the sanity check of the underlying Transport object
and stores the result in self.__idDict.
It is checked at the creation of the BaseClient, and when connecting
if the use of the certificate has changed.
:return: S_OK()/S_ERROR()
"""
if not self.__initStatus["OK"]:
return self.__initStatus
retVal = gProtocolDict[self.__URLTuple[0]]["sanity"](self.__URLTuple[1:3], self.kwargs)
if not retVal["OK"]:
return retVal
idDict = retVal["Value"]
for key in idDict:
self.__idDict[key] = idDict[key]
return S_OK()
def __setKeepAliveLapse(self):
"""Select the maximum Keep alive lapse between
150 seconds and what is specifind in kwargs[KW_KEEP_ALIVE_LAPSE],
and sets it in kwargs[KW_KEEP_ALIVE_LAPSE]
:return: S_OK()/S_ERROR()
"""
kaa = 1
if self.KW_KEEP_ALIVE_LAPSE in self.kwargs:
try:
kaa = max(0, int(self.kwargs[self.KW_KEEP_ALIVE_LAPSE]))
except Exception:
pass
if kaa:
kaa = max(150, kaa)
self.kwargs[self.KW_KEEP_ALIVE_LAPSE] = kaa
return S_OK()
def _getBaseStub(self):
"""Returns a list with [self._destinationSrv, newKwargs]
self._destinationSrv is what was given as first parameter of the init serviceName
newKwargs is an updated copy of kwargs:
* if set, we remove the useCertificates (KW_USE_CERTIFICATES) in newKwargs
This method is just used to return information in case of error in the InnerRPCClient
:return: tuple
"""
newKwargs = dict(self.kwargs)
# Remove useCertificates as the forwarder of the call will have to
# independently decide whether to use their cert or not anyway.
if "useCertificates" in newKwargs:
del newKwargs["useCertificates"]
return [self._destinationSrv, newKwargs]
def __bool__(self):
return True
# For Python 2 compatibility
__nonzero__ = __bool__
def __str__(self):
return "<DISET Client %s %s>" % (self.serviceURL, self.__extraCredentials)
| ic-hep/DIRAC | src/DIRAC/Core/DISET/private/BaseClient.py | Python | gpl-3.0 | 28,847 | [
"DIRAC"
] | 5f5eae69d79f6f6f79611f749c2eadff5d7882a3dfd3e6d8112e8f8280108838 |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.521956
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/web/recordnow.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class recordnow(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(recordnow, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_27555620 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2simplexmlresult>
\t<e2state>''')
_v = VFFSL(SL,"result",True) # u'$result' on line 4, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$result')) # from line 4, col 11.
write(u'''</e2state>
\t<e2statetext>''')
_v = VFFSL(SL,"message",True) # u'$message' on line 5, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$message')) # from line 5, col 15.
write(u'''</e2statetext>\t
</e2simplexmlresult>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_27555620
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_recordnow= 'respond'
## END CLASS DEFINITION
if not hasattr(recordnow, '_initCheetahAttributes'):
templateAPIClass = getattr(recordnow, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(recordnow)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=recordnow()).run()
| MOA-2011/enigma2-plugin-extensions-openwebif | plugin/controllers/views/web/recordnow.py | Python | gpl-2.0 | 5,222 | [
"VisIt"
] | 369134208c35d71266d00a305e29b233d5f74adc7ec92391bc0d4ab7877fecb8 |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 24 16:18:10 2016
http://mmcif.wwpdb.org/pdbx-mmcif-home-page.html
@author: noel
"""
#from Bio.PDB import *
import sys
import Bio.PDB.MMCIF2Dict as mmcifD
from Bio.PDB.MMCIFParser import *
from Bio.PDB.Superimposer import Superimposer
from Bio.PDB.Atom import Atom
import pandas as pd
import numpy as np
#from CHARMM_Parser import *
# TODO: Make this class an extended class of Bio.PDB that solves all those
# problems modifying values.
class index_super_structure(object):
def __init__(self, ss):
self.Sup_Struc = ss
self.sorted_by_aaid = False
self.Fixed_Prolines = False
def generate_indexes_from_Full_Structure(self):
self.aa_indx = []
self.aaid_indx = []
self.aaid_indx2 = []
self.ent_id_indx = []
self.ent_id_indx2 = []
self.chn_indx = []
self.chn_indx2 = []
self.comp_indx = []
self.comp = []
current_aaid = 0
current_ent_id = 0
current_chn = ''
current_comp = ''
self.aaid_end = []
self.ent_id_end = []
self.chn_end = []
self.comp_end = []
aaid_cnt = 0
ent_id_cnt = 0
chn_cnt = 0
comp_cnt = 0
for i in range(self.Sup_Struc.shape[0]):
if i == 0:
self.aa_indx.append(self.Sup_Struc.loc[i,'aa'])
self.aaid_indx.append(i)
self.ent_id_indx.append(i)
self.ent_id_indx2.append(self.Sup_Struc.loc[i,'ent_id'])
self.chn_indx.append(i)
self.chn_indx2.append(self.Sup_Struc.loc[i,'chain'])
self.comp_indx.append(i)
self.comp.append(self.Sup_Struc.loc[i,'component'][0:4])
self.aaid_indx2.append(self.Sup_Struc.loc[i,'aaid'])
current_aaid = self.Sup_Struc.loc[i,'aaid']
current_ent_id = self.Sup_Struc.loc[i,'ent_id']
current_chn = self.Sup_Struc.loc[i,'chain']
current_comp = self.Sup_Struc.loc[i,'component'][0:4]
aaid_cnt = 0
ent_id_cnt = 0
chn_cnt = 0
comp_cnt = 0
else:
if self.Sup_Struc.loc[i,'aaid'] != current_aaid:
self.aaid_indx.append(i)
current_aaid = self.Sup_Struc.loc[i,'aaid']
self.aaid_end.append(aaid_cnt)
aaid_cnt += 1
else:
aaid_cnt += 1
if self.Sup_Struc.loc[i,'ent_id'] != current_ent_id:
self.ent_id_indx.append(i)
current_ent_id = self.Sup_Struc.loc[i,'ent_id']
self.ent_id_end.append(ent_id_cnt)
ent_id_cnt += 1
else:
ent_id_cnt += 1
if self.Sup_Struc.loc[i,'chain'] != current_chn:
self.chn_indx.append(i)
current_chn = self.Sup_Struc.loc[i,'chain']
self.chn_end.append(chn_cnt)
chn_cnt += 1
else:
chn_cnt += 1
if self.Sup_Struc.loc[i,'component'][0:4] != current_comp:
self.comp_indx.append(i)
self.comp.append(self.Sup_Struc.loc[i,'component'][0:4])
self.aaid_indx2.append(self.Sup_Struc.loc[i,'aaid'])
current_comp = self.Sup_Struc.loc[i,'component'][0:4]
self.aa_indx.append(self.Sup_Struc.loc[i,'aa'])
self.chn_indx2.append(self.Sup_Struc.loc[i,'chain'])
self.ent_id_indx2.append(self.Sup_Struc.loc[i,'ent_id'])
self.comp_end.append(comp_cnt)
comp_cnt += 1
else:
comp_cnt += 1
self.aaid_end.append(aaid_cnt)
self.ent_id_end.append(ent_id_cnt)
self.chn_end.append(chn_cnt)
self.comp_end.append(comp_cnt)
for i in range(len(self.aaid_indx)):
self.aaid_indx[i] = (self.aaid_indx[i],self.aaid_end[i])
for i in range(len(self.ent_id_indx)):
self.ent_id_indx[i] = (self.ent_id_indx[i],self.ent_id_end[i])
for i in range(len(self.chn_indx)):
self.chn_indx[i] = (self.chn_indx[i],self.chn_end[i])
for i in range(len(self.comp_indx)):
#self.comp_indx[i] = (self.comp_indx[i],self.comp_end[i])
#self.comp_indx[i] = (self.aa_indx[i],self.aaid_indx[i],self.comp[i],self.comp_indx[i],self.comp_end[i])
self.comp_indx[i] = (self.chn_indx2[i],self.ent_id_indx2[i],self.aaid_indx2[i],self.aa_indx[i],self.comp[i],self.comp_indx[i],self.comp_end[i])
# TODO Re arranging atom positions in the Super Structure Full_Structure is something that is done for component
# analysis only becuase in CHARMM the order of atoms might be an issue, but I need to check.
def fix_proline_components(self):
if not self.Fixed_Prolines:
idx_ss_length = len(self.comp_indx)
count_idx_ss = 0
while count_idx_ss < idx_ss_length:
if self.comp_indx[count_idx_ss][3] == 'PRO':
pass
def sort_comp_index_by_aaid_within_chains(self):
''' This to make the output match the output by the Graduate School Perl program
to verify that the energies calculated are equal
'''
if not self.sorted_by_aaid:
idx_ss_length = len(self.comp_indx)
for i in range(0,idx_ss_length-1):
for j in range(i+1,idx_ss_length):
if (self.comp_indx[i][2] != self.comp_indx[j][2]) and \
(self.comp_indx[i][0] == self.comp_indx[j][0]):
if (str(self.comp_indx[i][2]) > str(self.comp_indx[j][2])):
temp = self.comp_indx[i]
self.comp_indx[i] = self.comp_indx[j]
self.comp_indx[j] = temp
# Bubble sort above just order the tuples by aaid, but it also alters the AMIN-SIDE-CARB order of components
# which is important for consistency. The following while loop adjusts the order of components.
count_idx_ss = 0
while count_idx_ss < idx_ss_length:
if self.comp_indx[count_idx_ss][3] == 'GLY':
if self.comp_indx[count_idx_ss][4] == 'AMIN':
pass
else:
temp = self.comp_indx[count_idx_ss]
self.comp_indx[count_idx_ss] = self.comp_indx[count_idx_ss+1]
self.comp_indx[count_idx_ss+1] = temp
count_idx_ss += 2
else:
if self.comp_indx[count_idx_ss][4] == 'AMIN':
if self.comp_indx[count_idx_ss+1][4] == 'SIDE':
pass
elif self.comp_indx[count_idx_ss+1][4] == 'CARB':
temp = self.comp_indx[count_idx_ss+1]
self.comp_indx[count_idx_ss+1] = self.comp_indx[count_idx_ss+2]
self.comp_indx[count_idx_ss+2] = temp
elif self.comp_indx[count_idx_ss][4] == 'CARB':
if self.comp_indx[count_idx_ss+1][4] == 'SIDE':
temp = self.comp_indx[count_idx_ss]
self.comp_indx[count_idx_ss] = self.comp_indx[count_idx_ss+2]
self.comp_indx[count_idx_ss+2] = temp
elif self.comp_indx[count_idx_ss+1][4] == 'AMIN':
temp = self.comp_indx[count_idx_ss]
self.comp_indx[count_idx_ss] = self.comp_indx[count_idx_ss+1]
self.comp_indx[count_idx_ss+1] = temp
temp = self.comp_indx[count_idx_ss+1]
self.comp_indx[count_idx_ss+1] = self.comp_indx[count_idx_ss+2]
self.comp_indx[count_idx_ss+2] = temp
elif self.comp_indx[count_idx_ss][4] == 'SIDE':
if self.comp_indx[count_idx_ss+1][4] == 'AMIN':
temp = self.comp_indx[count_idx_ss]
self.comp_indx[count_idx_ss] = self.comp_indx[count_idx_ss+1]
self.comp_indx[count_idx_ss+1] = temp
elif self.comp_indx[count_idx_ss+1][4] == 'CARB':
temp = self.comp_indx[count_idx_ss]
self.comp_indx[count_idx_ss] = self.comp_indx[count_idx_ss+1]
self.comp_indx[count_idx_ss+1] = temp
temp = self.comp_indx[count_idx_ss]
self.comp_indx[count_idx_ss] = self.comp_indx[count_idx_ss+2]
self.comp_indx[count_idx_ss+2] = temp
count_idx_ss += 3
self.sorted_by_aaid = True
class Super_Structure(object):
correct = {'ALA':{'H':'HN'},\
'ARG':{'H':'HN','HB3':'HB1','HG3':'HG1','HD3':'HD1'},\
'ASN':{'H':'HN','HB3':'HB1'},\
'ASP':{'H':'HN'},\
'CYS':{'H':'HN','HB3':'HB1'},\
'GLU':{'H':'HN','HB3':'HB1','HG3':'HG1'},\
'GLN':{'H':'HN','HB3':'HB1','HG3':'HG1'},\
'GLY':{'H':'HN','HA3':'HA1'},\
'HSD':{'H':'HN','HB3':'HB1'},\
'HSE':{'H':'HN'},\
'HSP':{'H':'HN'},\
'ILE':{'CD1':'CD','H':'HN','HG13':'HG11','HD11':'HD1','HD12':'HD2',\
'HD13':'HD3','HB3':'HB1'},\
'LEU':{'H':'HN','HB3':'HB1'},\
'LYS':{'H':'HN','HB3':'HB1','HG3':'HG1','HD3':'HD1','HE3':'HE1'},\
'MET':{'H':'HN'},\
'PHE':{'H':'HN','HB3':'HB1'},\
'PRO':{'H':'HN','HB3':'HB1','HG3':'HG1','HD3':'HD1'},\
'SER':{'H':'HN','HB3':'HB1','HG':'HG1'},\
'THR':{'H':'HN'},\
'TRP':{'H':'HN'},\
'TYR':{'H':'HN','HB3':'HB1'},\
'VAL':{'H':'HN'}}
inv_correct = {'ALA':{'HN':'H'},\
'ARG':{'HN':'H','HB1':'HB3','HG1':'HG3','HD1':'HD3','OT1':'O'},\
'ASN':{'HN':'H','HB1':'HB3','OT1':'O'},\
'ASP':{'HN':'H','OT1':'O'},\
'CYS':{'HN':'H','HB1':'HB3','OT1':'O'},\
'GLU':{'HN':'H','HB1':'HB3','HG1':'HG3','OT1':'O'},\
'GLN':{'HN':'H','HB1':'HB3','HG1':'HG3','OT1':'O'},\
'GLY':{'HN':'H','HA1':'HA3','OT1':'O'},\
'HSD':{'HN':'H','HB1':'HB3','OT1':'O'},\
'HSE':{'HN':'H','OT1':'O'},\
'HSP':{'HN':'H','OT1':'O'},\
'ILE':{'CD':'CD1','HN':'H','HG11':'HG13','HD1':'HD11','HD2':'HD12',\
'HD3':'HD13','HB1':'HB3','OT1':'O'},\
'LEU':{'HN':'H','HB1':'HB3','OT1':'O'},\
'LYS':{'HN':'H','HB1':'HB3','HG1':'HG3','HD1':'HD3','HE1':'HE3','OT1':'O'},\
'MET':{'HN':'H','OT1':'O'},\
'PHE':{'HN':'H','HB1':'HB3','OT1':'O'},\
'PRO':{'HN':'H','HB1':'HB3','HG1':'HG3','HD1':'HD3','OT1':'O'},\
'SER':{'HN':'H','HB1':'HB3','HG1':'HG','OT1':'O'},\
'THR':{'HN':'H','OT1':'O'},\
'TRP':{'HN':'H','OT1':'O'},\
'TYR':{'HN':'H','HB1':'HB3','OT1':'O'},\
'VAL':{'HN':'H','OT1':'O'}}
his = {0:{0:'HSD',1:'HSE',2:'HSE'},1:{0:'HSD',1:'HSD',2:'HSE'},2:{0:'HSD',1:'HSD',2:'HSP'}}
def __init__(self,parameter_object,structure_path,mode):
if mode == 'setup':
# TODO: for insulin 2hiu, the Histadines are not corrected to CHARMM's
# three options.
self.params = parameter_object
self.parser = MMCIFParser()
self.strctr = self.parser.get_structure('Center',structure_path)
self.header = mmcifD.MMCIF2Dict(structure_path)
self.Full_Structure = pd.DataFrame()
# For adding peptides and their anchers
self.pep = {}
# NTERM is when building missing residues in the direction of the N-Term
self.NTERM_anchor = {}
# CTERM is when building missing residues in the direction of the C-Term
self.CTERM_anchor = {}
self.contiguous_LL = []
self.hist = {}
elif mode == 'add_linker':
self.params = parameter_object
self.Full_Structure = pd.read_csv(structure_path)
# When csv comes from del_residue use first if.
# when it comes from another add_residue use second if. better way?
if "Unnamed: 0" in self.Full_Structure.columns:
self.Full_Structure.set_index(['Unnamed: 0'],inplace=True)
self.Full_Structure.index.names = ['indx']
elif "indx" in self.Full_Structure.columns:
self.Full_Structure.set_index(['indx'],inplace=True)
# For adding peptides and their anchers
self.pep = {}
# NTERM is when building missing residues in the direction of the N-Term
self.NTERM_anchor = {}
# CTERM is when building missing residues in the direction of the C-Term
self.CTERM_anchor = {}
self.contiguous_LL = []
self.hist = {}
elif mode == 'charmm_input':
self.params = parameter_object
# directory is just for holding pth for output of CSV Super Structure.
self.directory = structure_path
self.Full_Structure = pd.DataFrame()
else:
print('ERROR: Super Structure can only be ran witht the following options:')
print(' setup to convert a cif to a csv or pdb file.')
print(' add_linker to add a linker and search its conformational space.')
print('Try again. Exiting Now!')
sys.exit(1)
def check_models(self):
'''CIF files have multiple models sometimes. It is better to check that they have the same number of atoms than
assume that they do.'''
for k in range(len(self.models)):
for l in range(1,len(self.models)-1):
a = list(self.atom_site_df.label_atom_id[self.atom_site_df.pdbx_PDB_model_num == k])
b = list(self.atom_site_df.label_atom_id[self.atom_site_df.pdbx_PDB_model_num == l])
if len(a) != len(b):
print('ERROR: Models '+k+' and '+l+' have different number of atoms.')
print(' Modify the code to account for this. This structure')
print(' is not processed. Exit Now!')
sys.exit(1)
else:
for m in range(len(a)):
if a[m] != b[m]:
print('ERROR: Models '+k+' and '+l+' have differen atoms that are different.')
print(' '+a[m]+' '+b[m]+' do not match.')
print(' Modify the code to account for this. This structure')
print(' is not processed. Exit Now!')
sys.exit(1)
print('All Models have identical atom types, and in the same order.')
def corrections(self,res,atm):
if res in self.correct:
if atm in self.correct[res]:
return self.correct[res][atm]
else:
return atm
else:
return atm
def inv_corrections(self,res,atm):
if res in self.inv_correct:
if atm in self.inv_correct[res]:
return self.inv_correct[res][atm]
else:
return atm
else:
return atm
def create_super_structure_df_from_CRD_PSF(self, crd_file, psf_file):
# CRD and PSF are list of strings corresponding to every line in the crd and psf files
aa = []
aaid = []
entity_id = []
chain_id = []
atmtyp1 = []
atmtyp2 = []
charg = []
component = []
x = []
y = []
z = []
occupancy = []
B_iso = []
ent = 1
nuc = ['GUA','ADE','CYT','THY','URA']
pro = ['ALA','ARG','ASN','ASP','CYS','GLU','GLN','GLY','HSE','HSD','HSP','ILE','LEU','LYS','MET','PHE','PRO','SER',\
'THR','TRP','TYR','VAL']
if len(crd_file.crd_lines) == len(psf_file.psf_lines):
for i in range(len(crd_file.crd_lines)):
crd_lne = crd_file.crd_lines[i].split()
psf_lne = psf_file.psf_lines[i]
if i == 0:
ent = 1
chn = psf_lne[1]
else:
if chn != psf_lne[1]:
ent += 1
chn = psf_lne[1]
aa.append(crd_lne[2])
#aaid.append(crd_lne[1])
# TODO check that this change won't affect MMGBSA calculations, or of it fixes the bug.
aaid.append(crd_lne[8])
entity_id.append(ent)
chain_id.append(psf_lne[1])
atmtyp1.append(psf_lne[4])
atmtyp2.append(psf_lne[5])
charg.append(psf_lne[6])
#component.append('NA')
x.append(crd_lne[4])
y.append(crd_lne[5])
z.append(crd_lne[6])
occupancy.append(psf_lne[8])
B_iso.append(psf_lne[9])
self.Full_Structure['aa'] = pd.Series(aa)
self.Full_Structure['aaid'] = pd.Series(aaid)
self.Full_Structure['ent_id'] = pd.Series(entity_id)
self.Full_Structure['chain'] = pd.Series(chain_id)
self.Full_Structure['atmtyp1'] = pd.Series(atmtyp1)
self.Full_Structure['atmtyp2'] = pd.Series(atmtyp2)
c_cter = {}
for j in range(len(self.Full_Structure['atmtyp1'])):
resi = self.Full_Structure.loc[j,'aa']
atmo = self.Full_Structure.loc[j,'atmtyp1']
chan = self.Full_Structure.loc[j,'chain']
if resi in nuc:
count = 1
found_atm = False
for k in self.params.AA[resi].atoms:
if atmo in k:
found_atm = True
if count == 1:
component.append('NUC1')
elif count == len(self.params.AA[resi].atoms):
component.append('NUC5')
else:
component.append(('NUC'+str(count)))
count += 1
if not found_atm:
if atmo == 'H5T':
component.append('NUC1')
elif atmo == 'H3T':
component.append('NUC5')
elif resi in pro:
count = 1
found_atm = False
for k in self.params.AA[resi].atoms:
if atmo in k:
found_atm = True
if count == 1:
component.append('AMINO')
elif count == len(self.params.AA[resi].atoms):
component.append('CARBO')
else:
component.append(('SIDE'+str(count)))
count += 1
if not found_atm:
count = 1
for k in self.params.AA['ACE'].atoms:
if atmo in k:
found_atm = True
component.append(('ACE'+str(count)))
count += 1
if not found_atm:
count = 1
for k in self.params.AA['NTER'].atoms:
if atmo in k:
found_atm = True
component.append(('NTER'+str(count)))
count += 1
if not found_atm:
count = 1
for k in self.params.AA['CTER'].atoms:
if atmo in k:
found_atm = True
component.append(('CTER'+str(count)))
if atmo == 'OT1':
c_cter[chan] = j
count += 1
else:
print('ERROR: Amino Acid not found in parameters. Exit now.')
sys.exit(1)
self.Full_Structure['component'] = pd.Series(component)
# Make C part of CTER1
for j in c_cter:
self.Full_Structure.loc[c_cter[j]-1,'component'] = 'CTER1'
self.Full_Structure['charg'] = pd.Series(charg)
mass = []
atmNum = []
atmtyp3 = []
epsilon = []
rmin_half = []
atminfo = []
for i in self.Full_Structure['atmtyp2']:
atmNum.append(self.params.am.MASS[i][0])
mass.append(self.params.am.MASS[i][1])
atmtyp3.append(self.params.am.MASS[i][2])
epsilon.append(self.params.NONBONDED[i][1])
rmin_half.append(self.params.NONBONDED[i][2])
atminfo.append(False)
self.Full_Structure['epsilon'] = pd.Series(epsilon)
self.Full_Structure['rmin_half'] = pd.Series(rmin_half)
self.Full_Structure['atmtyp3'] = pd.Series(atmtyp3)
self.Full_Structure['mass'] = pd.Series(mass)
self.Full_Structure['atmNum'] = pd.Series(atmNum)
self.Full_Structure['aaid'] = self.Full_Structure['aaid'].apply(int)
self.Full_Structure['ent_id'] = self.Full_Structure['ent_id'].apply(int)
self.Full_Structure['mass'] = self.Full_Structure['mass'].apply(float)
#atom_site_df['id'] = atom_site_df['id'].apply(int)
self.Full_Structure['epsilon'] = self.Full_Structure['epsilon'].apply(float)
self.Full_Structure['rmin_half'] = self.Full_Structure['rmin_half'].apply(float)
self.Full_Structure['atmNum'] = self.Full_Structure['atmNum'].apply(int)
self.Full_Structure['aainfo1'] = pd.Series(atminfo)
self.Full_Structure['atminfo1'] = pd.Series(atminfo)
self.Full_Structure['x1'] = pd.Series(x)
self.Full_Structure['y1'] = pd.Series(y)
self.Full_Structure['z1'] = pd.Series(z)
self.Full_Structure['occupancy'] = pd.Series(occupancy)
self.Full_Structure['B_factor'] = pd.Series(B_iso)
# Makes sure that some columns are the right type
self.Full_Structure['x1'] = self.Full_Structure['x1'].apply(float)
self.Full_Structure['y1'] = self.Full_Structure['y1'].apply(float)
self.Full_Structure['z1'] = self.Full_Structure['z1'].apply(float)
self.Full_Structure['charg'] = self.Full_Structure['charg'].apply(float)
def create_column_with_ztranslated(self, new_column_name, from_column, chains_translated, distance_translated):
z_d = []
displaced_chains = chains_translated.split(',')
for i in self.Full_Structure.index:
if self.Full_Structure.loc[i,'chain'] in displaced_chains:
z_d.append(self.Full_Structure.loc[i,from_column]+distance_translated)
else:
z_d.append(self.Full_Structure.loc[i,from_column])
self.Full_Structure[new_column_name] = pd.Series(z_d)
self.Full_Structure[new_column_name] = self.Full_Structure[new_column_name].apply(float)
def add_column_to_super_structure_df_from_CRD_PSF(self, column_name, from_file):
if column_name in ['sa_z0','sa_z500']:
temp = np.zeros(self.Full_Structure.shape[0])
for i in open(from_file, 'r').read().split('\n'):
ii = i.strip().split()
if len(ii) > 0:
temp[int(ii[0])-1] = float(ii[1])
self.Full_Structure[column_name] = pd.Series(temp)
else:
temp = []
for i in open(from_file, 'r').read().split('\n'):
ii = i.strip().split()
if len(ii) > 0:
temp.append(ii[1])
self.Full_Structure[column_name] = pd.Series(temp)
self.Full_Structure[column_name] = self.Full_Structure[column_name].apply(float)
def get_histidine_info(self,ent):
#for i in entity_poly_df.entity_id:
self.hist = {}
hist_entity = {}
for i in ent.id:
hist_chain = {}
for chain in str(self.entity_poly_df.pdbx_strand_id[self.entity_poly_df.entity_id == i][int(i)-1]).split(","):
resid = 1
for j in self.entity_poly_seq_df.index[self.entity_poly_seq_df.entity_id == i]:
if self.entity_poly_seq_df.loc[j,'mon_id'] == 'HIS':
temp = list(self.atom_site_df.label_atom_id[(self.atom_site_df.pdbx_PDB_model_num == '1') & \
(self.atom_site_df.label_entity_id == i) & \
(self.atom_site_df.label_seq_id == self.entity_poly_seq_df.loc[j,'num'])])
HDcount = 0
HEcount = 0
for k in temp:
if k[0] == 'H':
if len(k) > 1:
if k[1] == 'E':
HEcount += 1
elif k[1] == 'D':
HDcount += 1
hist_chain[resid] = (resid,HDcount,HEcount)
resid += 1
hist_entity[chain] = hist_chain
self.hist[i] = hist_entity
def create_super_structure_df(self):
# Find HIS and analyze for the next step to have the HIS ready to identify
ent = self.entity_df[['id','pdbx_number_of_molecules']][self.entity_df.type == 'polymer']
self.get_histidine_info(ent)
###########################################################################
# create a Data Frame from sequence information. Atom types and
# charges are added to the sequence information.
aa = []
aaid = []
entity_id = []
chain_id = []
atmtyp1 = []
atmtyp2 = []
charg = []
component = []
add_ACE = {} # TODO this is boolean not Dictionary
add_CTR = {} # TODO this is boolean not Dictionary
beg_seq = {}
end_seq = {}
# It is possible to have more than one chain per entity.
for entid in ent.id:
for chain in str(self.entity_poly_df.pdbx_strand_id[self.entity_poly_df.entity_id == entid][int(entid)-1]).split(","):
beg_seq[chain] = int(self.struct_ref_seq_df.seq_align_beg[self.struct_ref_seq_df.pdbx_strand_id == chain])
end_seq[chain] = int(self.struct_ref_seq_df.seq_align_end[self.struct_ref_seq_df.pdbx_strand_id == chain])
add_ACE = True
add_CTR = False
# ASSUME ACE AND CTER are always added, unless ACE or CTER are explicitly present.
# Assign parameters to all atoms with coordinates
# include atoms without coordinate that could be missing in cystal structure
# According to residue type and CHARMM top27 force filed.
for j in self.entity_poly_seq_df.index[self.entity_poly_seq_df.entity_id == entid]:
# HIS is a special case that have three variants in CHARMM
if self.entity_poly_seq_df.loc[j,'mon_id'] == 'HIS':
for k in self.hist[entid]:
for l in self.hist[entid][k]:
HD = self.hist[entid][k][l][1]
HE = self.hist[entid][k][l][2]
if (HD <= 2) and (HE <= 2):
# the dictionary simplifies the algorithm
chrm = self.his[HD][HE]
else:
print('Error: Problem with histadine assignment')
print('#HD =',HD,' #HE=',HE)
print(' Program exit.')
sys.exit(1)
pdbx = 'HIS'
else:
chrm = self.entity_poly_seq_df.ix[j]['mon_id']
pdbx = self.entity_poly_seq_df.ix[j]['mon_id']
# Set a flag that tells if a CYS is forming a disulfide bond
is_disulfide = False
if self.entity_poly_seq_df.loc[j,'mon_id'] == 'CYS':
for k in self.struct_conn_df.index:
if self.struct_conn_df.loc[k,'conn_type_id'] == 'disulf':
if self.struct_conn_df.loc[k,'ptnr1_label_asym_id'] == chain:
if self.struct_conn_df.loc[k,'ptnr1_label_comp_id'] == 'CYS':
if self.struct_conn_df.loc[k,'ptnr1_label_seq_id'] == self.entity_poly_seq_df.loc[j,'num']:
is_disulfide = True
if self.struct_conn_df.loc[k,'ptnr2_label_asym_id'] == chain:
if self.struct_conn_df.loc[k,'ptnr2_label_comp_id'] == 'CYS':
if self.struct_conn_df.loc[k,'ptnr2_label_seq_id'] == self.entity_poly_seq_df.loc[j,'num']:
is_disulfide = True
# TODO, the program is designed to detect disulfide bonds from information in the CIF file.
# The line following this comment will ignore the presence of disulfide bonds because they
# are dependent of reducing or oxidicing environment. Whether a HG1 atom is present
# or not should be based on design desitions for protein. Insulin forms in the cytoplasm
# and it should be reduce for accurate calculations.
is_disulfide = False
snum = self.entity_poly_seq_df.loc[j,'num']
ent_id = self.entity_poly_seq_df.loc[j,'entity_id']
# print(entid,chain,j,chrm,pdbx,snum,ent_id)
# A peptide is a zwitterion at near neutral pH, protonated at low pH
# (NH3+, CO2H), or deprotonated at high pH (NH2, CO2-). For a peptide
# with uncharged ends, common patches to apply (via generate) are ACE
# (NYacetyl) and CT3 (C-terminal methylamine);
# We will modify all crytal structure initial and last resude to have
# N-terminal ACE and C-terminal CTER.
if int(snum) == end_seq[chain]:
add_CTR = True
if chrm in self.params.AA:
if chrm == 'ACE':
# THis assuemes that ACE is always first in the sequence
for k in self.params.AA['ACE'].atoms:
for l in k:
aa.append(list(self.entity_poly_seq_df.mon_id[(self.entity_poly_seq_df.entity_id == entid) & \
(self.entity_poly_seq_df.num == str(beg_seq[chain]))])[0])
aaid.append(snum)
entity_id.append(ent_id)
chain_id.append(chain)
atmtyp1.append(l)
atmtyp2.append(self.params.AA['ACE'].atom_type[l])
charg.append(self.params.AA['ACE'].atom_chrg[l])
component.append('ACETY')
add_ACE = False
else:
if add_ACE:
for k in self.params.AA['ACE'].atoms:
for l in k:
aa.append(pdbx)
aaid.append(snum)
entity_id.append(ent_id)
chain_id.append(chain)
atmtyp1.append(l)
atmtyp2.append(self.params.AA['ACE'].atom_type[l])
charg.append(self.params.AA['ACE'].atom_chrg[l])
component.append('ACETY')
add_ACE = False
comp = 1
for k in self.params.AA[chrm].atoms:
for l in k:
if l == 'HG1' and chrm == 'CYS' and is_disulfide:
pass
else:
aa.append(pdbx)
aaid.append(snum)
entity_id.append(ent_id)
chain_id.append(chain)
atmtyp1.append(self.corrections(chrm,l))
atmtyp2.append(self.params.AA[chrm].atom_type[self.corrections(chrm,l)])
charg.append(self.params.AA[chrm].atom_chrg[self.corrections(chrm,l)])
if comp == 1:
component.append('AMINO')
else:
if l in ['C','O']:
component.append('CARBO')
else:
component.append(('SIDE'+str(comp)))
comp += 1
else:
if add_CTR:
comp = 1
for k in self.params.AA[chrm].atoms:
for l in k:
if l == 'HG1' and chrm == 'CYS' and is_disulfide:
pass
else:
aa.append(pdbx)
aaid.append(snum)
entity_id.append(ent_id)
chain_id.append(chain)
if l == 'O':
atmtyp1.append('OT1')
atmtyp2.append(self.params.AA['CTER'].atom_type['OT1'])
charg.append(self.params.AA['CTER'].atom_chrg['OT1'])
component.append('CTERM')
elif l == 'C':
atmtyp1.append('C')
atmtyp2.append(self.params.AA['CTER'].atom_type['C'])
charg.append(self.params.AA['CTER'].atom_chrg['C'])
component.append('CTERM')
else:
atmtyp1.append(self.corrections(chrm,l))
atmtyp2.append(self.params.AA[chrm].atom_type[self.corrections(chrm,l)])
charg.append(self.params.AA[chrm].atom_chrg[self.corrections(chrm,l)])
if comp == 1:
component.append('AMINO')
else:
if l in ['C','O']:
component.append('CARBO')
else:
component.append(('SIDE'+str(comp)))
comp += 1
aa.append(pdbx)
aaid.append(snum)
entity_id.append(ent_id)
chain_id.append(chain)
atmtyp1.append('OT2')
atmtyp2.append(self.params.AA['CTER'].atom_type['OT2'])
charg.append(self.params.AA['CTER'].atom_chrg['OT2'])
component.append('CTERM')
else:
comp = 1
for k in self.params.AA[chrm].atoms:
for l in k:
if l == 'HG1' and chrm == 'CYS' and is_disulfide:
pass
else:
aa.append(pdbx)
aaid.append(snum)
entity_id.append(ent_id)
chain_id.append(chain)
atmtyp1.append(self.corrections(chrm,l))
atmtyp2.append(self.params.AA[chrm].atom_type[self.corrections(chrm,l)])
charg.append(self.params.AA[chrm].atom_chrg[self.corrections(chrm,l)])
if comp == 1:
component.append('AMINO')
else:
if l in ['C','O']:
component.append('CARBO')
else:
component.append(('SIDE'+str(comp)))
comp += 1
self.Full_Structure['aa'] = pd.Series(aa)
self.Full_Structure['aaid'] = pd.Series(aaid)
self.Full_Structure['ent_id'] = pd.Series(entity_id)
self.Full_Structure['chain'] = pd.Series(chain_id)
self.Full_Structure['atmtyp1'] = pd.Series(atmtyp1)
self.Full_Structure['atmtyp2'] = pd.Series(atmtyp2)
self.Full_Structure['component'] = pd.Series(component)
self.Full_Structure['charg'] = pd.Series(charg)
###########################################################################
# Add atomtyp, masses and atmNumber to each atom type
mass = []
atmNum = []
atmtyp3 = []
epsilon = []
rmin_half = []
atminfo = []
for i in self.Full_Structure['atmtyp2']:
atmNum.append(self.params.am.MASS[i][0])
mass.append(self.params.am.MASS[i][1])
atmtyp3.append(self.params.am.MASS[i][2])
epsilon.append(self.params.NONBONDED[i][1])
rmin_half.append(self.params.NONBONDED[i][2])
atminfo.append(False)
self.Full_Structure['epsilon'] = pd.Series(epsilon)
self.Full_Structure['rmin_half'] = pd.Series(rmin_half)
self.Full_Structure['atmtyp3'] = pd.Series(atmtyp3)
self.Full_Structure['mass'] = pd.Series(mass)
self.Full_Structure['atmNum'] = pd.Series(atmNum)
###########################################################################
# DF Type correction.
self.Full_Structure['aaid'] = self.Full_Structure['aaid'].apply(int)
self.Full_Structure['ent_id'] = self.Full_Structure['ent_id'].apply(int)
self.Full_Structure['mass'] = self.Full_Structure['mass'].apply(float)
self.atom_site_df['id'] = self.atom_site_df['id'].apply(int)
self.Full_Structure['epsilon'] = self.Full_Structure['epsilon'].apply(float)
self.Full_Structure['rmin_half'] = self.Full_Structure['rmin_half'].apply(float)
self.Full_Structure['atmNum'] = self.Full_Structure['atmNum'].apply(int)
###########################################################################
# Distinguish between imputed and available structural information T or F
entities = list(set(self.Full_Structure.ent_id))
for g in self.models:
x = []
y = []
z = []
occupancy = []
B_iso = []
for h in entities:
for i in self.Full_Structure.index[self.Full_Structure.ent_id == h]:
aa1 = self.Full_Structure.ix[i]['aa']
type1 = self.inv_corrections(aa1,self.Full_Structure.ix[i]['atmtyp1'])
aaid1 = str(self.Full_Structure.ix[i]['aaid'])
chn = self.Full_Structure.ix[i]['chain']
cx = self.atom_site_df.Cartn_x[(self.atom_site_df.label_atom_id == type1) & \
(self.atom_site_df.label_comp_id == aa1) & \
(self.atom_site_df.label_seq_id == aaid1) & \
(self.atom_site_df.pdbx_PDB_model_num == g) & \
(self.atom_site_df.label_entity_id == str(h)) & \
(self.atom_site_df.label_asym_id == chn)]
if len(cx) == 1:
x.append(float(cx))
else:
x.append(float('nan'))
cy = self.atom_site_df.Cartn_y[(self.atom_site_df.label_atom_id == type1) & \
(self.atom_site_df.label_comp_id == aa1) & \
(self.atom_site_df.label_seq_id == aaid1) & \
(self.atom_site_df.pdbx_PDB_model_num == g) & \
(self.atom_site_df.label_entity_id == str(h)) & \
(self.atom_site_df.label_asym_id == chn)]
if len(cy) == 1:
y.append(float(cy))
else:
y.append(float('nan'))
cz = self.atom_site_df.Cartn_z[(self.atom_site_df.label_atom_id == type1) & \
(self.atom_site_df.label_comp_id == aa1) & \
(self.atom_site_df.label_seq_id == aaid1) & \
(self.atom_site_df.pdbx_PDB_model_num == g) & \
(self.atom_site_df.label_entity_id == str(h)) & \
(self.atom_site_df.label_asym_id == chn)]
if len(cz) == 1:
z.append(float(cz))
else:
z.append(float('nan'))
ocu = self.atom_site_df.occupancy[(self.atom_site_df.label_atom_id == type1) & \
(self.atom_site_df.label_comp_id == aa1) & \
(self.atom_site_df.label_seq_id == aaid1) & \
(self.atom_site_df.pdbx_PDB_model_num == g) & \
(self.atom_site_df.label_entity_id == str(h)) & \
(self.atom_site_df.label_asym_id == chn)]
if len(ocu) == 1:
occupancy.append(float(ocu))
else:
occupancy.append('nan')
bfc = self.atom_site_df.B_iso_or_equiv[(self.atom_site_df.label_atom_id == type1) & \
(self.atom_site_df.label_comp_id == aa1) & \
(self.atom_site_df.label_seq_id == aaid1) & \
(self.atom_site_df.pdbx_PDB_model_num == g) & \
(self.atom_site_df.label_entity_id == str(h)) & \
(self.atom_site_df.label_asym_id == chn)]
if len(bfc) == 1:
B_iso.append(float(bfc))
else:
B_iso.append(float('nan'))
self.Full_Structure['aainfo'+g] = pd.Series(atminfo)
self.Full_Structure['atminfo'+g] = pd.Series(atminfo)
self.Full_Structure['x'+g] = pd.Series(x)
self.Full_Structure['y'+g] = pd.Series(y)
self.Full_Structure['z'+g] = pd.Series(z)
self.Full_Structure['occupancy'] = pd.Series(occupancy)
self.Full_Structure['B_factor'] = pd.Series(B_iso)
###########################################################################
#self.Full_Structure['B_factor'] = self.Full_Structure['B_factor'].apply(float)
#self.Full_Structure['occupancy'] = self.Full_Structure['occupancy'].apply(float)
###########################################################################
# For each models, set to True for atom coordinates with structural info
# from the CIF file. pd.notnull gives True is not null, false if null
for h in self.models:
x = pd.notnull(self.Full_Structure['x'+h])
y = pd.notnull(self.Full_Structure['y'+h])
z = pd.notnull(self.Full_Structure['z'+h])
# This loop checks that an atom location is obtained from x-struct
# or added from rebuilding missing peptides
for i in range(self.Full_Structure.shape[0]):
if x[i] & y[i] & z[i]:
self.Full_Structure.loc[i,'atminfo'+h] = True
# This loop, aa with all atms added from rebuilding missing peptides
# are considered Reabuilt (false). From x-structure (True)
for i in self.entity_poly_df.entity_id:
for j in self.entity_poly_seq_df.num[self.entity_poly_seq_df.entity_id == i]:
set_true = False
for k in self.Full_Structure['atminfo'+h][(self.Full_Structure.ent_id == int(i)) & \
(self.Full_Structure.aaid == int(j))]:
if k:
set_true = True
if set_true:
for l in self.Full_Structure.index[(self.Full_Structure.ent_id == int(i)) & \
(self.Full_Structure.aaid == int(j))]:
self.Full_Structure.loc[l,'aainfo'+h] = True
def get_missing_aa_listlist(self,mdl,ent,chn):
""" Maps amino acids that are present and missing and puts
contiguous missing aminoacid in a list of lists"""
self.missing = []
self.present = []
for j in self.entity_poly_seq_df.num[(self.entity_poly_seq_df.entity_id == ent)]:
set_true = False
for k in self.Full_Structure['atminfo'+mdl][(self.Full_Structure.ent_id == int(ent)) & \
(self.Full_Structure.aaid == int(j)) & \
(self.Full_Structure.chain == chn)]:
if k:
set_true = True
if not set_true:
self.missing.append(list(self.Full_Structure.aaid[(self.Full_Structure.ent_id == int(ent)) & \
(self.Full_Structure.aaid == int(j)) &\
(self.Full_Structure.chain == chn)])[0])
else:
self.present.append(list(self.Full_Structure.aaid[(self.Full_Structure.ent_id == int(ent)) & \
(self.Full_Structure.aaid == int(j)) &\
(self.Full_Structure.chain == chn)])[0])
temp = 0
contiguous_temp = []
self.contiguous_LL = []
for i in range(len(self.missing)):
if i == 0:
temp = self.missing[i]
contiguous_temp.append(temp)
else:
if self.missing[i] == (temp+1):
contiguous_temp.append(self.missing[i])
temp = self.missing[i]
else:
self.contiguous_LL.append(contiguous_temp)
temp = self.missing[i]
contiguous_temp = []
contiguous_temp.append(self.missing[i])
self.contiguous_LL.append(contiguous_temp)
def build_missing_aa(self):
for h in self.models:
model = str(h)
NTER_ent = {}
CTER_ent = {}
for hh in self.entity_poly_df.entity_id:
entity_id = int(hh)
NTER_ch = {}
CTER_ch = {}
NTER_ent[hh] = NTER_ch
CTER_ent[hh] = CTER_ch
# TODO would entity_id-1 be right all the time?
for hhh in str(self.entity_poly_df.pdbx_strand_id[self.entity_poly_df.entity_id == hh][entity_id-1]).split(","):
self.get_missing_aa_listlist(model,hh,hhh)
CTER_ent[hh][hhh] = False
NTER_ent[hh][hhh] = False
# This two ifs check that the list of lists is not empty
if((len(self.contiguous_LL) != 1) and (len(self.contiguous_LL[0]) != 0)):
for i in range(len(self.contiguous_LL)):
if i == 0 and self.contiguous_LL[i][0] == 1:
for j in self.contiguous_LL[i][::-1]:
place = self.entity_poly_seq_df['mon_id'][j-1]
if place == 'HIS':
place = self.his[self.hist[hh]['A'][j][1]][self.hist[hh]['A'][j][2]]
if place == 'ACE':
NTER_ent[hh][hhh] = True
if place == 'CTER':
print('ERROR: Structure attempts to place a CTER with a CTER_ancher.')
print(' CTER has no CTERM_ancher by definition because CTER caps')
print(' ,or ends, the chain. With nothing to ancher beyond that.')
sys.exit(1)
self.fit_coordinates('Ndir',j,entity_id,hhh,model,place)
else:
for j in self.contiguous_LL[i][:]:
place = self.entity_poly_seq_df['mon_id'][j-1]
if place == 'HIS':
place = self.his[self.hist[hh]['A'][j][1]][self.hist[hh]['A'][j][2]]
if place == 'CTER':
CTER_ent[hh][hhh] = True
if place == 'ACE':
print('ERROR: Structure attempts to place a ACE with a NTER_ancher.')
print(' ACE has no NTERM_ancher by definition because ACE caps')
print(' ,or begins, the chain. With nothing to ancher before that.')
sys.exit(1)
self.fit_coordinates('Cdir',j,entity_id,hhh,model,place)
# TODO This could be added to the above function somehow. I did it separately
# because the mess of arrays made it hard to make together.
for h in self.models:
model = str(h)
min_aaid = {}
max_aaid = {}
for hh in self.entity_poly_df.entity_id:
entity_id = int(hh)
min_ch = {}
max_ch = {}
min_aaid[hh] = min_ch
max_aaid[hh] = max_ch
for hhh in str(self.entity_poly_df.pdbx_strand_id[self.entity_poly_df.entity_id == hh][entity_id-1]).split(","):
for j in self.entity_poly_seq_df.num[(self.entity_poly_seq_df.entity_id == hh)]:
if int(j) == 1:
min_aaid[hh][hhh] = int(j)
max_aaid[hh][hhh] = int(j)
if j > max_aaid:
max_aaid[hh][hhh] = int(j)
for hh in self.entity_poly_df.entity_id:
entity_id = int(hh)
for hhh in str(self.entity_poly_df.pdbx_strand_id[self.entity_poly_df.entity_id == hh][entity_id-1]).split(","):
if not NTER_ent[hh][hhh]:
self.fit_coordinates('NTER',min_aaid[hh][hhh],entity_id,hhh,model,'ACE')
if not CTER_ent[hh][hhh]:
self.fit_coordinates('CTER',max_aaid[hh][hhh],entity_id,hhh,model,'CTER')
def fit_coordinates(self,mod,aaid,entity_id,chain,model,place):
'''mod tells this method how to deal with the residue numbers
to which added aa are fixed. This residue number are modified by + or - 1'''
if mod == 'Ndir':
aaid2 = aaid + 1
elif mod == 'Cdir':
aaid2 = aaid - 1
elif mod == 'NTER':
aaid2 = aaid
elif mod == 'CTER':
aaid2 = aaid
else:
print('ERROR: mod variable for fit_coordinates does not exists.')
print(' Only Ndir, Cdir, NTER and CTER are valid.')
print(' Exiting program before finished.')
sys.exit(1)
fixed1 = []
moving1 = []
moving2 = []
for k in self.Full_Structure.index[(self.Full_Structure.aaid == aaid2) &\
(self.Full_Structure.ent_id == entity_id) &\
(self.Full_Structure.chain == chain)]:
temp = self.Full_Structure.loc[k,'atmtyp1']
if temp == 'N' or temp == 'CA' or temp =='C':
cord = np.array([self.Full_Structure.loc[k,'x'+model],\
self.Full_Structure.loc[k,'y'+model],\
self.Full_Structure.loc[k,'z'+model]],\
dtype=float)
fixed1.append(Atom(temp,cord,0.0,0.0,1,temp+'k',k))
if (mod == 'Ndir') | (mod == 'NTER'):
for l in self.CTERM_anchor[place]:
if l == 'N':
tempN = Atom(self.CTERM_anchor[place][l].get_id(),\
self.CTERM_anchor[place][l].get_coord(),\
0.0,0.0,1,\
self.CTERM_anchor[place][l].get_id(),\
self.CTERM_anchor[place][l].get_id())
elif l == 'CA':
tempCA = Atom(self.CTERM_anchor[place][l].get_id(),\
self.CTERM_anchor[place][l].get_coord(),\
0.0,0.0,1,\
self.CTERM_anchor[place][l].get_id(),\
self.CTERM_anchor[place][l].get_id())
elif l =='C':
tempC = Atom(self.CTERM_anchor[place][l].get_id(),\
self.CTERM_anchor[place][l].get_coord(),\
0.0,0.0,1,\
self.CTERM_anchor[place][l].get_id(),\
self.CTERM_anchor[place][l].get_id())
elif (mod == 'Cdir') | (mod == 'CTER'):
for l in self.NTERM_anchor[place]:
if l == 'N':
tempN = Atom(self.NTERM_anchor[place][l].get_id(),\
self.NTERM_anchor[place][l].get_coord(),\
0.0,0.0,1,\
self.NTERM_anchor[place][l].get_id(),\
self.NTERM_anchor[place][l].get_id())
elif l == 'CA':
tempCA = Atom(self.NTERM_anchor[place][l].get_id(),\
self.NTERM_anchor[place][l].get_coord(),\
0.0,0.0,1,\
self.NTERM_anchor[place][l].get_id(),\
self.NTERM_anchor[place][l].get_id())
elif l =='C':
tempC = Atom(self.NTERM_anchor[place][l].get_id(),\
self.NTERM_anchor[place][l].get_coord(),\
0.0,0.0,1,\
self.NTERM_anchor[place][l].get_id(),\
self.NTERM_anchor[place][l].get_id())
else:
print('Error: mod not found. Program will exit.')
sys.exit(1)
moving1.append(tempN)
moving1.append(tempCA)
moving1.append(tempC)
for m in self.pep[place]:
moving2.append(Atom(self.pep[place][m].get_id(),\
self.pep[place][m].get_coord(),\
0.0,0.0,1,\
self.pep[place][m].get_id(),\
self.pep[place][m].get_id()))
sup = Superimposer()
sup.set_atoms(fixed1,moving1)
sup.apply(moving1)
sup.apply(moving2)
# TODO: It is here where we need to get some angles and place the added amino acid in the right conformation
# relative to the planar shape of the amino and carbonyl gropus.
moving3 = {}
for k in self.Full_Structure.index[(self.Full_Structure.aaid == aaid) &\
(self.Full_Structure.ent_id == entity_id) &\
(self.Full_Structure.chain == chain)]:
moving3[self.inv_corrections(place,self.Full_Structure.loc[k,'atmtyp1'])] = k
for i in moving2:
self.Full_Structure.loc[moving3[self.inv_corrections(place,i.get_id())],'x'+model] = i.get_coord()[0]
self.Full_Structure.loc[moving3[self.inv_corrections(place,i.get_id())],'y'+model] = i.get_coord()[1]
self.Full_Structure.loc[moving3[self.inv_corrections(place,i.get_id())],'z'+model] = i.get_coord()[2]
self.Full_Structure.loc[moving3[self.inv_corrections(place,i.get_id())],'occupancy'] = 0.0
self.Full_Structure.loc[moving3[self.inv_corrections(place,i.get_id())],'B_factor'] = 0.0
def delete_aa(self,aaid,ent_id,chain,aa):
del_atoms = []
if aa in self.params.AA:
for i in range(len(self.params.AA[aa].atoms)):
if aa == 'CTER':
if 'OT2' in self.params.AA[aa].atoms[i]:
del_atoms = del_atoms + ['OT2']
else:
del_atoms = del_atoms + self.params.AA[aa].atoms[i]
else:
print('ERROR: Amino Acid or Terminal '+aa+' is not found in parameters for deletion.')
print(' '+aa+' must be in the parameter filed read.')
print(' Exiting Now!')
sys.exit(1)
for i in del_atoms:
self.Full_Structure = self.Full_Structure[(self.Full_Structure.aaid != aaid) |\
(self.Full_Structure.chain != chain) |\
(self.Full_Structure.atmtyp1 != i)]
# When the CTER is deleted, the group parmeters have to be change to CARBONYL from CTERM
if aa == 'CTER':
indx = self.Full_Structure.index[(self.Full_Structure.aaid == aaid) &\
(self.Full_Structure.atmtyp1 == 'C') &\
(self.Full_Structure.chain == chain)][0]
self.Full_Structure.loc[indx,['atmtyp2']] = 'C'
self.Full_Structure.loc[indx,['component']] = 'CARBO'
self.Full_Structure.loc[indx,['charg']] = 0.51
self.Full_Structure.loc[indx,['epsilon']] = -0.11
self.Full_Structure.loc[indx,['rmin_half']] = 2.0
indx = self.Full_Structure.index[(self.Full_Structure.aaid == aaid) &\
(self.Full_Structure.atmtyp1 == 'OT1') &\
(self.Full_Structure.chain == chain)][0]
self.Full_Structure.loc[indx,['atmtyp1']] = 'O'
self.Full_Structure.loc[indx,['atmtyp2']] = 'O'
self.Full_Structure.loc[indx,['component']] = 'CARBO'
self.Full_Structure.loc[indx,['charg']] = -0.51
# NOTE: I removed (self.Full_Structure.ent_id != ent_id) |\ from the aboive resignment.
# deletions must be for all models or the models will be incosistent.
self.Full_Structure = self.Full_Structure.reset_index(drop=True)
def build_pep_and_anchers(self,p1):
"""Generate ancher for attaching residues to a protein.
An ancher is a set of atoms used for alignment. These atoms are
not the ones being attached, and they are usually backbone atoms
from the previous or next amino acid in the peptide PDB file.
"""
pep_order = []
first = {}
last = {}
ACE = {}
CTER = {}
h = [g.get_id()[1] for g in p1.get_residues()]
pep_naa = len(h)
for j in p1.get_residues():
if j.get_id()[1] > 1 and j.get_id()[1] < pep_naa:
atms = {}
for k in j.get_atom():
atms[k.get_name()] = k
self.pep[j.get_resname()] = atms
pep_order.append(j.get_resname())
if j.get_id()[1] == 1:
for k in j.get_atom():
if k.get_name() in ['N','CA','C']:
first[k.get_name()] = k
if k.get_name() in ['CAY','HY1','HY2','HY3','CY','OY']:
ACE[k.get_name()] = k
if j.get_id()[1] == pep_naa:
for k in j.get_atom():
if k.get_name() in ['N','CA','C']:
last[k.get_name()] = k
if k.get_name() in ['C','OT1','OT2']:
CTER[k.get_name()] = k
self.pep['ACE'] = ACE
self.pep['CTER'] = CTER
for i in range(len(pep_order)):
if i == 0:
self.NTERM_anchor[pep_order[i]] = first
atms = {}
for k in self.pep[pep_order[i+1]]:
if k in ['N','CA','C']:
atms[k] = self.pep[pep_order[i+1]][k]
self.CTERM_anchor[pep_order[i]] = atms
self.CTERM_anchor['ACE'] = first
elif i == (len(pep_order)-1):
self.CTERM_anchor[pep_order[i]] = last
atms = {}
for k in self.pep[pep_order[i-1]]:
if k in ['N','CA','C']:
atms[k] = self.pep[pep_order[i-1]][k]
self.NTERM_anchor[pep_order[i]] = atms
self.NTERM_anchor['CTER'] = last
else:
atms = {}
for k in self.pep[pep_order[i+1]]:
if k in ['N','CA','C']:
atms[k] = self.pep[pep_order[i+1]][k]
self.CTERM_anchor[pep_order[i]] = atms
atms = {}
for k in self.pep[pep_order[i-1]]:
if k in ['N','CA','C']:
atms[k] = self.pep[pep_order[i-1]][k]
self.NTERM_anchor[pep_order[i]] = atms
def check_residue_parameters(self):
"""Checks that aminoacids from structures in the rcsb.org database are
present int CHARMM parameter file. Use as a filter."""
parameters_complete = False
for i in self.strctr.get_models():
for j in i.get_chains():
for k in j.get_residues():
if k.get_resname() not in self.params.AA:
# HIS are excluded to be exchanged by HSD, HSP or HSE
if k.get_resname() != 'HIS':
print('Missing parameters for residue '+k.get_resname()+\
' in model '+str(i.get_id())+' chain '+str(j.get_id()))
else:
parameters_complete = False
else:
parameters_complete = True
return parameters_complete
def check_atom_parameters(self):
"""Checks that atoms from structures in the rcsb.org database are
present int CHARMM parameter file. Use as a filter."""
for i in self.strctr.get_models():
for j in i.get_chains():
for k in j.get_residues():
for l in k.get_atom():
if l.get_id() not in params.AA[k.get_resname()].atom_type:
print(' Missing parameters for atom '+l.get_id()+\
' in model '+str(i.get_id())+' chain '+str(j.get_id())+\
' residue '+k.get_resname()+' '+\
str(l.get_parent().get_full_id()[3][1]))
def write_csv(self,basedir,filename):
if basedir == '':
basedir = '.'
if basedir[-1] != "/":
basedir += "/"
self.Full_Structure.to_csv(basedir+filename+'.csv')
def write_crd(self,file_path, model=0):
print("Printing CRD")
'''
This fixes when there are multiple chains and the residue number gets
reset at the beginning of each chain. With this fix, residue numbers
will be renumbered
1 - 5 Integer Atom no. sequential
6 - 10 Integer ires Residue position from file beginning
11 - 11 Space
12 - 15 Achar resname Residue name
16 - 16 Space
17 - 20 Achar type Atom type, IUPAC name of atom left justified
21 - 30 Real(10.5) x Orthogonal coordinates for X, Angstroms
31 - 40 Real(10.5) y Orthogonal coordinates for Y, Angstroms
41 - 50 Real(10.5) z Orthogonal coordinates for Z, Angstroms
51 - 51 Space
52 - 55 Achar segid Segment identifier
56 - 56 Space
57 - 60 Achar resid Residue number within the segment
61 - 70 Real(10.5) Weighting array value
'''
line = '{:>5}{:>5}{:>4} {:4}{:>10.5f}{:>10.5f}{:>10.5f} {:4}{:3}{:>12.5f}'
atom_count = 0
resi_count = 0
lines = []
for a in sp.get_models():
if a.id == model:
for b in a.get_chains():
for c in b.get_residues():
resi_count += 1
for d in c.get_atom():
atom_count += 1
lines.append(line.format(atom_count,resi_count, \
c.get_resname(),d.get_name(), d.get_coord()[0], \
d.get_coord()[1],d.get_coord()[2], b.id, \
c.get_full_id()[3][1],0))
outFile = open(file_path, 'w')
outFile.write('* CRD generated with fixed HS \n')
outFile.write('* Name: NN\n')
outFile.write('* Title: New Molecule Generated by pdb_preparation.py\n')
outFile.write('*\n')
outFile.write('{:>5}'.format(len(lines))+'\n')
for i in lines:
outFile.write(i+'\n')
outFile.close()
def report_sequence_structure_comparison(self):
pass
def read_dict_into_dataframes(self):
"""Puts structural information inside a DataFrame and checks for missing
structural information by checking with sequence information available.
The data below is stracted for programming clarity, but they could be
obtain directly from the dictionaries."""
# TODO: Some CIF file might have more that one chain, can be of different
# types, so this code may need to accomodate for that. When the coding id finished
# I do not need to assign all of the variables below, I could get that directly
# from the CIF parser header. I do this to remember what I got from the CIF
# file during coding.
'''Data items in the CELL category record details about the
crystallographic cell parameters.'''
col1 = '_cell.'
col2 = ['entry_id','length_a','length_b','length_c','angle_alpha',\
'angle_beta','angle_gamma','Z_PDB','pdbx_unique_axis']
self.cell_df = pd.DataFrame(columns=col2)
for i in col2:
self.cell_df[i] = pd.Series(self.header[col1+i])
'''Data items in the SYMMETRY category record details about the
space-group symmetry.'''
col1 = '_symmetry.'
col2 = ['entry_id','space_group_name_H-M','pdbx_full_space_group_name_H-M',\
'cell_setting','Int_Tables_number']
self.symmetry_df = pd.DataFrame(columns=col2)
for i in col2:
self.symmetry_df[i] = pd.Series(self.header[col1+i])
'''Data items in the ENTITY category record details (such as
chemical composition, name and source) about the molecular
entities that are present in the crystallographic structure.'''
col1 = '_entity.'
col2 = ['id','type','src_method','pdbx_description','formula_weight',\
'pdbx_number_of_molecules','details']
self.entity_df = pd.DataFrame(columns=col2)
for i in col2:
self.entity_df[i] = pd.Series(self.header[col1+i])
'''Data items in the ENTITY_POLY category record details about the
polymer, such as the type of the polymer, the number of
monomers and whether it has nonstandard features.'''
col1 = '_entity_poly.'
col2 = ['entity_id','type','nstd_linkage','nstd_monomer',\
'pdbx_seq_one_letter_code','pdbx_seq_one_letter_code_can',\
'pdbx_strand_id']
self.entity_poly_df = pd.DataFrame(columns=col2)
for i in col2:
self.entity_poly_df[i] = pd.Series(self.header[col1+i])
'''Data items in the ENTITY_POLY_SEQ category specify the sequence
of monomers in a polymer. Allowance is made for the possibility
of microheterogeneity in a sample by allowing a given sequence
number to be correlated with more than one monomer ID. The
corresponding ATOM_SITE entries should reflect this
heterogeneity.'''
col1 = '_entity_poly_seq.'
col2 = ['entity_id','num','mon_id','hetero']
self.entity_poly_seq_df = pd.DataFrame(columns=col2)
for i in col2:
self.entity_poly_seq_df[i] = pd.Series(self.header[col1+i])
'''Data items in the STRUCT_REF category allow the author of a
data block to relate the entities or biological units
described in the data block to information archived in external
databases.'''
col1 = '_struct_ref.'
# col2 = ['id','db_name','db_code','entity_id','pdbx_db_accession',\
# 'pdbx_align_begin','pdbx_seq_one_letter_code','biol_id']
# col2 = ['id','db_name','db_code','entity_id','pdbx_db_accession',\
# 'pdbx_align_begin','biol_id']
col2 = ['id','db_name','db_code','entity_id','pdbx_db_accession',\
'pdbx_align_begin']
self.struct_ref_df = pd.DataFrame(columns=col2)
for i in col2:
self.struct_ref_df[i] = pd.Series(self.header[col1+i])
'''Data items in the STRUCT_REF_SEQ category provide a mechanism
for indicating and annotating a region (or regions) of alignment
between the sequence of an entity or biological unit described
in the data block and the sequence in the referenced database
entry.'''
col1 = '_struct_ref_seq.'
# col2 = ['align_id','ref_id','pdbx_PDB_id_code','pdbx_strand_id',\
# 'seq_align_beg','pdbx_seq_align_beg_ins_code','seq_align_end',\
# 'pdbx_seq_align_end_ins_code','pdbx_db_accession','db_align_beg',\
# 'pdbx_db_align_beg_ins_code','db_align_end',\
# 'pdbx_db_align_end_ins_code','pdbx_auth_seq_align_beg',\
# 'pdbx_auth_seq_align_end']
col2 = ['align_id','ref_id','pdbx_PDB_id_code','pdbx_strand_id',\
'seq_align_beg','pdbx_seq_align_beg_ins_code','seq_align_end',\
'pdbx_seq_align_end_ins_code','pdbx_db_accession','db_align_beg',\
'db_align_end','pdbx_auth_seq_align_beg','pdbx_auth_seq_align_end']
self.struct_ref_seq_df = pd.DataFrame(columns=col2)
for i in col2:
self.struct_ref_seq_df[i] = pd.Series(self.header[col1+i])
'''Data items in the STRUCT_ASYM category record details about the
structural elements in the asymmetric unit.'''
col1 = '_struct_asym.'
col2 = ['id','pdbx_blank_PDB_chainid_flag','pdbx_modified','entity_id',\
'details']
self.struct_asym_df = pd.DataFrame(columns=col2)
for i in col2:
self.struct_asym_df[i] = pd.Series(self.header[col1+i])
'''Data items in the ATOM_SITE category record details about
the atom sites in a macromolecular crystal structure, such as
the positional coordinates, atomic displacement parameters,
magnetic moments and directions.
The data items for describing anisotropic atomic
displacement factors are only used if the corresponding items
are not given in the ATOM_SITE_ANISOTROP category.'''
col1 = "_atom_site."
#col2 = ['group_PDB','id','type_symbol','label_atom_id','label_alt_id', \
# 'label_comp_id','label_asym_id','label_entity_id','label_seq_id', \
# 'pdbx_PDB_ins_code','Cartn_x','Cartn_y','Cartn_z','occupancy', \
# 'B_iso_or_equiv','Cartn_x_esd','Cartn_y_esd','Cartn_z_esd', \
# 'occupancy_esd','B_iso_or_equiv_esd','pdbx_formal_charge', \
# 'auth_seq_id','auth_comp_id','auth_asym_id','auth_atom_id', \
# 'pdbx_PDB_model_num']
# Changed du to inconsitencies in formatting
col2 = ['group_PDB','id','type_symbol','label_atom_id','label_alt_id', \
'label_comp_id','label_asym_id','label_entity_id','label_seq_id', \
'pdbx_PDB_ins_code','Cartn_x','Cartn_y','Cartn_z', 'occupancy',\
'B_iso_or_equiv', \
'pdbx_formal_charge', \
'auth_seq_id','auth_comp_id','auth_asym_id','auth_atom_id', \
'pdbx_PDB_model_num']
self.atom_site_df = pd.DataFrame()
for i in col2:
self.atom_site_df[i] = self.header[col1+i]
##### So far the aboove fields will throw an exception if not found.
##### The following header fields will be checked for their presence
##### and if not found program is still allowed to run
col1 = "_struct_conn."
col2 = ['id','conn_type_id','ptnr1_label_asym_id','ptnr1_label_comp_id',\
'ptnr1_label_seq_id','ptnr1_label_atom_id','ptnr1_symmetry',\
'ptnr2_label_asym_id','ptnr2_label_comp_id','ptnr2_label_seq_id',\
'ptnr2_label_atom_id','pdbx_dist_value']
self.struct_conn_df = pd.DataFrame()
for i in col2:
if (col1+i) in self.header:
self.struct_conn_df[i] = self.header[col1+i]
else:
print('WARNING: Field '+(col1+i)+' is not found in CIF header')
# These model are obtained because it is a function that is used a lot
# so it is obtained once to save repetitive iterations. It is also more
# reliable to obtain this information from structural information directly.
# Because the CIF format for other fields can vary
# TODO: Find a better place for this little one line function call.
self.models = self.get_models()
def get_models(self):
mdls = list(set(self.atom_site_df.pdbx_PDB_model_num))
mdls = [int(i) for i in mdls]
mdls = sorted(mdls)
mdls = [str(i) for i in mdls]
return mdls
| noelcjr/EntropyMaxima | em/tools/Super_Structures.py | Python | gpl-3.0 | 78,633 | [
"CHARMM",
"CRYSTAL"
] | c01bb9ec7900d3c050f1b3cf7138b941a469242e8f65527a619a0cc5f9ad8ec5 |
#!/usr/bin/env python
#
# Wrapper script for starting the biopet-seattleseqkit JAR package
#
# This script is written for use with the Conda package manager and is copied
# from the peptide-shaker wrapper. Only the parameters are changed.
# (https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py)
#
# This file was automatically generated by the sbt-bioconda plugin.
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'SeattleSeqKit-assembly-0.1.jar'
default_jvm_mem_opts = []
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
"""
PeptideShaker updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
java = java_executable()
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
| matthdsm/bioconda-recipes | recipes/biopet-seattleseqkit/biopet-seattleseqkit.py | Python | mit | 3,377 | [
"Bioconda"
] | f76f5049c9b6a9be9db1c1fb5b16cd2a312dd3042e5f1af3fe13aa53dd8361bc |
# coding=utf-8
# Copyright 2022 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Common configurable image manipulation methods for use in preprocessors."""
from typing import Callable, List, Optional, Sequence
import gin
from six.moves import zip
import tensorflow.compat.v1 as tf
def RandomCropImages(images, input_shape,
target_shape):
"""Crop a part of given shape from a random location in a list of images.
Args:
images: List of tensors of shape [batch_size, h, w, c].
input_shape: Shape [h, w, c] of the input images.
target_shape: Shape [h, w] of the cropped output.
Raises:
ValueError: In case the either the input_shape or the target_shape have a
wrong length.
Returns:
crops: List of cropped tensors of shape [batch_size] + target_shape.
"""
if len(input_shape) != 3:
raise ValueError(
'The input shape has to be of the form (height, width, channels) '
'but has len {}'.format(len(input_shape)))
if len(target_shape) != 2:
raise ValueError('The target shape has to be of the form (height, width) '
'but has len {}'.format(len(target_shape)))
max_y = int(input_shape[0]) - int(target_shape[0])
max_x = int(input_shape[1]) - int(target_shape[1])
with tf.control_dependencies(
[tf.assert_greater_equal(max_x, 0),
tf.assert_greater_equal(max_y, 0)]):
offset_y = tf.random_uniform((), maxval=max_y + 1, dtype=tf.int32)
offset_x = tf.random_uniform((), maxval=max_x + 1, dtype=tf.int32)
return [
tf.image.crop_to_bounding_box(img, offset_y, offset_x,
int(target_shape[0]),
int(target_shape[1])) for img in images
]
def CenterCropImages(images, input_shape,
target_shape):
"""Take a central crop of given size from a list of images.
Args:
images: List of tensors of shape [batch_size, h, w, c].
input_shape: Shape [h, w, c] of the input images.
target_shape: Shape [h, w] of the cropped output.
Returns:
crops: List of cropped tensors of shape [batch_size] + target_shape.
"""
if len(input_shape) != 3:
raise ValueError(
'The input shape has to be of the form (height, width, channels) '
'but has len {}'.format(len(input_shape)))
if len(target_shape) != 2:
raise ValueError('The target shape has to be of the form (height, width) '
'but has len {}'.format(len(target_shape)))
if input_shape[0] == target_shape[0] and input_shape[1] == target_shape[1]:
return [image for image in images]
# Assert all images have the same shape.
assert_ops = []
for image in images:
assert_ops.append(
tf.assert_equal(
input_shape[:2],
tf.shape(image)[1:3],
message=('All images must have same width and height'
'for CenterCropImages.')))
offset_y = int(input_shape[0] - target_shape[0]) // 2
offset_x = int(input_shape[1] - target_shape[1]) // 2
with tf.control_dependencies(assert_ops):
crops = [
tf.image.crop_to_bounding_box(image, offset_y, offset_x,
target_shape[0], target_shape[1])
for image in images
]
return crops
def CustomCropImages(images, input_shape,
target_shape,
target_locations):
"""Crop a list of images at with a custom crop location and size.
Args:
images: List of tensors of shape [batch_size, h, w, c].
input_shape: Shape [h, w, c] of the input images.
target_shape: Shape [h, w] of the cropped output.
target_locations: List of crop center coordinates tensors of shape [b, 2].
Returns:
crops: List of cropped tensors of shape [batch_size] + target_shape + [3].
"""
if len(input_shape) != 3:
raise ValueError(
'The input shape has to be of the form (height, width, channels) '
'but has len {}'.format(len(input_shape)))
if len(target_shape) != 2:
raise ValueError('The target shape has to be of the form (height, width) '
'but has len {}'.format(len(target_shape)))
if len(images) != len(target_locations):
raise ValueError('There should be one target location per image. Found {} '
'images for {} locations'.format(len(images),
len(target_locations)))
if input_shape[0] == target_shape[0] and input_shape[1] == target_shape[1]:
return [image for image in images]
if input_shape[0] < target_shape[0] or input_shape[1] < target_shape[1]:
raise ValueError('The target shape {} is larger than the input image size '
'{}'.format(target_shape, input_shape[:2]))
assert_ops = []
for image, target_location in zip(images, target_locations):
# Assert all images have the same shape.
assert_ops.append(
tf.assert_equal(
input_shape[:2],
tf.shape(image)[1:3],
message=('All images must have same width and height'
'for CenterCropImages.')))
with tf.control_dependencies(assert_ops):
crops = []
for image, target_location in zip(images, target_locations):
# If bounding box is outside of image boundaries, move it
x_coordinates = tf.slice(
target_location,
[0, 1], [tf.shape(target_location)[0], 1])
y_coordinates = tf.slice(
target_location,
[0, 0], [tf.shape(target_location)[0], 1])
x_coordinates = tf.math.maximum(
tf.cast(x_coordinates, tf.float32),
tf.cast(target_shape[1] // 2, tf.float32))
y_coordinates = tf.math.maximum(
tf.cast(y_coordinates, tf.float32),
tf.cast(target_shape[0] // 2, tf.float32))
x_coordinates = tf.math.minimum(
tf.cast(x_coordinates, tf.float32),
tf.cast(tf.shape(image)[2] - target_shape[1] // 2, tf.float32))
y_coordinates = tf.math.minimum(
tf.cast(y_coordinates, tf.float32),
tf.cast(tf.shape(image)[1] - target_shape[0] // 2, tf.float32)
)
target_location = tf.concat([x_coordinates, y_coordinates], 1)
crops.append(
tf.image.extract_glimpse(image, target_shape, tf.cast(
target_location, tf.float32), centered=False, normalized=False))
return crops
@gin.configurable
def ApplyPhotometricImageDistortions(
images,
random_brightness = False,
max_delta_brightness = 0.125,
random_saturation = False,
lower_saturation = 0.5,
upper_saturation = 1.5,
random_hue = False,
max_delta_hue = 0.2,
random_contrast = False,
lower_contrast = 0.5,
upper_contrast = 1.5,
random_noise_level = 0.0,
random_noise_apply_probability = 0.5):
"""Apply photometric distortions to the input images.
Args:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
to apply the random photometric distortions to.
random_brightness: If True; randomly adjust the brightness.
max_delta_brightness: Float; maximum delta for the random value by which to
adjust the brightness.
random_saturation: If True; randomly adjust the saturation.
lower_saturation: Float; lower bound of the range from which to chose a
random value for the saturation.
upper_saturation: Float; upper bound of the range from which to chose a
random value for the saturation.
random_hue: If True; randomly adjust the hue.
max_delta_hue: Float; maximum delta for the random value by which to adjust
the hue.
random_contrast: If True; randomly adjust the contrast.
lower_contrast: Float; lower bound of the range from which to chose a random
value for the contrast.
upper_contrast: Float; upper bound of the range from which to chose a random
value for the contrast.
random_noise_level: Standard deviation of the gaussian from which to sample
random noise to be added to the images. If 0.0, no noise is added.
random_noise_apply_probability: Probability of applying additive random
noise to the images.
Returns:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
resulting from applying random photometric distortions to the inputs.
"""
with tf.variable_scope('photometric_distortions'):
# Adjust brightness to a random level.
if random_brightness:
delta = tf.random_uniform([], -max_delta_brightness, max_delta_brightness)
for i, image in enumerate(images):
images[i] = tf.image.adjust_brightness(image, delta)
# Adjust saturation to a random level.
if random_saturation:
lower = lower_saturation
upper = upper_saturation
saturation_factor = tf.random_uniform([], lower, upper)
for i, image in enumerate(images):
images[i] = tf.image.adjust_saturation(image, saturation_factor)
# Randomly shift the hue.
if random_hue:
delta = tf.random_uniform([], -max_delta_hue, max_delta_hue)
for i, image in enumerate(images):
images[i] = tf.image.adjust_hue(image, delta)
# Adjust contrast to a random level.
if random_contrast:
lower = lower_contrast
upper = upper_contrast
contrast_factor = tf.random_uniform([], lower, upper)
for i, image in enumerate(images):
images[i] = tf.image.adjust_contrast(image, contrast_factor)
# Add random Gaussian noise.
if random_noise_level:
for i, image in enumerate(images):
rnd_noise = tf.random_normal(tf.shape(image), stddev=random_noise_level)
img_shape = tf.shape(image)
def ImageClosure(value):
return lambda: value
image = tf.cond(
tf.greater(tf.random.uniform(()), random_noise_apply_probability),
ImageClosure(image), ImageClosure(image + rnd_noise))
images[i] = tf.reshape(image, img_shape)
# Clip to valid range.
for i, image in enumerate(images):
images[i] = tf.clip_by_value(image, 0.0, 1.0)
return images
@gin.configurable
def ApplyPhotometricImageDistortionsParallel(
images,
random_brightness = False,
max_delta_brightness = 0.125,
random_saturation = False,
lower_saturation = 0.5,
upper_saturation = 1.5,
random_hue = False,
max_delta_hue = 0.2,
random_contrast = False,
lower_contrast = 0.5,
upper_contrast = 1.5,
random_noise_level = 0.0,
random_noise_apply_probability = 0.5,
custom_distortion_fn = None):
"""Apply photometric distortions to the input images in parallel.
Args:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
to apply the random photometric distortions to.
random_brightness: If True; randomly adjust the brightness.
max_delta_brightness: Float; maximum delta for the random value by which to
adjust the brightness.
random_saturation: If True; randomly adjust the saturation.
lower_saturation: Float; lower bound of the range from which to chose a
random value for the saturation.
upper_saturation: Float; upper bound of the range from which to chose a
random value for the saturation.
random_hue: If True; randomly adjust the hue.
max_delta_hue: Float; maximum delta for the random value by which to adjust
the hue.
random_contrast: If True; randomly adjust the contrast.
lower_contrast: Float; lower bound of the range from which to chose a random
value for the contrast.
upper_contrast: Float; upper bound of the range from which to chose a random
value for the contrast.
random_noise_level: Standard deviation of the gaussian from which to sample
random noise to be added to the images. If 0.0, no noise is added.
random_noise_apply_probability: Probability of applying additive random
noise to the images.
custom_distortion_fn: A custom distortion fn that takes a tensor of shape
[h, w, 3] and returns a tensor of the same size.
Returns:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
resulting from applying random photometric distortions to the inputs.
"""
with tf.variable_scope('photometric_distortions'):
def SingleImageDistortion(image):
# Adjust brightness to a random level.
if random_brightness:
delta = tf.random_uniform([], -max_delta_brightness,
max_delta_brightness)
image = tf.image.adjust_brightness(image, delta)
# Adjust saturation to a random level.
if random_saturation:
lower = lower_saturation
upper = upper_saturation
saturation_factor = tf.random_uniform([], lower, upper)
image = tf.image.adjust_saturation(image, saturation_factor)
# Randomly shift the hue.
if random_hue:
delta = tf.random_uniform([], -max_delta_hue, max_delta_hue)
image = tf.image.adjust_hue(image, delta)
# Adjust contrast to a random level.
if random_contrast:
lower = lower_contrast
upper = upper_contrast
contrast_factor = tf.random_uniform([], lower, upper)
image = tf.image.adjust_contrast(image, contrast_factor)
# Add random Gaussian noise.
if random_noise_level:
rnd_noise = tf.random_normal(tf.shape(image), stddev=random_noise_level)
img_shape = tf.shape(image)
def ImageClosure(value):
return lambda: value
image = tf.cond(
tf.greater(tf.random.uniform(()), random_noise_apply_probability),
ImageClosure(image), ImageClosure(image + rnd_noise))
image = tf.reshape(image, img_shape)
if custom_distortion_fn:
image = custom_distortion_fn(image)
# Clip to valid range.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
images = tf.map_fn(SingleImageDistortion, images)
return images
@gin.configurable
def ApplyPhotometricImageDistortionsCheap(
images):
"""Apply photometric distortions to the input images.
Args:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
to apply the random photometric distortions to. Assumed to be normalized
to range (0, 1), float32 encoding.
Returns:
images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
resulting from applying random photometric distortions to the inputs.
"""
with tf.name_scope('photometric_distortion'):
channels = tf.unstack(images, axis=-1)
# Per-channel random gamma correction.
# Lower gamma = brighter image, decreased contrast.
# Higher gamma = dark image, increased contrast.
gamma_corrected = [c**tf.random_uniform([], 0.5, 1.5) for c in channels]
images = tf.stack(gamma_corrected, axis=-1)
return images
def ApplyRandomFlips(images):
"""Randomly flips images across x-axis and y-axis."""
with tf.name_scope('random_flips'):
# This is consistent for the entire batch, which guarantees it'll be
# consistent for the episode, but will correlate flips across the batch.
# Seems fine for now.
left_flip = tf.random_uniform([]) > 0.5
up_flip = tf.random_uniform([]) > 0.5
images = tf.cond(
left_flip, lambda: tf.image.flip_left_right(images), lambda: images)
images = tf.cond(
up_flip, lambda: tf.image.flip_up_down(images), lambda: images)
return images
@gin.configurable
def ApplyDepthImageDistortions(depth_images,
random_noise_level = 0.05,
random_noise_apply_probability = 0.5,
scaling_noise = True,
gamma_shape = 1000.0,
gamma_scale_inverse = 1000.0,
min_depth_allowed = 0.25,
max_depth_allowed = 2.5):
"""Apply photometric distortions to the input depth images.
Args:
depth_images: Tensor of shape [batch_size, h, w, 1] containing a batch of
depth images to apply the random photometric distortions to.
random_noise_level: The standard deviation of the Gaussian distribution for
the noise that is applied to the depth image. When 0.0, then no noise is
applied.
random_noise_apply_probability: Probability of applying additive random
noise to the images.
scaling_noise: If True; sample a random variable from a Gamma distribution
to scale the depth image.
gamma_shape: Float; shape parameter of a Gamma distribution.
gamma_scale_inverse: Float; inverse of scale parameter of a Gamma
distribution.
min_depth_allowed: Float; minimum clip value for depth.
max_depth_allowed: Float; max clip value for depth.
Returns:
depth_images: Tensor of shape [batch_size, h, w, 1] containing a
batch of images resulting from applying random photometric distortions to
the inputs.
"""
assert depth_images[0].get_shape().as_list()[-1] == 1
with tf.variable_scope('distortions_depth_images'):
# Add random Gaussian noise.
if random_noise_level:
for i, image in enumerate(depth_images):
img_shape = tf.shape(image)
rnd_noise = tf.random_normal(img_shape, stddev=random_noise_level)
def ReturnImageTensor(value):
return lambda: value
if scaling_noise:
alpha = tf.random_gamma([], gamma_shape, gamma_scale_inverse)
image = tf.cond(
tf.reduce_all(
tf.greater(
tf.random.uniform([1]), random_noise_apply_probability)),
ReturnImageTensor(image),
ReturnImageTensor(alpha * image + rnd_noise))
depth_images[i] = tf.reshape(image, img_shape)
# Clip to valid range.
for i, image in enumerate(depth_images):
depth_images[i] = tf.clip_by_value(image, min_depth_allowed,
max_depth_allowed)
return depth_images
| google-research/tensor2robot | preprocessors/image_transformations.py | Python | apache-2.0 | 18,591 | [
"Gaussian"
] | b1521d4b442738264c714e0ec6148536a9021be278aead53ae673b70e5360880 |
#!/usr/bin/env python
#
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Standard setup script.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
from buildbot_worker import version
try:
# If setuptools is installed, then we'll add setuptools-specific arguments
# to the setup args.
import setuptools
from setuptools import setup
from setuptools.command.sdist import sdist
from distutils.command.install_data import install_data
except ImportError:
setuptools = None
from distutils.command.sdist import sdist
from distutils.core import setup
BUILDING_WHEEL = bool("bdist_wheel" in sys.argv)
class our_install_data(install_data):
def finalize_options(self):
self.set_undefined_options('install',
('install_lib', 'install_dir'),
)
install_data.finalize_options(self)
def run(self):
install_data.run(self)
# ensure there's a buildbot_worker/VERSION file
fn = os.path.join(self.install_dir, 'buildbot_worker', 'VERSION')
with open(fn, 'w') as f:
f.write(version)
self.outfiles.append(fn)
class our_sdist(sdist):
def make_release_tree(self, base_dir, files):
sdist.make_release_tree(self, base_dir, files)
# ensure there's a buildbot_worker/VERSION file
fn = os.path.join(base_dir, 'buildbot_worker', 'VERSION')
open(fn, 'w').write(version)
# ensure that NEWS has a copy of the latest release notes, copied from
# the master tree, with the proper version substituted
src_fn = os.path.join('..', 'master', 'docs', 'relnotes/index.rst')
with open(src_fn) as f:
src = f.read()
src = src.replace('|version|', version)
dst_fn = os.path.join(base_dir, 'NEWS')
with open(dst_fn, 'w') as f:
f.write(src)
setup_args = {
'name': "buildbot-worker",
'version': version,
'description': "Buildbot Worker Daemon",
'long_description': "See the 'buildbot' package for details",
'author': "Brian Warner",
'author_email': "warner-buildbot@lothar.com",
'maintainer': "Dustin J. Mitchell",
'maintainer_email': "dustin@v.igoro.us",
'url': "http://buildbot.net/",
'classifiers': [
'Development Status :: 5 - Production/Stable',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Testing',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
'packages': [
"buildbot_worker",
"buildbot_worker.util",
"buildbot_worker.backports",
"buildbot_worker.commands",
"buildbot_worker.scripts",
"buildbot_worker.monkeypatches",
] + ([] if BUILDING_WHEEL else [ # skip tests for wheels (save 40% of the archive)
"buildbot_worker.test",
"buildbot_worker.test.fake",
"buildbot_worker.test.unit",
"buildbot_worker.test.util",
]),
# mention data_files, even if empty, so install_data is called and
# VERSION gets copied
'data_files': [("buildbot_worker", [])],
'package_data': {
'': [
'VERSION',
]
},
'cmdclass': {
'install_data': our_install_data,
'sdist': our_sdist
},
'entry_points': {
'console_scripts': [
'buildbot-worker=buildbot_worker.scripts.runner:run',
# this will also be shipped on non windows :-(
'buildbot_worker_windows_service=buildbot_worker.scripts.windows_service:HandleCommandLine', # noqa pylint: disable=line-too-long
]}
}
# set zip_safe to false to force Windows installs to always unpack eggs
# into directories, which seems to work better --
# see http://buildbot.net/trac/ticket/907
if sys.platform == "win32":
setup_args['zip_safe'] = False
twisted_ver = ">= 17.9.0"
autobahn_ver = ">= 0.16.0"
if setuptools is not None:
setup_args['install_requires'] = [
'twisted ' + twisted_ver,
'future',
]
if sys.version_info.major >= 3:
# Message pack is only supported on Python 3
setup_args['install_requires'] += [
'autobahn ' + autobahn_ver,
'msgpack >= 0.6.0',
]
# buildbot_worker_windows_service needs pywin32
if sys.platform == "win32":
setup_args['install_requires'].append('pywin32')
# Unit test hard dependencies.
test_deps = [
'mock',
'psutil',
]
setup_args['tests_require'] = test_deps
setup_args['extras_require'] = {
'test': [
'pep8',
# spellcheck introduced in version 1.4.0
'pylint>=1.4.0',
'pyenchant',
'flake8~=3.9.0',
] + test_deps,
}
if '--help-commands' in sys.argv or 'trial' in sys.argv or 'test' in sys.argv:
setup_args['setup_requires'] = [
'setuptools_trial',
]
if os.getenv('NO_INSTALL_REQS'):
setup_args['install_requires'] = None
setup_args['extras_require'] = None
setup(**setup_args)
| pmisik/buildbot | worker/setup.py | Python | gpl-2.0 | 6,318 | [
"Brian"
] | 564a8a1197ee1e2b75ec79301938bc65b75bf143412342110fdb19fd9bc6b501 |
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
cqsdir = os.path.abspath(os.path.dirname(currentdir) + "/CQS")
sys.path.insert(0,cqsdir)
import logging
import argparse
import string
import subprocess
import pysam
from LocusItem import LocusItem, readBedFile, getChromosomeMap
from FileListUtils import readHashMap
def main():
DEBUG = False
NOT_DEBUG = not DEBUG
parser = argparse.ArgumentParser(description="Draw bam plot based on peak list.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', action='store', nargs='?', required=NOT_DEBUG, help="Input bed file")
parser.add_argument('-b', '--bamListFile', action='store', nargs='?', required=NOT_DEBUG, help="Sample bam file list")
parser.add_argument('-c', '--cnvFile', action='store', nargs='?', required=NOT_DEBUG, help="Exclude CNV range file")
parser.add_argument('-o', '--output', action='store', nargs='?', required=NOT_DEBUG, help="Output file")
args = parser.parse_args()
if(DEBUG):
args.input = "/scratch/cqs/references/exomeseq/IDT/xgen-exome-research-panel-targetsae255a1532796e2eaa53ff00001c1b3c.slop50.nochr.bed"
args.bamListFile = "/scratch/cqs/shengq2/macrae_linton/20190517_linton_exomeseq_3321_human/GATK4_CNV_Germline_9_CNVGenesPlot/result/linton_exomeseq_3321__fileList3.list"
args.cnvFile = "/scratch/cqs/shengq2/macrae_linton/20190517_linton_exomeseq_3321_human/GATK4_CNV_Germline_7_CombineGCNV/result/linton_exomeseq_3321.txt"
args.output = "/scratch/cqs/shengq2/macrae_linton/20190517_linton_exomeseq_3321_human/background/linton_exomeseq_3321.excluded.bed"
logger = logging.getLogger('getBackgroundCount')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
print(args)
#if not os.path.isfile(args.output):
bamMap = readHashMap(args.bamListFile)
samples = sorted(bamMap.keys())
bedItems = readBedFile(args.input)
cnvItems = readBedFile(args.cnvFile)
bedMap = getChromosomeMap(bedItems)
cnvMap = getChromosomeMap(cnvItems)
logger.info("Before excluding, there are %d intervals" % len(bedItems))
for chrom in bedMap.keys():
logger.info(chrom)
if not chrom in cnvMap:
continue
curBedItems = bedMap[chrom]
curExcludeItems = cnvMap[chrom]
for bi in curBedItems:
for ci in curExcludeItems:
if bi.overlapPosition(ci):
bi.Overlapped = True
break
bedItems = [bi for bi in bedItems if not bi.Overlapped]
logger.info("After excluding, there are %d intervals" % len(bedItems))
for chrom in bedMap.keys():
curBedItems = bedMap[chrom]
for idx in range(len(curBedItems)-1, 1, -1):
curItem = curBedItems[idx]
prevItem = curBedItems[idx - 1]
if (not curItem.Overlapped) and (not prevItem.Overlapped):
prevItem.End = curItem.End
curItem.Overlapped = True
validBedItems = [bi for bi in bedItems if not bi.Overlapped]
logger.info("After merge, there are %d intervals" % len(validBedItems))
chromosomes = sorted(set(bi.Chromosome for bi in validBedItems))
print(chromosomes)
with open(args.output, "w") as fout:
fout.write("Chromosome\tSample\tCount\n")
for sample in samples:
bamFile = bamMap[sample][0]
with pysam.Samfile(bamFile) as samfile:
logger.info("start counting %s ..." % sample)
for chromosome in chromosomes:
chromBeds = [bi for bi in validBedItems if bi.Chromosome == chromosome]
chromCount = 0
for chromBed in chromBeds:
chromCount = chromCount + samfile.count(chromBed.Chromosome, chromBed.Start, chromBed.End)
logger.info("%s ~ %s : %d" % (sample, chromosome, chromCount))
fout.write("%s\t%s\t%d\n" % (chromosome, sample, chromCount))
realpath = os.path.dirname(os.path.realpath(__file__))
rPath = realpath + "/getBackgroundCount.r"
targetR = args.output + ".r"
with open(targetR, "wt") as fout:
fout.write("inputFile<-\"%s\"\n" % args.output)
fout.write("outputFile<-\"%s\"\n\n" % (args.output + ".sizefactor"))
with open(rPath, "r") as fin:
for line in fin:
line = line.rstrip()
fout.write(line + "\n")
cmd = "R --vanilla -f " + targetR
logger.info(cmd)
os.system(cmd)
logger.info("done.")
if __name__ == "__main__":
main()
| shengqh/ngsperl | lib/GATK4/getBackgroundCount.py | Python | apache-2.0 | 4,462 | [
"pysam"
] | bd1e282a1bca719156fc0a8ba31382b82ed33f93fe536d794dc1f6a2b4d3ff4a |
#!/usr/bin/env python
# encoding: utf-8
import pytest
from tbone.db.models import create_collection
from tbone.resources import verbs, Resource
from tbone.testing.clients import *
from tbone.testing.fixtures import *
from .resources import *
@pytest.mark.asyncio
@pytest.fixture(scope='function')
async def load_account_collection(json_fixture, db):
''' Helper fixture for loading the accounts.json fixture into the database '''
app = App(db=db)
# load data
data = json_fixture('accounts.json')
# create collection in db and optional indices
coll = await create_collection(db, AccountResource._meta.object_class)
# insert raw data into collection
if coll:
await coll.insert_many(data)
return app
@pytest.mark.asyncio
@pytest.fixture(scope='function')
async def load_book_collection(json_fixture, db):
''' Helper fixture for loading the books.json fixture into the database '''
app = App(db=db)
# load data
data = json_fixture('books.json')
# create collection in db and optional indices
coll = await create_collection(db, BookResource._meta.object_class)
# insert raw data into collection
if coll:
await coll.insert_many(data)
return app
@pytest.mark.asyncio
async def test_mongo_resource_create(db):
app = App(db=db)
await create_collection(db, BookResource._meta.object_class)
url = '/api/{}/'.format(BookResource.__name__)
client = ResourceTestClient(app, BookResource)
# create a new book
new_book = {
'isbn': '9780140815054',
'title': 'A Tale of Two Cities',
'author': ['Charles Dickens'],
'publication_date': '1859-01-01T00:00:00.000+0000'
}
response = await client.post(url, body=new_book)
assert response.status == verbs.CREATED
data = client.parse_response_data(response)
for key in new_book.keys():
assert key in data
@pytest.mark.asyncio
async def test_mongo_resource_crud(json_fixture, db):
''' Basic tests covering CRUD operations '''
app = App(db=db)
data = json_fixture('books.json')
coll = await create_collection(db, BookResource._meta.object_class)
# insert raw data into collection
if coll:
await coll.insert_many(data)
# create client
url = '/api/{}/'.format(BookResource.__name__)
client = ResourceTestClient(app, BookResource)
# get all books
response = await client.get(url)
assert response.status == OK
data = client.parse_response_data(response)
assert 'meta' in data
assert 'objects' in data
# create a new book
new_book = {
'isbn': '9788408020011',
'title': 'The Old Man and the Sea',
'author': ['Ernest Hemingway'],
'publication_date': '1953-01-01T00:00:00.000+0000'
}
response = await client.post(url, body=new_book)
assert response.status == CREATED
data = client.parse_response_data(response)
for key in new_book.keys():
assert key in data
# create new review by performing PUT
reviews = data.get('reviews', None) or []
reviews.append({
'user': 'Brian Fantana',
'ratings': {
'smooth_read': 2,
'language': 4,
'pace': 1,
'originality': 2
},
'text': 'Could not finish it'
})
data['reviews'] = reviews
response = await client.put(url + data['isbn'] + '/', body=data)
assert response.status == ACCEPTED
update_obj = client.parse_response_data(response)
assert update_obj['_links'] == data['_links']
assert len(update_obj['reviews']) == 1
# create new review by performing PATCH
reviews = update_obj.get('reviews', None)
reviews.append({
'user': 'Ron Burgundy',
'ratings': {
'smooth_read': 4,
'language': 5,
'pace': 3,
'originality': 2
},
'text': 'Good read, really enjoyed it, even though it took me so long to finish'
})
response = await client.patch(url + data['isbn'] + '/', body={'reviews': reviews})
assert response.status == ACCEPTED
update_obj = client.parse_response_data(response)
assert update_obj['_links'] == data['_links']
assert len(update_obj['reviews']) == 2
# get detail
response = await client.get(url + data['isbn'] + '/')
assert response.status == OK
update_obj = client.parse_response_data(response)
assert update_obj['_links'] == data['_links']
assert len(update_obj['reviews']) == 2
# verify internal document fields were not serialized
assert 'impressions' not in update_obj
assert 'views' not in update_obj
# delete the book
response = await client.delete(url + data['isbn'] + '/')
assert response.status == NO_CONTENT
# fail to delete the book a 2nd time
response = await client.delete(url + data['isbn'] + '/')
assert response.status == NOT_FOUND
@pytest.mark.asyncio
async def test_mongo_resource_crud_ws(json_fixture, db):
''' Basic tests covering CRUD operations '''
app = App(db=db)
data = json_fixture('books.json')
coll = await create_collection(db, BookResource._meta.object_class)
# insert raw data into collection
if coll:
await coll.insert_many(data)
# create client
url = '/api/{}/'.format(BookResource.__name__)
client = ResourceTestClient(app, BookResource, Resource.Protocol.websocket)
# get all books
response = await client.get(url)
assert response.status == OK
data = client.parse_response_data(response)
assert 'meta' in data
assert 'objects' in data
# create a new book
new_book = {
'isbn': '9788408020011',
'title': 'The Old Man and the Sea',
'author': ['Ernest Hemingway'],
'publication_date': '1953-01-01T00:00:00.000+0000'
}
response = await client.post(url, body=new_book)
assert response.status == CREATED
data = client.parse_response_data(response)
for key in new_book.keys():
assert key in data
# create new review by performing PUT
reviews = data.get('reviews', None) or []
reviews.append({
'user': 'Brian Fantana',
'ratings': {
'smooth_read': 2,
'language': 4,
'pace': 1,
'originality': 2
},
'text': 'Could not finish it'
})
data['reviews'] = reviews
response = await client.put(url + data['isbn'] + '/', body=data)
assert response.status == ACCEPTED
update_obj = client.parse_response_data(response)
assert update_obj['_links'] == data['_links']
assert len(update_obj['reviews']) == 1
# create new review by performing PATCH
reviews = update_obj.get('reviews', None)
reviews.append({
'user': 'Ron Burgundy',
'ratings': {
'smooth_read': 4,
'language': 5,
'pace': 3,
'originality': 2
},
'text': 'Good read, really enjoyed it, even though it took me so long to finish'
})
response = await client.patch(url + data['isbn'] + '/', body={'reviews': reviews})
assert response.status == ACCEPTED
update_obj = client.parse_response_data(response)
assert update_obj['_links'] == data['_links']
assert len(update_obj['reviews']) == 2
# get detail
response = await client.get(url + data['isbn'] + '/')
assert response.status == OK
update_obj = client.parse_response_data(response)
assert update_obj['_links'] == data['_links']
assert len(update_obj['reviews']) == 2
# verify internal document fields were not serialized
assert 'impressions' not in update_obj
assert 'views' not in update_obj
# delete the book
response = await client.delete(url + data['isbn'] + '/')
assert response.status == NO_CONTENT
# fail to delete the book a 2nd time
response = await client.delete(url + data['isbn'] + '/')
assert response.status == NOT_FOUND
@pytest.mark.asyncio
async def test_mongo_collection_pagination_and_sorting(load_account_collection):
app = load_account_collection
# create client
url = '/api/{}/'.format(AccountResource.__name__)
client = ResourceTestClient(app, AccountResource)
# get accounts - 0 offset
response = await client.get(url)
# make sure we got a response object
assert isinstance(response, Response)
# parse response and retrieve data
page1 = client.parse_response_data(response)
assert 'meta' in page1
assert 'objects' in page1
assert len(page1['objects']) == LIMIT
assert 'total_count' in page1['meta']
assert page1['meta']['offset'] == 0
# get accounts - 10 offset
response = await client.get(url, args={'offset': 10})
# make sure we got a response object
assert isinstance(response, Response)
# parse response and retrieve data
page2 = client.parse_response_data(response)
assert 'meta' in page2
assert 'objects' in page2
assert len(page2['objects']) == LIMIT
assert 'total_count' in page2['meta']
assert page2['meta']['offset'] == 10
# very offset is correct with objects
assert page1['objects'][10] == page2['objects'][0]
with pytest.raises(AssertionError):
assert page1['objects'][0] == page2['objects'][0]
# test sorting
response = await client.get(url, args={'order_by': 'password'}) # arbitrary field sorting
# make sure we got a response object
assert isinstance(response, Response)
# parse response and retrieve data
page3 = client.parse_response_data(response)
assert page3['meta']['offset'] == 0
# make sure the first object in both collections are not identical
with pytest.raises(AssertionError):
assert page1['objects'][0] == page3['objects'][0]
@pytest.mark.asyncio
async def test_mongo_collection_with_resource_defined_query(load_account_collection):
class PremiumAccountResource(AccountResource):
''' Derived account resource limited only to premium accounts '''
class Meta(AccountResource.Meta):
query = {'premium': True}
app = load_account_collection
# create client
url = '/api/{}/'.format(PremiumAccountResource .__name__)
client = ResourceTestClient(app, PremiumAccountResource)
# get all premium accounts
response = await client.get(url, args={'limit': '0'})
# make sure we got a response object
assert isinstance(response, Response)
# parse response and retrieve data
data = client.parse_response_data(response)
for account in data['objects']:
assert account['premium'] is True
@pytest.mark.asyncio
async def test_mongo_collection_filtering_simple(load_account_collection):
app = load_account_collection
# create client
url = '/api/{}/'.format(AccountResource.__name__)
client = ResourceTestClient(app, AccountResource)
# get all accounts
response = await client.get(url)
# make sure we got a response object
assert isinstance(response, Response)
# parse response and retrieve data
data = client.parse_response_data(response)
total_count = data['meta']['total_count']
# get only accounts which are designated Male gender
response = await client.get(url, args={'gender': 'Male'})
# parse response and retrieve data
data = client.parse_response_data(response)
male_count = data['meta']['total_count']
# get only accounts which are designated Female gender
response = await client.get(url, args={'gender': 'Female'})
# parse response and retrieve data
data = client.parse_response_data(response)
female_count = data['meta']['total_count']
assert female_count + male_count <= total_count
@pytest.mark.asyncio
async def test_mongo_collection_filtering_operator(load_account_collection):
app = load_account_collection
# create client
url = '/api/{}/'.format(AccountResource.__name__)
client = ResourceTestClient(app, AccountResource)
# get all accounts
response = await client.get(url)
# make sure we got a response object
assert isinstance(response, Response)
# parse response and retrieve data
data = client.parse_response_data(response)
total_count = data['meta']['total_count']
# get only accounts which are designated Male gender
response = await client.get(url, args={'gender': 'Male'})
# parse response and retrieve data
data = client.parse_response_data(response)
male_count = data['meta']['total_count']
# get only accounts which are designated Female gender
response = await client.get(url, args={'gender': 'Female'})
# parse response and retrieve data
data = client.parse_response_data(response)
female_count = data['meta']['total_count']
assert female_count + male_count <= total_count
@pytest.mark.asyncio
async def test_mongo_collection_custom_indices(load_book_collection):
app = load_book_collection
assert BookResource._meta.object_class.primary_key == 'isbn'
assert BookResource._meta.object_class.primary_key_type == str
# create client
url = '/api/{}/'.format(BookResource.__name__)
client = ResourceTestClient(app, BookResource)
# get books
response = await client.get(url)
# make sure we got a response object
assert isinstance(response, Response)
assert response.status == verbs.OK
# parse response and retrieve data
data = client.parse_response_data(response)
for obj in data['objects']:
# verify that the unique isbn is part of the resource uri
assert obj['isbn'] in obj['_links']['self']['href']
# fail to insert a new book with existing isbn
new_book = {
'isbn': data['objects'][0]['isbn'],
'title': 'fake title'
}
response = await client.post(url, body=new_book)
data = client.parse_response_data(response)
assert response.status == 400
assert 'error' in data
assert 'duplicate' in data['error']
@pytest.mark.asyncio
async def test_nested_resources(load_book_collection):
app = load_book_collection
# create client
url = '/api/{}/'.format(BookResource.__name__)
comment_url_template = '/api/{}/{}/reviews/add/'
client = ResourceTestClient(app, BookResource)
review = {
'user': 'Ron Burgundy',
'ratings': {
'smooth_read': 4,
'language': 5,
'pace': 3,
'originality': 2
},
'text': 'Good read, really enjoyed it, even though it took me so long to finish'
}
# get a book
response = await client.get(url)
assert response.status == OK
data = client.parse_response_data(response)
book_data = data['objects'][0]
pk = book_data['isbn']
# create new comment
comment_url = comment_url_template.format(BookResource.__name__, pk)
response = await client.post(comment_url, body=review)
assert response.status == CREATED
| 475Cumulus/TBone | tests/resources/test_mongo_resources.py | Python | mit | 14,933 | [
"Brian"
] | ab23920a9f7036e860130e0cd6917ed430133e18249540a0228330e267bb8c2d |
#!/usr/bin/env python
# $Id: simpleExample.py 545 2012-01-18 06:10:03Z cvermilion $
#----------------------------------------------------------------------
# Copyright (c) 2010-12, Pierre-Antoine Delsart, Kurtis Geerlings, Joey Huston,
# Brian Martin, and Christopher Vermilion
#
#----------------------------------------------------------------------
# This file is part of SpartyJet.
#
# SpartyJet is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# SpartyJet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SpartyJet; if not, write to the Free Software
# Foundation, Inc.:
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#----------------------------------------------------------------------
from spartyjet import *
#================================================
# Create a jet builder(MessageLevel = INFO)------
builder = SJ.JetBuilder(SJ.INFO)
# Create input object and add to builder --------
input = getInputMaker('../data/J1_Clusters.dat')
builder.configure_input(input)
# Create jet finder and add to builder ----------
name = 'AntiKt4'
alg = fj.antikt_algorithm
R = 0.4
antikt4Finder = SJ.FastJet.FastJetFinder(name, alg, R)
analysis = SJ.JetAnalysis(antikt4Finder)
builder.add_analysis(analysis)
# Shortcut:
#builder.add_default_analysis(SJ.FastJet.FastJetFinder('AntiKt4', fastjet.antikt_algorithm, 0.4))
# Add a jet measurement
builder.add_jetTool(SJ.EtaPhiMomentTool())
# Configure text output (optional) --------------
builder.add_text_output("../data/output/simple.dat")
# Configure ntuple output------------------------
outfile = "../data/output/simple.root"
builder.configure_output("SpartyJet_Tree", outfile)
builder.set_output_type(SJ.kVector, SJ.kDouble)
# Run SpartyJet----------------------------------
builder.process_events(10)
# Save this script in the ROOT file (needs to go after process_events or it
# gets over-written!)
writeCurrentFile(outfile)
| mickypaganini/SSI2016-jet-clustering | spartyjet-4.0.2_mac/examples_py/simpleExample.py | Python | mit | 2,381 | [
"Brian"
] | 5e180819fa72ab3960f777fde439bcf8d1f384ffe36e638baf229047c40fe644 |
from __future__ import print_function, unicode_literals, division, absolute_import
import numba
import numpy as np
from scipy.special import logsumexp
from scipy.ndimage.filters import gaussian_filter
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import BaseCrossValidator
from sklearn.base import BaseEstimator
from tqdm import tqdm
from .precomputed_models import get_image_hash
from .roc import general_roc
from .numba_utils import fill_fixation_map
from .utils import inter_and_extrapolate
from . import Model, UniformModel
@numba.jit(nopython=True)
def _normalize_fixations(orig_xs, orig_ys, orig_ns, sizes, new_xs, new_ys, real_widths, real_heights):
for i in range(len(orig_xs)):
height, width = sizes[orig_ns[i]]
new_xs[i] = orig_xs[i] / width
new_ys[i] = orig_ys[i] / height
real_widths[i] = width
real_heights[i] = height
def normalize_fixations(stimuli, fixations, keep_aspect=False, add_shape=False, verbose=True):
sizes = np.array(stimuli.sizes)
xs = np.empty(len(fixations.x))
ys = np.empty(len(fixations.x))
widths = np.empty(len(fixations.x))
heights = np.empty(len(fixations.x))
_normalize_fixations(fixations.x, fixations.y, fixations.n, sizes,
xs, ys, widths, heights)
real_widths = widths.copy()
real_heights = heights.copy()
if keep_aspect:
max_size = np.max([widths, heights], axis=0)
widths = max_size
heights = max_size
xs = fixations.x / widths
ys = fixations.y / heights
real_widths /= widths
real_heights /= heights
if add_shape:
return xs, ys, real_widths, real_heights
return xs, ys
def fixations_to_scikit_learn(fixations, normalize=None, keep_aspect=False, add_shape=False,
add_stimulus_number=False,
add_fixation_number=False,
verbose=True):
if normalize is None:
xs = fixations.x
ys = fixations.y
data = [xs, ys]
if add_shape:
raise NotImplementedError()
else:
data = normalize_fixations(normalize, fixations, keep_aspect=keep_aspect, add_shape=add_shape,
verbose=verbose)
if add_stimulus_number:
data = list(data) + [fixations.n]
if add_fixation_number:
data = list(data) + [np.arange(len(fixations.n))]
return np.vstack(data).T.copy()
class ScikitLearnImageCrossValidationGenerator(object):
def __init__(self, stimuli, fixations):
self.stimuli = stimuli
self.fixations = fixations
def __iter__(self):
for n in range(len(self.stimuli)):
inds = self.fixations.n == n
if inds.sum():
yield ~inds, inds
def __len__(self):
return len(self.stimuli)
class ScikitLearnImageSubjectCrossValidationGenerator(object):
def __init__(self, stimuli, fixations):
self.stimuli = stimuli
self.fixations = fixations
def __iter__(self):
for n in range(len(self.stimuli)):
for s in range(self.fixations.subject_count):
image_inds = self.fixations.n == n
subject_inds = self.fixations.subjects == s
train_inds, test_inds = image_inds & ~subject_inds, image_inds & subject_inds
if test_inds.sum() == 0 or train_inds.sum() == 0:
#print("Skipping")
continue
yield train_inds, test_inds
def __len__(self):
return len(set(zip(self.fixations.n, self.fixations.subjects)))
class ScikitLearnWithinImageCrossValidationGenerator(object):
def __init__(self, stimuli, fixations, chunks_per_image=10, random_seed=42):
self.stimuli = stimuli
self.fixations = fixations
self.chunks_per_image = chunks_per_image
self.rng = np.random.RandomState(seed=random_seed)
def __iter__(self):
for n in range(len(self.stimuli)):
image_inds = self.fixations.n == n
_image_inds = np.nonzero(image_inds)[0]
self.rng.shuffle(_image_inds)
chunks = np.array_split(_image_inds, self.chunks_per_image)
for chunk in chunks:
if not len(chunk):
continue
test_inds = np.zeros_like(self.fixations.n)
test_inds[chunk] = 1
test_inds = test_inds > 0.5
train_inds = image_inds & ~test_inds
yield train_inds, test_inds
def __len__(self):
#counts = 0
#for n in range(len(self.stimuli)):
return len(self.stimuli)*self.chunks_per_image
class RegularizedKernelDensityEstimator(BaseEstimator):
def __init__(self, bandwidth=1.0, regularization = 1.0e-5):
self.bandwidth = bandwidth
self.regularization = regularization
def setup(self):
self.kde = KernelDensity(kernel='gaussian', bandwidth=self.bandwidth)
height, width = self.shape
self.uniform_density = -np.log(width*height)
self.kde_constant = np.log(1-self.regularization)
self.uniform_constant = np.log(self.regularization)
def fit(self, X):
self.shape = X[0, 2:4]
self.setup()
self.kde.fit(X[:, 0:2])
return self
def score_samples(self, X):
kde_logliks = self.kde.score_samples(X[:, :2])
logliks = np.logaddexp(
self.kde_constant + kde_logliks,
self.uniform_constant + self.uniform_density
)
return logliks
def score(self, X):
return np.sum(self.score_samples(X))
class MixtureKernelDensityEstimator(BaseEstimator):
def __init__(self, bandwidth=1.0, regularization = 1.0e-5, regularizing_log_likelihoods=None):
self.bandwidth = bandwidth
self.regularization = regularization
#self.regularizer_model = regularizer_model
##self.stimuli = stimuli
self.regularizing_log_likelihoods = regularizing_log_likelihoods
def setup(self):
self.kde = KernelDensity(kernel='gaussian', bandwidth=self.bandwidth)
self.kde_constant = np.log(1-self.regularization)
self.uniform_constant = np.log(self.regularization)
def fit(self, X):
assert X.shape[1] == 3
self.setup()
self.kde.fit(X[:, 0:2])
return self
def score_samples(self, X):
assert X.shape[1] == 3
kde_logliks = self.kde.score_samples(X[:, :2])
fix_ns = X[:, 2].astype(int)
fix_lls = self.regularizing_log_likelihoods[fix_ns]
logliks = np.logaddexp(
self.kde_constant + kde_logliks,
self.uniform_constant + fix_lls
)
return logliks
def score(self, X):
return np.sum(self.score_samples(X))
class AUCKernelDensityEstimator(BaseEstimator):
def __init__(self, nonfixations, bandwidth=1.0):
self.bandwidth = bandwidth
self.nonfixations = nonfixations
def setup(self):
self.kde = KernelDensity(kernel='gaussian', bandwidth=self.bandwidth)
def fit(self, X):
self.setup()
self.kde.fit(X)
self.nonfixation_values = self.kde.score_samples(self.nonfixations)
return self
def score_samples(self, X):
pos_logliks = self.kde.score_samples(X)
neg_logliks = self.nonfixation_values
aucs = [general_roc(np.array([p]), neg_logliks)[0] for p in pos_logliks]
return aucs
def score(self, X):
return np.sum(self.score_samples(X))
class GoldModel(Model):
def __init__(self, stimuli, fixations, bandwidth, eps = 1e-20, keep_aspect=False, verbose=False, **kwargs):
super(GoldModel, self).__init__(**kwargs)
self.stimuli = stimuli
self.fixations = fixations
self.bandwidth = bandwidth
self.eps = eps
self.keep_aspect = keep_aspect
self.xs, self.ys = normalize_fixations(stimuli, fixations, keep_aspect=self.keep_aspect, verbose=verbose)
self.shape_cache = {}
def _log_density(self, stimulus):
shape = stimulus.shape[0], stimulus.shape[1]
stimulus_id = get_image_hash(stimulus)
stimulus_index = self.stimuli.stimulus_ids.index(stimulus_id)
#fixations = self.fixations[self.fixations.n == stimulus_index]
inds = self.fixations.n == stimulus_index
if not inds.sum():
return UniformModel().log_density(stimulus)
ZZ = np.zeros(shape)
if self.keep_aspect:
height, width = shape
max_size = max(width, height)
x_factor = max_size
y_factor = max_size
else:
x_factor = shape[1]
y_factor = shape[0]
_fixations = np.array([self.ys[inds]*y_factor, self.xs[inds]*x_factor]).T
fill_fixation_map(ZZ, _fixations)
ZZ = gaussian_filter(ZZ, [self.bandwidth*y_factor, self.bandwidth*x_factor])
ZZ *= (1-self.eps)
ZZ += self.eps * 1.0/(shape[0]*shape[1])
ZZ = np.log(ZZ)
ZZ -= logsumexp(ZZ)
#ZZ -= np.log(np.exp(ZZ).sum())
return ZZ
class KDEGoldModel(Model):
def __init__(self, stimuli, fixations, bandwidth, eps=1e-20, keep_aspect=False, verbose=False, grid_spacing=1, **kwargs):
super(KDEGoldModel, self).__init__(**kwargs)
self.stimuli = stimuli
self.fixations = fixations
self.bandwidth = bandwidth
self.eps = eps
self.keep_aspect = keep_aspect
self.grid_spacing = grid_spacing
self.xs, self.ys = normalize_fixations(stimuli, fixations, keep_aspect=self.keep_aspect, verbose=verbose)
self.shape_cache = {}
def _log_density(self, stimulus):
shape = stimulus.shape[0], stimulus.shape[1]
stimulus_id = get_image_hash(stimulus)
stimulus_index = self.stimuli.stimulus_ids.index(stimulus_id)
inds = self.fixations.n == stimulus_index
if not inds.sum():
return UniformModel().log_density(stimulus)
X = fixations_to_scikit_learn(
self.fixations[inds], normalize=self.stimuli,
keep_aspect=self.keep_aspect, add_shape=False, verbose=False)
kde = KernelDensity(bandwidth=self.bandwidth).fit(X)
height, width = shape
if self.keep_aspect:
max_size = max(height, width)
rel_height = height / max_size
rel_width = width / max_size
else:
rel_height = 1.0
rel_width = 1.0
# calculate the KDE score at the middle of each pixel:
# for a width of 10 pixels, we are going to calculate at
# 0.5, 1.5, ..., 9.5, since e.g. fixations with x coordinate between 0.0 and 1.0
# will be evaluated at pixel index 0.
xs = np.linspace(0, rel_width, num=width, endpoint=False) + 0.5 * rel_width / width
ys = np.linspace(0, rel_height, num=height, endpoint=False) + 0.5 * rel_height / height
if self.grid_spacing > 1:
xs = xs[::self.grid_spacing]
ys = ys[::self.grid_spacing]
XX, YY = np.meshgrid(xs, ys)
XX_flat = XX.flatten()
YY_flat = YY.flatten()
scores = kde.score_samples(np.column_stack((XX_flat, YY_flat)))
if self.grid_spacing == 1:
scores = scores.reshape((height, width))
else:
x_coordinates = np.arange(0, width)[::self.grid_spacing]
y_coordinates = np.arange(0, height)[::self.grid_spacing]
XX_coordinates, YY_coordinates = np.meshgrid(x_coordinates, y_coordinates)
score_grid = np.empty((height, width)) * np.nan
score_grid[YY_coordinates.flatten(), XX_coordinates.flatten()] = scores
score_grid = inter_and_extrapolate(score_grid)
scores = score_grid
scores -= logsumexp(scores)
ZZ = scores
if self.eps:
ZZ = np.logaddexp(
np.log(1 - self.eps) + scores,
np.log(self.eps) - np.log(height * width)
)
ZZ -= logsumexp(ZZ)
return ZZ
class CrossvalidatedBaselineModel(Model):
def __init__(self, stimuli, fixations, bandwidth, eps = 1e-20, **kwargs):
super(CrossvalidatedBaselineModel, self).__init__(**kwargs)
self.stimuli = stimuli
self.fixations = fixations
self.bandwidth = bandwidth
self.eps = eps
self.xs, self.ys = normalize_fixations(stimuli, fixations)
#self.kde = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(np.vstack([self.xs, self.ys]).T)
self.shape_cache = {}
def _log_density(self, stimulus):
shape = stimulus.shape[0], stimulus.shape[1]
stimulus_id = get_image_hash(stimulus)
stimulus_index = self.stimuli.stimulus_ids.index(stimulus_id)
#fixations = self.fixations[self.fixations.n == stimulus_index]
inds = self.fixations.n != stimulus_index
ZZ = np.zeros(shape)
_fixations = np.array([self.ys[inds]*shape[0], self.xs[inds]*shape[1]]).T
fill_fixation_map(ZZ, _fixations)
ZZ = gaussian_filter(ZZ, [self.bandwidth*shape[0], self.bandwidth*shape[1]])
ZZ *= (1-self.eps)
ZZ += self.eps * 1.0/(shape[0]*shape[1])
ZZ = np.log(ZZ)
ZZ -= logsumexp(ZZ)
#ZZ -= np.log(np.exp(ZZ).sum())
return ZZ
class BaselineModel(Model):
def __init__(self, stimuli, fixations, bandwidth, eps = 1e-20, keep_aspect=False, **kwargs):
super(BaselineModel, self).__init__(**kwargs)
self.stimuli = stimuli
self.fixations = fixations
self.bandwidth = bandwidth
self.eps = eps
self.keep_aspect = keep_aspect
self.xs, self.ys = normalize_fixations(stimuli, fixations, keep_aspect=keep_aspect)
self.shape_cache = {}
def _log_density(self, stimulus):
shape = stimulus.shape[0], stimulus.shape[1]
if shape not in self.shape_cache:
ZZ = np.zeros(shape)
height, width = shape
if self.keep_aspect:
max_size = max(height, width)
y_factor = max_size
x_factor = max_size
else:
y_factor = height
x_factor = width
_fixations = np.array([self.ys*y_factor, self.xs*x_factor]).T
fill_fixation_map(ZZ, _fixations)
ZZ = gaussian_filter(ZZ, [self.bandwidth*y_factor, self.bandwidth*x_factor])
ZZ *= (1-self.eps)
ZZ += self.eps * 1.0/(shape[0]*shape[1])
ZZ = np.log(ZZ)
ZZ -= logsumexp(ZZ)
self.shape_cache[shape] = ZZ
return self.shape_cache[shape]
| matthias-k/pysaliency | pysaliency/baseline_utils.py | Python | mit | 14,811 | [
"Gaussian"
] | c9647b7b55cdd702f45533849cd044920d15a4b45c990ece2b8909896f28587d |
""" Module holding function(s) creating the pilot wrapper.
This is a DIRAC-free module, so it could possibly be used also outside of DIRAC installations.
The main client of this module is the SiteDirector, that invokes the functions here more or less like this::
pilotFiles = getPilotFiles()
pilotFilesCompressedEncodedDict = getPilotFilesCompressedEncodedDict(pilotFiles)
localPilot = pilotWrapperScript(pilotFilesCompressedEncodedDict,
pilotOptions,
pilotExecDir)
_writePilotWrapperFile(localPilot=localPilot)
"""
import os
import tempfile
import shutil
import tarfile
import json
import base64
import bz2
from cStringIO import StringIO
import requests
def pilotWrapperScript(pilotFilesCompressedEncodedDict=None,
pilotOptions='',
pilotExecDir='',
envVariables=None):
""" Returns the content of the pilot wrapper script.
The pilot wrapper script is a bash script that invokes the system python. Linux only.
:param pilotFilesCompressedEncodedDict: this is a possible dict of name:compressed+encoded content files.
the proxy can be part of this, and of course the pilot files
:type pilotFilesCompressedEncodedDict: dict
:param pilotOptions: options with which to start the pilot
:type pilotOptions: basestring
:param pilotExecDir: pilot execution directory
:type pilotExecDir: basestring
:returns: content of the pilot wrapper
:rtype: basestring
"""
if pilotFilesCompressedEncodedDict is None:
pilotFilesCompressedEncodedDict = {}
if envVariables is None:
envVariables = {}
mString = ""
for pfName, encodedPf in pilotFilesCompressedEncodedDict.iteritems(): # are there some pilot files to unpack?
# then we create the unpacking string
mString += """
try:
with open('%(pfName)s', 'w') as fd:
fd.write(bz2.decompress(base64.b64decode(\"\"\"%(encodedPf)s\"\"\")))
os.chmod('%(pfName)s', stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
except BaseException as x:
print >> sys.stderr, x
shutil.rmtree(pilotWorkingDirectory)
sys.exit(-1)
""" % {'encodedPf': encodedPf,
'pfName': pfName}
envVariablesString = ""
for name, value in envVariables.iteritems(): # are there some environment variables to add?
envVariablesString += """
os.environ[\"%(name)s\"]=\"%(value)s\"
""" % {'name': name,
'value': value}
mString = mString + envVariablesString
# add X509_USER_PROXY to etablish pilot env in Cluster WNs
if 'proxy' in pilotFilesCompressedEncodedDict:
mString += """
os.environ['X509_USER_PROXY'] = os.path.join(pilotWorkingDirectory, 'proxy')
"""
localPilot = """#!/bin/bash
/usr/bin/env python << EOF
# imports
import os
import stat
import tempfile
import sys
import shutil
import base64
import bz2
import logging
import time
# setting up the logging
formatter = logging.Formatter(fmt='%%(asctime)s UTC %%(levelname)-8s %%(message)s', datefmt='%%Y-%%m-%%d %%H:%%M:%%S')
logging.Formatter.converter = time.gmtime
try:
screen_handler = logging.StreamHandler(stream=sys.stdout)
except TypeError: # python2.6
screen_handler = logging.StreamHandler(strm=sys.stdout)
screen_handler.setFormatter(formatter)
logger = logging.getLogger('pilotLogger')
logger.setLevel(logging.DEBUG)
logger.addHandler(screen_handler)
# just logging the environment as first thing
print '==========================================================='
logger.debug('Environment of execution host\\n')
for key, val in os.environ.iteritems():
logger.debug(key + '=' + val)
print '===========================================================\\n'
# putting ourselves in the right directory
pilotExecDir = '%(pilotExecDir)s'
if not pilotExecDir:
pilotExecDir = os.getcwd()
pilotWorkingDirectory = tempfile.mkdtemp(suffix='pilot', prefix='DIRAC_', dir=pilotExecDir)
pilotWorkingDirectory = os.path.realpath(pilotWorkingDirectory)
os.chdir(pilotWorkingDirectory)
logger.info("Launching dirac-pilot script from %%s" %%os.getcwd())
# unpacking lines
logger.info("But first unpacking pilot files")
%(mString)s
# now finally launching the pilot script (which should be called dirac-pilot.py)
cmd = "python dirac-pilot.py %(pilotOptions)s"
logger.info('Executing: %%s' %% cmd)
sys.stdout.flush()
os.system(cmd)
# and cleaning up
shutil.rmtree(pilotWorkingDirectory)
EOF
""" % {'mString': mString,
'pilotOptions': pilotOptions,
'pilotExecDir': pilotExecDir}
return localPilot
def getPilotFilesCompressedEncodedDict(pilotFiles, proxy=None):
""" this function will return the dictionary of pilot files names : encodedCompressedContent
that we are going to send
:param pilotFiles: list of pilot files
:type pilotFiles: list
:param proxy: proxy
:type proxy: basestring
"""
pilotFilesCompressedEncodedDict = {}
for pf in pilotFiles:
with open(pf, "r") as fd:
pfContent = fd.read()
pfContentEncoded = base64.b64encode(bz2.compress(pfContent, 9))
pilotFilesCompressedEncodedDict[os.path.basename(pf)] = pfContentEncoded
if proxy is not None:
compressedAndEncodedProxy = base64.b64encode(bz2.compress(proxy.dumpAllToString()['Value']))
pilotFilesCompressedEncodedDict['proxy'] = compressedAndEncodedProxy
return pilotFilesCompressedEncodedDict
def _writePilotWrapperFile(workingDirectory=None, localPilot=''):
""" write the localPilot string to a file, rurn the file name
:param workingDirectory: the directory where to store the pilot wrapper file
:type workingDirectory: basestring
:param localPilot: content of the pilot wrapper
:type localPilot: basestring
:returns: file name of the pilot wrapper
:rtype: basestring
"""
fd, name = tempfile.mkstemp(suffix='_pilotwrapper.py', prefix='DIRAC_', dir=workingDirectory)
with os.fdopen(fd, 'w') as pilotWrapper:
pilotWrapper.write(localPilot)
return name
def getPilotFiles(pilotFilesDir=None, pilotFilesLocation=None):
""" get the pilot files to be sent in a local directory (this is for pilot3 files)
:param pilotFilesDir: the directory where to store the pilot files
:type pilotFilesDir: basestring
:param pilotFilesLocation: URL from where to the pilot files
:type pilotFilesLocation: basestring
:returns: list of pilot files (full path)
:rtype: list
"""
if pilotFilesDir is None:
pilotFilesDir = os.getcwd()
shutil.rmtree(pilotFilesDir) # make sure it's empty
os.mkdir(pilotFilesDir)
# getting the pilot files
if pilotFilesLocation.startswith('http'):
res = requests.get(pilotFilesLocation)
if res.status_code != 200:
raise IOError(res.text)
fileObj = StringIO(res.content)
tar = tarfile.open(fileobj=fileObj)
res = requests.get(os.path.join(os.path.dirname(pilotFilesLocation), 'pilot.json'))
if res.status_code != 200:
raise IOError(res.text)
jsonCFG = res.json()
else: # maybe it's just a local file
tar = tarfile.open(os.path.basename(pilotFilesLocation))
tar.extractall(pilotFilesDir)
with open(os.path.join(pilotFilesDir, 'pilot.json'), 'w') as fd:
json.dump(jsonCFG, fd)
# excluding some files that might got in
pilotFiles = [pf for pf in os.listdir(pilotFilesDir) if pf not in ['__init__.py', 'dirac-install.py']]
pilotFiles = [pf for pf in pilotFiles if pf.endswith('.py') or pf.endswith('.json')]
pilotFiles = [os.path.join(pilotFilesDir, pf) for pf in pilotFiles]
return pilotFiles
| petricm/DIRAC | WorkloadManagementSystem/Utilities/PilotWrapper.py | Python | gpl-3.0 | 7,685 | [
"DIRAC"
] | e70b468080ed7ec8c49954dfeb555364020d63efaa09cdca4859e6888d685557 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import logging
import numpy as np
import time
from pymatgen.core.structure import Structure
from pymatgen.core.sites import PeriodicSite
from monty.json import MSONable
from scipy.spatial import Voronoi
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import my_solid_angle
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import get_lower_and_upper_f
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import rectangle_surface_intersection
from pymatgen.analysis.chemenv.utils.defs_utils import AdditionalConditions
from pymatgen.analysis.chemenv.utils.math_utils import normal_cdf_step
"""
This module contains the object used to describe the possible bonded atoms based on a Voronoi analysis
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
def from_bson_voronoi_list(bson_nb_voro_list, structure):
"""
Returns the voronoi_list needed for the VoronoiContainer object from a bson-encoded voronoi_list (composed of
vlist and bson_nb_voro_list).
:param vlist: List of voronoi objects
:param bson_nb_voro_list: List of periodic sites involved in the Voronoi
:return: The voronoi_list needed for the VoronoiContainer (with PeriodicSites as keys of the dictionary - not
allowed in the BSON format)
"""
voronoi_list = [None] * len(bson_nb_voro_list)
for isite, voro in enumerate(bson_nb_voro_list):
if voro is None or voro == 'None':
continue
voronoi_list[isite] = []
for psd, dd in voro:
struct_site = structure[dd['index']]
periodic_site = PeriodicSite(struct_site._species, struct_site.frac_coords + psd[1],
struct_site._lattice, properties=struct_site.properties)
voronoi_list[isite].append((periodic_site, dd))
return voronoi_list
def from_bson_voronoi_list2(bson_nb_voro_list2, structure):
"""
Returns the voronoi_list needed for the VoronoiContainer object from a bson-encoded voronoi_list (composed of
vlist and bson_nb_voro_list).
:param vlist: List of voronoi objects
:param bson_nb_voro_list: List of periodic sites involved in the Voronoi
:return: The voronoi_list needed for the VoronoiContainer (with PeriodicSites as keys of the dictionary - not
allowed in the BSON format)
"""
voronoi_list = [None] * len(bson_nb_voro_list2)
for isite, voro in enumerate(bson_nb_voro_list2):
if voro is None or voro == 'None':
continue
voronoi_list[isite] = []
for psd, dd in voro:
struct_site = structure[dd['index']]
periodic_site = PeriodicSite(struct_site._species, struct_site.frac_coords + psd[1],
struct_site._lattice, properties=struct_site.properties)
dd['site'] = periodic_site
voronoi_list[isite].append(dd)
return voronoi_list
class DetailedVoronoiContainer(MSONable):
"""
Class used to store the full Voronoi of a given structure.
"""
AC = AdditionalConditions()
default_voronoi_cutoff = 10.0
default_normalized_distance_tolerance = 1e-5
default_normalized_angle_tolerance = 1e-3
def __init__(self, structure=None, voronoi_list=None, voronoi_list2=None,
voronoi_cutoff=default_voronoi_cutoff, isites=None,
normalized_distance_tolerance=default_normalized_distance_tolerance,
normalized_angle_tolerance=default_normalized_angle_tolerance,
additional_conditions=None, valences=None,
maximum_distance_factor=None, minimum_angle_factor=None):
"""
Constructor for the VoronoiContainer object. Either a structure is given, in which case the Voronoi is
computed, or the different components of the VoronoiContainer are given (used in the from_dict method)
:param structure: Structure for which the Voronoi is computed
:param voronoi_list: List of voronoi polyhedrons for each site
:param voronoi_cutoff: cutoff used for the voronoi
:param isites: indices of sites for which the Voronoi has to be computed
:raise: RuntimeError if the Voronoi cannot be constructed
"""
self.normalized_distance_tolerance = normalized_distance_tolerance
self.normalized_angle_tolerance = normalized_angle_tolerance
if additional_conditions is None:
self.additional_conditions = [self.AC.NONE, self.AC.ONLY_ACB]
else:
self.additional_conditions = additional_conditions
self.valences = valences
self.maximum_distance_factor = maximum_distance_factor
self.minimum_angle_factor = minimum_angle_factor
if isites is None:
indices = list(range(len(structure)))
else:
indices = isites
self.structure = structure
logging.info('Setting Voronoi list')
if voronoi_list2 is not None:
self.voronoi_list2 = voronoi_list2
else:
self.setup_voronoi_list(indices=indices, voronoi_cutoff=voronoi_cutoff)
logging.info('Setting neighbors distances and angles')
t1 = time.clock()
self.setup_neighbors_distances_and_angles(indices=indices)
t2 = time.clock()
logging.info('Neighbors distances and angles set up in {:.2f} seconds'.format(t2-t1))
def setup_voronoi_list(self, indices, voronoi_cutoff):
"""
Set up of the voronoi list of neighbours by calling qhull
:param indices: indices of the sites for which the Voronoi is needed
:param voronoi_cutoff: Voronoi cutoff for the search of neighbours
:raise RuntimeError: If an infinite vertex is found in the voronoi construction
"""
self.voronoi_list2 = [None] * len(self.structure)
self.voronoi_list_coords = [None] * len(self.structure)
logging.info('Getting all neighbors in structure')
struct_neighbors = self.structure.get_all_neighbors(voronoi_cutoff, include_index=True)
t1 = time.clock()
logging.info('Setting up Voronoi list :')
for jj, isite in enumerate(indices):
logging.info(' - Voronoi analysis for site #{:d} ({:d}/{:d})'.format(isite, jj+1, len(indices)))
site = self.structure[isite]
neighbors1 = [(site, 0.0, isite)]
neighbors1.extend(struct_neighbors[isite])
distances = [i[1] for i in sorted(neighbors1, key=lambda s: s[1])]
neighbors = [i[0] for i in sorted(neighbors1, key=lambda s: s[1])]
qvoronoi_input = [s.coords for s in neighbors]
voro = Voronoi(points=qvoronoi_input, qhull_options="o Fv")
all_vertices = voro.vertices
results2 = []
maxangle = 0.0
mindist = 10000.0
for iridge, ridge_points in enumerate(voro.ridge_points):
if 0 in ridge_points:
ridge_vertices_indices = voro.ridge_vertices[iridge]
if -1 in ridge_vertices_indices:
raise RuntimeError("This structure is pathological,"
" infinite vertex in the voronoi "
"construction")
ridge_point2 = max(ridge_points)
facets = [all_vertices[i] for i in ridge_vertices_indices]
sa = my_solid_angle(site.coords, facets)
maxangle = max([sa, maxangle])
mindist = min([mindist, distances[ridge_point2]])
for iii, sss in enumerate(self.structure):
if neighbors[ridge_point2].is_periodic_image(sss):
myindex = iii
break
results2.append({'site': neighbors[ridge_point2],
'angle': sa,
'distance': distances[ridge_point2],
'index': myindex})
for dd in results2:
dd['normalized_angle'] = dd['angle'] / maxangle
dd['normalized_distance'] = dd['distance'] / mindist
self.voronoi_list2[isite] = results2
self.voronoi_list_coords[isite] = np.array([dd['site'].coords for dd in results2])
t2 = time.clock()
logging.info('Voronoi list set up in {:.2f} seconds'.format(t2-t1))
def setup_neighbors_distances_and_angles(self, indices):
"""
Initializes the angle and distance separations
:param indices: indices of the sites for which the Voronoi is needed
"""
self.neighbors_distances = [None] * len(self.structure)
self.neighbors_normalized_distances = [None] * len(self.structure)
self.neighbors_angles = [None] * len(self.structure)
self.neighbors_normalized_angles = [None] * len(self.structure)
for isite in indices:
results = self.voronoi_list2[isite]
if results is None:
continue
#Initializes neighbors distances and normalized distances groups
self.neighbors_distances[isite] = []
self.neighbors_normalized_distances[isite] = []
normalized_distances = [nb_dict['normalized_distance'] for nb_dict in results]
isorted_distances = np.argsort(normalized_distances)
self.neighbors_normalized_distances[isite].append({'min': normalized_distances[isorted_distances[0]],
'max': normalized_distances[isorted_distances[0]]})
self.neighbors_distances[isite].append({'min': results[isorted_distances[0]]['distance'],
'max': results[isorted_distances[0]]['distance']})
icurrent = 0
nb_indices = {int(isorted_distances[0])}
dnb_indices = {int(isorted_distances[0])}
for idist in iter(isorted_distances):
wd = normalized_distances[idist]
if self.maximum_distance_factor is not None:
if wd > self.maximum_distance_factor:
self.neighbors_normalized_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
break
if np.isclose(wd, self.neighbors_normalized_distances[isite][icurrent]['max'],
rtol=0.0, atol=self.normalized_distance_tolerance):
self.neighbors_normalized_distances[isite][icurrent]['max'] = wd
self.neighbors_distances[isite][icurrent]['max'] = results[idist]['distance']
dnb_indices.add(int(idist))
else:
self.neighbors_normalized_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
dnb_indices = {int(idist)}
self.neighbors_normalized_distances[isite].append({'min': wd,
'max': wd})
self.neighbors_distances[isite].append({'min': results[idist]['distance'],
'max': results[idist]['distance']})
icurrent += 1
nb_indices.add(int(idist))
else:
self.neighbors_normalized_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_distances[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]['dnb_indices'] = list(dnb_indices)
for idist in range(len(self.neighbors_distances[isite]) - 1):
dist_dict = self.neighbors_distances[isite][idist]
dist_dict_next = self.neighbors_distances[isite][idist+1]
dist_dict['next'] = dist_dict_next['min']
ndist_dict = self.neighbors_normalized_distances[isite][idist]
ndist_dict_next = self.neighbors_normalized_distances[isite][idist + 1]
ndist_dict['next'] = ndist_dict_next['min']
if self.maximum_distance_factor is not None:
dfact = self.maximum_distance_factor
else:
dfact = self.default_voronoi_cutoff / self.neighbors_distances[isite][0]['min']
self.neighbors_normalized_distances[isite][-1]['next'] = dfact
self.neighbors_distances[isite][-1]['next'] = dfact * self.neighbors_distances[isite][0]['min']
#Initializes neighbors angles and normalized angles groups
self.neighbors_angles[isite] = []
self.neighbors_normalized_angles[isite] = []
normalized_angles = [nb_dict['normalized_angle'] for nb_dict in results]
isorted_angles = np.argsort(normalized_angles)[::-1]
self.neighbors_normalized_angles[isite].append({'max': normalized_angles[isorted_angles[0]],
'min': normalized_angles[isorted_angles[0]]})
self.neighbors_angles[isite].append({'max': results[isorted_angles[0]]['angle'],
'min': results[isorted_angles[0]]['angle']})
icurrent = 0
nb_indices = {int(isorted_angles[0])}
dnb_indices = {int(isorted_angles[0])}
for iang in iter(isorted_angles):
wa = normalized_angles[iang]
if self.minimum_angle_factor is not None:
if wa < self.minimum_angle_factor:
self.neighbors_normalized_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
break
if np.isclose(wa, self.neighbors_normalized_angles[isite][icurrent]['min'],
rtol=0.0, atol=self.normalized_angle_tolerance):
self.neighbors_normalized_angles[isite][icurrent]['min'] = wa
self.neighbors_angles[isite][icurrent]['min'] = results[iang]['angle']
dnb_indices.add(int(iang))
else:
self.neighbors_normalized_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
dnb_indices = {int(iang)}
self.neighbors_normalized_angles[isite].append({'max': wa,
'min': wa})
self.neighbors_angles[isite].append({'max': results[iang]['angle'],
'min': results[iang]['angle']})
icurrent += 1
nb_indices.add(int(iang))
else:
self.neighbors_normalized_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_angles[isite][icurrent]['nb_indices'] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]['dnb_indices'] = list(dnb_indices)
for iang in range(len(self.neighbors_angles[isite]) - 1):
ang_dict = self.neighbors_angles[isite][iang]
ang_dict_next = self.neighbors_angles[isite][iang + 1]
ang_dict['next'] = ang_dict_next['max']
nang_dict = self.neighbors_normalized_angles[isite][iang]
nang_dict_next = self.neighbors_normalized_angles[isite][iang + 1]
nang_dict['next'] = nang_dict_next['max']
if self.minimum_angle_factor is not None:
afact = self.minimum_angle_factor
else:
afact = 0.0
self.neighbors_normalized_angles[isite][-1]['next'] = afact
self.neighbors_angles[isite][-1]['next'] = afact * self.neighbors_angles[isite][0]['max']
def _precompute_additional_conditions(self, ivoronoi, voronoi, valences):
additional_conditions = {ac: [] for ac in self.additional_conditions}
for ips, (ps, vals) in enumerate(voronoi):
for ac in self.additional_conditions:
additional_conditions[ac].append(self.AC.check_condition(condition=ac, structure=self.structure,
parameters={'valences': valences,
'neighbor_index': vals['index'],
'site_index': ivoronoi}))
return additional_conditions
def _precompute_distance_conditions(self, ivoronoi, voronoi):
distance_conditions = []
for idp, dp_dict in enumerate(self.neighbors_normalized_distances[ivoronoi]):
distance_conditions.append([])
dp = dp_dict['max']
for ips, (ps, vals) in enumerate(voronoi):
distance_conditions[idp].append(vals['normalized_distance'] <= dp or
np.isclose(vals['normalized_distance'], dp,
rtol=0.0, atol=self.normalized_distance_tolerance/2.0))
return distance_conditions
def _precompute_angle_conditions(self, ivoronoi, voronoi):
angle_conditions = []
for iap, ap_dict in enumerate(self.neighbors_normalized_angles[ivoronoi]):
angle_conditions.append([])
ap = ap_dict['max']
for ips, (ps, vals) in enumerate(voronoi):
angle_conditions[iap].append(vals['normalized_angle'] >= ap or
np.isclose(vals['normalized_angle'], ap,
rtol=0.0, atol=self.normalized_angle_tolerance/2.0))
return angle_conditions
def neighbors_map(self, isite, distfactor, angfactor, additional_condition):
if self.neighbors_normalized_distances[isite] is None:
return None
dist_where = np.argwhere(np.array([wd['min'] for wd in self.neighbors_normalized_distances[isite]]) <= distfactor)
if len(dist_where) == 0:
return None
idist = dist_where[-1][0]
ang_where = np.argwhere(np.array([wa['max'] for wa in self.neighbors_normalized_angles[isite]]) >= angfactor)
if len(ang_where) == 0:
return None
iang = ang_where[0][0]
if self.additional_conditions.count(additional_condition) != 1:
return None
i_additional_condition = self.additional_conditions.index(additional_condition)
return {'i_distfactor': idist, 'i_angfactor': iang, 'i_additional_condition': i_additional_condition}
def neighbors_surfaces(self, isite, surface_calculation_type=None, max_dist=2.0):
if self.voronoi_list2[isite] is None:
return None
bounds_and_limits = self.voronoi_parameters_bounds_and_limits(isite, surface_calculation_type, max_dist)
distance_bounds = bounds_and_limits['distance_bounds']
angle_bounds = bounds_and_limits['angle_bounds']
surfaces = np.zeros((len(distance_bounds), len(angle_bounds)), np.float)
for idp in range(len(distance_bounds) - 1):
this_dist_plateau = distance_bounds[idp + 1] - distance_bounds[idp]
for iap in range(len(angle_bounds) - 1):
this_ang_plateau = angle_bounds[iap + 1] - angle_bounds[iap]
surfaces[idp][iap] = np.absolute(this_dist_plateau*this_ang_plateau)
return surfaces
def neighbors_surfaces_bounded(self, isite, surface_calculation_options=None):
if self.voronoi_list2[isite] is None:
return None
if surface_calculation_options is None:
surface_calculation_options = {'type': 'standard_elliptic',
'distance_bounds': {'lower': 1.2, 'upper': 1.8},
'angle_bounds': {'lower': 0.1, 'upper': 0.8}}
if surface_calculation_options['type'] in ['standard_elliptic', 'standard_diamond', 'standard_spline']:
plot_type = {'distance_parameter': ('initial_normalized', None),
'angle_parameter': ('initial_normalized', None)}
else:
raise ValueError('Type "{}" for the surface calculation in DetailedVoronoiContainer '
'is invalid'.format(surface_calculation_options['type']))
max_dist = surface_calculation_options['distance_bounds']['upper'] + 0.1
bounds_and_limits = self.voronoi_parameters_bounds_and_limits(isite=isite,
plot_type=plot_type,
max_dist=max_dist)
distance_bounds = bounds_and_limits['distance_bounds']
angle_bounds = bounds_and_limits['angle_bounds']
lower_and_upper_functions = get_lower_and_upper_f(surface_calculation_options=surface_calculation_options)
mindist = surface_calculation_options['distance_bounds']['lower']
maxdist = surface_calculation_options['distance_bounds']['upper']
minang = surface_calculation_options['angle_bounds']['lower']
maxang = surface_calculation_options['angle_bounds']['upper']
f_lower = lower_and_upper_functions['lower']
f_upper = lower_and_upper_functions['upper']
surfaces = np.zeros((len(distance_bounds), len(angle_bounds)), np.float)
for idp in range(len(distance_bounds) - 1):
dp1 = distance_bounds[idp]
dp2 = distance_bounds[idp+1]
if dp2 < mindist or dp1 > maxdist:
continue
if dp1 < mindist:
d1 = mindist
else:
d1 = dp1
if dp2 > maxdist:
d2 = maxdist
else:
d2 = dp2
for iap in range(len(angle_bounds) - 1):
ap1 = angle_bounds[iap]
ap2 = angle_bounds[iap+1]
if ap1 > ap2:
ap1 = angle_bounds[iap + 1]
ap2 = angle_bounds[iap]
if ap2 < minang or ap1 > maxang:
continue
intersection, interror = rectangle_surface_intersection(rectangle=((d1, d2),
(ap1, ap2)),
f_lower=f_lower,
f_upper=f_upper,
bounds_lower=[mindist, maxdist],
bounds_upper=[mindist, maxdist],
check=False)
surfaces[idp][iap] = intersection
return surfaces
@staticmethod
def _get_vertices_dist_ang_indices(parameter_indices_list):
pp0 = [pp[0] for pp in parameter_indices_list]
pp1 = [pp[1] for pp in parameter_indices_list]
min_idist = min(pp0)
min_iang = min(pp1)
max_idist = max(pp0)
max_iang = max(pp1)
i_min_angs = np.argwhere(np.array(pp1) == min_iang)
i_max_dists = np.argwhere(np.array(pp0) == max_idist)
pp0_at_min_iang = [pp0[ii[0]] for ii in i_min_angs]
pp1_at_max_idist = [pp1[ii[0]] for ii in i_max_dists]
max_idist_at_min_iang = max(pp0_at_min_iang)
min_iang_at_max_idist = min(pp1_at_max_idist)
p1 = (min_idist, min_iang)
p2 = (max_idist_at_min_iang, min_iang)
p3 = (max_idist_at_min_iang, min_iang_at_max_idist)
p4 = (max_idist, min_iang_at_max_idist)
p5 = (max_idist, max_iang)
p6 = (min_idist, max_iang)
return [p1, p2, p3, p4, p5, p6]
def maps_and_surfaces(self, isite, surface_calculation_type=None, max_dist=2.0, additional_conditions=None):
if self.voronoi_list2[isite] is None:
return None
if additional_conditions is None:
additional_conditions = [self.AC.ONLY_ACB]
surfaces = self.neighbors_surfaces(isite=isite, surface_calculation_type=surface_calculation_type,
max_dist=max_dist)
maps_and_surfaces = []
for cn, value in self._unique_coordinated_neighbors_parameters_indices[isite].items():
for imap, list_parameters_indices in enumerate(value):
thissurf = 0.0
for (idp, iap, iacb) in list_parameters_indices:
if iacb in additional_conditions:
thissurf += surfaces[idp, iap]
maps_and_surfaces.append({'map': (cn, imap), 'surface': thissurf,
'parameters_indices': list_parameters_indices})
return maps_and_surfaces
def maps_and_surfaces_bounded(self, isite, surface_calculation_options=None, additional_conditions=None):
if self.voronoi_list2[isite] is None:
return None
if additional_conditions is None:
additional_conditions = [self.AC.ONLY_ACB]
surfaces = self.neighbors_surfaces_bounded(isite=isite, surface_calculation_options=surface_calculation_options)
maps_and_surfaces = []
for cn, value in self._unique_coordinated_neighbors_parameters_indices[isite].items():
for imap, list_parameters_indices in enumerate(value):
thissurf = 0.0
for (idp, iap, iacb) in list_parameters_indices:
if iacb in additional_conditions:
thissurf += surfaces[idp, iap]
maps_and_surfaces.append({'map': (cn, imap), 'surface': thissurf,
'parameters_indices': list_parameters_indices})
return maps_and_surfaces
def neighbors(self, isite, distfactor, angfactor, additional_condition=None):
idist = None
dfact = None
for iwd, wd in enumerate(self.neighbors_normalized_distances[isite]):
if distfactor >= wd['min']:
idist = iwd
dfact = wd['max']
else:
break
iang = None
afact = None
for iwa, wa in enumerate(self.neighbors_normalized_angles[isite]):
if angfactor <= wa['max']:
iang = iwa
afact = wa['min']
else:
break
if idist is None or iang is None:
raise ValueError('Distance or angle parameter not found ...')
return [nb for nb in self.voronoi_list2[isite] if
nb['normalized_distance'] <= dfact and nb['normalized_angle'] >= afact]
def voronoi_parameters_bounds_and_limits(self, isite, plot_type, max_dist):
#Initializes the distance and angle parameters
if self.voronoi_list2[isite] is None:
return None
if plot_type is None:
plot_type = {'distance_parameter': ('initial_inverse_opposite', None),
'angle_parameter': ('initial_opposite', None)}
dd = [dist['min'] for dist in self.neighbors_normalized_distances[isite]]
dd[0] = 1.0
if plot_type['distance_parameter'][0] == 'initial_normalized':
dd.append(max_dist)
distance_bounds = np.array(dd)
dist_limits = [1.0, max_dist]
elif plot_type['distance_parameter'][0] == 'initial_inverse_opposite':
ddinv = [1.0 / dist for dist in dd]
ddinv.append(0.0)
distance_bounds = np.array([1.0 - invdist for invdist in ddinv])
dist_limits = [0.0, 1.0]
elif plot_type['distance_parameter'][0] == 'initial_inverse3_opposite':
ddinv = [1.0 / dist**3.0 for dist in dd]
ddinv.append(0.0)
distance_bounds = np.array([1.0 - invdist for invdist in ddinv])
dist_limits = [0.0, 1.0]
else:
raise NotImplementedError('Plotting type "{}" '
'for the distance is not implemented'.format(plot_type['distance_parameter']))
if plot_type['angle_parameter'][0] == 'initial_normalized':
aa = [0.0]
aa.extend([ang['max'] for ang in self.neighbors_normalized_angles[isite]])
angle_bounds = np.array(aa)
elif plot_type['angle_parameter'][0] == 'initial_opposite':
aa = [0.0]
aa.extend([ang['max'] for ang in self.neighbors_normalized_angles[isite]])
aa = [1.0 - ang for ang in aa]
angle_bounds = np.array(aa)
else:
raise NotImplementedError('Plotting type "{}" '
'for the angle is not implemented'.format(plot_type['angle_parameter']))
ang_limits = [0.0, 1.0]
return {'distance_bounds': distance_bounds, 'distance_limits': dist_limits,
'angle_bounds': angle_bounds, 'angle_limits': ang_limits}
def is_close_to(self, other, rtol=0.0, atol=1e-8):
isclose = (np.isclose(self.normalized_angle_tolerance, other.normalized_angle_tolerance,
rtol=rtol, atol=atol) and
np.isclose(self.normalized_distance_tolerance, other.normalized_distance_tolerance,
rtol=rtol, atol=atol) and
self.additional_conditions == other.additional_conditions and
self.valences == other.valences)
if not isclose:
return isclose
for isite, site_voronoi in enumerate(self.voronoi_list2):
self_to_other_nbs = {}
for inb, nb in enumerate(site_voronoi):
if nb is None:
if other.voronoi_list2[isite] is None:
continue
else:
return False
else:
if other.voronoi_list2[isite] is None:
return False
nb_other = None
for inb2, nb2 in enumerate(other.voronoi_list2[isite]):
if nb['site'] == nb2['site']:
self_to_other_nbs[inb] = inb2
nb_other = nb2
break
if nb_other is None:
return False
if not np.isclose(nb['distance'], nb_other['distance'],
rtol=rtol, atol=atol):
return False
if not np.isclose(nb['angle'], nb_other['angle'],
rtol=rtol, atol=atol):
return False
if not np.isclose(nb['normalized_distance'], nb_other['normalized_distance'],
rtol=rtol, atol=atol):
return False
if not np.isclose(nb['normalized_angle'], nb_other['normalized_angle'],
rtol=rtol, atol=atol):
return False
if nb['index'] != nb_other['index']:
return False
if nb['site'] != nb_other['site']:
return False
return True
def get_rdf_figure(self, isite, normalized=True, figsize=None,
step_function=None):
def dp_func(dp):
return 1.0 - 1.0 / np.power(dp, 3.0)
import matplotlib.pyplot as plt
if step_function is None:
step_function = {'type': 'normal_cdf', 'scale': 0.0001}
# Initializes the figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
subplot = fig.add_subplot(111)
if normalized:
dists = self.neighbors_normalized_distances[isite]
else:
dists = self.neighbors_distances[isite]
if step_function['type'] == 'step_function':
isorted = np.argsort([dd['min'] for dd in dists])
sorted_dists = [dists[ii]['min'] for ii in isorted]
dnb_dists = [len(dists[ii]['dnb_indices']) for ii in isorted]
xx = [0.0]
yy = [0.0]
for idist, dist in enumerate(sorted_dists):
xx.append(dist)
xx.append(dist)
yy.append(yy[-1])
yy.append(yy[-1]+dnb_dists[idist])
xx.append(1.1*xx[-1])
yy.append(yy[-1])
elif step_function['type'] == 'normal_cdf':
scale = step_function['scale']
mydists = [dp_func(dd['min']) for dd in dists]
mydcns = [len(dd['dnb_indices']) for dd in dists]
xx = np.linspace(0.0, 1.1*max(mydists), num=500)
yy = np.zeros_like(xx)
for idist, dist in enumerate(mydists):
yy += mydcns[idist] * normal_cdf_step(xx, mean=dist, scale=scale)
else:
raise ValueError('Step function of type "{}" is not allowed'.format(step_function['type']))
subplot.plot(xx, yy)
return fig
def get_sadf_figure(self, isite, normalized=True, figsize=None,
step_function=None):
def ap_func(ap):
return np.power(ap, -0.1)
import matplotlib.pyplot as plt
if step_function is None:
step_function = {'type': 'step_function', 'scale': 0.0001}
# Initializes the figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
subplot = fig.add_subplot(111)
if normalized:
angs = self.neighbors_normalized_angles[isite]
else:
angs = self.neighbors_angles[isite]
if step_function['type'] == 'step_function':
isorted = np.argsort([ap_func(aa['min']) for aa in angs])
sorted_angs = [ap_func(angs[ii]['min']) for ii in isorted]
dnb_angs = [len(angs[ii]['dnb_indices']) for ii in isorted]
xx = [0.0]
yy = [0.0]
for iang, ang in enumerate(sorted_angs):
xx.append(ang)
xx.append(ang)
yy.append(yy[-1])
yy.append(yy[-1]+dnb_angs[iang])
xx.append(1.1*xx[-1])
yy.append(yy[-1])
elif step_function['type'] == 'normal_cdf':
scale = step_function['scale']
myangs = [ap_func(aa['min']) for aa in angs]
mydcns = [len(dd['dnb_indices']) for dd in angs]
xx = np.linspace(0.0, 1.1*max(myangs), num=500)
yy = np.zeros_like(xx)
for iang, ang in enumerate(myangs):
yy += mydcns[iang] * normal_cdf_step(xx, mean=ang, scale=scale)
else:
raise ValueError('Step function of type "{}" is not allowed'.format(step_function['type']))
subplot.plot(xx, yy)
return fig
def __eq__(self, other):
return (self.normalized_angle_tolerance == other.normalized_angle_tolerance and
self.normalized_distance_tolerance == other.normalized_distance_tolerance and
self.additional_conditions == other.additional_conditions and
self.valences == other.valences and
self.voronoi_list2 == other.voronoi_list2 and
self.structure == other.structure)
def __ne__(self, other):
return not self == other
def to_bson_voronoi_list2(self):
"""
Transforms the voronoi_list into a vlist + bson_nb_voro_list, that are BSON-encodable.
:return: [vlist, bson_nb_voro_list], to be used in the as_dict method
"""
bson_nb_voro_list2 = [None] * len(self.voronoi_list2)
for ivoro, voro in enumerate(self.voronoi_list2):
if voro is None or voro == 'None':
continue
site_voro = []
# {'site': neighbors[nn[1]],
# 'angle': sa,
# 'distance': distances[nn[1]],
# 'index': myindex}
for nb_dict in voro:
site = nb_dict['site']
site_dict = {key: val for key, val in nb_dict.items() if key not in ['site']}
#site_voro.append([ps.as_dict(), dd]) [float(c) for c in self.frac_coords]
diff = site.frac_coords - self.structure[nb_dict['index']].frac_coords
site_voro.append([[nb_dict['index'], [float(c) for c in diff]],
site_dict])
bson_nb_voro_list2[ivoro] = site_voro
return bson_nb_voro_list2
def as_dict(self):
"""
Bson-serializable dict representation of the VoronoiContainer.
:return: dictionary that is BSON-encodable
"""
bson_nb_voro_list2 = self.to_bson_voronoi_list2()
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"bson_nb_voro_list2": bson_nb_voro_list2,
# "neighbors_lists": self.neighbors_lists,
"structure": self.structure.as_dict(),
"normalized_angle_tolerance": self.normalized_angle_tolerance,
"normalized_distance_tolerance": self.normalized_distance_tolerance,
"additional_conditions": self.additional_conditions,
"valences": self.valences,
"maximum_distance_factor": self.maximum_distance_factor,
"minimum_angle_factor": self.minimum_angle_factor}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the VoronoiContainer object from a dict representation of the VoronoiContainer created using
the as_dict method.
:param d: dict representation of the VoronoiContainer object
:return: VoronoiContainer object
"""
structure = Structure.from_dict(d['structure'])
voronoi_list2 = from_bson_voronoi_list2(d['bson_nb_voro_list2'], structure)
maximum_distance_factor = d['maximum_distance_factor'] if 'maximum_distance_factor' in d else None
minimum_angle_factor = d['minimum_angle_factor'] if 'minimum_angle_factor' in d else None
return cls(structure=structure, voronoi_list2=voronoi_list2,
# neighbors_lists=neighbors_lists,
normalized_angle_tolerance=d['normalized_angle_tolerance'],
normalized_distance_tolerance=d['normalized_distance_tolerance'],
additional_conditions=d['additional_conditions'],
valences=d['valences'],
maximum_distance_factor=maximum_distance_factor,
minimum_angle_factor=minimum_angle_factor)
| dongsenfo/pymatgen | pymatgen/analysis/chemenv/coordination_environments/voronoi.py | Python | mit | 40,607 | [
"pymatgen"
] | 48d1f92fbecbd6019e1d939b3373914b307d6e093e67b01fa17e1da18b4274bc |
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
import numpy as np
import visvis as vv
import zlib, base64
# todo: parameterize; allow custum Gaussian blobs at specified positions.
if False:
# Get data (exported from Matlab)
from visvis import ssdf
db = ssdf.load('d:/almar/projects/peaks.ssdf')
data = db.z.astype(np.float32)
# Dump
data = data.tostring()
data = zlib.compress(data)
text = base64.encodestring(data)
print text
def peaks():
""" peaks()
Returs a 2D map of z-values that represent an example landscape with
Gaussian blobs.
"""
# Decode z data
data = base64.decodestring(zData)
data = zlib.decompress(data)
z = np.frombuffer(data, dtype=np.float32 )
z.shape = 49, 49
# Done!
return z
zData = """
eJwNl4c/Vf8Dxu29MiKrgShkpLr3nvMZDamMopS+UUpIi9KQSoisUFnRUBpkVpRx7zmfU4oQoqVI
pNIiWQ3F7/cnPK/383o/r2f2pwSe8lUpftPIHr6UyjP+8DJbQX5osiDCu0OwqW4a9cvbi6qxTaLM
xXIpg6sFlJtNLKW0LU6g1HlVGFV1QHS+dFg0I9OJyVXKYrbUvmDOX1RivTbPYT3fOrOVdttY6nko
2/M7gn3pG8G+EB5kg+dsZU+sWcF+f2jBVuupsD623cySdwXMnoojjNJHWwY0DIl2NmaLOryXi/qK
h4Ra3onC5r/6wqMbr1ddyTCtyjuWXdk6rlx50nNbxYbwvPLxBw/u/lf6iqdg4cKvYYR8iVnGAlFa
nEAY0SE4ucGMmlIQTBk/vEb56rZQDur91Lv6D5RSSzrl7ZtflfFomSgyc1hUbObDjH8rZ8I/yLKL
zSj2y9+9rL0wjS0fKmHXcw9ZV+nn7G+HDnbO19esln0LOz2eY/em5LPJSafZWa6BrHsCYM0vq7EH
zz1joo+kM4GTnBlqpwzjHXBDtEjOWdQ51CfcNjNK+EJbSThqmVgVsUumqig8sPLq9AcVHjtVKo41
zCyvuGnEP/cri393XEkwZHtA8OZNoyBguzH1pO0QhWJKqfyCH5RaqxZtsNGEHu3WpKU6z1DTvxOh
099aERjbwJg5P2A8c/TYaao+7FXZc+w0zUfspMgRdlWtKrlgNZ3krLAke7/aktMZNmQiYxZR/qRH
GmUUSMfOr6xW0D22ZkYmO/NsAAsumrO9zwcZvOkaY1LkwQwFyTFOf6+KLixGou8zngkr93kLb2V2
Vx0XW1vVnshUgkdTKlN2b6hQ7tpf3nRlF/+NSTffvsdJ4NRyW6DnpUWt0dtN/VhdRs34LEY/07Wg
bQLc6AGVLfQhO0iX2e6nFgWtFXFP5zCzVO8yb3IN2UL3EPZrWSnrpzPMXuvTJ/MUMRk54k1ilh8m
st9OkE6LBDIqH0+smOMkWnMv2TlvLUnLmE9WDmkRheV9rMGbMjaiNooNvUuxrT4TzLLJuYyephsz
Oeyf6FjcGRE7PF1UkJQvLHebLXTSvFR1vVmxKjDNq5IdTK/IyEwtx/Ou86lWA4FWZ7KgddeYQNXU
jaoIz6UcD/+j5jfZ0WkeQbTsndN0yLNM2mTfAXpdN5960VwpMj8Yx/gEKbKnbIJYvfoqdkqeDHlw
YD5pBtuJj2Uy6Ye5ZIGhkEx784isMWom53wbScnvanJVooxspLLJ5MIYYlK+kWRqW5PBKTLkTeED
NrU1nv0ZQ7N42igjcMhi4kL4TLL/cxHPe7voP9GwUCNmj7DT4G3VSB6oUl+eXDnWVFJx2Mu3fLnD
W770tzWCz8/uC8JbZlPhtsnU1/b31Kd4S/rK+EH6hvg1ekjtEb26spUul8ulhS4ulF28DDOtpJo5
eMuebZ+Uz0oBKfK+AhK35RFERvM6aTNrIO4H+sjtL1Kc5j0Vbs9SDe6fpjpH/BU5KWUx7mH4O/Lr
C0fG4UVSJRFKqk0hGZmnSJSmPGB310WxH4QWbGPSS0a5KZzZoarB/HqVLXI7bSxyMc0WFp1WEdYh
vyp56wuVo3anKi7Wyy2Q5qsKxk/HCd4H/hMMpW6ishNEVGyiNu0h8KeVV1yny3920ntPyIHnhurA
sWSA/p3xg2LsZjB12X+YTLUItmldFzv7vBXxjw4jItsi8mLOW/KuQ45zdpvG2TTN5eS3LeQO1Thw
u9ct5Up9AbdXdg5X8mUK56wkyek+e0Eaw26QEzfDyZdfFBH9kiRn/95i3Zf7sVklKuyR0Twm9hRk
rt1pENm0OIn8rYmwIG+6kNPZXqVsvqcy9vXI/MrmeN7jWkvBzp48QQoyoF4anaRW3umnkhsX0o07
0ujoY6/osVENkP+eB7afdwIyU+eDGeKX6eRpWswjS2X25ZpMNuyOHBHbvJ68EGQTx72vieUlZW7T
mTncIndnbpKaP4f4h7jDE+GccHk4N81pP1d6cjNXEOXAOTTP4tSVFDkLhXbyrOcaaT+4h2xnzMhN
i3es7YzT7KJ+Gzb+VjPDL/FlyMN+0cyFfqI7zY+Eu3y1hE9y9ar2XZ/EE5u/in/eZynf6/NiwQLD
+4LYTwuo5uTrlNckNVozZRetMKuKfjQsDw68QIC8PABuKyaDA9NPApdhHthtFSiqy5Jk1Y5dZoVp
2uSgRQg571pJBnvEuPb3Zlz4Z1duX0IIp5Z5kmNdznI75l/k2u9d4IZ1M7hzOglclvkBbtF2V+4Q
MOe+nZThotQaSKR8ConRdCLWEbLEiRSwYnIr2Q2a3xiJ0COM3uHfoqeWXqLQhitCSSaz6pz0KX7v
TFPB5gYdQcKSdv6Fh/YCj6F6wTKDpVQEqqBkXIxohbY4utrmDR1lYAb+FgUBw4JLQP3QA2Dx7ilY
2XMHuLuqghkyOUzPxUT253xN8m9mOFk4+JBIflTitBopLu7wTu5wcCI35b/LnMxACXextJwb7ynn
KrNvcuWvc7jkOac4daP93BuFZZxEtj63d8pXEtGfRzwn/MmJVm2y/aKIzVHxYmW//GDA7b3MjMYX
oof62iJtKVJl9EAkWO8jQ3mU9gqMj10TrEteLQgzmiFwmVYmUOgWUPD8XcrvqRmt/TeFrlEepedw
C8Fp+VOg89cjEGcqAe87ToHVXfrw5r0hcPYvQ+/dZ8J+ivjG2gz7kls8hrQDJW77Msythwe5V0vT
uespRdzn6YTT1q7jevbWc8v41ZzGzjLu9ItsTrsyhqtq8eRWD5pzMT8nyCXRHbLPbz/5/Fif+NiK
2Aw/V9axuJU5JTaPWbx6g2jvDZ4gc6KSCrFspPR98qhSl0Bq4K4utda0SnBZP4ofNeYq8HKSoqwr
zlBBxZp0mvJJWvO/n/TuFGdgNXgJrPn1DQzxjeDclaugZege2OIfDKMLKbjAezbIKvRmg/dNI7t+
niXfc0bIw7k2nK/mTi7UPoXbFVrEmX6+xx1e38QZhLVw2pYNXKy3iHsglsvlnkjkPuj6cnteW3M9
78S5uy/KSAzYRdqsJpGBthxWEk1n83rDmPKNQLRyCY9WlUmn2y+eotvk/Oj2ACP6pM4Lav+O49S8
dB2qovNbpUE7qfRdAfm3X38Q9O4opkK3OtICzyb61S8AhtMvA11FMVgRC+Dxhki42ToXdohXw6qu
R3AgJReK3mrDI2QBO1tqDhm/UkQGfytyI1ccOJ+ASK6XXORaH9zhznY94JJ31HOWvx5yRpsquN8J
VzijKfHcrAIv7vpls/9nHiRvbuaQkLHlZGZsN8u7uIZtU4thvJExvfGeFRj47ALMdQXA79MkMDP/
CY3/xNMzgubQktXN1MCHHVTVRGvVpcQpwpoqB+HGZWuFNf2ywoLValT3XQe6a484aFwYDUqiBkDk
JHs48jAVLlF9AmeflUG7GqaiiOLZKGebPiq58xSa3p4KroXJko6uK2TGJwVuxrgDV3gsgltzJYv7
FVzI3e0p45ZalHI9X3K5/KFULvT5Ia48zJ6j1qhz58zqSGn8YZIvK08GU3zZ6IRBUaFiFvCa+QP8
bfkN8h6/AsGV+eDnwYMALDIDR4+20TrhUfRgly59372IenHwP+GTP8XCex8lRexZnohpCBDZTDsq
colZJHLJSxYUTR6hF73NA6sqZ8MWLgX+y/kAnzkZofSf7ih1LBzZlSWiPUHxqOz7ZiSzfxg+Fldh
w3wDSb7hN/LHw4Z7XOjHRZfGcNVLU7iSt6mce/1J7sZ/h7hiexduo7sh57Cjk4iL4knifyqkPHsO
q706Bzyz94QhdrHQqCUaLsj1h/+lLYCrKmThqTkMsIehoIQYgOsmLP3fpQ205bEhylhSKPw9rCfq
6z0okrtZJUqXk2IqXlsw7Q8dmYWyjkySgjzTfdaAjt3HAI/BbXDAuQ2yupao1+YQGpl0FT3Jr0GN
pzvQQ9EbdK6kGi3bkIC2LnoN1T43snld14l/mAIXnLKAM25cw7V2beRqbrhzx8VobvrAZK5f/RVx
WRlNBs98Zz8PXRfJd52GqlGf4ZdDUqjCVgyZDr2FJsp3oP6qOMiMLYHya2WgeFsx2C1cD5x/iYPb
Y6n0WgtjWiFJVVTRs0+UrvdIlKirz5zY5s9otl1mYlyeMoq3/jJZFtLs50NvmUZZKyZH7g7YNJQJ
uxV1Udqbw6j37B3UkdiHFh1TwXx5Azx30AD/2qyCyYwOVKN3DC02VIdfXPkELGSI7S8x7sCMSZwD
X4UboUdI/fa7ZEqyB6kQJrCHd2+CX29oIrk4JyRrsgWlnvFEnvVLUGSsEVrzZALeOMNA/7Jo+EHD
Dn6e+gmc2ZsMVMFsQC24T7vZutPOsk6inYk3RTer1Zn4pwHM0e+3mMHHYuwZF2u2RHsLaySMZntK
EtmkvFA200GFPVccA8QaayBPZQ3KTC1GQ8O/0NuaqThnN4Ujny3H/U+W4upea3xSXAEHkmI02imB
+nvjWcl2T/LxYAZxKMwiplohJGy3Ajl4vkwUJfUHFgoC0BL3TKRck49e5OejsSkXUJRhDCrV2ICS
vc1Q+eZRmK1fCL/q+cH0AxpwU9AdoJW0FqirD9PJ16Pph+tjRcC9X9Sxexlj2nOZeXz6DzMH89kC
10h25ZEiNri4jfXx/8Oe2idBAni9bFtBMhtrNUHvXfIP1uicRFF271Eba4jrouyxyv1N+KPONvzv
ywa8wYXGa2arYLriNvLh6yMP7QvCnq67bPyNp+zKh87sy8xVUPGkLZo4kY4uTK1FoXnv0f5tfSg2
oweZ/mxALQsLkefrODTwyRXZHNNBPp+fwiXqiRDz7OAP8zdgZ3gk6JqnCVx1c+jMb4Wi8Pk6zDO1
CMbgeCvDNBmze80OspF/b7K5U3+wsr56ROEHRT4Hu5HFPWvIbppH9pz7wJbuWEldK52GbOtuoi1X
FLHBJwEuyt2IZ4QE4SDRTvzmyWpc5jwbu20bQJs9k9BML3H0s3kG3GjXCy6FFEN2yWKUEpaDbv95
izw9ZfFCOU2sDLRwqZcSzlz3C5m0NKOaudeQj0soEps1D3VKjsF1Vdchyl4Dhcsl4PG0TGC1dA64
FsrSsYtbRB9n85mGoQuM311x1q3GiQ1RP8eWNXWxEsf1iEm0E0lbHEYczVPJ4O1schGeI473wslA
vA7Jr0ylRWJOaKvTMxQrmIp7JFdga+SDXY744QWHV2OtU3Pwl+IJ9PVeDvJcMBcNHX8EPY7WQh4z
Gxk8PI3GZrShoVwl7HHJGNeesMa6t63wwmYjbCClhj2HviMFAxatzE9Gcckr0fp0VZQaw8LJ1Xth
Ta4mzDQuAVp5S0G56iva7vSgqDpkHZN0oYI5uVmHvTq+h32UVMEe65MhEQU0ORB1mBhYZJOlZiyh
Y58RJYl2gg7Xk+MvL5Adr03JF9PvYHNkOPqc8QPN9TfDPy0ccIzsKpwwfxHe2Twdh//oQx3yZ1G9
yBzFZb+ED67+hIFOa1Cpdgna8ekvelc3HbMCCnvyHfAmnj1um70Az0qdhu8dlcZDNk/RLv9s5DPZ
D+mpTkUWg63Qzv0EPH7QGNpX3QOodAMQkx2g++RkmRdng5iFLU1Mkoc5+/paLBtyrIXdqatLpuqt
J5s1z5AzJkIyX9BDPr8X57YeUOBkPWS5+f++kI67uWS4U404Nl+BpbYXUJi1OE7sM8Jai2xwh7QZ
/qishE921SOn9aHI45QM6rxZApcfHYKn525FbVEENTorYAlPazzm64hNW9bhW+buePiKPT4nb4kr
S1WxskUPsvcsQOepg2jx69kIxryD4YJTUDzZGl4baga7WnaAxCdiYGa5GnNobgSTcKOTIWM8NnY8
hY1lelj5JWZktftu8mLXZeKT9IRMkRDj4ElNbio3nUv3MOI2ftXivGQHSI9eOpnWdIG1fz4FmY+W
IZOrE8jpkhqW3amIx0/0oudaV9AzRCMN11JY0bobzvNphwsM/NGKtFrEyU3CbQp87LzAHZ/8uRm3
DW3EhtEu+HiRHY4f0cKJw9/Q6qLbaHpHOFqrZovGlL/BBQ7pMHNoAdxzvQ0EFO0HgrkyIDZGi5Eq
j2Z62j8y7VaYLTHMYm1a+thyB2sikRdKstLySc/tNyTYUJ6LY6dxzyJtuL8T87mTpyy5fcEanEr4
U/LTeTX5Zh0JJJy80X0fBnl/70Gsdxf6sKASnTU6jOSbJFDtyCIY8FUari8n8IG5D2pyrUPBczRw
wSiFrx71wMuW+GJvhS24B7piq5/z8bwGbexa8x2V3itDJ90jkZGGHRq61Q8LBjNg7F8+3KXQDu5s
CQGyxbLgiqkW0x8fzfC6PjLzj2G260oWC/S+s1I3bAhv3VGSKlFE+lq6Sf1uJW5xswkn6uFxbosW
ctY2FOfAGXOvDv0j1SuyyMOMu+xg1xuYnRKFVHuLUE5tCcpTPoUKGvhocOgqbL0bAi54LAIVvVch
aVuPBqY+RH8a1PBBFT7eo+6O645vxpvnb8QWmc44v3Qu9pHXwnc2fEXU1luosuYYelRojRaZf4Gv
VNKg3Z75MLfjBdC6FgxmaUuDqmpV5tXzcMb/ZyejaihgN91NY1+86GUtTlsQtcwD5MHzPKKn9oaU
Silw2g1G3Jn3C7gHKou4nZch9yvLnLuWIM8NqVeQjCwTEnS5EVy/MQdJe0egCRCDbs7xRMMvpJFc
Lx8+1v0o4l3/LazRToZY1Rkl9AvRqIcCzqy3xtxORwzM1mFe92o8+8di3GljjhUGlLGsThdq6MxD
afuCUYyJGVJ91QnHXyZCqXOWUBD8GHQ89weLHf7RYr+lmbXLgpjNGk+Y5c8t2ZujCWzInzY2/850
YrPLn3youkiOLXtC5LdKcKuW6nEJhlZc7w8+t2MFj4u1MuNqxRW4H8/ukYkDK0i0fAPzePQD/H7d
HgXar0PepdYIrmuFF+TFoXTqLCb5grywsDUR1tgtQT7Ot5BDkjhObjXGm/QBDrjsgE+5LcGnvOfh
4f8McTVPErtoNyNXifOI1+GNdi/VQ1n3G6H0cCT0WzwNNlSKQPW4O2ip/UwbBP8QDZz3YOTzhUxv
uz57TXiEFUg/YKfZqJOLW1xJKptMIlpFhCv/RoxnK3PpkVO5sdpZnG+TGack1OWuZ0yQ7LA7pC9m
ETHb9o85kVoLG27ZoaAORxQUY43uZ/XBptAAWP6tHFSISUGBDQc/v3RGuaqFyJ0aQvcNdPDHlxa4
wXMe9s+1wbklRvhRjgquUf2G9mlWobFJJ1Fz9XLUtkgRpYRUwJnMdjiWpQrbNG6AjhIEnLe00LWu
raLAUppBYVcYlxtyrLWNJxtYk8+eKx1nTbzmE3mXUPJ+5VWypaWR+JqOEG+eAtd9SZ17/1ON60iU
5Fw1n5OWlFPkz1spUp1/EUR/n4Qs+32QwsoIxFsTjFZVLkWSazQRtvwE8cPv8O/wTMSPPIJ2vy5D
27X7kLa3Cn4wZoC/rjXCdvsNcYj8JJyUMYYSjjSjVOMrqNxoPyp1tEbGWkOw/dUlaB3rBO11/wCT
WSng/sOZoEPvLq3w/aZofrEhYx4Qx0w//YHxq+OxYbOS2Xn856zhVh3Sk7+K+MXHkv8c8kn4qTri
SXeT+BtfSeabHnK1vYZ0v04lzu+NSH0exQQnK6C106LR7xO30L1rNWipRA2Km3wXLYy+hO7tSkZj
xfHI5lsaUu4qQfY6T9GUez9Rd78CPpetgRM9NfGojTKWU5xAZrrt6M6uUrRj3UkUI+eG5qdORmEG
zTDhUSzMyLWEIvVnQNnlENA0VgafxbPob9HJosfJYoy7+0bmm08l8/uDJntM04+tZ/LZ9tFBds9G
E1JWsZYkBEeRV/5ZZGFRHilwzycqcudIlGsQWdMqSSLDJMEktZmo8N5lNJP5iD4pKWNvVg8Hxk/F
z+/qYmeFSThGKIOjssdR+LHfqBuMI8enUvggUMTDp1XxswuqWHqxAtacM4GKvLrR0UAWBclloVO3
dqPLq2xQZsNf6DOpCFZV+8D/TNXgnYslQFnBBZzt/UQP1x6lqeFNogzDR6LLsRZM8vpkJhR9YMqN
rFnfhghWX7qclZo5yn6Za0B2HQekJs6dnJ/pRT7kOBJfrykk+/xx9uLW7dDByR1dfsch5QZFTD+z
xaUprvhiiC9+92YXjlTfief3bMGNde74isQy/K+RwivUbDG3eTZWPmSE51brYanhSdjVTxo7B39D
Qr961DV4HW0/EYUCN65AxWc10bWPLbAzJwleTBPAv5JfwfiZJFBbOhMY9App7YVO9PogU5HGz1SR
W/iEKLzXjZnpkMt47BxnFkvwWVXzcPafUx47rbGZHZjaz061+cn+sOpgl+9PZM+151AbytvhjJAw
NPfwC0T76eHZWk5Y8DQE15Ek3Bt3Di+fnIOXns3BG2Qu4iNNadg3JAH7uYZjGLgdm+m740X5GF+5
aYGfMTpYJ1sKL/3bhVoLK5FZdiq6AP2Q2moLpDf+CzbGFUF/xwC4g6cNiTIDvIZ9QNZpKXDiQSpt
qDedzr7QL4wqcxNdViwV+emrM2q7tzLTQvIZtXs/mF52Oiv6upy9v9+PLQ7wYwObbNkpxZh5JQ/h
XCUldDjqBDrQ04bCdAzwkL8b7gdRuDPwAq6/dBtf0ryPC6vr8cvDj/GFZ7X46hiD07ffxPnnLuG6
5mSs+O4orjLywloSEIc2TMP7b0nj+42v0LhzERJbdwKd2L0CLf2pgd6ebIFPM5KhmyOE96uGgHlj
Oni10A58039CZ0zaRusO/6a237wkdNNVEkmt2y46tVEootJUGMNZzoyVZjxTmZfP+G+pYoZhDuOw
XpyZxUsHN3dFwwwHFTR5PBQpDT9E/spqWDNpCb5QdxQXjJ3DEWvK8duyJuw0rwt3xn/Bi+b24Smh
n7DM+jd4PWjErdWVeCD0Kj5RnoQbJ/biD1qOWOnsTOwsK4NrBU+R2IMcZN++D5kcnIuq+ydg3LJb
sH3lbti3WA+K/6wGpTsDQUa0Mnj17DJNj9rSvp6EmitaLiyTKRTev6co0v28VnSTzRK5dT4W7Vcf
EInJdosEOktFYkt66V36XYD7GA7B6DD8rbMK7fm/9+LefEf6PTPxZU0vXPI7AXt8v4FXLarBZ8s7
sdDtBw7I/otXvZ/AvXZjeI73dyxW/QYHKzzClcMlOEU/A3cUHsX/8CrsZm+KD+2RxFel65FheQY6
fGgjmq41FTVdegNf89Ph5p/2MOW/fyBx1QVguAcASe03tPHgIdq+WY7OGDtFWRp8rNLXpoWHRpKF
Y171wpYPf4R03IhwzV174Xi8Od3/2gTcGHkEgkwdoZJbBcy+qousLgej2No7aK+NOPbPsMLLnm7F
2xYl40tXC3Fz9CMcx3XhKRuHsNObcfx5rvjCyK//8G6NQbzCuAtHPKzDadU3ceLLdLzlVyh+eMMR
OzpOx01bfqPvtiJkfj4e/bZyQD8SFJFmNAt9lxyFiXVG8MtII4i+dBj0Ay0wWFdAF9Uh+n1KMxVW
tZ5abLa5qqHhetUSm7aq3WUvqnhBnyq9u3wpKsudXhEgA9b6JgOeiQKc9PkATE9uhm6hJij25GEU
2ncXfbGdQI81zPHXak88eUkcrrhxFX8qZ/HHfc9xgnwv9pcYxLePj+Bs3x9Yx+4jNtN4jnnJBN/1
ycVui5PwefcdeFsMwKo3NbH66h4UuuEG+vd9P/oiPQclDH6H44aXoV6BG7SQlYEuXdfAlxQHILv6
M929P4pWFFOnZ5mep75OTKcebTtfuflQZqWHJHv3uulxwcTRHZSO6xT6xHgBXbDFCjwOuQ62DmrC
1TOOQsOljfDf5qlILW4XMrbMR+/JF+SgpI+Hpy3DErUHsaRlKuatLMCFiQQvsm/GgSavcNSmDnzj
fhu2J02425XgrtcFuOJrGg4wDcNxAa5Y/bEpVnssgTV21aDgsDOIJLuhVys1UMb1R1BOIhqmZJlD
Hf+XwOvocTB9sj4YnVxGP323nOa1tFMKW7ZRc/f2C+L6IhasDDrAV1bZIGjS0KTq7xZTHjtpOqqS
of132IEBsyvg2KASHF4TAO8eqITZ9crofsMqNGybiub/qkf/rGTwGl9zPDV9Nf5+6gj+23Ia9wzm
4JplN3FcaQUeOyrEegcrsPzPEmzRnIM3zjqD7avC8P0T67DLNBt8JlAZ7z3RjuQuXkMOY3sRs9YS
6cYNQLw6B+7UcYeX5eVg56c8MOubI1i49jt9IjKW7lilTf8byKGKts2mlBSLBCHDs/iT0wf4M0ma
wHt8NrWi6C7FnubRg1G36FC+Eah7nwy2cUNA/sZSuGTsHBy3/gbHky2QT0gwcum/hp6efY3aeIpY
MsYcC0OcsMmnHRhxx3H08SQ8sDoNK5zPwMrWaXjy6iS8p+A45lx34mMP/v8R3lliw+sqmL+4B+k9
LUHHjaPQrr8YjQ/IoYddInjO7igc+WkMn/94AhIuhINn76eA18O36fYFK2gPyU7q/qOd1KZlI4Jr
zAFBWdU6/r4aCcGtwFSBTeBUarfuFao8fSqdV5NG++rKAu78dvDP+QGw3mkI6x8fgHa5ldD/uwwK
ygDoXl04MqJvoE7Tl2j1Bilcd8kAT7o3D98fWoHPRG7AclJ++FZSAC547YtfrdiAZ3aswGR8Hg7Y
bIhXVUrjY9Pakf7CYpTw/53pu7ocZdxXR0GHmmHVniSYMgHgoM0oMBjNAtejAFCa002vHY2gdYWT
aCnRRepTvykVnFAoMDG1EOylHfkP/D7xt2scFag8lqRm3zxObfn9l3rXH0DDiCf0Coe5oLYpFZzN
6gdlr3gwITAe9kU8gs+DldBcb4gkyg8h1+iLyPblfRTxrheVXJbGulZaWNJjOjZRn4Xn3TTH0mNm
uJc3HavN0MIxD6SxsfAT2uL9AOlzl9DJ9cdQRtMS5LRAA5nXPocDRekwnOcCw6Tk4erHN0FKjBeo
qpUBT5Zk02dq5tNbbjdQEvs8qcklvYL9u3YKGhZ+4RsVmfCt28v54YWLBcXmjwQVxvaUlamQUgww
p9edzqCvHBAHEyJ30HkmHzRelYRvkpbAE38SoVJYDQxzkkEK/VboiPEmpC0fh5ZIXEHGrULkc60F
fap4h36k9SHk8AN1HOlDdhnv0JaMFgR+CdHSkCtI+20cyji9GVWO26KRBnm06VUjlLJNg61vXWHF
YlWo8pIBly33gT+BOmD6gwq6IGA9vdHrFxVbGUeZrNCk7omfE5g6GQjKhrP4QVdGeEPvo/kgTEkQ
qZEkeH1OgepOjKT+pfRR27+uopuyb9GHldVBzvptwO7ZbeA9VxpKTsKwUPkE3Gt5B37V74PLF09G
N/bz0JZ1G9CJgiPo3MRJZB16FlnUZaPqpTloTJiNDm87i5x+nUSvnh5BP55uQFvX8VF1vDaCHgMw
9E0FXNcfB48VOkAl3v/9oUGAV94R4KwzAzx5UUfP/hxMAzlVeqQ5h9Lomkt1dXOCi2SZILO+nq/3
bCl/3OMGTz9yPt/iDsN3+g8K0LEKgX2rBaUadZ7K1ZanO9Zsoy3CWHrYTBv8nbcNJBcUgvTLv0Cr
uyVsU94O9ZUyoe3be3DgQx/c6K+MrnXMQJfIPCQd//9+jDqjL5auqO2TM/p9cAlaUDEPrfwxA6nm
KqOCx/2w3bIaNtzOgr6tu6CUkw28MFcMar4sBYcq94Gg2Okgq7uJro0Np7V+6tKqy+5QS2ucKbnc
d4JavFdwZ9EYP7znCH/x+CivvnYDT1r8K09QtJdvM2uQX3whQIBHXwmsqpZQ1JMCiuFPog9e3UUj
WkSreakBvZ1rweH5WUDpxQtgNKgGG/YBeOV9IBwzSoHbu4vh3q11cJ2wCz6THILTCsZh+mpx9PTC
OPz3YxAqlXXBkZA6GDutBC5KT4Fa1nvg5v+juNusDl3vvwZxqhfBmWgvIJc8GcRH1NCHO4/SzSEG
dGAkQx1GXlRD+h/BmqI4waQadYFncwb/1zodvtqaVN4+O1nez/vJPMl7GvzvbWf4DZbKgnNrjgti
Q74JetVdqHvtRRRPWoWeWuxN7zMopG9vnaD/xCwAzyOPgX+OhaBEvxvkHFKBfqI5UP/aShg3JRBy
vtFwCzoFD8zOgLdjM+Fk4wz4Yv4paL8lGi60DIQV3Suh3mwrWKajCoOK34G1k4uB25UI4M6nwB5d
SbBf9Ta9Pm47vatJi3Y7X0ktU9lEATEJauXfM4JLdtMF3eEF/Ofj1vzVtrd4a7UteP8DD6jBLg==
"""
if __name__ == "__main__":
vv.figure()
m = vv.surf(peaks())
| chiluf/visvis.dev | functions/peaks.py | Python | bsd-3-clause | 13,421 | [
"Gaussian",
"MOE"
] | 2e3455d2d5489a449cacbb47b0e1160bf2fc47fe76e720099b894b29a19493ae |
## These are the mask bits in ANYMASK / ALLMASK.
#
# From: http://www.noao.edu/noao/staff/fvaldes/CPDocPrelim/PL201_3.html
# 1 -- detector bad pixel InstCal
# 1 -- detector bad pixel/no data Resampled
# 1 -- No data Stacked
# 2 -- saturated InstCal/Resampled
# 4 -- interpolated InstCal/Resampled
# 16 -- single exposure cosmic ray InstCal/Resampled
# 64 -- bleed trail InstCal/Resampled
# 128 -- multi-exposure transient InstCal/Resampled
DQ_BITS = dict(badpix = 1,
satur = 2,
interp = 4,
cr = 16, # 0x 10
bleed = 64, # 0x 40
trans = 128, # 0x 80
edge = 256, # 0x100
edge2 = 512, # 0x200
# Added by our stage_outliers rejection
outlier = 2048, # 0x800
)
# Bit codes for why a CCD got cut (survey-ccds file, ccd_cuts column)
CCD_CUTS = dict(
err_legacyzpts = 0x1,
not_grz = 0x2,
not_third_pix = 0x4, # Mosaic3 one-third-pixel interpolation problem
exptime = 0x8,
ccdnmatch = 0x10,
zpt_diff_avg = 0x20,
zpt_small = 0x40,
zpt_large = 0x80,
sky_is_bright = 0x100,
badexp_file = 0x200,
phrms = 0x400,
radecrms = 0x800,
seeing_bad = 0x1000,
early_decam = 0x2000,
depth_cut = 0x4000,
too_many_bad_ccds = 0x8000,
flagged_in_des = 0x10000,
phrms_s7 = 0x20000,
)
FITBITS = dict(
FORCED_POINTSOURCE = 0x1,
FIT_BACKGROUND = 0x2,
HIT_RADIUS_LIMIT = 0x4,
HIT_SERSIC_LIMIT = 0x8,
FROZEN = 0x10, # all source parameters were frozen at ref-cat values
BRIGHT = 0x20,
MEDIUM = 0x40,
GAIA = 0x80,
TYCHO2 = 0x100,
LARGEGALAXY = 0x200,
WALKER = 0x400,
RUNNER = 0x800,
GAIA_POINTSOURCE = 0x1000,
ITERATIVE = 0x2000,
)
# Outlier mask bit values
OUTLIER_POS = 1
OUTLIER_NEG = 2
# Bits in the "maskbits" data product
MASKBITS = dict(
NPRIMARY = 0x1, # not PRIMARY
BRIGHT = 0x2,
SATUR_G = 0x4,
SATUR_R = 0x8,
SATUR_Z = 0x10,
ALLMASK_G = 0x20,
ALLMASK_R = 0x40,
ALLMASK_Z = 0x80,
WISEM1 = 0x100, # WISE masked
WISEM2 = 0x200,
BAILOUT = 0x400, # bailed out of processing
MEDIUM = 0x800, # medium-bright star
GALAXY = 0x1000, # SGA large galaxy
CLUSTER = 0x2000, # Cluster catalog source
SATUR_I = 0x4000,
ALLMASK_I = 0x8000,
)
# Bits in the "brightblob" bitmask
IN_BLOB = dict(
BRIGHT = 0x1, # "bright" star
MEDIUM = 0x2, # "medium-bright" star
CLUSTER = 0x4, # Globular cluster
GALAXY = 0x8, # large SGA galaxy
)
| legacysurvey/pipeline | py/legacypipe/bits.py | Python | gpl-2.0 | 2,859 | [
"Galaxy"
] | b6251b6ae854d85a1c735e1201ee4942bf7e959c996775e0394e0b66596ac16e |
#-*- coding: utf-8 -*-
#
####
# 2006/02 Will Holcomb <wholcomb@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# 2007/07/26 Slightly modified by Brian Schneider
#
# in order to support unicode files ( multipart_encode function )
# From http://peerit.blogspot.com/2007/07/multipartposthandler-doesnt-work-for.html
#
# 2013/07 Ken Olum <kdo@cosmos.phy.tufts.edu>
#
# Removed one of \r\n and send Content-Length
#
# 2014/05 Applied Fedora rpm patch
#
# https://bugzilla.redhat.com/show_bug.cgi?id=920778
# http://pkgs.fedoraproject.org/cgit/python-MultipartPostHandler2.git/diff/python-MultipartPostHandler2-cut-out-main.patch?id=c1638bb3e45596232b4d02f1e69901db0c28cfdb
#
# 2014/05/09 Sérgio Basto <sergio@serjux.com>
#
# Better deal with None values, don't throw an exception and just send an empty string.
# Simplified text example
#
"""
Usage:
Enables the use of multipart/form-data for posting forms
Inspirations:
Upload files in python:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
urllib2_file:
Fabien Seisen: <fabien@seisen.org>
Example:
import MultipartPostHandler, urllib2
opener = urllib2.build_opener(MultipartPostHandler.MultipartPostHandler)
params = { "username" : "bob", "password" : "riviera",
"file" : open("filename", "rb") }
opener.open("http://wwww.bobsite.com/upload/", params)
"""
import urllib
import urllib2
import mimetools, mimetypes
import os, stat
from cStringIO import StringIO
class Callable:
def __init__(self, anycallable):
self.__call__ = anycallable
# Controls how sequences are uncoded. If true, elements may be given multiple values by
# assigning a sequence.
doseq = 1
class MultipartPostHandler(urllib2.BaseHandler):
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
def http_request(self, request):
data = request.get_data()
if data is not None and type(data) != str:
v_files = []
v_vars = []
try:
for(key, value) in data.items():
if type(value) == file:
v_files.append((key, value))
else:
v_vars.append((key, value))
except TypeError:
systype, value, traceback = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", traceback
if len(v_files) == 0:
data = urllib.urlencode(v_vars, doseq)
else:
boundary, data = self.multipart_encode(v_vars, v_files)
contenttype = 'multipart/form-data; boundary=%s' % boundary
# ~ if(request.has_header('Content-Type')
# ~ and request.get_header('Content-Type').find('multipart/form-data') != 0):
# ~ print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data')
request.add_unredirected_header('Content-Type', contenttype)
request.add_data(data)
return request
def multipart_encode(vars, files, boundary = None, buffer = None):
if boundary is None:
boundary = mimetools.choose_boundary()
if buffer is None:
buffer = StringIO()
for(key, value) in vars:
buffer.write('--%s\r\n' % boundary)
buffer.write('Content-Disposition: form-data; name="%s"' % key)
if value is None:
value = ""
# if type(value) is not str, we need str(value) to not error with cannot concatenate 'str'
# and 'dict' or 'tuple' or somethingelse objects
buffer.write('\r\n\r\n' + str(value) + '\r\n')
for(key, fd) in files:
file_size = os.fstat(fd.fileno())[stat.ST_SIZE]
filename = fd.name.split('/')[-1]
contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
buffer.write('--%s\r\n' % boundary)
buffer.write('Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename))
buffer.write('Content-Type: %s\r\n' % contenttype)
buffer.write('Content-Length: %s\r\n' % file_size)
fd.seek(0)
buffer.write('\r\n' + fd.read() + '\r\n')
buffer.write('--' + boundary + '--\r\n')
buffer = buffer.getvalue()
return boundary, buffer
multipart_encode = Callable(multipart_encode)
https_request = http_request
| alfa-jor/addon | plugin.video.alfa/lib/MultipartPostHandler.py | Python | gpl-3.0 | 4,984 | [
"Brian"
] | 09b43e4a7ac96cd5fb51fc2c8bc31dc71e75f2e8cb5429f58bcbe620950dff3d |
"""Declare critical models for Hydroshare hs_core app."""
import copy
import json
import logging
import os.path
import re
import unicodedata
from uuid import uuid4
import arrow
from dateutil import parser
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import HStoreField
from django.core.exceptions import ObjectDoesNotExist, ValidationError, \
SuspiciousFileOperation, PermissionDenied
from django.core.files import File
from django.core.urlresolvers import reverse
from django.core.validators import URLValidator
from django.db import models
from django.db import transaction
from django.db.models import Q, Sum
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.forms.models import model_to_dict
from django.utils.timezone import now
from dominate.tags import div, legend, table, tbody, tr, th, td, h4
from lxml import etree
from markdown import markdown
from mezzanine.conf import settings as s
from mezzanine.core.managers import PublishedManager
from mezzanine.core.models import Ownable
from mezzanine.generic.fields import CommentsField, RatingField
from mezzanine.pages.managers import PageManager
from mezzanine.pages.models import Page
from rdflib import Literal, BNode, URIRef
from rdflib.namespace import DC, DCTERMS, RDF
from django_irods.icommands import SessionException
from django_irods.storage import IrodsStorage
from hs_core.enums import RelationTypes
from hs_core.irods import ResourceIRODSMixin, ResourceFileIRODSMixin
from .hs_rdf import HSTERMS, RDF_Term_MixIn, RDF_MetaData_Mixin, rdf_terms, RDFS1
from .languages_iso import languages as iso_languages
def clean_for_xml(s):
"""
Remove all control characters from a unicode string in preparation for XML inclusion
* Convert \n\n+ to unicode paragraph
* Convert \n alone to unicode RETURN (return SYMBOL)
* Convert control characters to spaces if last character is not space.
* Space-pad paragraph and NL symbols as necessary
"""
CR = chr(0x23CE) # carriage return unicode SYMBOL
PARA = chr(0xB6) # paragraph mark unicode SYMBOL
output = ''
next = None
last = None
for ch in s:
cat = unicodedata.category(ch)
ISCONTROL = cat[0] == 'C'
ISSPACE = cat[0] == 'Z'
ISNEWLINE = (ord(ch) == 10)
if next:
if ISNEWLINE: # linux '\n'
next = PARA # upgrade to two+ returns
elif ISSPACE or ISCONTROL:
pass # ignore spaces in newline sequence
else:
if last != ' ':
output += ' '
output += next + ' ' + ch
next = None
last = ch
else:
if ISNEWLINE:
next = CR
elif ISSPACE:
if last != ' ':
output += ch
last = ch
elif ISCONTROL:
if last != ' ':
output += ' '
last = ' '
else:
output += ch
last = ch
return output
class GroupOwnership(models.Model):
"""Define lookup table allowing django auth users to own django auth groups."""
group = models.ForeignKey(Group)
owner = models.ForeignKey(User)
def get_user(request):
"""Authorize user based on API key if it was passed, otherwise just use the request's user.
NOTE: The API key portion has been removed with TastyPie and will be restored when the
new API is built.
:param request:
:return: django.contrib.auth.User
"""
if not hasattr(request, 'user'):
raise PermissionDenied
if request.user.is_authenticated():
return User.objects.get(pk=request.user.pk)
else:
return request.user
def validate_user_url(value):
"""Validate that a URL is a valid URL for a hydroshare user."""
err_message = '%s is not a valid url for hydroshare user' % value
if value:
url_parts = value.split('/')
if len(url_parts) != 4:
raise ValidationError(err_message)
if url_parts[1] != 'user':
raise ValidationError(err_message)
try:
user_id = int(url_parts[2])
except ValueError:
raise ValidationError(err_message)
# check the user exists for the provided user id
if not User.objects.filter(pk=user_id).exists():
raise ValidationError(err_message)
class ResourcePermissionsMixin(Ownable):
"""Mix in can_* permission helper functions between users and resources."""
creator = models.ForeignKey(User,
related_name='creator_of_%(app_label)s_%(class)s',
help_text='This is the person who first uploaded the resource',
)
class Meta:
"""Define meta properties for ResourcePermissionsMixin, make abstract."""
abstract = True
@property
def permissions_store(self):
"""Use PERMISSIONS_DB constant. Unsure what 's' is here."""
return s.PERMISSIONS_DB
def can_add(self, request):
"""Pass through can_change to determine if user can make changes to a resource."""
return self.can_change(request)
def can_delete(self, request):
"""Use utils.authorize method to determine if user can delete a resource."""
# have to do import locally to avoid circular import
from hs_core.views.utils import authorize, ACTION_TO_AUTHORIZE
return authorize(request, self.short_id,
needed_permission=ACTION_TO_AUTHORIZE.DELETE_RESOURCE,
raises_exception=False)[1]
def can_change(self, request):
"""Use utils.authorize method to determine if user can change a resource."""
# have to do import locally to avoid circular import
from hs_core.views.utils import authorize, ACTION_TO_AUTHORIZE
return authorize(request, self.short_id,
needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE,
raises_exception=False)[1]
def can_view(self, request):
"""Use utils.authorize method to determine if user can view a resource."""
# have to do import locally to avoid circular import
from hs_core.views.utils import authorize, ACTION_TO_AUTHORIZE
return authorize(request, self.short_id,
needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA,
raises_exception=False)[1]
# Build a JSON serializable object with user data
def get_access_object(user, user_type, user_access):
from hs_core.templatetags.hydroshare_tags import best_name
access_object = None
picture = None
if user_type == "user":
if user.userprofile.picture:
picture = user.userprofile.picture.url
access_object = {
"user_type": user_type,
"access": user_access,
"id": user.id,
"pictureUrl": picture,
"best_name": best_name(user),
"user_name": user.username,
"can_undo": user.can_undo,
# Data used to populate profile badge:
"email": user.email,
"organization": user.userprofile.organization,
"title": user.userprofile.title,
"contributions": len(user.uaccess.owned_resources),
"subject_areas": user.userprofile.subject_areas,
"identifiers": user.userprofile.identifiers,
"state": user.userprofile.state,
"country": user.userprofile.country,
"joined": user.date_joined.strftime("%d %b, %Y"),
}
elif user_type == "group":
if user.gaccess.picture:
picture = user.gaccess.picture.url
access_object = {
"user_type": user_type,
"access": user_access,
"id": user.id,
"pictureUrl": picture,
"best_name": user.name,
"user_name": None,
"can_undo": user.can_undo
}
return access_object
def page_permissions_page_processor(request, page):
"""Return a dict describing permissions for current user."""
from hs_access_control.models.privilege import PrivilegeCodes
cm = page.get_content_model()
can_change_resource_flags = False
self_access_level = None
if request.user.is_authenticated():
if request.user.uaccess.can_change_resource_flags(cm):
can_change_resource_flags = True
# this will get resource access privilege even for admin user
user_privilege = cm.raccess.get_effective_user_privilege(request.user)
if user_privilege == PrivilegeCodes.OWNER:
self_access_level = 'owner'
elif user_privilege == PrivilegeCodes.CHANGE:
self_access_level = 'edit'
elif user_privilege == PrivilegeCodes.VIEW:
self_access_level = 'view'
owners = cm.raccess.owners.all()
editors = cm.raccess.get_users_with_explicit_access(PrivilegeCodes.CHANGE,
include_group_granted_access=False)
viewers = cm.raccess.get_users_with_explicit_access(PrivilegeCodes.VIEW,
include_group_granted_access=False)
edit_groups = cm.raccess.edit_groups
view_groups = cm.raccess.view_groups.exclude(pk__in=edit_groups)
if request.user.is_authenticated():
for owner in owners:
owner.can_undo = request.user.uaccess.can_undo_share_resource_with_user(cm, owner)
for viewer in viewers:
viewer.can_undo = request.user.uaccess.can_undo_share_resource_with_user(cm, viewer)
for editor in editors:
editor.can_undo = request.user.uaccess.can_undo_share_resource_with_user(cm, editor)
for view_grp in view_groups:
view_grp.can_undo = request.user.uaccess.can_undo_share_resource_with_group(cm,
view_grp)
for edit_grp in edit_groups:
edit_grp.can_undo = request.user.uaccess.can_undo_share_resource_with_group(cm,
edit_grp)
else:
for owner in owners:
owner.can_undo = False
for viewer in viewers:
viewer.can_undo = False
for editor in editors:
editor.can_undo = False
for view_grp in view_groups:
view_grp.can_undo = False
for edit_grp in edit_groups:
edit_grp.can_undo = False
users_json = []
for usr in owners:
users_json.append(get_access_object(usr, "user", "owner"))
for usr in editors:
users_json.append(get_access_object(usr, "user", "edit"))
for usr in viewers:
users_json.append(get_access_object(usr, "user", "view"))
for usr in edit_groups:
users_json.append(get_access_object(usr, "group", "edit"))
for usr in view_groups:
users_json.append(get_access_object(usr, "group", "view"))
users_json = json.dumps(users_json)
is_replaced_by = cm.get_relation_version_res_url(RelationTypes.isReplacedBy)
is_version_of = cm.get_relation_version_res_url(RelationTypes.isVersionOf)
permissions_allow_copy = False
if request.user.is_authenticated:
permissions_allow_copy = request.user.uaccess.can_view_resource(cm)
show_manage_access = False
is_owner = self_access_level == 'owner'
is_edit = self_access_level == 'edit'
is_view = self_access_level == 'view'
if is_owner or (cm.raccess.shareable and (is_view or is_edit)):
show_manage_access = True
return {
'resource_type': cm._meta.verbose_name,
"users_json": users_json,
"owners": owners,
"self_access_level": self_access_level,
"permissions_allow_copy": permissions_allow_copy,
"can_change_resource_flags": can_change_resource_flags,
"is_replaced_by": is_replaced_by,
"is_version_of": is_version_of,
"show_manage_access": show_manage_access
}
class AbstractMetaDataElement(models.Model, RDF_Term_MixIn):
"""Define abstract class for all metadata elements."""
object_id = models.PositiveIntegerField()
# see the following link the reason for having the related_name setting
# for the content_type attribute
# https://docs.djangoproject.com/en/1.6/topics/db/models/#abstract-related-name
content_type = models.ForeignKey(ContentType, related_name="%(app_label)s_%(class)s_related")
content_object = GenericForeignKey('content_type', 'object_id')
def __str__(self):
"""Return unicode for python 3 compatibility in templates"""
return self.__unicode__()
@property
def metadata(self):
"""Return content object that describes metadata."""
return self.content_object
@property
def dict(self):
return {self.__class__.__name__: model_to_dict(self)}
@classmethod
def create(cls, **kwargs):
"""Pass through kwargs to object.create method."""
return cls.objects.create(**kwargs)
@classmethod
def update(cls, element_id, **kwargs):
"""Pass through kwargs to update specific metadata object."""
element = cls.objects.get(id=element_id)
for key, value in list(kwargs.items()):
setattr(element, key, value)
element.save()
return element
@classmethod
def remove(cls, element_id):
"""Pass through element id to objects.get and then delete() method.
Could not name this method as 'delete' since the parent 'Model' class has such a method
"""
element = cls.objects.get(id=element_id)
element.delete()
class Meta:
"""Define meta properties for AbstractMetaDataElement class."""
abstract = True
class HSAdaptorEditInline(object):
"""Define permissions-based helper to determine if user can edit adapter field.
Adaptor class added for Django inplace editing to honor HydroShare user-resource permissions
"""
@classmethod
def can_edit(cls, adaptor_field):
"""Define permissions-based helper to determine if user can edit adapter field."""
obj = adaptor_field.obj
cm = obj.get_content_model()
return cm.can_change(adaptor_field.request)
class Party(AbstractMetaDataElement):
"""Define party model to define a person."""
description = models.CharField(null=True, blank=True, max_length=50,
validators=[validate_user_url])
name = models.CharField(max_length=100, null=True, blank=True)
organization = models.CharField(max_length=200, null=True, blank=True)
email = models.EmailField(null=True, blank=True)
address = models.CharField(max_length=250, null=True, blank=True)
phone = models.CharField(max_length=25, null=True, blank=True)
homepage = models.URLField(null=True, blank=True)
# to store one or more external identifier (Google Scholar, ResearchGate, ORCID etc)
# each identifier is stored as a key/value pair {name:link}
identifiers = HStoreField(default={})
# list of identifier currently supported
supported_identifiers = {'ResearchGateID': 'https://www.researchgate.net/',
'ORCID': 'https://orcid.org/',
'GoogleScholarID': 'https://scholar.google.com/',
'ResearcherID': 'https://www.researcherid.com/'}
def __unicode__(self):
"""Return name field for unicode representation."""
return self.name
class Meta:
"""Define meta properties for Party class."""
abstract = True
def rdf_triples(self, subject, graph):
party_type = self.get_class_term()
party = BNode()
graph.add((subject, party_type, party))
for field_term, field_value in self.get_field_terms_and_values(['identifiers']):
graph.add((party, field_term, field_value))
for k, v in self.identifiers.items():
graph.add((party, getattr(HSTERMS, k), URIRef(v)))
@classmethod
def ingest_rdf(cls, graph, subject, content_object):
"""Default implementation that ingests by convention"""
party_type = cls.get_class_term()
for party in graph.objects(subject=subject, predicate=party_type):
value_dict = {}
identifiers = {}
fields_by_term = {cls.get_field_term(field.name): field for field in cls._meta.fields}
for _, p, o in graph.triples((party, None, None)):
if p not in fields_by_term:
identifiers[p.rsplit("/", 1)[1]] = str(o)
else:
value_dict[fields_by_term[p].name] = str(o)
if value_dict or identifiers:
if identifiers:
cls.create(content_object=content_object, identifiers=identifiers, **value_dict)
else:
cls.create(content_object=content_object, **value_dict)
@classmethod
def get_post_data_with_identifiers(cls, request, as_json=True):
identifier_names = request.POST.getlist('identifier_name')
identifier_links = request.POST.getlist('identifier_link')
identifiers = {}
if identifier_links and identifier_names:
if len(identifier_names) != len(identifier_links):
raise Exception("Invalid data for identifiers")
identifiers = dict(list(zip(identifier_names, identifier_links)))
if len(identifier_names) != len(list(identifiers.keys())):
raise Exception("Invalid data for identifiers")
if as_json:
identifiers = json.dumps(identifiers)
post_data_dict = request.POST.dict()
post_data_dict['identifiers'] = identifiers
return post_data_dict
@classmethod
def create(cls, **kwargs):
"""Define custom create method for Party model."""
element_name = cls.__name__
identifiers = kwargs.get('identifiers', '')
if identifiers:
identifiers = cls.validate_identifiers(identifiers)
kwargs['identifiers'] = identifiers
metadata_obj = kwargs['content_object']
metadata_type = ContentType.objects.get_for_model(metadata_obj)
if element_name == 'Creator':
party = Creator.objects.filter(object_id=metadata_obj.id,
content_type=metadata_type).last()
creator_order = 1
if party:
creator_order = party.order + 1
if ('name' not in kwargs or kwargs['name'] is None) and \
('organization' not in kwargs or kwargs['organization'] is None):
raise ValidationError(
"Either an organization or name is required for a creator element")
if 'name' in kwargs and kwargs['name'] is not None:
if len(kwargs['name'].strip()) == 0:
if 'organization' in kwargs and kwargs['organization'] is not None:
if len(kwargs['organization'].strip()) == 0:
raise ValidationError(
"Either the name or organization must not be blank for the creator "
"element")
if 'order' not in kwargs or kwargs['order'] is None:
kwargs['order'] = creator_order
party = super(Party, cls).create(**kwargs)
else:
party = super(Party, cls).create(**kwargs)
return party
@classmethod
def update(cls, element_id, **kwargs):
"""Define custom update method for Party model."""
element_name = cls.__name__
creator_order = None
if 'description' in kwargs:
party = cls.objects.get(id=element_id)
if party.description is not None and kwargs['description'] is not None:
if len(party.description.strip()) > 0 and len(kwargs['description'].strip()) > 0:
if party.description != kwargs['description']:
raise ValidationError("HydroShare user identifier can't be changed.")
if 'order' in kwargs and element_name == 'Creator':
creator_order = kwargs['order']
if creator_order <= 0:
creator_order = 1
del kwargs['order']
identifiers = kwargs.get('identifiers', '')
if identifiers:
identifiers = cls.validate_identifiers(identifiers)
kwargs['identifiers'] = identifiers
party = super(Party, cls).update(element_id, **kwargs)
if isinstance(party, Creator) and creator_order is not None:
if party.order != creator_order:
resource_creators = Creator.objects.filter(
object_id=party.object_id, content_type__pk=party.content_type.id).all()
if creator_order > len(resource_creators):
creator_order = len(resource_creators)
for res_cr in resource_creators:
if party.order > creator_order:
if res_cr.order < party.order and not res_cr.order < creator_order:
res_cr.order += 1
res_cr.save()
else:
if res_cr.order > party.order:
res_cr.order -= 1
res_cr.save()
party.order = creator_order
party.save()
@classmethod
def remove(cls, element_id):
"""Define custom remove method for Party model."""
party = cls.objects.get(id=element_id)
# if we are deleting a creator, then we have to update the order attribute of remaining
# creators associated with a resource
# make sure we are not deleting all creators of a resource
if isinstance(party, Creator):
if Creator.objects.filter(object_id=party.object_id,
content_type__pk=party.content_type.id).count() == 1:
raise ValidationError("The only creator of the resource can't be deleted.")
creators_to_update = Creator.objects.filter(
object_id=party.object_id,
content_type__pk=party.content_type.id).exclude(order=party.order).all()
for cr in creators_to_update:
if cr.order > party.order:
cr.order -= 1
cr.save()
party.delete()
@classmethod
def validate_identifiers(cls, identifiers):
"""Validates optional identifiers for user/creator/contributor
:param identifiers: identifier data as a json string or as a dict
"""
if not isinstance(identifiers, dict):
if identifiers:
# validation form can populate the dict(kwargs) with key 'identifiers" with
# value of empty string if data passed to the validation form did not had this
# key. In that case no need to convert the string to dict
try:
identifiers = json.loads(identifiers)
except ValueError:
raise ValidationError("Value for identifiers not in the correct format")
# identifiers = kwargs['identifiers']
if identifiers:
# validate the identifiers are one of the supported ones
for name in identifiers:
if name not in cls.supported_identifiers:
raise ValidationError("Invalid data found for identifiers. "
"{} not a supported identifier.". format(name))
# validate identifier values - check for duplicate links
links = [l.lower() for l in list(identifiers.values())]
if len(links) != len(set(links)):
raise ValidationError("Invalid data found for identifiers. "
"Duplicate identifier links found.")
for link in links:
validator = URLValidator()
try:
validator(link)
except ValidationError:
raise ValidationError("Invalid data found for identifiers. "
"Identifier link must be a URL.")
# validate identifier keys - check for duplicate names
names = [n.lower() for n in list(identifiers.keys())]
if len(names) != len(set(names)):
raise ValidationError("Invalid data found for identifiers. "
"Duplicate identifier names found")
# validate that the links for the known identifiers are valid
for id_name in cls.supported_identifiers:
id_link = identifiers.get(id_name, '')
if id_link:
if not id_link.startswith(cls.supported_identifiers[id_name]) \
or len(id_link) <= len(cls.supported_identifiers[id_name]):
raise ValidationError("URL for {} is invalid".format(id_name))
return identifiers
@rdf_terms(DC.contributor)
class Contributor(Party):
"""Extend Party model with the term of 'Contributor'."""
term = 'Contributor'
@rdf_terms(DC.creator, order=HSTERMS.creatorOrder)
class Creator(Party):
"""Extend Party model with the term of 'Creator' and a proper ordering."""
term = "Creator"
order = models.PositiveIntegerField()
class Meta:
"""Define meta properties for Creator class."""
ordering = ['order']
@rdf_terms(DC.description, abstract=DCTERMS.abstract)
class Description(AbstractMetaDataElement):
"""Define Description metadata element model."""
term = 'Description'
abstract = models.TextField()
def __unicode__(self):
"""Return abstract field for unicode representation."""
return self.abstract
class Meta:
"""Define meta properties for Description model."""
unique_together = ("content_type", "object_id")
@classmethod
def update(cls, element_id, **kwargs):
"""Create custom update method for Description model."""
element = Description.objects.get(id=element_id)
resource = element.metadata.resource
if resource.resource_type == "TimeSeriesResource":
element.metadata.is_dirty = True
element.metadata.save()
super(Description, cls).update(element_id, **kwargs)
@classmethod
def remove(cls, element_id):
"""Create custom remove method for Description model."""
raise ValidationError("Description element of a resource can't be deleted.")
@rdf_terms(DCTERMS.bibliographicCitation)
class Citation(AbstractMetaDataElement):
"""Define Citation metadata element model."""
term = 'Citation'
value = models.TextField()
def __unicode__(self):
"""Return value field for unicode representation."""
return self.value
class Meta:
"""Define meta properties for Citation class."""
unique_together = ("content_type", "object_id")
@classmethod
def update(cls, element_id, **kwargs):
"""Call parent update function for Citation class."""
super(Citation, cls).update(element_id, **kwargs)
@classmethod
def remove(cls, element_id):
"""Call delete function for Citation class."""
element = cls.objects.get(id=element_id)
element.delete()
def rdf_triples(self, subject, graph):
graph.add((subject, self.get_class_term(), Literal(self.value)))
@classmethod
def ingest_rdf(cls, graph, subject, content_object):
citation = graph.value(subject=subject, predicate=cls.get_class_term())
if citation:
Citation.create(value=citation.value, content_object=content_object)
@rdf_terms(DC.title)
class Title(AbstractMetaDataElement):
"""Define Title metadata element model."""
term = 'Title'
value = models.CharField(max_length=300)
def __unicode__(self):
"""Return value field for unicode representation."""
return self.value
class Meta:
"""Define meta properties for Title class."""
unique_together = ("content_type", "object_id")
@classmethod
def update(cls, element_id, **kwargs):
"""Define custom update function for Title class."""
element = Title.objects.get(id=element_id)
resource = element.metadata.resource
if resource.resource_type == "TimeSeriesResource":
element.metadata.is_dirty = True
element.metadata.save()
super(Title, cls).update(element_id, **kwargs)
@classmethod
def remove(cls, element_id):
"""Define custom remove function for Title class."""
raise ValidationError("Title element of a resource can't be deleted.")
def rdf_triples(self, subject, graph):
graph.add((subject, self.get_class_term(), Literal(self.value)))
@classmethod
def ingest_rdf(cls, graph, subject, content_object):
title = graph.value(subject=subject, predicate=cls.get_class_term())
if title:
Title.create(value=title.value, content_object=content_object)
@rdf_terms(DC.type)
class Type(AbstractMetaDataElement):
"""Define Type metadata element model."""
term = 'Type'
url = models.URLField()
def __unicode__(self):
"""Return url field for unicode representation."""
return self.url
class Meta:
"""Define meta properties for Type class."""
unique_together = ("content_type", "object_id")
@classmethod
def remove(cls, element_id):
"""Define custom remove function for Type model."""
raise ValidationError("Type element of a resource can't be deleted.")
def rdf_triples(self, subject, graph):
graph.add((subject, self.get_class_term(), URIRef(self.url)))
@classmethod
def ingest_rdf(cls, graph, subject, content_object):
url = graph.value(subject=subject, predicate=cls.get_class_term())
if url:
Type.create(url=str(url), content_object=content_object)
@rdf_terms(DC.date)
class Date(AbstractMetaDataElement):
"""Define Date metadata model."""
DATE_TYPE_CHOICES = (
('created', 'Created'),
('modified', 'Modified'),
('valid', 'Valid'),
('available', 'Available'),
('published', 'Published')
)
term = 'Date'
type = models.CharField(max_length=20, choices=DATE_TYPE_CHOICES)
start_date = models.DateTimeField()
end_date = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
"""Return either {type} {start} or {type} {start} {end} for unicode representation."""
if self.end_date:
return "{type} {start} {end}".format(type=self.type, start=self.start_date,
end=self.end_date)
return "{type} {start}".format(type=self.type, start=self.start_date)
class Meta:
"""Define meta properties for Date class."""
unique_together = ("type", "content_type", "object_id")
def rdf_triples(self, subject, graph):
date_node = BNode()
graph.add((subject, self.get_class_term(), date_node))
graph.add((date_node, RDF.type, getattr(DCTERMS, self.type)))
graph.add((date_node, RDF.value, Literal(self.start_date.isoformat())))
@classmethod
def ingest_rdf(cls, graph, subject, content_object):
for _, _, date_node in graph.triples((subject, cls.get_class_term(), None)):
type = graph.value(subject=date_node, predicate=RDF.type)
value = graph.value(subject=date_node, predicate=RDF.value)
if type and value:
type = type.split('/')[-1]
start_date = parser.parse(str(value))
Date.create(type=type, start_date=start_date, content_object=content_object)
@classmethod
def create(cls, **kwargs):
"""Define custom create method for Date model."""
if 'type' in kwargs:
if not kwargs['type'] in list(dict(cls.DATE_TYPE_CHOICES).keys()):
raise ValidationError('Invalid date type:%s' % kwargs['type'])
# get matching resource
metadata_obj = kwargs['content_object']
resource = BaseResource.objects.filter(object_id=metadata_obj.id).first()
if kwargs['type'] != 'valid':
if 'end_date' in kwargs:
del kwargs['end_date']
if 'start_date' in kwargs:
if isinstance(kwargs['start_date'], str):
kwargs['start_date'] = parser.parse(kwargs['start_date'])
if kwargs['type'] == 'published':
if not resource.raccess.published:
raise ValidationError("Resource is not published yet.")
elif kwargs['type'] == 'available':
if not resource.raccess.public:
raise ValidationError("Resource has not been made public yet.")
elif kwargs['type'] == 'valid':
if 'end_date' in kwargs:
if isinstance(kwargs['end_date'], str):
kwargs['end_date'] = parser.parse(kwargs['end_date'])
if kwargs['start_date'] > kwargs['end_date']:
raise ValidationError("For date type valid, end date must be a date "
"after the start date.")
return super(Date, cls).create(**kwargs)
else:
raise ValidationError("Type of date element is missing.")
@classmethod
def update(cls, element_id, **kwargs):
"""Define custom update model for Date model."""
dt = Date.objects.get(id=element_id)
if 'start_date' in kwargs:
if isinstance(kwargs['start_date'], str):
kwargs['start_date'] = parser.parse(kwargs['start_date'])
if dt.type == 'created':
raise ValidationError("Resource creation date can't be changed")
elif dt.type == 'modified':
dt.start_date = now().isoformat()
dt.save()
elif dt.type == 'valid':
if 'end_date' in kwargs:
if isinstance(kwargs['end_date'], str):
kwargs['end_date'] = parser.parse(kwargs['end_date'])
if kwargs['start_date'] > kwargs['end_date']:
raise ValidationError("For date type valid, end date must be a date "
"after the start date.")
dt.start_date = kwargs['start_date']
dt.end_date = kwargs['end_date']
dt.save()
else:
if dt.end_date:
if kwargs['start_date'] > dt.end_date:
raise ValidationError("For date type valid, end date must be a date "
"after the start date.")
dt.start_date = kwargs['start_date']
dt.save()
else:
dt.start_date = kwargs['start_date']
dt.save()
elif dt.type == 'modified':
dt.start_date = now().isoformat()
dt.save()
@classmethod
def remove(cls, element_id):
"""Define custom remove method for Date model."""
dt = Date.objects.get(id=element_id)
if dt.type in ['created', 'modified']:
raise ValidationError("Date element of type:%s can't be deleted." % dt.type)
dt.delete()
@rdf_terms(DC.relation)
class Relation(AbstractMetaDataElement):
"""Define Relation custom metadata model."""
SOURCE_TYPES = (
(RelationTypes.isPartOf.value, 'The content of this resource is part of'),
(RelationTypes.hasPart.value, 'This resource includes'),
(RelationTypes.isExecutedBy.value, 'The content of this resource can be executed by'),
(RelationTypes.isCreatedBy.value,
'The content of this resource was created by a related App or software program'),
(RelationTypes.isVersionOf.value, 'This resource updates and replaces a previous version'),
(RelationTypes.isReplacedBy.value, 'This resource has been replaced by a newer version'),
(RelationTypes.isDescribedBy.value, 'This resource is described by'),
(RelationTypes.conformsTo.value, 'This resource conforms to established standard described by'),
(RelationTypes.hasFormat.value, 'This resource has a related resource in another format'),
(RelationTypes.isFormatOf.value, 'This resource is a different format of'),
(RelationTypes.isRequiredBy.value, 'This resource is required by'),
(RelationTypes.requires.value, 'This resource requires'),
(RelationTypes.isReferencedBy.value, 'This resource is referenced by'),
(RelationTypes.references.value, 'The content of this resource references'),
(RelationTypes.replaces.value, 'This resource replaces'),
(RelationTypes.source.value, 'The content of this resource is derived from')
)
# these are hydroshare custom terms that are not Dublin Core terms
HS_RELATION_TERMS = (RelationTypes.isExecutedBy, RelationTypes.isCreatedBy, RelationTypes.isDescribedBy)
NOT_USER_EDITABLE = (RelationTypes.isVersionOf, RelationTypes.isReplacedBy,
RelationTypes.isPartOf, RelationTypes.hasPart, RelationTypes.replaces)
term = 'Relation'
type = models.CharField(max_length=100, choices=SOURCE_TYPES)
value = models.TextField()
def __str__(self):
"""Return {type} {value} for string representation."""
return "{type} {value}".format(type=self.type, value=self.value)
def __unicode__(self):
"""Return {type} {value} for unicode representation (deprecated)."""
return "{type} {value}".format(type=self.type, value=self.value)
@classmethod
def get_supported_types(cls):
return dict(cls.SOURCE_TYPES).keys()
def type_description(self):
return dict(self.SOURCE_TYPES)[self.type]
def rdf_triples(self, subject, graph):
relation_node = BNode()
graph.add((subject, self.get_class_term(), relation_node))
if self.type in self.HS_RELATION_TERMS:
graph.add((relation_node, getattr(HSTERMS, self.type), Literal(self.value)))
else:
graph.add((relation_node, getattr(DCTERMS, self.type), Literal(self.value)))
@classmethod
def ingest_rdf(cls, graph, subject, content_object):
for _, _, relation_node in graph.triples((subject, cls.get_class_term(), None)):
for _, p, o in graph.triples((relation_node, None, None)):
type_term = p
value = o
break
if type_term:
type = type_term.split('/')[-1]
value = str(value)
Relation.create(type=type, value=value, content_object=content_object)
@classmethod
def create(cls, **kwargs):
"""Define custom create method for Relation class."""
if 'type' not in kwargs:
ValidationError("Type of relation element is missing.")
if 'value' not in kwargs:
ValidationError("Value of relation element is missing.")
if not kwargs['type'] in list(dict(cls.SOURCE_TYPES).keys()):
raise ValidationError('Invalid relation type:%s' % kwargs['type'])
# ensure isHostedBy and isCopiedFrom are mutually exclusive
metadata_obj = kwargs['content_object']
metadata_type = ContentType.objects.get_for_model(metadata_obj)
# avoid creating duplicate element (same type and same value)
if Relation.objects.filter(type=kwargs['type'],
value=kwargs['value'],
object_id=metadata_obj.id,
content_type=metadata_type).exists():
raise ValidationError('Relation element of the same type '
'and value already exists.')
return super(Relation, cls).create(**kwargs)
@classmethod
def update(cls, element_id, **kwargs):
"""Define custom update method for Relation class."""
if 'type' not in kwargs:
ValidationError("Type of relation element is missing.")
if 'value' not in kwargs:
ValidationError("Value of relation element is missing.")
if not kwargs['type'] in list(dict(cls.SOURCE_TYPES).keys()):
raise ValidationError('Invalid relation type:%s' % kwargs['type'])
# avoid changing this relation to an existing relation of same type and same value
rel = Relation.objects.get(id=element_id)
metadata_obj = kwargs['content_object']
metadata_type = ContentType.objects.get_for_model(metadata_obj)
qs = Relation.objects.filter(type=kwargs['type'],
value=kwargs['value'],
object_id=metadata_obj.id,
content_type=metadata_type)
if qs.exists() and qs.first() != rel:
# this update will create a duplicate relation element
raise ValidationError('A relation element of the same type and value already exists.')
super(Relation, cls).update(element_id, **kwargs)
@rdf_terms(DC.identifier)
class Identifier(AbstractMetaDataElement):
"""Create Identifier custom metadata element."""
term = 'Identifier'
name = models.CharField(max_length=100)
url = models.URLField(unique=True)
def __unicode__(self):
"""Return {name} {url} for unicode representation."""
return "{name} {url}".format(name=self.name, url=self.url)
def rdf_triples(self, subject, graph):
identifier_node = BNode()
graph.add((subject, self.get_class_term(), identifier_node))
if self.name.lower() == 'doi':
graph.add((identifier_node, HSTERMS.doi, URIRef(self.url)))
else:
graph.add((identifier_node, HSTERMS.hydroShareIdentifier, URIRef(self.url)))
@classmethod
def ingest_rdf(cls, graph, subject, content_object):
for _, _, identifier_node in graph.triples((subject, cls.get_class_term(), None)):
url = graph.value(subject=identifier_node, predicate=HSTERMS.doi)
name = 'doi'
if not url:
name = 'hydroShareIdentifier'
url = graph.value(subject=identifier_node, predicate=HSTERMS.hydroShareIdentifier)
if url:
# overwrite hydroShareIdentifier url with this resource's url
url = content_object.rdf_subject()
if url:
Identifier.create(url=str(url), name=name, content_object=content_object)
@classmethod
def create(cls, **kwargs):
"""Define custom create method for Identifier model."""
if 'name' in kwargs:
metadata_obj = kwargs['content_object']
# get matching resource
resource = BaseResource.objects.filter(object_id=metadata_obj.id).first()
metadata_type = ContentType.objects.get_for_model(metadata_obj)
# check the identifier name doesn't already exist - identifier name
# needs to be unique per resource
idf = Identifier.objects.filter(name__iexact=kwargs['name'],
object_id=metadata_obj.id,
content_type=metadata_type).first()
if idf:
raise ValidationError('Identifier name:%s already exists' % kwargs['name'])
if kwargs['name'].lower() == 'doi':
if not resource.doi:
raise ValidationError("Identifier of 'DOI' type can't be created for a "
"resource that has not been assigned a DOI yet.")
return super(Identifier, cls).create(**kwargs)
else:
raise ValidationError("Name of identifier element is missing.")
@classmethod
def update(cls, element_id, **kwargs):
"""Define custom update method for Identifier model."""
idf = Identifier.objects.get(id=element_id)
if 'name' in kwargs:
if idf.name.lower() != kwargs['name'].lower():
if idf.name.lower() == 'hydroshareidentifier':
if 'migration' not in kwargs:
raise ValidationError("Identifier name 'hydroshareIdentifier' can't "
"be changed.")
if idf.name.lower() == 'doi':
raise ValidationError("Identifier name 'DOI' can't be changed.")
# check this new identifier name not already exists
if Identifier.objects.filter(name__iexact=kwargs['name'], object_id=idf.object_id,
content_type__pk=idf.content_type.id).count() > 0:
if 'migration' not in kwargs:
raise ValidationError('Identifier name:%s already exists.'
% kwargs['name'])
if 'url' in kwargs:
if idf.url.lower() != kwargs['url'].lower():
if idf.name.lower() == 'hydroshareidentifier':
if 'migration' not in kwargs:
raise ValidationError("Hydroshare identifier url value can't be changed.")
# check this new identifier url not already exists
if Identifier.objects.filter(url__iexact=kwargs['url'], object_id=idf.object_id,
content_type__pk=idf.content_type.id).count() > 0:
raise ValidationError('Identifier URL:%s already exists.' % kwargs['url'])
super(Identifier, cls).update(element_id, **kwargs)
@classmethod
def remove(cls, element_id):
"""Define custom remove method for Idenfitier method."""
idf = Identifier.objects.get(id=element_id)
# get matching resource
resource = BaseResource.objects.filter(object_id=idf.content_object.id).first()
if idf.name.lower() == 'hydroshareidentifier':
raise ValidationError("Hydroshare identifier:%s can't be deleted." % idf.name)
if idf.name.lower() == 'doi':
if resource.doi:
raise ValidationError("Hydroshare identifier:%s can't be deleted for a resource "
"that has been assigned a DOI." % idf.name)
idf.delete()
@rdf_terms(DC.publisher, name=HSTERMS.publisherName, url=HSTERMS.publisherURL)
class Publisher(AbstractMetaDataElement):
"""Define Publisher custom metadata model."""
term = 'Publisher'
name = models.CharField(max_length=200)
url = models.URLField()
def __unicode__(self):
"""Return {name} {url} for unicode representation of Publisher model."""
return "{name} {url}".format(name=self.name, url=self.url)
class Meta:
"""Define meta properties for Publisher model."""
unique_together = ("content_type", "object_id")
@classmethod
def create(cls, **kwargs):
"""Define custom create method for Publisher model."""
metadata_obj = kwargs['content_object']
# get matching resource
resource = BaseResource.objects.filter(object_id=metadata_obj.id).first()
if not resource.raccess.published:
raise ValidationError("Publisher element can't be created for a resource that "
"is not yet published.")
publisher_CUAHSI = "Consortium of Universities for the Advancement of Hydrologic " \
"Science, Inc. (CUAHSI)"
if resource.files.all():
# if the resource has content files, set CUAHSI as the publisher
if 'name' in kwargs:
if kwargs['name'].lower() != publisher_CUAHSI.lower():
raise ValidationError("Invalid publisher name")
kwargs['name'] = publisher_CUAHSI
if 'url' in kwargs:
if kwargs['url'].lower() != 'https://www.cuahsi.org':
raise ValidationError("Invalid publisher URL")
kwargs['url'] = 'https://www.cuahsi.org'
else:
# make sure we are not setting CUAHSI as publisher for a resource
# that has no content files
if 'name' in kwargs:
if kwargs['name'].lower() == publisher_CUAHSI.lower():
raise ValidationError("Invalid publisher name")
if 'url' in kwargs:
if kwargs['url'].lower() == 'https://www.cuahsi.org':
raise ValidationError("Invalid publisher URL")
return super(Publisher, cls).create(**kwargs)
@classmethod
def update(cls, element_id, **kwargs):
"""Define custom update method for Publisher model."""
raise ValidationError("Publisher element can't be updated.")
@classmethod
def remove(cls, element_id):
"""Define custom remove method for Publisher model."""
raise ValidationError("Publisher element can't be deleted.")
@rdf_terms(DC.language)
class Language(AbstractMetaDataElement):
"""Define language custom metadata model."""
term = 'Language'
code = models.CharField(max_length=3, choices=iso_languages)
class Meta:
"""Define meta properties for Language model."""
unique_together = ("content_type", "object_id")
def __unicode__(self):
"""Return code field for unicode representation."""
return self.code
@classmethod
def create(cls, **kwargs):
"""Define custom create method for Language model."""
if 'code' in kwargs:
# check the code is a valid code
if not [t for t in iso_languages if t[0] == kwargs['code']]:
raise ValidationError('Invalid language code:%s' % kwargs['code'])
return super(Language, cls).create(**kwargs)
else:
raise ValidationError("Language code is missing.")
@classmethod
def update(cls, element_id, **kwargs):
"""Define custom update method for Language model."""
if 'code' in kwargs:
# validate language code
if not [t for t in iso_languages if t[0] == kwargs['code']]:
raise ValidationError('Invalid language code:%s' % kwargs['code'])
super(Language, cls).update(element_id, **kwargs)
else:
raise ValidationError('Language code is missing.')
def rdf_triples(self, subject, graph):
graph.add((subject, self.get_class_term(), Literal(self.code)))
@classmethod
def ingest_rdf(cls, graph, subject, content_object):
code = graph.value(subject=subject, predicate=cls.get_class_term())
if code:
Language.create(code=str(code), content_object=content_object)
@rdf_terms(DC.coverage)
class Coverage(AbstractMetaDataElement):
"""Define Coverage custom metadata element model."""
COVERAGE_TYPES = (
('box', 'Box'),
('point', 'Point'),
('period', 'Period')
)
term = 'Coverage'
type = models.CharField(max_length=20, choices=COVERAGE_TYPES)
def __unicode__(self):
"""Return {type} {value} for unicode representation."""
return "{type} {value}".format(type=self.type, value=self._value)
class Meta:
"""Define meta properties for Coverage model."""
unique_together = ("type", "content_type", "object_id")
"""
_value field stores a json string. The content of the json
string depends on the type of coverage as shown below. All keys shown in
json string are required.
For coverage type: period
_value = "{'name':coverage name value here (optional), 'start':start date value,
'end':end date value, 'scheme':'W3C-DTF}"
For coverage type: point
_value = "{'east':east coordinate value,
'north':north coordinate value,
'units:units applying to (east. north),
'name':coverage name value here (optional),
'elevation': coordinate in the vertical direction (optional),
'zunits': units for elevation (optional),
'projection': name of the projection (optional),
}"
For coverage type: box
_value = "{'northlimit':northenmost coordinate value,
'eastlimit':easternmost coordinate value,
'southlimit':southernmost coordinate value,
'westlimit':westernmost coordinate value,
'units:units applying to 4 limits (north, east, south & east),
'name':coverage name value here (optional),
'uplimit':uppermost coordinate value (optional),
'downlimit':lowermost coordinate value (optional),
'zunits': units for uplimit/downlimit (optional),
'projection': name of the projection (optional)}"
"""
_value = models.CharField(max_length=1024)
@property
def value(self):
"""Return json representation of coverage values."""
return json.loads(self._value)
@classmethod
def create(cls, **kwargs):
"""Define custom create method for Coverage model.
data for the coverage value attribute must be provided as a dictionary
Note that kwargs['_value'] is a JSON-serialized unicode string dictionary
generated from django.forms.models.model_to_dict() which converts model values
to dictionaries.
"""
if 'type' in kwargs:
# check the type doesn't already exists - we allow only one coverage type per resource
metadata_obj = kwargs['content_object']
metadata_type = ContentType.objects.get_for_model(metadata_obj)
if not kwargs['type'] in list(dict(cls.COVERAGE_TYPES).keys()):
raise ValidationError('Invalid coverage type:%s' % kwargs['type'])
if kwargs['type'] == 'box':
# check that there is not already a coverage of point type
coverage = Coverage.objects.filter(type='point', object_id=metadata_obj.id,
content_type=metadata_type).first()
if coverage:
raise ValidationError("Coverage type 'Box' can't be created when there "
"is a coverage of type 'Point'")
elif kwargs['type'] == 'point':
# check that there is not already a coverage of box type
coverage = Coverage.objects.filter(type='box', object_id=metadata_obj.id,
content_type=metadata_type).first()
if coverage:
raise ValidationError("Coverage type 'Point' can't be created when "
"there is a coverage of type 'Box'")
value_arg_dict = None
if 'value' in kwargs:
value_arg_dict = kwargs['value']
elif '_value' in kwargs:
value_arg_dict = json.loads(kwargs['_value'])
if value_arg_dict is not None:
cls.validate_coverage_type_value_attributes(kwargs['type'], value_arg_dict)
if kwargs['type'] == 'period':
value_dict = {k: v for k, v in list(value_arg_dict.items())
if k in ('name', 'start', 'end')}
elif kwargs['type'] == 'point':
value_dict = {k: v for k, v in list(value_arg_dict.items())
if k in ('name', 'east', 'north', 'units', 'elevation',
'zunits', 'projection')}
elif kwargs['type'] == 'box':
value_dict = {k: v for k, v in list(value_arg_dict.items())
if k in ('units', 'northlimit', 'eastlimit', 'southlimit',
'westlimit', 'name', 'uplimit', 'downlimit',
'zunits', 'projection')}
if kwargs['type'] == 'box' or kwargs['type'] == 'point':
if 'projection' not in value_dict:
value_dict['projection'] = 'WGS 84 EPSG:4326'
value_json = json.dumps(value_dict)
if 'value' in kwargs:
del kwargs['value']
kwargs['_value'] = value_json
return super(Coverage, cls).create(**kwargs)
else:
raise ValidationError('Coverage value is missing.')
else:
raise ValidationError("Type of coverage element is missing.")
@classmethod
def update(cls, element_id, **kwargs):
"""Define custom create method for Coverage model.
data for the coverage value attribute must be provided as a dictionary
"""
cov = Coverage.objects.get(id=element_id)
changing_coverage_type = False
if 'type' in kwargs:
changing_coverage_type = cov.type != kwargs['type']
if 'value' in kwargs:
cls.validate_coverage_type_value_attributes(kwargs['type'], kwargs['value'])
else:
raise ValidationError('Coverage value is missing.')
if 'value' in kwargs:
if changing_coverage_type:
value_dict = {}
cov.type = kwargs['type']
else:
value_dict = cov.value
if 'name' in kwargs['value']:
value_dict['name'] = kwargs['value']['name']
if cov.type == 'period':
for item_name in ('start', 'end'):
if item_name in kwargs['value']:
value_dict[item_name] = kwargs['value'][item_name]
elif cov.type == 'point':
for item_name in ('east', 'north', 'units', 'elevation', 'zunits', 'projection'):
if item_name in kwargs['value']:
value_dict[item_name] = kwargs['value'][item_name]
elif cov.type == 'box':
for item_name in ('units', 'northlimit', 'eastlimit', 'southlimit', 'westlimit',
'uplimit', 'downlimit', 'zunits', 'projection'):
if item_name in kwargs['value']:
value_dict[item_name] = kwargs['value'][item_name]
value_json = json.dumps(value_dict)
del kwargs['value']
kwargs['_value'] = value_json
super(Coverage, cls).update(element_id, **kwargs)
@classmethod
def remove(cls, element_id):
"""Define custom remove method for Coverage model."""
raise ValidationError("Coverage element can't be deleted.")
def add_to_xml_container(self, container):
"""Update etree SubElement container with coverage values."""
NAMESPACES = CoreMetaData.NAMESPACES
dc_coverage = etree.SubElement(container, '{%s}coverage' % NAMESPACES['dc'])
cov_dcterm = '{%s}' + self.type
dc_coverage_dcterms = etree.SubElement(dc_coverage,
cov_dcterm % NAMESPACES['dcterms'])
rdf_coverage_value = etree.SubElement(dc_coverage_dcterms,
'{%s}value' % NAMESPACES['rdf'])
if self.type == 'period':
start_date = parser.parse(self.value['start'])
end_date = parser.parse(self.value['end'])
cov_value = 'start=%s; end=%s; scheme=W3C-DTF' % (start_date.isoformat(),
end_date.isoformat())
if 'name' in self.value:
cov_value = 'name=%s; ' % self.value['name'] + cov_value
elif self.type == 'point':
cov_value = 'east=%s; north=%s; units=%s' % (self.value['east'],
self.value['north'],
self.value['units'])
if 'name' in self.value:
cov_value = 'name=%s; ' % self.value['name'] + cov_value
if 'elevation' in self.value:
cov_value += '; elevation=%s' % self.value['elevation']
if 'zunits' in self.value:
cov_value += '; zunits=%s' % self.value['zunits']
if 'projection' in self.value:
cov_value += '; projection=%s' % self.value['projection']
else:
# this is box type
cov_value = 'northlimit=%s; eastlimit=%s; southlimit=%s; westlimit=%s; units=%s' \
% (self.value['northlimit'], self.value['eastlimit'],
self.value['southlimit'], self.value['westlimit'],
self.value['units'])
if 'name' in self.value:
cov_value = 'name=%s; ' % self.value['name'] + cov_value
if 'uplimit' in self.value:
cov_value += '; uplimit=%s' % self.value['uplimit']
if 'downlimit' in self.value:
cov_value += '; downlimit=%s' % self.value['downlimit']
if 'uplimit' in self.value or 'downlimit' in self.value:
cov_value += '; zunits=%s' % self.value['zunits']
if 'projection' in self.value:
cov_value += '; projection=%s' % self.value['projection']
rdf_coverage_value.text = cov_value
@classmethod
def ingest_rdf(cls, graph, subject, content_object):
for _, _, cov in graph.triples((subject, cls.get_class_term(), None)):
type = graph.value(subject=cov, predicate=RDF.type)
value = graph.value(subject=cov, predicate=RDF.value)
type = type.split('/')[-1]
value_dict = {}
for key_value in value.split(";"):
key_value = key_value.strip()
k, v = key_value.split("=")
if k in ['start', 'end']:
v = parser.parse(v).strftime("%Y/%m/%d %H:%M:%S")
value_dict[k] = v
Coverage.create(type=type, value=value_dict, content_object=content_object)
def rdf_triples(self, subject, graph):
coverage = BNode()
graph.add((subject, self.get_class_term(), coverage))
DCTERMS_type = getattr(DCTERMS, self.type)
graph.add((coverage, RDF.type, DCTERMS_type))
value_dict = {}
for k, v in self.value.items():
if k in ['start', 'end']:
v = parser.parse(v).isoformat()
value_dict[k] = v
value_string = "; ".join(["=".join([key, str(val)]) for key, val in value_dict.items()])
graph.add((coverage, RDF.value, Literal(value_string)))
@classmethod
def validate_coverage_type_value_attributes(cls, coverage_type, value_dict):
"""Validate values based on coverage type."""
def compute_longitude(key_name):
if value_dict[key_name] <= -180 and value_dict[key_name] >= -360:
value_dict[key_name] = value_dict[key_name] + 360
elif value_dict[key_name] >= 180 and value_dict[key_name] <= 360:
value_dict[key_name] = value_dict[key_name] - 360
if value_dict[key_name] < -180 or value_dict[key_name] > 180:
err_msg = "Invalid value for {}:{}. Value for {} longitude should be in the range of -180 to 180"
err_msg = err_msg.format(key_name, value_dict[key_name], key_name)
raise ValidationError(err_msg)
if coverage_type == 'period':
# check that all the required sub-elements exist
if 'start' not in value_dict or 'end' not in value_dict:
raise ValidationError("For coverage of type 'period' values for both start date "
"and end date are needed.")
elif coverage_type == 'point':
# check that all the required sub-elements exist
if 'east' not in value_dict or 'north' not in value_dict or 'units' not in value_dict:
raise ValidationError("For coverage of type 'point' values for 'east', 'north' "
"and 'units' are needed.")
for value_item in ('east', 'north'):
try:
value_dict[value_item] = float(value_dict[value_item])
except TypeError:
raise ValidationError("Value for '{}' must be numeric".format(value_item))
compute_longitude(key_name='east')
if value_dict['north'] < -90 or value_dict['north'] > 90:
raise ValidationError("Value for North latitude should be "
"in the range of -90 to 90")
elif coverage_type == 'box':
# check that all the required sub-elements exist
for value_item in ['units', 'northlimit', 'eastlimit', 'southlimit', 'westlimit']:
if value_item not in value_dict:
raise ValidationError("For coverage of type 'box' values for one or more "
"bounding box limits or 'units' is missing.")
else:
if value_item != 'units':
try:
value_dict[value_item] = float(value_dict[value_item])
except TypeError:
raise ValidationError("Value for '{}' must be numeric"
.format(value_item))
if value_dict['northlimit'] < -90 or value_dict['northlimit'] > 90:
raise ValidationError("Value for North latitude should be "
"in the range of -90 to 90")
if value_dict['southlimit'] < -90 or value_dict['southlimit'] > 90:
raise ValidationError("Value for South latitude should be "
"in the range of -90 to 90")
if (value_dict['northlimit'] < 0 and value_dict['southlimit'] < 0) or (
value_dict['northlimit'] > 0 and value_dict['southlimit'] > 0):
if value_dict['northlimit'] < value_dict['southlimit']:
raise ValidationError("Value for North latitude must be greater than or "
"equal to that of South latitude.")
compute_longitude(key_name='eastlimit')
compute_longitude(key_name='westlimit')
def get_html(self, pretty=True):
"""Use the dominate module to generate element display HTML.
This function should be used for displaying one spatial coverage element
or one temporal coverage element
"""
root_div = div(cls='content-block')
def get_th(heading_name):
return th(heading_name, cls="text-muted")
with root_div:
if self.type == 'box' or self.type == 'point':
legend('Spatial Coverage')
div('Coordinate Reference System', cls='text-muted')
div(self.value['projection'])
div('Coordinate Reference System Unit', cls='text-muted space-top')
div(self.value['units'])
h4('Extent', cls='space-top')
with table(cls='custom-table'):
if self.type == 'box':
with tbody():
with tr():
get_th('North')
td(self.value['northlimit'])
with tr():
get_th('West')
td(self.value['westlimit'])
with tr():
get_th('South')
td(self.value['southlimit'])
with tr():
get_th('East')
td(self.value['eastlimit'])
else:
with tr():
get_th('North')
td(self.value['north'])
with tr():
get_th('East')
td(self.value['east'])
else:
legend('Temporal Coverage')
start_date = parser.parse(self.value['start'])
end_date = parser.parse(self.value['end'])
with table(cls='custom-table'):
with tbody():
with tr():
get_th('Start Date')
td(start_date.strftime('%m/%d/%Y'))
with tr():
get_th('End Date')
td(end_date.strftime('%m/%d/%Y'))
return root_div.render(pretty=pretty)
@classmethod
def get_temporal_html_form(cls, resource, element=None, file_type=False, allow_edit=True):
"""Return CoverageTemporalForm for Coverage model."""
from .forms import CoverageTemporalForm
coverage_data_dict = dict()
if element is not None:
start_date = parser.parse(element.value['start'])
end_date = parser.parse(element.value['end'])
# change the date format to match with datepicker date format
coverage_data_dict['start'] = start_date.strftime('%m/%d/%Y')
coverage_data_dict['end'] = end_date.strftime('%m/%d/%Y')
coverage_form = CoverageTemporalForm(initial=coverage_data_dict, allow_edit=allow_edit,
res_short_id=resource.short_id if resource else None,
element_id=element.id if element else None,
file_type=file_type)
return coverage_form
@classmethod
def get_spatial_html_form(cls, resource, element=None, allow_edit=True, file_type=False):
"""Return SpatialCoverageForm for Coverage model."""
from .forms import CoverageSpatialForm
coverage_data_dict = dict()
if element is not None:
coverage_data_dict['type'] = element.type
coverage_data_dict['name'] = element.value.get('name', "")
if element.type == 'box':
coverage_data_dict['northlimit'] = element.value['northlimit']
coverage_data_dict['eastlimit'] = element.value['eastlimit']
coverage_data_dict['southlimit'] = element.value['southlimit']
coverage_data_dict['westlimit'] = element.value['westlimit']
else:
coverage_data_dict['east'] = element.value['east']
coverage_data_dict['north'] = element.value['north']
coverage_data_dict['elevation'] = element.value.get('elevation', None)
coverage_form = CoverageSpatialForm(initial=coverage_data_dict, allow_edit=allow_edit,
res_short_id=resource.short_id if resource else None,
element_id=element.id if element else None,
file_type=file_type)
return coverage_form
class Format(AbstractMetaDataElement):
"""Define Format custom metadata element model."""
term = 'Format'
value = models.CharField(max_length=150)
class Meta:
"""Define meta properties for Format model."""
unique_together = ("value", "content_type", "object_id")
def __unicode__(self):
"""Return value field for unicode representation."""
return self.value
@rdf_terms(HSTERMS.awardInfo, agency_name=HSTERMS.fundingAgencyName, award_title=HSTERMS.awardTitle,
award_number=HSTERMS.awardNumber, agency_url=HSTERMS.fundingAgencyURL)
class FundingAgency(AbstractMetaDataElement):
"""Define FundingAgency custom metadata element mode."""
term = 'FundingAgency'
agency_name = models.TextField(null=False)
award_title = models.TextField(null=True, blank=True)
award_number = models.TextField(null=True, blank=True)
agency_url = models.URLField(null=True, blank=True)
def __unicode__(self):
"""Return agency_name field for unicode representation."""
return self.agency_name
@classmethod
def create(cls, **kwargs):
"""Define custom create method for FundingAgency model."""
agency_name = kwargs.get('agency_name', None)
if agency_name is None or len(agency_name.strip()) == 0:
raise ValidationError("Agency name is missing")
return super(FundingAgency, cls).create(**kwargs)
@classmethod
def update(cls, element_id, **kwargs):
"""Define custom update method for Agency model."""
agency_name = kwargs.get('agency_name', None)
if agency_name and len(agency_name.strip()) == 0:
raise ValidationError("Agency name is missing")
super(FundingAgency, cls).update(element_id, **kwargs)
@rdf_terms(DC.subject)
class Subject(AbstractMetaDataElement):
"""Define Subject custom metadata element model."""
term = 'Subject'
value = models.CharField(max_length=100)
class Meta:
"""Define meta properties for Subject model."""
unique_together = ("value", "content_type", "object_id")
def __unicode__(self):
"""Return value field for unicode representation."""
return self.value
@classmethod
def create(cls, **kwargs):
"""Define custom create method for Subject model."""
metadata_obj = kwargs['content_object']
value = kwargs.get('value', None)
if value is not None:
if metadata_obj.subjects.filter(value__iexact=value).exists():
raise ValidationError("Subject element already exists.")
return super(Subject, cls).create(**kwargs)
@classmethod
def remove(cls, element_id):
"""Define custom remove method for Subject model."""
sub = Subject.objects.get(id=element_id)
if Subject.objects.filter(object_id=sub.object_id,
content_type__pk=sub.content_type.id).count() == 1:
raise ValidationError("The only subject element of the resource can't be deleted.")
sub.delete()
def rdf_triples(self, subject, graph):
graph.add((subject, self.get_class_term(), Literal(self.value)))
@classmethod
def ingest_rdf(cls, graph, subject, content_object):
for _, _, o in graph.triples((subject, cls.get_class_term(), None)):
Subject.create(value=str(o), content_object=content_object)
@rdf_terms(DC.rights, statement=HSTERMS.rightsStatement, url=HSTERMS.URL)
class Rights(AbstractMetaDataElement):
"""Define Rights custom metadata element model."""
term = 'Rights'
statement = models.TextField(null=True, blank=True)
url = models.URLField(null=True, blank=True)
def __unicode__(self):
"""Return either statement or statement + url for unicode representation."""
value = ''
if self.statement:
value += self.statement + ' '
if self.url:
value += self.url
return value
class Meta:
"""Define meta properties for Rights model."""
unique_together = ("content_type", "object_id")
@classmethod
def remove(cls, element_id):
"""Define custom remove method for Rights model."""
raise ValidationError("Rights element of a resource can't be deleted.")
def short_id():
"""Generate a uuid4 hex to be used as a resource or element short_id."""
return uuid4().hex
class ResourceManager(PageManager):
"""Extend mezzanine PageManager to manage Resource pages."""
def __init__(self, resource_type=None, *args, **kwargs):
"""Extend mezzanine PageManager to manage Resource pages based on resource_type."""
self.resource_type = resource_type
super(ResourceManager, self).__init__(*args, **kwargs)
def create(self, *args, **kwargs):
"""Create new mezzanine page based on resource_type."""
if self.resource_type is None:
kwargs.pop('resource_type', None)
return super(ResourceManager, self).create(*args, **kwargs)
def get_queryset(self):
"""Get mezzanine-like queryset based on resource_type."""
qs = super(ResourceManager, self).get_queryset()
if self.resource_type:
qs = qs.filter(resource_type=self.resource_type)
return qs
class AbstractResource(ResourcePermissionsMixin, ResourceIRODSMixin):
"""
Create Abstract Class for all Resources.
All hydroshare objects inherit from this mixin. It defines things that must
be present to be considered a hydroshare resource. Additionally, all
hydroshare resources should inherit from Page. This gives them what they
need to be represented in the Mezzanine CMS.
In some cases, it is possible that the order of inheritence matters. Best
practice dictates that you list pages.Page first and then other classes:
class MyResourceContentType(pages.Page, hs_core.AbstractResource):
...
"""
content = models.TextField() # the field added for use by Django inplace editing
last_changed_by = models.ForeignKey(User,
help_text='The person who last changed the resource',
related_name='last_changed_%(app_label)s_%(class)s',
null=False,
default=1
)
files = GenericRelation('hs_core.ResourceFile',
help_text='The files associated with this resource',
for_concrete_model=True)
file_unpack_status = models.CharField(max_length=7,
null=True, blank=True,
choices=(('Pending', 'Pending'), ('Running', 'Running'),
('Done', 'Done'), ('Error', 'Error'))
)
file_unpack_message = models.TextField(null=True, blank=True)
short_id = models.CharField(max_length=32, default=short_id, db_index=True)
doi = models.CharField(max_length=128, null=False, blank=True, db_index=True, default='',
help_text='Permanent identifier. Never changes once it\'s been set.')
comments = CommentsField()
rating = RatingField()
# this is to establish a relationship between a resource and
# any metadata container object (e.g., CoreMetaData object)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_type = models.ForeignKey(ContentType, null=True, blank=True)
content_object = GenericForeignKey('content_type', 'object_id')
extra_metadata = HStoreField(default={})
# this field is for resources to store extra key:value pairs as needed, e.g., bag checksum is stored as
# "bag_checksum":value pair for published resources in order to meet the DataONE data distribution needs
# for internal use only
# this field WILL NOT get recorded in bag and SHOULD NEVER be used for storing metadata
extra_data = HStoreField(default={})
# for tracking number of times resource and its files have been downloaded
download_count = models.PositiveIntegerField(default=0)
# for tracking number of times resource has been viewed
view_count = models.PositiveIntegerField(default=0)
def update_view_count(self, request):
self.view_count += 1
self.save()
def update_download_count(self):
self.download_count += 1
self.save()
# definition of resource logic
@property
def supports_folders(self):
"""Return whether folder operations are supported. Computed for polymorphic types."""
return False
@property
def last_updated(self):
"""Return the last updated date stored in metadata"""
return self.metadata.dates.all().filter(type='modified')[0].start_date
@property
def has_required_metadata(self):
"""Return True only if all required metadata is present."""
if self.metadata is None or not self.metadata.has_all_required_elements():
return False
for f in self.logical_files:
if not f.metadata.has_all_required_elements():
return False
return True
@property
def can_be_public_or_discoverable(self):
"""Return True if the resource can be set to public or discoverable.
This is True if
1. The resource has all metadata elements marked as required.
2. The resource has all files that are considered required.
and False otherwise
"""
has_files = self.has_required_content_files()
has_metadata = self.has_required_metadata
return has_files and has_metadata
def set_discoverable(self, value, user=None):
"""Set the discoverable flag for a resource.
:param value: True or False
:param user: user requesting the change, or None for changes that are not user requests.
:raises ValidationError: if the current configuration cannot be set to desired state
This sets the discoverable flag (self.raccess.discoverable) for a resource based
upon application logic. It is part of AbstractResource because its result depends
upon resource state, and not just access control.
* This flag can only be set to True if the resource passes basic validations
`has_required_metata` and `has_required_content_files`
* setting `discoverable` to `False` also sets `public` to `False`
* setting `discoverable` to `True` does not change `public`
Thus, the setting public=True, discoverable=False is disallowed.
If `user` is None, access control is not checked. This happens when a resource has been
invalidated outside of the control of a specific user. In this case, user can be None
"""
# access control is separate from validation logic
if user is not None and not user.uaccess.can_change_resource_flags(self):
raise ValidationError("You don't have permission to change resource sharing status")
# check that there is sufficient resource content
has_metadata = self.has_required_metadata
has_files = self.has_required_content_files()
if value and not (has_metadata and has_files):
if not has_metadata and not has_files:
msg = "Resource does not have sufficient metadata and content files to be " + \
"discoverable"
raise ValidationError(msg)
elif not has_metadata:
msg = "Resource does not have sufficient metadata to be discoverable"
raise ValidationError(msg)
elif not has_files:
msg = "Resource does not have sufficient content files to be discoverable"
raise ValidationError(msg)
else: # state change is allowed
self.raccess.discoverable = value
self.raccess.save()
self.set_public(False)
def set_public(self, value, user=None):
"""Set the public flag for a resource.
:param value: True or False
:param user: user requesting the change, or None for changes that are not user requests.
:raises ValidationError: if the current configuration cannot be set to desired state
This sets the public flag (self.raccess.public) for a resource based
upon application logic. It is part of AbstractResource because its result depends
upon resource state, and not just access control.
* This flag can only be set to True if the resource passes basic validations
`has_required_metata` and `has_required_content_files`
* setting `public` to `True` also sets `discoverable` to `True`
* setting `public` to `False` does not change `discoverable`
* setting `public` to either also modifies the AVU isPublic for the resource.
Thus, the setting public=True, discoverable=False is disallowed.
If `user` is None, access control is not checked. This happens when a resource has been
invalidated outside of the control of a specific user. In this case, user can be None
"""
# avoid import loop
from hs_core.views.utils import run_script_to_update_hyrax_input_files
from hs_core.signals import post_raccess_change
# access control is separate from validation logic
if user is not None and not user.uaccess.can_change_resource_flags(self):
raise ValidationError("You don't have permission to change resource sharing status")
old_value = self.raccess.public # is this a change?
# check that there is sufficient resource content
has_metadata = self.has_required_metadata
has_files = self.has_required_content_files()
if value and not (has_metadata and has_files):
if not has_metadata and not has_files:
msg = "Resource does not have sufficient metadata and content files to be public"
raise ValidationError(msg)
elif not has_metadata:
msg = "Resource does not have sufficient metadata to be public"
raise ValidationError(msg)
elif not has_files:
msg = "Resource does not have sufficient content files to be public"
raise ValidationError(msg)
else: # make valid state change
self.raccess.public = value
if value: # can't be public without being discoverable
self.raccess.discoverable = value
self.raccess.save()
post_raccess_change.send(sender=self, resource=self)
# public changed state: set isPublic metadata AVU accordingly
if value != old_value:
self.setAVU("isPublic", self.raccess.public)
# TODO: why does this only run when something becomes public?
# TODO: Should it be run when a NetcdfResource becomes private?
# Answer to TODO above: it is intentional not to run it when a target resource
# becomes private for performance reasons. The nightly script run will clean up
# to make sure all private resources are not available to hyrax server as well as
# to make sure all resources files available to hyrax server are up to date with
# the HydroShare iRODS data store.
# run script to update hyrax input files when private netCDF resource becomes
# public or private composite resource that includes netCDF files becomes public
is_netcdf_to_public = False
if self.resource_type == 'NetcdfResource':
is_netcdf_to_public = True
elif self.resource_type == 'CompositeResource' and \
self.get_logical_files('NetCDFLogicalFile'):
is_netcdf_to_public = True
if value and settings.RUN_HYRAX_UPDATE and is_netcdf_to_public:
run_script_to_update_hyrax_input_files(self.short_id)
def set_require_download_agreement(self, user, value):
"""Set resource require_download_agreement flag to True or False.
If require_download_agreement is True then user will be prompted to agree to resource
rights statement before he/she can download resource files or bag.
:param user: user requesting the change
:param value: True or False
:raises PermissionDenied: if the user lacks permission to change resource flag
"""
if not user.uaccess.can_change_resource_flags(self):
raise PermissionDenied("You don't have permission to change resource download agreement"
" status")
self.raccess.require_download_agreement = value
self.raccess.save()
def set_private_sharing_link(self, user, value):
"""Set resource 'allow_private_sharing' flag to True or False.
If allow_private_sharing is True then any user including anonymous user will be able to use the resource url
to view the resource (view mode).
:param user: user requesting the change
:param value: True or False
:raises PermissionDenied: if the user lacks permission to change resource flag
"""
if not user.uaccess.can_change_resource_flags(self):
raise PermissionDenied("You don't have permission to change resource private link sharing "
" status")
self.raccess.allow_private_sharing = value
self.raccess.save()
def update_public_and_discoverable(self):
"""Update the settings of the public and discoverable flags for changes in metadata."""
if self.raccess.discoverable and not self.can_be_public_or_discoverable:
self.set_discoverable(False) # also sets Public
def get_url_of_path(self, path):
"""Return the URL of an arbtrary path in this resource.
A GET of this URL simply returns the contents of the path.
This URL is independent of federation.
PUT, POST, and DELETE are not supported.
path includes data/contents/
This choice for a URL is dependent mainly upon conformance to DataOne URL standards
that are also conformant to the format in resourcemap.xml. This url does not contain
the site URL, which is prefixed when needed.
This is based upon the resourcemap_urls.py entry:
url(r'^resource/(?P<shortkey>[0-9a-f-]+)/data/contents/(?.+)/$',
views.file_download_url_mapper,
name='get_resource_file')
"""
# must start with a / in order to concat with current_site_url.
return '/' + os.path.join('resource', self.short_id, path)
def get_irods_path(self, path, prepend_short_id=True):
"""Return the irods path by which the given path is accessed.
The input path includes data/contents/ as needed.
"""
if prepend_short_id and not path.startswith(self.short_id):
full_path = os.path.join(self.short_id, path)
else:
full_path = path
if self.is_federated:
return os.path.join(self.resource_federation_path, full_path)
else:
return full_path
def set_quota_holder(self, setter, new_holder):
"""Set quota holder of the resource to new_holder who must be an owner.
setter is the requesting user to transfer quota holder and setter must also be an owner
"""
from hs_core.hydroshare.utils import validate_user_quota
if __debug__:
assert(isinstance(setter, User))
assert(isinstance(new_holder, User))
if not setter.uaccess.owns_resource(self) or \
not new_holder.uaccess.owns_resource(self):
raise PermissionDenied("Only owners can set or be set as quota holder for the resource")
# QuotaException will be raised if new_holder does not have enough quota to hold this
# new resource, in which case, set_quota_holder to the new user fails
validate_user_quota(new_holder, self.size)
attname = "quotaUserName"
if setter.username != new_holder.username:
# this condition check is needed to make sure attname exists as AVU before getting it
oldqu = self.getAVU(attname)
if oldqu:
# have to remove the old AVU first before setting to the new one in order to trigger
# quota micro-service PEP msiRemoveQuotaHolder so quota for old quota
# holder will be reduced as a result of setting quota holder to a different user
self.removeAVU(attname, oldqu)
self.setAVU(attname, new_holder.username)
def get_quota_holder(self):
"""Get quota holder of the resource.
return User instance of the quota holder for the resource or None if it does not exist
"""
try:
uname = self.getAVU("quotaUserName")
except SessionException:
# quotaUserName AVU does not exist, return None
return None
if uname:
return User.objects.filter(username=uname).first()
else:
# quotaUserName AVU does not exist, return None
return None
def removeAVU(self, attribute, value):
"""Remove an AVU at the resource level.
This avoids mistakes in setting AVUs by assuring that the appropriate root path
is alway used.
"""
istorage = self.get_irods_storage()
root_path = self.root_path
istorage.session.run("imeta", None, 'rm', '-C', root_path, attribute, value)
def setAVU(self, attribute, value):
"""Set an AVU at the resource level.
This avoids mistakes in setting AVUs by assuring that the appropriate root path
is alway used.
"""
if isinstance(value, bool):
value = str(value).lower() # normalize boolean values to strings
istorage = self.get_irods_storage()
root_path = self.root_path
# has to create the resource collection directory if it does not exist already due to
# the need for setting quota holder on the resource collection before adding files into
# the resource collection in order for the real-time iRODS quota micro-services to work
if not istorage.exists(root_path):
istorage.session.run("imkdir", None, '-p', root_path)
istorage.setAVU(root_path, attribute, value)
def getAVU(self, attribute):
"""Get an AVU for a resource.
This avoids mistakes in getting AVUs by assuring that the appropriate root path
is alway used.
"""
istorage = self.get_irods_storage()
root_path = self.root_path
value = istorage.getAVU(root_path, attribute)
# Convert selected boolean attribute values to bool; non-existence implies False
# "Private" is the appropriate response if "isPublic" is None
if attribute == 'isPublic':
if value is not None and value.lower() == 'true':
return True
else:
return False
# Convert selected boolean attribute values to bool; non-existence implies True
# If bag_modified or metadata_dirty does not exist, then we do not know the
# state of metadata files and/or bags. They may not exist. Thus we interpret
# None as "true", which will generate the appropriate files if they do not exist.
if attribute == 'bag_modified' or attribute == 'metadata_dirty':
if value is None or value.lower() == 'true':
return True
else:
return False
# return strings for all other attributes
else:
return value
@classmethod
def scimeta_url(cls, resource_id):
""" Get URL of the science metadata file resourcemetadata.xml """
res = BaseResource.objects.get(short_id=resource_id)
scimeta_path = res.scimeta_path
scimeta_url = reverse('rest_download', kwargs={'path': scimeta_path})
return scimeta_url
# TODO: there are too many ways to get to the resourcemap.
# 1. {id}/data/resourcemap.xml
# 2. {id}/resmap
# Choose one!
@classmethod
def resmap_url(cls, resource_id):
""" Get URL of the resource map resourcemap.xml."""
resmap_path = "{resource_id}/data/resourcemap.xml".format(resource_id=resource_id)
resmap_url = reverse('rest_download', kwargs={'path': resmap_path})
return resmap_url
# TODO: this is inaccurate; resourcemap.xml != systemmetadata.xml
@classmethod
def sysmeta_path(cls, resource_id):
"""Get URL of resource map xml."""
return "{resource_id}/data/resourcemap.xml".format(resource_id=resource_id)
def delete(self, using=None, keep_parents=False):
"""Delete resource along with all of its metadata and data bag."""
from .hydroshare import hs_bagit
for fl in self.files.all():
# COUCH: delete of file objects now cascades.
fl.delete(delete_logical_file=True)
# TODO: Pabitra - delete_all_elements() may not be needed in Django 1.8 and later
self.metadata.delete_all_elements()
self.metadata.delete()
hs_bagit.delete_files_and_bag(self)
super(AbstractResource, self).delete()
@property
def metadata(self):
"""Return the metadata object for this resource."""
return self.content_object
@classmethod
def get_metadata_class(cls):
return CoreMetaData
@property
def first_creator(self):
"""Get first creator of resource from metadata."""
first_creator = self.metadata.creators.filter(order=1).first()
return first_creator
def get_metadata_xml(self, pretty_print=True, include_format_elements=True):
"""Get metadata xml for Resource.
Resource types that support file types
must override this method. See Composite Resource
type as an example
"""
return self.metadata.get_xml(pretty_print=pretty_print,
include_format_elements=include_format_elements)
def is_aggregation_xml_file(self, file_path):
"""Checks if the file path *file_path* is one of the aggregation related xml file paths
:param file_path: full file path starting with resource short_id
:return True if file_path is one of the aggregation xml file paths else False
This function is overridden for Composite Resource.
"""
return False
def extra_capabilites(self):
"""Return None. No-op method.
This is not terribly well defined yet, but should return at least a JSON serializable
object of URL endpoints where extra self-describing services exist and can be queried by
the user in the form of { "name" : "endpoint" }
"""
return None
def parse_citation_name(self, name, first_author=False):
"""Return properly formatted citation name from metadata."""
CREATOR_NAME_ERROR = "Failed to generate citation - invalid creator name."
first_names = None
if "," in name:
name_parts = name.split(",")
if len(name_parts) == 0:
return CREATOR_NAME_ERROR
elif len(name_parts) == 1:
last_names = name_parts[0]
elif len(name_parts) == 2:
first_names = name_parts[1]
first_names = first_names.split()
last_names = name_parts[0]
else:
return CREATOR_NAME_ERROR
else:
name_parts = name.split()
if len(name_parts) == 0:
return CREATOR_NAME_ERROR
elif len(name_parts) > 1:
first_names = name_parts[:-1]
last_names = name_parts[-1]
else:
last_names = name_parts[0]
if first_names:
initials_list = [i[0] for i in first_names]
initials = ". ".join(initials_list) + "."
if first_author:
author_name = "{last_name}, {initials}"
else:
author_name = "{initials} {last_name}"
author_name = author_name.format(last_name=last_names,
initials=initials
)
else:
author_name = "{last_name}".format(last_name=last_names)
return author_name + ", "
def get_custom_citation(self):
"""Get custom citation."""
if self.metadata.citation.first() is None:
return ''
return str(self.metadata.citation.first())
def get_citation(self):
"""Get citation or citations from resource metadata."""
citation_str_lst = []
CITATION_ERROR = "Failed to generate citation."
first_author = self.metadata.creators.all().filter(order=1)[0]
if first_author.organization and not first_author.name:
citation_str_lst.append(first_author.organization + ", ")
else:
citation_str_lst.append(self.parse_citation_name(first_author.name, first_author=True))
other_authors = self.metadata.creators.all().filter(order__gt=1)
for author in other_authors:
if author.organization and not author.name:
citation_str_lst.append(author.organization + ", ")
elif author.name and len(author.name.strip()) != 0:
citation_str_lst.append(self.parse_citation_name(author.name))
# remove the last added comma and the space
if len(citation_str_lst[-1]) > 2:
citation_str_lst[-1] = citation_str_lst[-1][:-2]
else:
return CITATION_ERROR
if self.metadata.dates.all().filter(type='published'):
citation_date = self.metadata.dates.all().filter(type='published')[0]
elif self.metadata.dates.all().filter(type='modified'):
citation_date = self.metadata.dates.all().filter(type='modified')[0]
else:
return CITATION_ERROR
citation_str_lst.append(" ({year}). ".format(year=citation_date.start_date.year))
citation_str_lst.append(self.metadata.title.value)
isPendingActivation = False
if self.metadata.identifiers.all().filter(name="doi"):
hs_identifier = self.metadata.identifiers.all().filter(name="doi")[0]
if self.doi.find('pending') >= 0 or self.doi.find('failure') >= 0:
isPendingActivation = True
elif self.metadata.identifiers.all().filter(name="hydroShareIdentifier"):
hs_identifier = self.metadata.identifiers.all().filter(name="hydroShareIdentifier")[0]
else:
return CITATION_ERROR
citation_str_lst.append(", HydroShare, {url}".format(url=hs_identifier.url))
if isPendingActivation:
citation_str_lst.append(", DOI for this published resource is pending activation.")
return ''.join(citation_str_lst)
@classmethod
def get_supported_upload_file_types(cls):
"""Get a list of permissible upload types.
Subclasses override this function to allow only specific file types.
Any version should return a tuple of those file extensions
(ex: return (".csv", ".txt",))
To disallow all file upload, return an empty tuple ( return ())
By default all file types are supported
This is called before creating a specific instance; hence it is a class method.
"""
return (".*",)
@classmethod
def allow_multiple_file_upload(cls):
"""
Return whether multiple files can be uploaded.
Subclasses of BaseResource override this function to tailor file upload.
To allow multiple files to be uploaded return True, otherwise return False
Resource by default allows multiple file upload.
"""
return True
@classmethod
def can_have_multiple_files(cls):
"""Return whether this kind of resource can contain multiple files.
Subclasses of BaseResource override this function to tailor file upload.
To allow resource to have only 1 file or no file, return False
A resource by default can contain multiple files
"""
return True
def has_required_content_files(self):
"""Check whether a resource has the required content files.
Any subclass of this class may need to override this function
to apply specific requirements as it relates to resource content files
"""
if len(self.get_supported_upload_file_types()) > 0:
if self.files.all().count() > 0:
return True
else:
return False
else:
return True
@property
def readme_file(self):
"""Returns a resource file that is at the root with a file name of either
'readme.txt' or 'readme.md' (filename is case insensitive). If no such file then None
is returned. If both files exist then resource file for readme.md is returned"""
res_files_at_root = self.files.filter(file_folder='')
readme_txt_file = None
readme_md_file = None
for res_file in res_files_at_root:
if res_file.file_name.lower() == 'readme.md':
readme_md_file = res_file
elif res_file.file_name.lower() == 'readme.txt':
readme_txt_file = res_file
if readme_md_file is not None:
break
if readme_md_file is not None:
return readme_md_file
else:
return readme_txt_file
def get_readme_file_content(self):
"""Gets the content of the readme file. If both a readme.md and a readme.txt file exist,
then the content of the readme.md file is returned, otherwise None
Note: The user uploaded readme file if originally not encoded as utf-8, then any non-ascii
characters in the file will be escaped when we return the file content.
"""
readme_file = self.readme_file
if readme_file is not None:
readme_file_content = readme_file.read().decode('utf-8', 'ignore')
if readme_file.extension.lower() == '.md':
markdown_file_content = markdown(readme_file_content)
return {'content': markdown_file_content,
'file_name': readme_file.file_name, 'file_type': 'md'}
else:
return {'content': readme_file_content, 'file_name': readme_file.file_name}
return readme_file
@property
def logical_files(self):
"""Get a list of logical files for resource."""
logical_files_list = []
for res_file in self.files.all():
if res_file.logical_file is not None:
if res_file.logical_file not in logical_files_list:
logical_files_list.append(res_file.logical_file)
return logical_files_list
@property
def aggregation_types(self):
"""Gets a list of all aggregation types that currently exist in this resource"""
aggr_types = []
aggr_type_names = []
for lf in self.logical_files:
if lf.type_name not in aggr_type_names:
aggr_type_names.append(lf.type_name)
aggr_type = lf.get_aggregation_display_name().split(":")[0]
aggr_types.append(aggr_type)
return aggr_types
@property
def non_logical_files(self):
"""Get list of non-logical files for resource."""
non_logical_files_list = []
for res_file in self.files.all():
if res_file.logical_file is None:
if res_file.logical_file not in non_logical_files_list:
non_logical_files_list.append(res_file)
return non_logical_files_list
@property
def generic_logical_files(self):
"""Get list of generic logical files for resource."""
generic_logical_files_list = []
for res_file in self.files.all():
if res_file.has_generic_logical_file:
if res_file.logical_file not in generic_logical_files_list:
generic_logical_files_list.append(res_file.logical_file)
return generic_logical_files_list
def get_logical_files(self, logical_file_class_name):
"""Get a list of logical files (aggregations) for a specified logical file class name."""
logical_files_list = [lf for lf in self.logical_files if
lf.type_name() == logical_file_class_name]
return logical_files_list
@property
def has_logical_spatial_coverage(self):
"""Checks if any of the logical files has spatial coverage"""
return any(lf.metadata.spatial_coverage is not None for lf in self.logical_files)
@property
def has_logical_temporal_coverage(self):
"""Checks if any of the logical files has temporal coverage"""
return any(lf.metadata.temporal_coverage is not None for lf in self.logical_files)
@property
def supports_logical_file(self):
"""Check if resource allows associating resource file objects with logical file."""
return False
def supports_folder_creation(self, folder_full_path):
"""Check if resource supports creation of folder at the specified path."""
return True
def supports_rename_path(self, src_full_path, tgt_full_path):
"""Check if file/folder rename/move is allowed by this resource."""
return True
def supports_zip(self, folder_to_zip):
"""Check if resource supports the specified folder to be zipped."""
return True
def supports_unzip(self, file_to_unzip):
"""Check if resource supports the unzipping of the specified file."""
return True
def supports_delete_folder_on_zip(self, original_folder):
"""Check if resource allows the original folder to be deleted upon zip."""
return True
@property
def storage_type(self):
if not self.is_federated:
return 'local'
userpath = '/' + os.path.join(
getattr(settings, 'HS_USER_IRODS_ZONE', 'hydroshareuserZone'),
'home',
getattr(settings, 'HS_IRODS_PROXY_USER_IN_USER_ZONE', 'localHydroProxy'))
if self.resource_federation_path == userpath:
return 'user'
else:
return 'federated'
def is_folder(self, folder_path):
"""Determine whether a given path (relative to resource root, including /data/contents/)
is a folder or not. Returns False if the path does not exist.
"""
path_split = folder_path.split('/')
while path_split[-1] == '':
path_split.pop()
dir_path = '/'.join(path_split[0:-1])
# handles federation
irods_path = os.path.join(self.root_path, dir_path)
istorage = self.get_irods_storage()
try:
listing = istorage.listdir(irods_path)
except SessionException:
return False
if path_split[-1] in listing[0]: # folders
return True
else:
return False
class Meta:
"""Define meta properties for AbstractResource class."""
abstract = True
unique_together = ("content_type", "object_id")
def get_path(instance, filename, folder=''):
"""Get a path from a ResourceFile, filename, and folder.
:param instance: instance of ResourceFile to use
:param filename: file name to use (without folder)
:param folder: can override folder for ResourceFile instance
The filename is only a single name. This routine converts it to an absolute
path that can be federated or local. The instance points to the Resource record,
which contains the federation path. The folder in the instance will be used unless
overridden.
Note: this does not change the default behavior.
Thus it can be used to compute a new path for file that
one wishes to move.
"""
if not folder:
folder = instance.file_folder
return get_resource_file_path(instance.resource, filename, folder)
# TODO: make this an instance method of BaseResource.
def get_resource_file_path(resource, filename, folder=''):
"""Determine storage path for a FileField based upon whether resource is federated.
:param resource: resource containing the file.
:param filename: name of file without folder.
:param folder: folder of file
The filename is only a single name. This routine converts it to an absolute
path that can be federated or local. The resource contains information on how
to do this.
"""
# folder can be absolute pathname; strip qualifications off of folder if necessary
# cannot only test folder string to start with resource.root_path, since a relative folder path
# may start with the resource's uuid if the same resource bag is added into the same resource and unzipped
# into the resource as in the bug reported in this issue: https://github.com/hydroshare/hydroshare/issues/2984
if folder is not None and folder.startswith(os.path.join(resource.root_path, 'data', 'contents')):
# TODO: does this now start with /?
folder = folder[len(resource.root_path):]
# retrieve federation path -- if any -- from Resource object containing the file
if filename.startswith(resource.file_path):
return filename
# otherwise, it is an unqualified name.
if folder:
# use subfolder
folder = folder.strip('/')
return os.path.join(resource.file_path, folder, filename)
else:
# use root folder
filename = filename.strip('/')
return os.path.join(resource.file_path, filename)
def path_is_allowed(path):
"""Check for suspicious paths containing '/../'."""
if path == "":
raise ValidationError("Empty file paths are not allowed")
if '/../' in path:
raise SuspiciousFileOperation("File paths cannot contain '/../'")
if '/./' in path:
raise SuspiciousFileOperation("File paths cannot contain '/./'")
class FedStorage(IrodsStorage):
"""Define wrapper class to fix Django storage object limitations for iRODS.
The constructor of a Django storage object must have no arguments.
This simple workaround accomplishes that.
"""
def __init__(self):
"""Initialize method with no arguments for federated storage."""
super(FedStorage, self).__init__("federated")
# TODO: revise path logic for rename_resource_file_in_django for proper path.
# TODO: utilize antibugging to check that paths are coherent after each operation.
class ResourceFile(ResourceFileIRODSMixin):
"""
Represent a file in a resource.
"""
class Meta:
index_together = [['object_id', 'resource_file'],
['object_id', 'fed_resource_file'],
]
# A ResourceFile is a sub-object of a resource, which can have several types.
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType)
content_object = GenericForeignKey('content_type', 'object_id')
# This is used to direct uploads to a subfolder of the root folder for the resource.
# See get_path and get_resource_file_path above.
file_folder = models.CharField(max_length=4096, null=False, default="")
# This pair of FileFields deals with the fact that there are two kinds of storage
resource_file = models.FileField(upload_to=get_path, max_length=4096,
null=True, blank=True, storage=IrodsStorage())
fed_resource_file = models.FileField(upload_to=get_path, max_length=4096,
null=True, blank=True, storage=FedStorage())
# DEPRECATED: utilize resfile.set_storage_path(path) and resfile.storage_path.
# fed_resource_file_name_or_path = models.CharField(max_length=255, null=True, blank=True)
# DEPRECATED: use native size routine
# fed_resource_file_size = models.CharField(max_length=15, null=True, blank=True)
# we are using GenericForeignKey to allow resource file to be associated with any
# HydroShare defined LogicalFile types (e.g., GeoRasterFile, NetCdfFile etc)
logical_file_object_id = models.PositiveIntegerField(null=True, blank=True)
logical_file_content_type = models.ForeignKey(ContentType,
null=True, blank=True,
related_name="files")
logical_file_content_object = GenericForeignKey('logical_file_content_type',
'logical_file_object_id')
_size = models.BigIntegerField(default=-1)
def __str__(self):
"""Return resource filename or federated resource filename for string representation."""
if self.resource.resource_federation_path:
return self.fed_resource_file.name
else:
return self.resource_file.name
@classmethod
def create(cls, resource, file, folder='', source=None):
"""Create custom create method for ResourceFile model.
Create takes arguments that are invariant of storage medium.
These are turned into a path that is suitable for the medium.
Federation must be initialized first at the resource level.
:param resource: resource that contains the file.
:param file: a File or a iRODS path to an existing file already copied.
:param folder: the folder in which to store the file.
:param source: an iRODS path in the same zone from which to copy the file.
There are two main usages to this constructor:
* uploading a file from a form or REST call:
ResourceFile.create(r, File(...something...), folder=d)
* copying a file internally from iRODS:
ResourceFile.create(r, file_name, folder=d, source=s)
In this case, source is a full iRODS pathname of the place from which to copy
the file.
A third form is less common and presumes that the file already exists in iRODS
in the proper place:
* pointing to an existing file:
ResourceFile.create(r, file_name, folder=d)
"""
# bind to appropriate resource
kwargs = {}
if __debug__:
assert isinstance(resource, BaseResource)
kwargs['content_object'] = resource
kwargs['file_folder'] = folder
# if file is an open file, use native copy by setting appropriate variables
if isinstance(file, File):
if resource.is_federated:
kwargs['resource_file'] = None
kwargs['fed_resource_file'] = file
else:
kwargs['resource_file'] = file
kwargs['fed_resource_file'] = None
else: # if file is not an open file, then it's a basename (string)
if file is None and source is not None:
if __debug__:
assert(isinstance(source, str))
# source is a path to an iRODS file to be copied here.
root, newfile = os.path.split(source) # take file from source path
# newfile is where it should be copied to.
target = get_resource_file_path(resource, newfile, folder=folder)
istorage = resource.get_irods_storage()
if not istorage.exists(source):
raise ValidationError("ResourceFile.create: source {} of copy not found"
.format(source))
istorage.copyFiles(source, target)
if not istorage.exists(target):
raise ValidationError("ResourceFile.create: copy to target {} failed"
.format(target))
elif file is not None and source is None:
# file points to an existing iRODS file
# no need to verify whether the file exists in iRODS since the file
# name is returned from iRODS ils list dir command which already
# confirmed the file exists already in iRODS
target = get_resource_file_path(resource, file, folder=folder)
else:
raise ValidationError(
"ResourceFile.create: exactly one of source or file must be specified")
# we've copied or moved if necessary; now set the paths
if resource.is_federated:
kwargs['resource_file'] = None
kwargs['fed_resource_file'] = target
else:
kwargs['resource_file'] = target
kwargs['fed_resource_file'] = None
# Actually create the file record
# when file is a File, the file is copied to storage in this step
# otherwise, the copy must precede this step.
return ResourceFile.objects.create(**kwargs)
# TODO: automagically handle orphaned logical files
def delete(self, delete_logical_file=False):
"""Delete a resource file record and the file contents.
:param delete_logical_file: if True deletes logical file associated with resource file
model.delete does not cascade to delete files themselves,
and these must be explicitly deleted.
"""
if self.exists:
if delete_logical_file and self.logical_file is not None:
# deleting logical file metadata deletes the logical file as well
self.logical_file.metadata.delete()
if self.fed_resource_file:
self.fed_resource_file.delete()
if self.resource_file:
self.resource_file.delete()
super(ResourceFile, self).delete()
@property
def resource(self):
"""Return content_object representing the resource from a resource file."""
return self.content_object
@property
def size(self):
"""Return file size of the file.
Calculates the size first if it has not been calculated yet."""
if self._size < 0:
self.calculate_size()
return self._size
@property
def modified_time(self):
return self.resource_file.storage.get_modified_time(self.resource_file.name)
@property
def checksum(self):
return self.resource_file.storage.checksum(self.resource_file.name, force_compute=False)
# TODO: write unit test
@property
def exists(self):
"""Check existence of files for both federated and non-federated."""
istorage = self.resource.get_irods_storage()
if self.resource.is_federated:
if __debug__:
assert self.resource_file.name is None or \
self.resource_file.name == ''
return istorage.exists(self.fed_resource_file.name)
else:
if __debug__:
assert self.fed_resource_file.name is None or \
self.fed_resource_file.name == ''
return istorage.exists(self.resource_file.name)
# TODO: write unit test
def read(self):
if self.resource.is_federated:
return self.fed_resource_file.read()
else:
return self.resource_file.read()
@property
def storage_path(self):
"""Return the qualified name for a file in the storage hierarchy.
This is a valid input to IrodsStorage for manipulating the file.
The output depends upon whether the IrodsStorage instance is running
in federated mode.
"""
# instance.content_object can be stale after changes.
# Re-fetch based upon key; bypass type system; it is not relevant
resource = self.resource
if resource.is_federated: # false if None or empty
if __debug__:
assert self.resource_file.name is None or \
self.resource_file.name == ''
return self.fed_resource_file.name
else:
if __debug__:
assert self.fed_resource_file.name is None or \
self.fed_resource_file.name == ''
return self.resource_file.name
def calculate_size(self):
"""Reads the file size and saves to the DB"""
if self.resource.resource_federation_path:
if __debug__:
assert self.resource_file.name is None or \
self.resource_file.name == ''
try:
self._size = self.fed_resource_file.size
except (SessionException, ValidationError):
logger = logging.getLogger(__name__)
logger.warn("file {} not found".format(self.storage_path))
self._size = 0
else:
if __debug__:
assert self.fed_resource_file.name is None or \
self.fed_resource_file.name == ''
try:
self._size = self.resource_file.size
except (SessionException, ValidationError):
logger = logging.getLogger(__name__)
logger.warn("file {} not found".format(self.storage_path))
self._size = 0
self.save()
# ResourceFile API handles file operations
def set_storage_path(self, path, test_exists=True):
"""Bind this ResourceFile instance to an existing file.
:param path: the path of the object.
:param test_exists: if True, test for path existence in iRODS
Path can be absolute or relative.
* absolute paths contain full irods path to local or federated object.
* relative paths start with anything else and can start with optional folder
:raises ValidationError: if the pathname is inconsistent with resource configuration.
It is rather important that applications call this rather than simply calling
resource_file = "text path" because it takes the trouble of making that path
fully qualified so that IrodsStorage will work properly.
This records file_folder for future possible uploads and searches.
The heavy lifting in this routine is accomplished via path_is_acceptable and get_path,
which together normalize the file name. Regardless of whether the internal file name
is qualified or not, this makes it fully qualified from the point of view of the
IrodsStorage module.
"""
folder, base = self.path_is_acceptable(path, test_exists=test_exists)
self.file_folder = folder
self.save()
# self.content_object can be stale after changes. Re-fetch based upon key
# bypass type system; it is not relevant
resource = self.resource
# switch FileFields based upon federation path
if resource.is_federated:
# uses file_folder; must come after that setting.
self.fed_resource_file = get_path(self, base)
self.resource_file = None
else:
self.fed_resource_file = None
self.resource_file = get_path(self, base)
self.save()
@property
def short_path(self):
"""Return the unqualified path to the file object.
* This path is invariant of where the object is stored.
* Thus, it does not change if the resource is moved.
This is the path that should be used as a key to index things such as file type.
"""
if self.resource.is_federated:
folder, base = self.path_is_acceptable(self.fed_resource_file.name, test_exists=False)
else:
folder, base = self.path_is_acceptable(self.resource_file.name, test_exists=False)
if folder is not None:
return os.path.join(folder, base)
else:
return base
def set_short_path(self, path):
"""Set a path to a given path, relative to resource root.
There is some question as to whether the short path should be stored explicitly or
derived as in short_path above. The latter is computationally expensive but results
in a single point of truth.
"""
folder, base = os.path.split(path)
self.file_folder = folder # must precede call to get_path
if self.resource.is_federated:
self.resource_file = None
self.fed_resource_file = get_path(self, base)
else:
self.resource_file = get_path(self, base)
self.fed_resource_file = None
self.save()
def parse(self):
"""Parse a path into folder and basename."""
return self.path_is_acceptable(self.storage_path, test_exists=False)
def path_is_acceptable(self, path, test_exists=True):
"""Determine whether a path is acceptable for this resource file.
Called inside ResourceFile objects to check paths
:param path: path to test
:param test_exists: if True, test for path existence in iRODS
"""
return ResourceFile.resource_path_is_acceptable(self.resource, path, test_exists)
@classmethod
def resource_path_is_acceptable(cls, resource, path, test_exists=True):
"""Determine whether a path is acceptable for this resource file.
Called outside ResourceFile objects or before such an object exists
:param path: path to test
:param test_exists: if True, test for path existence in iRODS
This has the side effect of returning the short path for the resource
as a folder/filename pair.
"""
if test_exists:
storage = resource.get_irods_storage()
locpath = os.path.join(resource.short_id, "data", "contents") + "/"
relpath = path
fedpath = resource.resource_federation_path
if fedpath and relpath.startswith(fedpath + '/'):
if test_exists and not storage.exists(path):
raise ValidationError("Federated path does not exist in irods")
plen = len(fedpath + '/')
relpath = relpath[plen:] # omit fed path
# strip resource id from path
if relpath.startswith(locpath):
plen = len(locpath)
relpath = relpath[plen:] # omit local path
else:
raise ValidationError("Malformed federated resource path")
elif path.startswith(locpath):
# strip optional local path prefix
if test_exists and not storage.exists(path):
raise ValidationError("Local path ({}) does not exist in irods".format(path))
plen = len(locpath)
relpath = relpath[plen:] # strip local prefix, omit /
# now we have folder/file. We could have gotten this from the input, or
# from stripping qualification folders. Note that this can contain
# misnamed header content misinterpreted as a folder unless one tests
# for existence
if '/' in relpath:
folder, base = os.path.split(relpath)
abspath = get_resource_file_path(resource, base, folder=folder)
if test_exists and not storage.exists(abspath):
raise ValidationError("Local path does not exist in irods")
else:
folder = ''
base = relpath
abspath = get_resource_file_path(resource, base, folder=folder)
if test_exists and not storage.exists(abspath):
raise ValidationError("Local path does not exist in irods")
return folder, base
# classmethods do things that query or affect all files.
@classmethod
def get(cls, resource, file, folder=''):
"""Get a ResourceFile record via its short path."""
if resource.resource_federation_path:
return ResourceFile.objects.get(object_id=resource.id,
fed_resource_file=get_resource_file_path(resource,
file,
folder))
else:
return ResourceFile.objects.get(object_id=resource.id,
resource_file=get_resource_file_path(resource,
file,
folder))
# TODO: move to BaseResource as instance method
@classmethod
def list_folder(cls, resource, folder, sub_folders=True):
"""List files (instances of ResourceFile) in a given folder.
:param resource: resource for which to list the folder
:param folder: folder listed as either short_path or fully qualified path
:param sub_folders: if true files from sub folders of *folder* will be included in the list
"""
file_folder_to_match = folder
if not folder:
folder = resource.file_path
elif not folder.startswith(resource.file_path):
folder = os.path.join(resource.file_path, folder)
else:
file_folder_to_match = folder[len(resource.file_path) + 1:]
if sub_folders:
# append trailing slash to match only this folder
if not folder.endswith("/"):
folder += "/"
if resource.is_federated:
return ResourceFile.objects.filter(
object_id=resource.id,
fed_resource_file__startswith=folder)
else:
return ResourceFile.objects.filter(
object_id=resource.id,
resource_file__startswith=folder)
else:
return ResourceFile.objects.filter(
object_id=resource.id,
file_folder=file_folder_to_match)
# TODO: move to BaseResource as instance method
@classmethod
def create_folder(cls, resource, folder, migrating_resource=False):
"""Create a folder for a resource."""
# avoid import loop
from hs_core.views.utils import create_folder
path_is_allowed(folder)
# TODO: move code from location used below to here
create_folder(resource.short_id, os.path.join('data', 'contents', folder),
migrating_resource=migrating_resource)
# TODO: move to BaseResource as instance method
@classmethod
def remove_folder(cls, resource, folder, user):
"""Remove a folder for a resource."""
# avoid import loop
from hs_core.views.utils import remove_folder
path_is_allowed(folder)
# TODO: move code from location used below to here
remove_folder(user, resource.short_id, os.path.join('data', 'contents', folder))
@property
def has_logical_file(self):
"""Check existence of logical file."""
return self.logical_file is not None
@property
def get_or_create_logical_file(self):
"""
Create a logical file on the fly if it does not exist
This is a temporary fix just for release 1.14. It is expected that further
work on logical files will make this unnecessary.
"""
# prevent import loops
from hs_file_types.models.generic import GenericLogicalFile
if self.content_object.resource_type == "CompositeResource":
if not self.has_logical_file:
logical_file = GenericLogicalFile.create()
self.logical_file_content_object = logical_file
self.save()
logger = logging.getLogger(__name__)
logger.warn("auto-create logical file for {}".format(self.storage_path))
return self.logical_file
else:
return None
@property
def logical_file(self):
"""Return content_object of logical file."""
return self.logical_file_content_object
@property
def logical_file_type_name(self):
"""Return class name of logical file's content object."""
return self.logical_file_content_object.__class__.__name__
@property
def aggregation_display_name(self):
"""Return a name for the logical file type (aggregation)- used in UI"""
return self.logical_file.get_aggregation_display_name()
@property
def has_generic_logical_file(self):
"""Return True of logical file type's classname is 'GenericLogicalFile'."""
return self.logical_file_type_name == "GenericLogicalFile"
@property
def metadata(self):
"""Return logical file metadata."""
if self.logical_file is not None:
return self.logical_file.metadata
return None
@property
def mime_type(self):
"""Return MIME type of represented file."""
from .hydroshare.utils import get_file_mime_type
return get_file_mime_type(self.file_name)
@property
def extension(self):
"""Return extension of resource file."""
_, file_ext = os.path.splitext(self.storage_path)
return file_ext
@property
def dir_path(self):
"""Return directory path of resource file."""
return os.path.dirname(self.storage_path)
@property
def full_path(self):
"""Return full path of resource file."""
return self.storage_path
@property
def file_name(self):
"""Return filename of resource file."""
return os.path.basename(self.storage_path)
@property
def url(self):
"""Return the URL of the file contained in this ResourceFile.
A GET of this URL simply returns the file. This URL is independent of federation.
PUT, POST, and DELETE are not supported.
This choice for a URL is dependent mainly upon conformance to DataOne URL standards
that are also conformant to the format in resourcemap.xml. This url does not contain
the site URL, which is prefixed when needed.
This is based upon the resourcemap_urls.py entry:
url(r'^resource/(?P<shortkey>[0-9a-f-]+)/data/contents/(?.+)/$',
views.file_download_url_mapper,
name='get_resource_file')
This url does NOT depend upon federation status.
"""
return '/' + os.path.join('resource', self.public_path)
@property
def public_path(self):
""" return the public path (unqualified iRODS path) for a resource.
This corresponds to the iRODS path if the resource isn't federated.
"""
return os.path.join(self.resource.short_id, 'data', 'contents', self.short_path)
@property
def irods_path(self):
""" Return the irods path for accessing a file, including possible federation information.
This consists of the resource id, /data/contents/, and the file path.
"""
if self.resource.is_federated:
return os.path.join(self.resource.resource_federation_path, self.public_path)
else:
return self.public_path
class PublicResourceManager(models.Manager):
"""Extend Django model Manager to allow for public resource access."""
def get_queryset(self):
"""Extend Django model Manager to allow for public resource access."""
return super(PublicResourceManager, self).get_queryset().filter(raccess__public=True)
class DiscoverableResourceManager(models.Manager):
"""Extend Django model Manager to filter for public or discoverable resources."""
def get_queryset(self):
"""Extend Django model Manager to filter for public or discoverable resources."""
return super(DiscoverableResourceManager, self).get_queryset().filter(
Q(raccess__discoverable=True) |
Q(raccess__public=True))
# remove RichText parent class from the parameters for Django inplace editing to work;
# otherwise, get internal edit error when saving changes
class BaseResource(Page, AbstractResource):
"""Combine mezzanine Page model and AbstractResource model to establish base resource."""
resource_type = models.CharField(max_length=50, default="GenericResource")
# this locked_time field is added for resource versioning locking representing
# the time when the resource is locked for a new version action. A value of null
# means the resource is not locked
locked_time = models.DateTimeField(null=True, blank=True)
# this resource_federation_path is added to record where a HydroShare resource is
# stored. The default is empty string meaning the resource is stored in HydroShare
# zone. If a resource is stored in a fedearated zone, the field should store the
# federated root path in the format of /federated_zone/home/localHydroProxy
# TODO: change to null=True, default=None to simplify logic elsewhere
resource_federation_path = models.CharField(max_length=100, blank=True, default='')
objects = PublishedManager()
public_resources = PublicResourceManager()
discoverable_resources = DiscoverableResourceManager()
collections = models.ManyToManyField('BaseResource', related_name='resources')
discovery_content_type = 'Generic Resource' # used during discovery
class Meta:
"""Define meta properties for BaseResource model."""
verbose_name = 'Generic'
db_table = 'hs_core_genericresource'
def can_add(self, request):
"""Pass through to abstract resource can_add function."""
return AbstractResource.can_add(self, request)
def can_change(self, request):
"""Pass through to abstract resource can_add function."""
return AbstractResource.can_change(self, request)
def can_delete(self, request):
"""Pass through to abstract resource can_delete function."""
return AbstractResource.can_delete(self, request)
def can_view(self, request):
"""Pass through to abstract resource can_view function."""
return AbstractResource.can_view(self, request)
def get_irods_storage(self):
"""Return either IrodsStorage or FedStorage."""
if self.resource_federation_path:
return FedStorage()
else:
return IrodsStorage()
@property
def is_federated(self):
"""Return existence of resource_federation_path."""
return self.resource_federation_path is not None and \
self.resource_federation_path != ''
# Paths relative to the resource
@property
def root_path(self):
"""Return the root folder of the iRODS structure containing resource files.
Note that this folder doesn't directly contain the resource files;
They are contained in ./data/contents/* instead.
"""
if self.is_federated:
return os.path.join(self.resource_federation_path, self.short_id)
else:
return self.short_id
@property
def file_path(self):
"""Return the file path of the resource.
This is the root path plus "data/contents".
This is the root of the folder structure for resource files.
"""
return os.path.join(self.root_path, "data", "contents")
@property
def scimeta_path(self):
""" path to science metadata file (in iRODS) """
return os.path.join(self.root_path, "data", "resourcemetadata.xml")
@property
def resmap_path(self):
""" path to resource map file (in iRODS) """
return os.path.join(self.root_path, "data", "resourcemap.xml")
# @property
# def sysmeta_path(self):
# """ path to system metadata file (in iRODS) """
# return os.path.join(self.root_path, "data", "systemmetadata.xml")
@property
def bag_path(self):
"""Return the unique iRODS path to the bag for the resource.
Since this is a cache, it is stored in a different place than the resource files.
"""
bagit_path = getattr(settings, 'IRODS_BAGIT_PATH', 'bags')
bagit_postfix = getattr(settings, 'IRODS_BAGIT_POSTFIX', 'zip')
if self.is_federated:
return os.path.join(self.resource_federation_path, bagit_path,
self.short_id + '.' + bagit_postfix)
else:
return os.path.join(bagit_path, self.short_id + '.' + bagit_postfix)
@property
def bag_url(self):
"""Get bag url of resource data bag."""
bagit_path = getattr(settings, 'IRODS_BAGIT_PATH', 'bags')
bagit_postfix = getattr(settings, 'IRODS_BAGIT_POSTFIX', 'zip')
bag_path = "{path}/{resource_id}.{postfix}".format(path=bagit_path,
resource_id=self.short_id,
postfix=bagit_postfix)
istorage = self.get_irods_storage()
bag_url = istorage.url(bag_path)
return bag_url
@property
def bag_checksum(self):
"""
get checksum of resource bag. Currently only published resources have bag checksums computed and saved
:return: checksum if bag checksum exists; empty string '' otherwise
"""
extra_data = self.extra_data
if 'bag_checksum' in extra_data and extra_data['bag_checksum']:
return extra_data['bag_checksum'].strip('\n')
else:
return ''
@bag_checksum.setter
def bag_checksum(self, checksum):
"""
Set bag checksum implemented as a property setter.
:param checksum: checksum value to be set
"""
if checksum:
extra_data = self.extra_data
extra_data['bag_checksum'] = checksum
self.extra_data = extra_data
self.save()
else:
return ValidationError("checksum to set on the bag of the resource {} is empty".format(self.short_id))
# URIs relative to resource
# these are independent of federation strategy
# TODO: utilize "reverse" abstraction to tie this to urls.py for robustness
# add these one by one to avoid errors.
# @property
# def root_uri(self):
# pass
# @property
# def scimeta_uri(self):
# return os.path.join(self.root_uri, 'scimeta')
# @property
# def sysmeta_uri(self):
# return os.path.join(self.root_uri, 'sysmeta')
# @property
# def file_uri(self):
# return os.path.join(self.root_uri, 'files')
# create crossref deposit xml for resource publication
def get_crossref_deposit_xml(self, pretty_print=True):
"""Return XML structure describing crossref deposit."""
# importing here to avoid circular import problem
from .hydroshare.resource import get_activated_doi
xsi = "http://www.w3.org/2001/XMLSchema-instance"
schemaLocation = 'http://www.crossref.org/schema/4.3.6 ' \
'http://www.crossref.org/schemas/crossref4.3.6.xsd'
ns = "http://www.crossref.org/schema/4.3.6"
ROOT = etree.Element('{%s}doi_batch' % ns, version="4.3.6", nsmap={None: ns},
attrib={"{%s}schemaLocation" % xsi: schemaLocation})
# get the resource object associated with this metadata container object - needed
# to get the verbose_name
# create the head sub element
head = etree.SubElement(ROOT, 'head')
etree.SubElement(head, 'doi_batch_id').text = self.short_id
etree.SubElement(head, 'timestamp').text = arrow.get(self.updated)\
.format("YYYYMMDDHHmmss")
depositor = etree.SubElement(head, 'depositor')
etree.SubElement(depositor, 'depositor_name').text = 'HydroShare'
etree.SubElement(depositor, 'email_address').text = settings.DEFAULT_SUPPORT_EMAIL
# The organization that owns the information being registered.
etree.SubElement(head, 'registrant').text = 'Consortium of Universities for the ' \
'Advancement of Hydrologic Science, Inc. ' \
'(CUAHSI)'
# create the body sub element
body = etree.SubElement(ROOT, 'body')
# create the database sub element
db = etree.SubElement(body, 'database')
# create the database_metadata sub element
db_md = etree.SubElement(db, 'database_metadata', language="en")
# titles is required element for database_metadata
titles = etree.SubElement(db_md, 'titles')
etree.SubElement(titles, 'title').text = "HydroShare Resources"
# create the dataset sub element, dataset_type can be record or collection, set it to
# collection for HydroShare resources
dataset = etree.SubElement(db, 'dataset', dataset_type="collection")
ds_titles = etree.SubElement(dataset, 'titles')
etree.SubElement(ds_titles, 'title').text = self.metadata.title.value
# doi_data is required element for dataset
doi_data = etree.SubElement(dataset, 'doi_data')
res_doi = get_activated_doi(self.doi)
idx = res_doi.find('10.4211')
if idx >= 0:
res_doi = res_doi[idx:]
etree.SubElement(doi_data, 'doi').text = res_doi
etree.SubElement(doi_data, 'resource').text = self.metadata.identifiers.all().filter(
name='hydroShareIdentifier')[0].url
return '<?xml version="1.0" encoding="UTF-8"?>\n' + etree.tostring(
ROOT, encoding='UTF-8', pretty_print=pretty_print).decode()
@property
def size(self):
"""Return the total size of all data files in iRODS.
This size does not include metadata. Just files. Specifically,
resourcemetadata.xml, systemmetadata.xml are not included in this
size estimate.
Raises SessionException if iRODS fails.
"""
# trigger file size read for files that haven't been set yet
for f in self.files.filter(_size__lt=0):
f.calculate_size()
# compute the total file size for the resource
res_size_dict = self.files.aggregate(Sum('_size'))
# handle case if no resource files
res_size = res_size_dict['_size__sum']
if not res_size:
# in case of no files
res_size = 0
return res_size
@property
def verbose_name(self):
"""Return verbose name of content_model."""
return self.get_content_model()._meta.verbose_name
@property
def discovery_content_type(self):
"""Return verbose name of content type."""
return self.get_content_model().discovery_content_type
@property
def can_be_published(self):
"""Determine when data and metadata are complete enough for the resource to be published.
The property can be overriden by specific resource type which is not appropriate for
publication such as the Web App resource
:return:
"""
if self.raccess.published:
return False
return self.can_be_public_or_discoverable
@classmethod
def get_supported_upload_file_types(cls):
"""Get supported upload types for a resource.
This can be overridden to choose which types of file can be uploaded by a subclass.
By default, all file types are supported
"""
# TODO: this should be replaced by an instance method.
return ('.*')
@classmethod
def can_have_multiple_files(cls):
"""Return True if multiple files can be uploaded.
This can be overridden to choose how many files can be uploaded by a subclass.
By default, uploads are not limited.
"""
# TODO: this should be replaced by an instance method.
return True
@classmethod
def can_have_files(cls):
"""Return whether the resource supports files at all.
This can be overridden to choose whether files can be uploaded by a subclass.
By default, uploads are allowed.
"""
# TODO: this should be replaced by an instance method.
return True
def get_hs_term_dict(self):
"""Return a dict of HS Terms and their values.
Will be used to parse webapp url templates
NOTES FOR ANY SUBCLASS OF THIS CLASS TO OVERRIDE THIS FUNCTION:
resource types that inherit this class should add/merge their resource-specific HS Terms
into this dict
"""
hs_term_dict = {}
hs_term_dict["HS_RES_ID"] = self.short_id
hs_term_dict["HS_RES_TYPE"] = self.resource_type
hs_term_dict.update(self.extra_metadata.items())
return hs_term_dict
def replaced_by(self):
""" return a list or resources that replaced this one """
from hs_core.hydroshare import get_resource_by_shortkey, current_site_url # prevent import loop
replacedby = self.metadata.relations.all().filter(type=RelationTypes.isReplacedBy)
rlist = []
for r in replacedby:
citation = r.value
res_id = citation[-32:]
# TODO: This is a mistake. This hardcodes the server on which the URI is created as its URI
res_path = "{}/resource/{}".format(current_site_url(), res_id)
if citation.endswith(res_path):
try:
rv = get_resource_by_shortkey(res_id, or_404=False)
rlist.append(rv)
except BaseResource.DoesNotExist:
pass
return rlist
def get_relation_version_res_url(self, rel_type):
"""Extracts the resource url from resource citation stored in relation metadata for resource
versioning
:param rel_type: type of relation (allowed types are: 'isVersionOf' and 'isReplacedBy')
"""
relation_meta_obj = self.metadata.relations.filter(type=rel_type).first()
if relation_meta_obj is not None:
# get the resource url from resource citation
version_res_url = relation_meta_obj.value.split(',')[-1]
return version_res_url
else:
return ''
@property
def show_in_discover(self):
"""
return True if a resource should be exhibited
A resource should be exhibited if it is at least discoverable
and not replaced by anything that exists and is at least discoverable.
A resource is hidden if there is any descendant (according to isReplacedBy)
that is discoverable. The descendent tree is searched via breadth-first search
with cycle elimination. Thus the search always terminates regardless of the
complexity of descendents.
"""
if not self.raccess.discoverable:
return False # not exhibitable
replacedby = self.replaced_by()
visited = {}
visited[self.short_id] = True
# breadth-first replacement search, first discoverable replacement wins
for r in replacedby:
if r.raccess.discoverable:
return False
if r.short_id not in visited:
replacedby.extend(r.replaced_by())
visited[r.short_id] = True
return True # no reason not to show it
def update_relation_meta(self):
"""Updates the citation stored in relation metadata for relation type
'isReplacedBy', 'isPartOf' and 'hasPart' if needed"""
from hs_core.hydroshare import get_resource_by_shortkey
def _update_relation_meta(relation_meta_obj):
relation_updated = False
if relation_meta_obj.value and '/resource/' in relation_meta_obj.value:
version_citation = relation_meta_obj.value
version_res_id = version_citation.split('/resource/')[-1]
try:
version_res = get_resource_by_shortkey(version_res_id, or_404=False)
except BaseResource.DoesNotExist:
relation_meta_obj.delete()
relation_updated = True
return relation_updated
current_version_citation = version_res.get_citation()
if current_version_citation != version_citation:
relation_meta_obj.value = current_version_citation
relation_meta_obj.save()
relation_updated = True
return relation_updated
replace_relation = self.metadata.relations.all().filter(type=RelationTypes.isReplacedBy).first()
replace_relation_updated = False
if replace_relation is not None:
replace_relation_updated = _update_relation_meta(replace_relation)
part_of_relation_updated = False
for part_of_relation in self.metadata.relations.filter(type=RelationTypes.isPartOf).all():
if _update_relation_meta(part_of_relation):
part_of_relation_updated = True
has_part_relation_updated = False
for has_part_relation in self.metadata.relations.filter(type=RelationTypes.hasPart).all():
if _update_relation_meta(has_part_relation):
has_part_relation_updated = True
if any([replace_relation_updated, part_of_relation_updated, has_part_relation_updated]):
self.setAVU("bag_modified", True)
self.setAVU("metadata_dirty", True)
# TODO Deprecated
class GenericResource(BaseResource):
"""Define GenericResource model."""
objects = ResourceManager('GenericResource')
@property
def supports_folders(self):
"""Return True always."""
return True
discovery_content_type = 'Generic Resource' # used during discovery
class Meta:
"""Define meta properties for GenericResource model."""
verbose_name = 'Generic'
proxy = True
old_get_content_model = Page.get_content_model
def new_get_content_model(self):
"""Override mezzanine get_content_model function for pages for resources."""
from hs_core.hydroshare.utils import get_resource_types
content_model = self.content_model
if content_model.endswith('resource'):
rt = [rt for rt in get_resource_types() if rt._meta.model_name == content_model][0]
return rt.objects.get(id=self.id)
return old_get_content_model(self)
Page.get_content_model = new_get_content_model
# This model has a one-to-one relation with the AbstractResource model
class CoreMetaData(models.Model, RDF_MetaData_Mixin):
"""Define CoreMetaData model."""
XML_HEADER = '''<?xml version="1.0" encoding="UTF-8"?>'''
NAMESPACES = {'rdf': "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
'rdfs1': "http://www.w3.org/2000/01/rdf-schema#",
'dc': "http://purl.org/dc/elements/1.1/",
'dcterms': "http://purl.org/dc/terms/",
'hsterms': "https://www.hydroshare.org/terms/"}
id = models.AutoField(primary_key=True)
_description = GenericRelation(Description) # resource abstract
_title = GenericRelation(Title)
creators = GenericRelation(Creator)
contributors = GenericRelation(Contributor)
citation = GenericRelation(Citation)
dates = GenericRelation(Date)
coverages = GenericRelation(Coverage)
formats = GenericRelation(Format)
identifiers = GenericRelation(Identifier)
_language = GenericRelation(Language)
subjects = GenericRelation(Subject)
relations = GenericRelation(Relation)
_rights = GenericRelation(Rights)
_type = GenericRelation(Type)
_publisher = GenericRelation(Publisher)
funding_agencies = GenericRelation(FundingAgency)
@property
def resource(self):
"""Return base resource object that the metadata defines."""
return BaseResource.objects.filter(object_id=self.id).first()
@property
def title(self):
"""Return the first title object from metadata."""
return self._title.all().first()
@property
def description(self):
"""Return the first description object from metadata."""
return self._description.all().first()
@property
def language(self):
"""Return the first _language object from metadata."""
return self._language.all().first()
@property
def rights(self):
"""Return the first rights object from metadata."""
return self._rights.all().first()
@property
def type(self):
"""Return the first _type object from metadata."""
return self._type.all().first()
@property
def publisher(self):
"""Return the first _publisher object from metadata."""
return self._publisher.all().first()
@property
def spatial_coverage(self):
return self.coverages.exclude(type='period').first()
@property
def temporal_coverage(self):
return self.coverages.filter(type='period').first()
@property
def spatial_coverage_default_projection(self):
return 'WGS 84 EPSG:4326'
@property
def spatial_coverage_default_units(self):
return 'Decimal degrees'
@property
def serializer(self):
"""Return an instance of rest_framework Serializer for self
Note: Subclass must override this property
"""
from .views.resource_metadata_rest_api import CoreMetaDataSerializer
return CoreMetaDataSerializer(self)
def rdf_subject(self):
from .hydroshare import current_site_url
return URIRef("{}/resource/{}".format(current_site_url(), self.resource.short_id))
def rdf_metadata_subject(self):
from .hydroshare import current_site_url
return URIRef("{}/resource/{}/data/resourcemetadata.xml".format(current_site_url(), self.resource.short_id))
def rdf_type(self):
return getattr(HSTERMS, self.resource.resource_type)
def ignored_generic_relations(self):
"""Override to exclude generic relations from the rdf/xml. This is built specifically for Format, which is the
only AbstractMetadataElement that is on a metadata model and not included in the rdf/xml. Returns a list
of classes to be ignored"""
return [Format]
def ingest_metadata(self, graph):
super(CoreMetaData, self).ingest_metadata(graph)
subject = self.rdf_subject_from_graph(graph)
extra_metadata = {}
for o in graph.objects(subject=subject, predicate=HSTERMS.extendedMetadata):
key = graph.value(subject=o, predicate=HSTERMS.key).value
value = graph.value(subject=o, predicate=HSTERMS.value).value
extra_metadata[key] = value
res = self.resource
res.extra_metadata = copy.deepcopy(extra_metadata)
# delete ingested default citation
citation_regex = re.compile("(.*) \(\d{4}\)\. (.*), http:\/\/(.*)\/[A-z0-9]{32}")
ingested_citation = self.citation.first()
if ingested_citation and citation_regex.match(ingested_citation.value):
self.citation.first().delete()
res.save()
def get_rdf_graph(self):
graph = super(CoreMetaData, self).get_rdf_graph()
subject = self.rdf_subject()
# add any key/value metadata items
if len(self.resource.extra_metadata) > 0:
for key, value in self.resource.extra_metadata.items():
extendedMetadata = BNode()
graph.add((subject, HSTERMS.extendedMetadata, extendedMetadata))
graph.add((extendedMetadata, HSTERMS.key, Literal(key)))
graph.add((extendedMetadata, HSTERMS.value, Literal(value)))
# if custom citation does not exist, use the default citation
if not self.citation.first():
graph.add((subject, DCTERMS.bibliographicCitation, Literal(self.resource.get_citation())))
from .hydroshare import current_site_url
TYPE_SUBJECT = URIRef("{}/terms/{}".format(current_site_url(), self.resource.resource_type))
graph.add((TYPE_SUBJECT, RDFS1.label, Literal(self.resource.verbose_name)))
graph.add((TYPE_SUBJECT, RDFS1.isDefinedBy, URIRef(HSTERMS)))
return graph
@classmethod
def parse_for_bulk_update(cls, metadata, parsed_metadata):
"""Parse the input *metadata* dict to needed format and store it in
*parsed_metadata* list
:param metadata: a dict of metadata that needs to be parsed to get the metadata in the
format needed for updating the metadata elements supported by generic resource type
:param parsed_metadata: a list of dicts that will be appended with parsed data
"""
keys_to_update = list(metadata.keys())
if 'title' in keys_to_update:
parsed_metadata.append({"title": {"value": metadata.pop('title')}})
if 'creators' in keys_to_update:
if not isinstance(metadata['creators'], list):
metadata['creators'] = json.loads(metadata['creators'])
for creator in metadata.pop('creators'):
parsed_metadata.append({"creator": creator})
if 'contributors' in keys_to_update:
if not isinstance(metadata['contributors'], list):
metadata['contributors'] = json.loads(metadata['contributors'])
for contributor in metadata.pop('contributors'):
parsed_metadata.append({"contributor": contributor})
if 'coverages' in keys_to_update:
for coverage in metadata.pop('coverages'):
parsed_metadata.append({"coverage": coverage})
if 'dates' in keys_to_update:
for date in metadata.pop('dates'):
parsed_metadata.append({"date": date})
if 'description' in keys_to_update:
parsed_metadata.append({"description": {"abstract": metadata.pop('description')}})
if 'language' in keys_to_update:
parsed_metadata.append({"language": {"code": metadata.pop('language')}})
if 'rights' in keys_to_update:
parsed_metadata.append({"rights": metadata.pop('rights')})
if 'sources' in keys_to_update:
for source in metadata.pop('sources'):
parsed_metadata.append({"source": source})
if 'subjects' in keys_to_update:
for subject in metadata.pop('subjects'):
parsed_metadata.append({"subject": {"value": subject['value']}})
if 'funding_agencies' in keys_to_update:
for agency in metadata.pop("funding_agencies"):
# using fundingagency instead of funding_agency to be consistent with UI
# add-metadata logic as well as the term for the metadata element.
parsed_metadata.append({"fundingagency": agency})
if 'relations' in keys_to_update:
for relation in metadata.pop('relations'):
parsed_metadata.append({"relation": relation})
@classmethod
def get_supported_element_names(cls):
"""Return a list of supported metadata element names."""
return ['Description',
'Citation',
'Creator',
'Contributor',
'Coverage',
'Format',
'Rights',
'Title',
'Type',
'Date',
'Identifier',
'Language',
'Subject',
'Relation',
'Publisher',
'FundingAgency']
@classmethod
def get_form_errors_as_string(cls, form):
"""Helper method to generate a string from form.errors
:param form: an instance of Django Form class
"""
error_string = ", ".join(key + ":" + form.errors[key][0]
for key in list(form.errors.keys()))
return error_string
def set_dirty(self, flag):
"""Track whethrer metadata object is dirty.
Subclasses that have the attribute to track whether metadata object is dirty
should override this method to allow setting that attribute
:param flag: a boolean value
:return:
"""
pass
def has_all_required_elements(self):
"""Determine whether metadata has all required elements.
This method needs to be overriden by any subclass of this class
if they implement additional metadata elements that are required
"""
if not self.title:
return False
elif self.title.value.lower() == 'untitled resource':
return False
if not self.description:
return False
elif len(self.description.abstract.strip()) == 0:
return False
if self.creators.count() == 0:
return False
if not self.rights:
return False
elif len(self.rights.statement.strip()) == 0:
return False
if self.subjects.count() == 0:
return False
return True
def get_required_missing_elements(self):
"""Return a list of required missing metadata elements.
This method needs to be overriden by any subclass of this class
if they implement additional metadata elements that are required
"""
missing_required_elements = []
if not self.title:
missing_required_elements.append('Title')
elif self.title.value.lower() == 'untitled resource':
missing_required_elements.append('Title')
if not self.description:
missing_required_elements.append('Abstract')
if not self.rights:
missing_required_elements.append('Rights')
if self.subjects.count() == 0:
missing_required_elements.append('Keywords')
return missing_required_elements
def delete_all_elements(self):
"""Delete all metadata elements.
This method needs to be overriden by any subclass of this class if that class
has additional metadata elements
"""
if self.title:
self.title.delete()
if self.description:
self.description.delete()
if self.language:
self.language.delete()
if self.rights:
self.rights.delete()
if self.publisher:
self.publisher.delete()
if self.type:
self.type.delete()
self.creators.all().delete()
self.contributors.all().delete()
self.dates.all().delete()
self.identifiers.all().delete()
self.coverages.all().delete()
self.formats.all().delete()
self.subjects.all().delete()
self.relations.all().delete()
self.funding_agencies.all().delete()
def copy_all_elements_from(self, src_md, exclude_elements=None):
"""Copy all metadata elements from another resource."""
md_type = ContentType.objects.get_for_model(src_md)
supported_element_names = src_md.get_supported_element_names()
for element_name in supported_element_names:
element_model_type = src_md._get_metadata_element_model_type(element_name)
elements_to_copy = element_model_type.model_class().objects.filter(
object_id=src_md.id, content_type=md_type).all()
for element in elements_to_copy:
element_args = model_to_dict(element)
element_args.pop('content_type')
element_args.pop('id')
element_args.pop('object_id')
if exclude_elements:
if not element_name.lower() in exclude_elements:
self.create_element(element_name, **element_args)
else:
self.create_element(element_name, **element_args)
# this method needs to be overriden by any subclass of this class
# to allow updating of extended (resource specific) metadata
def update(self, metadata, user):
"""Define custom update method for CoreMetaData model.
:param metadata: a list of dicts - each dict in the format of {element_name: **kwargs}
element_name must be in lowercase.
example of a dict in metadata list:
{'creator': {'name': 'John Howard', 'email: 'jh@gmail.com'}}
:param user: user who is updating metadata
:return:
"""
from .forms import TitleValidationForm, AbstractValidationForm, LanguageValidationForm, \
RightsValidationForm, CreatorValidationForm, ContributorValidationForm, \
RelationValidationForm, FundingAgencyValidationForm
validation_forms_mapping = {'title': TitleValidationForm,
'description': AbstractValidationForm,
'language': LanguageValidationForm,
'rights': RightsValidationForm,
'creator': CreatorValidationForm,
'contributor': ContributorValidationForm,
'relation': RelationValidationForm,
'fundingagency': FundingAgencyValidationForm
}
# updating non-repeatable elements
with transaction.atomic():
for element_name in ('title', 'description', 'language', 'rights'):
for dict_item in metadata:
if element_name in dict_item:
validation_form = validation_forms_mapping[element_name](
dict_item[element_name])
if not validation_form.is_valid():
err_string = self.get_form_errors_as_string(validation_form)
raise ValidationError(err_string)
self.update_non_repeatable_element(element_name, metadata)
for element_name in ('creator', 'contributor', 'coverage', 'source', 'relation',
'subject'):
subjects = []
for dict_item in metadata:
if element_name in dict_item:
if element_name == 'subject':
subject_data = dict_item['subject']
if 'value' not in subject_data:
raise ValidationError("Subject value is missing")
subjects.append(dict_item['subject']['value'])
continue
if element_name == 'coverage':
# coverage metadata is not allowed for update for time series resource
if self.resource.resource_type == "TimeSeriesResource":
err_msg = "Coverage metadata can't be updated for {} resource"
err_msg = err_msg.format(self.resource.resource_type)
raise ValidationError(err_msg)
coverage_data = dict_item[element_name]
if 'type' not in coverage_data:
raise ValidationError("Coverage type data is missing")
if 'value' not in coverage_data:
raise ValidationError("Coverage value data is missing")
coverage_value_dict = coverage_data['value']
coverage_type = coverage_data['type']
Coverage.validate_coverage_type_value_attributes(coverage_type,
coverage_value_dict)
continue
if element_name in ['creator', 'contributor']:
try:
party_data = dict_item[element_name]
if 'identifiers' in party_data:
if isinstance(party_data['identifiers'], dict):
# convert dict to json for form validation
party_data['identifiers'] = json.dumps(
party_data['identifiers'])
except Exception:
raise ValidationError("Invalid identifier data for "
"creator/contributor")
validation_form = validation_forms_mapping[element_name](
party_data)
else:
validation_form = validation_forms_mapping[element_name](
dict_item[element_name])
if not validation_form.is_valid():
err_string = self.get_form_errors_as_string(validation_form)
err_string += " element name:{}".format(element_name)
raise ValidationError(err_string)
if subjects:
subjects_set = set([s.lower() for s in subjects])
if len(subjects_set) < len(subjects):
raise ValidationError("Duplicate subject values found")
self.update_repeatable_element(element_name=element_name, metadata=metadata)
# allow only updating or creating date element of type valid
element_name = 'date'
date_list = [date_dict for date_dict in metadata if element_name in date_dict]
if len(date_list) > 0:
for date_item in date_list:
if 'type' in date_item[element_name]:
if date_item[element_name]['type'] == 'valid':
self.dates.filter(type='valid').delete()
self.create_element(element_model_name=element_name,
**date_item[element_name])
break
# allow only updating or creating identifiers which does not have name value
# 'hydroShareIdentifier'
element_name = 'identifier'
identifier_list = [id_dict for id_dict in metadata if element_name in id_dict]
if len(identifier_list) > 0:
for id_item in identifier_list:
if 'name' in id_item[element_name]:
if id_item[element_name]['name'].lower() != 'hydroshareidentifier':
self.identifiers.filter(name=id_item[element_name]['name']).delete()
self.create_element(element_model_name=element_name,
**id_item[element_name])
element_name = 'fundingagency'
identifier_list = [id_dict for id_dict in metadata if element_name in id_dict]
if len(identifier_list) > 0:
for id_item in identifier_list:
validation_form = validation_forms_mapping[element_name](
id_item[element_name])
if not validation_form.is_valid():
err_string = self.get_form_errors_as_string(validation_form)
raise ValidationError(err_string)
# update_repeatable_elements will append an 's' to element_name before getattr,
# unless property_name is provided. I'd like to remove English grammar rules from
# our codebase, but in the interest of time, I'll just add a special case for
# handling funding_agencies
self.update_repeatable_element(element_name=element_name, metadata=metadata,
property_name="funding_agencies")
def get_xml_legacy(self, pretty_print=True, include_format_elements=True):
"""Get metadata XML rendering."""
# importing here to avoid circular import problem
from .hydroshare.utils import current_site_url, get_resource_types
RDF_ROOT = etree.Element('{%s}RDF' % self.NAMESPACES['rdf'], nsmap=self.NAMESPACES)
# create the Description element -this is not exactly a dc element
rdf_Description = etree.SubElement(RDF_ROOT, '{%s}Description' % self.NAMESPACES['rdf'])
resource_uri = self.identifiers.all().filter(name='hydroShareIdentifier')[0].url
rdf_Description.set('{%s}about' % self.NAMESPACES['rdf'], resource_uri)
# get the resource object associated with this metadata container object - needed to
# get the verbose_name
resource = BaseResource.objects.filter(object_id=self.id).first()
rt = [rt for rt in get_resource_types()
if rt._meta.object_name == resource.resource_type][0]
resource = rt.objects.get(id=resource.id)
# create the title element
if self.title:
dc_title = etree.SubElement(rdf_Description, '{%s}title' % self.NAMESPACES['dc'])
dc_title.text = self.title.value
# create the type element
if self.type:
dc_type = etree.SubElement(rdf_Description, '{%s}type' % self.NAMESPACES['dc'])
dc_type.set('{%s}resource' % self.NAMESPACES['rdf'], self.type.url)
# create the Description element (we named it as Abstract to differentiate from the parent
# "Description" element)
if self.description:
dc_description = etree.SubElement(rdf_Description,
'{%s}description' % self.NAMESPACES['dc'])
dc_des_rdf_Desciption = etree.SubElement(dc_description,
'{%s}Description' % self.NAMESPACES['rdf'])
dcterms_abstract = etree.SubElement(dc_des_rdf_Desciption,
'{%s}abstract' % self.NAMESPACES['dcterms'])
dcterms_abstract.text = clean_for_xml(self.description.abstract)
for agency in self.funding_agencies.all():
hsterms_agency = etree.SubElement(rdf_Description,
'{%s}awardInfo' % self.NAMESPACES['hsterms'])
hsterms_agency_rdf_Description = etree.SubElement(hsterms_agency, '{%s}Description' %
self.NAMESPACES['rdf'])
hsterms_name = etree.SubElement(hsterms_agency_rdf_Description,
'{%s}fundingAgencyName' % self.NAMESPACES['hsterms'])
hsterms_name.text = clean_for_xml(agency.agency_name)
if agency.agency_url:
hsterms_agency_rdf_Description.set('{%s}about' % self.NAMESPACES['rdf'],
agency.agency_url)
if agency.award_title:
hsterms_title = etree.SubElement(hsterms_agency_rdf_Description, '{%s}awardTitle' %
self.NAMESPACES['hsterms'])
hsterms_title.text = clean_for_xml(agency.award_title)
if agency.award_number:
hsterms_number = etree.SubElement(hsterms_agency_rdf_Description,
'{%s}awardNumber' % self.NAMESPACES['hsterms'])
hsterms_number.text = clean_for_xml(agency.award_number)
# use all creators associated with this metadata object to
# generate creator xml elements
for creator in self.creators.all():
self._create_person_element(etree, rdf_Description, creator)
for contributor in self.contributors.all():
self._create_person_element(etree, rdf_Description, contributor)
for coverage in self.coverages.all():
coverage.add_to_xml_container(rdf_Description)
for dt in self.dates.all():
dc_date = etree.SubElement(rdf_Description, '{%s}date' % self.NAMESPACES['dc'])
dc_term = '{%s}' + dt.type
dc_date_dcterms = etree.SubElement(dc_date, dc_term % self.NAMESPACES['dcterms'])
rdf_date_value = etree.SubElement(dc_date_dcterms, '{%s}value' % self.NAMESPACES['rdf'])
if dt.type != 'valid':
rdf_date_value.text = dt.start_date.isoformat()
else:
if dt.end_date:
rdf_date_value.text = "start=%s; end=%s" % (dt.start_date.isoformat(),
dt.end_date.isoformat())
else:
rdf_date_value.text = dt.start_date.isoformat()
if include_format_elements:
for fmt in self.formats.all():
dc_format = etree.SubElement(rdf_Description, '{%s}format' % self.NAMESPACES['dc'])
dc_format.text = fmt.value
for res_id in self.identifiers.all():
dc_identifier = etree.SubElement(rdf_Description,
'{%s}identifier' % self.NAMESPACES['dc'])
dc_id_rdf_Description = etree.SubElement(dc_identifier,
'{%s}Description' % self.NAMESPACES['rdf'])
id_hsterm = '{%s}' + res_id.name
hsterms_hs_identifier = etree.SubElement(dc_id_rdf_Description,
id_hsterm % self.NAMESPACES['hsterms'])
hsterms_hs_identifier.text = res_id.url
if self.language:
dc_lang = etree.SubElement(rdf_Description, '{%s}language' % self.NAMESPACES['dc'])
dc_lang.text = self.language.code
if self.publisher:
dc_publisher = etree.SubElement(rdf_Description,
'{%s}publisher' % self.NAMESPACES['dc'])
dc_pub_rdf_Description = etree.SubElement(dc_publisher,
'{%s}Description' % self.NAMESPACES['rdf'])
hsterms_pub_name = etree.SubElement(dc_pub_rdf_Description,
'{%s}publisherName' % self.NAMESPACES['hsterms'])
hsterms_pub_name.text = self.publisher.name
hsterms_pub_url = etree.SubElement(dc_pub_rdf_Description,
'{%s}publisherURL' % self.NAMESPACES['hsterms'])
hsterms_pub_url.set('{%s}resource' % self.NAMESPACES['rdf'], self.publisher.url)
for rel in self.relations.all():
dc_relation = etree.SubElement(rdf_Description, '{%s}relation' % self.NAMESPACES['dc'])
dc_rel_rdf_Description = etree.SubElement(dc_relation,
'{%s}Description' % self.NAMESPACES['rdf'])
if rel.type in Relation.HS_RELATION_TERMS:
term_ns = self.NAMESPACES['hsterms']
else:
term_ns = self.NAMESPACES['dcterms']
terms_type = etree.SubElement(dc_rel_rdf_Description, '{%s}%s' % (term_ns, rel.type))
# check if the relation value starts with 'http://' or 'https://'
if rel.value.lower().find('http://') == 0 or rel.value.lower().find('https://') == 0:
terms_type.set('{%s}resource' % self.NAMESPACES['rdf'], rel.value)
else:
terms_type.text = rel.value
if self.rights:
dc_rights = etree.SubElement(rdf_Description, '{%s}rights' % self.NAMESPACES['dc'])
dc_rights_rdf_Description = etree.SubElement(dc_rights,
'{%s}Description' % self.NAMESPACES['rdf'])
hsterms_statement = etree.SubElement(dc_rights_rdf_Description,
'{%s}rightsStatement' % self.NAMESPACES['hsterms'])
hsterms_statement.text = clean_for_xml(self.rights.statement)
if self.rights.url:
hsterms_url = etree.SubElement(dc_rights_rdf_Description,
'{%s}URL' % self.NAMESPACES['hsterms'])
hsterms_url.set('{%s}resource' % self.NAMESPACES['rdf'], self.rights.url)
for sub in self.subjects.all():
dc_subject = etree.SubElement(rdf_Description, '{%s}subject' % self.NAMESPACES['dc'])
if sub.value.lower().find('http://') == 0 or sub.value.lower().find('https://') == 0:
dc_subject.set('{%s}resource' % self.NAMESPACES['rdf'], sub.value)
else:
dc_subject.text = sub.value
# resource type related additional attributes
rdf_Description_resource = etree.SubElement(RDF_ROOT,
'{%s}Description' % self.NAMESPACES['rdf'])
rdf_Description_resource.set('{%s}about' % self.NAMESPACES['rdf'], self.type.url)
rdfs1_label = etree.SubElement(rdf_Description_resource,
'{%s}label' % self.NAMESPACES['rdfs1'])
rdfs1_label.text = resource._meta.verbose_name
rdfs1_isDefinedBy = etree.SubElement(rdf_Description_resource,
'{%s}isDefinedBy' % self.NAMESPACES['rdfs1'])
rdfs1_isDefinedBy.text = current_site_url() + "/terms"
# encode extended key/value arbitrary metadata
resource = BaseResource.objects.filter(object_id=self.id).first()
for key, value in list(resource.extra_metadata.items()):
hsterms_key_value = etree.SubElement(
rdf_Description, '{%s}extendedMetadata' % self.NAMESPACES['hsterms'])
hsterms_key_value_rdf_Description = etree.SubElement(
hsterms_key_value, '{%s}Description' % self.NAMESPACES['rdf'])
hsterms_key = etree.SubElement(hsterms_key_value_rdf_Description,
'{%s}key' % self.NAMESPACES['hsterms'])
hsterms_key.text = key
hsterms_value = etree.SubElement(hsterms_key_value_rdf_Description,
'{%s}value' % self.NAMESPACES['hsterms'])
hsterms_value.text = value
return self.XML_HEADER + '\n' + etree.tostring(RDF_ROOT, encoding='UTF-8',
pretty_print=pretty_print).decode()
# TODO: (Pabitra, Dt:11/21/2016) need to delete this method and users of this method
# need to use the same method from the hydroshare.utils.py
def add_metadata_element_to_xml(self, root, md_element, md_fields):
"""Generate XML elements for a given metadata element.
Helper function to generate xml elements for a given metadata element that belongs to
'hsterms' namespace
:param root: the xml document root element to which xml elements for the specified
metadata element needs to be added
:param md_element: the metadata element object. The term attribute of the metadata
element object is used for naming the root xml element for this metadata element.
If the root xml element needs to be named differently, then this needs to be a tuple
with first element being the metadata element object and the second being the name
for the root element. Example: md_element=self.Creat or # the term attribute of the
Creator object will be used md_element=(self.Creator, 'Author') # 'Author' will be used
:param md_fields: a list of attribute names of the metadata element (if the name to be used
in generating the xml element name is same as the attribute name then include the
attribute name as a list item. if xml element name needs to be different from the
attribute name then the list item must be a tuple with first element of the tuple being
the attribute name and the second element being what will be used in naming the xml
element) Example: [('first_name', 'firstName'), 'phone', 'email']
# xml sub-elements names: firstName, phone, email
"""
from lxml import etree
if isinstance(md_element, tuple):
element_name = md_element[1]
md_element = md_element[0]
else:
element_name = md_element.term
hsterms_newElem = etree.SubElement(root,
"{{{ns}}}{new_element}"
.format(ns=self.NAMESPACES['hsterms'],
new_element=element_name))
hsterms_newElem_rdf_Desc = etree.SubElement(
hsterms_newElem, "{{{ns}}}Description".format(ns=self.NAMESPACES['rdf']))
for md_field in md_fields:
if isinstance(md_field, tuple):
field_name = md_field[0]
xml_element_name = md_field[1]
else:
field_name = md_field
xml_element_name = md_field
if hasattr(md_element, field_name):
attr = getattr(md_element, field_name)
if attr:
field = etree.SubElement(hsterms_newElem_rdf_Desc,
"{{{ns}}}{field}".format(ns=self.NAMESPACES['hsterms'],
field=xml_element_name))
field.text = str(attr)
def _create_person_element(self, etree, parent_element, person):
"""Create a metadata element for a person (Creator, Contributor, etc)."""
# importing here to avoid circular import problem
from .hydroshare.utils import current_site_url
if isinstance(person, Creator):
dc_person = etree.SubElement(parent_element, '{%s}creator' % self.NAMESPACES['dc'])
else:
dc_person = etree.SubElement(parent_element, '{%s}contributor' % self.NAMESPACES['dc'])
dc_person_rdf_Description = etree.SubElement(dc_person,
'{%s}Description' % self.NAMESPACES['rdf'])
if person.name.strip():
hsterms_name = etree.SubElement(dc_person_rdf_Description,
'{%s}name' % self.NAMESPACES['hsterms'])
hsterms_name.text = person.name
if person.description:
dc_person_rdf_Description.set('{%s}about' % self.NAMESPACES['rdf'],
current_site_url() + person.description)
if isinstance(person, Creator):
hsterms_creatorOrder = etree.SubElement(dc_person_rdf_Description,
'{%s}creatorOrder' % self.NAMESPACES['hsterms'])
hsterms_creatorOrder.text = str(person.order)
if person.organization:
hsterms_organization = etree.SubElement(dc_person_rdf_Description,
'{%s}organization' % self.NAMESPACES['hsterms'])
hsterms_organization.text = person.organization
if person.email:
hsterms_email = etree.SubElement(dc_person_rdf_Description,
'{%s}email' % self.NAMESPACES['hsterms'])
hsterms_email.text = person.email
if person.address:
hsterms_address = etree.SubElement(dc_person_rdf_Description,
'{%s}address' % self.NAMESPACES['hsterms'])
hsterms_address.text = person.address
if person.phone:
hsterms_phone = etree.SubElement(dc_person_rdf_Description,
'{%s}phone' % self.NAMESPACES['hsterms'])
hsterms_phone.set('{%s}resource' % self.NAMESPACES['rdf'], 'tel:' + person.phone)
if person.homepage:
hsterms_homepage = etree.SubElement(dc_person_rdf_Description,
'{%s}homepage' % self.NAMESPACES['hsterms'])
hsterms_homepage.set('{%s}resource' % self.NAMESPACES['rdf'], person.homepage)
for name, link in person.identifiers.items():
hsterms_link_type = etree.SubElement(dc_person_rdf_Description,
'{%s}' % self.NAMESPACES['hsterms'] + name)
hsterms_link_type.set('{%s}resource' % self.NAMESPACES['rdf'], link)
@property
def resource_uri(self):
return self.identifiers.all().filter(name='hydroShareIdentifier')[0].url
def create_element(self, element_model_name, **kwargs):
"""Create any supported metadata element."""
model_type = self._get_metadata_element_model_type(element_model_name)
kwargs['content_object'] = self
element_model_name = element_model_name.lower()
if self.resource.raccess.published:
if element_model_name == 'creator':
raise ValidationError("{} can't be created for a published resource".format(element_model_name))
elif element_model_name == 'identifier':
name_value = kwargs.get('name', '')
if name_value != 'doi':
# for published resource the 'name' attribute of the identifier must be set to 'doi'
raise ValidationError("For a published resource only a doi identifier can be created")
elif element_model_name == 'date':
date_type = kwargs.get('type', '')
if date_type and date_type not in ('modified', 'published'):
raise ValidationError("{} date can't be created for a published resource".format(date_type))
element = model_type.model_class().create(**kwargs)
return element
def update_element(self, element_model_name, element_id, **kwargs):
"""Update metadata element."""
model_type = self._get_metadata_element_model_type(element_model_name)
kwargs['content_object'] = self
element_model_name = element_model_name.lower()
if self.resource.raccess.published:
if element_model_name in ('title', 'creator', 'rights', 'identifier', 'format', 'publisher'):
raise ValidationError("{} can't be updated for a published resource".format(element_model_name))
elif element_model_name == 'date':
date_type = kwargs.get('type', '')
if date_type and date_type != 'modified':
raise ValidationError("{} date can't be updated for a published resource".format(date_type))
model_type.model_class().update(element_id, **kwargs)
def delete_element(self, element_model_name, element_id):
"""Delete Metadata element."""
model_type = self._get_metadata_element_model_type(element_model_name)
element_model_name = element_model_name.lower()
if self.resource.raccess.published:
if element_model_name not in ('subject', 'contributor', 'source', 'relation', 'fundingagency', 'format'):
raise ValidationError("{} can't be deleted for a published resource".format(element_model_name))
model_type.model_class().remove(element_id)
def _get_metadata_element_model_type(self, element_model_name):
"""Get type of metadata element based on model type."""
element_model_name = element_model_name.lower()
if not self._is_valid_element(element_model_name):
raise ValidationError("Metadata element type:%s is not one of the "
"supported in core metadata elements."
% element_model_name)
unsupported_element_error = "Metadata element type:%s is not supported." \
% element_model_name
try:
model_type = ContentType.objects.get(app_label=self._meta.app_label,
model=element_model_name)
except ObjectDoesNotExist:
try:
model_type = ContentType.objects.get(app_label='hs_core',
model=element_model_name)
except ObjectDoesNotExist:
raise ValidationError(unsupported_element_error)
if not issubclass(model_type.model_class(), AbstractMetaDataElement):
raise ValidationError(unsupported_element_error)
return model_type
def _is_valid_element(self, element_name):
"""Check whether metadata element is valid."""
allowed_elements = [el.lower() for el in self.get_supported_element_names()]
return element_name.lower() in allowed_elements
def update_non_repeatable_element(self, element_name, metadata, property_name=None):
"""Update a non-repeatable metadata element.
This helper function is to create/update a specific metadata element as specified by
*element_name*
:param element_name: metadata element class name (e.g. title)
:param metadata: a list of dicts - each dict has data to update/create a specific metadata
element (e.g. {'title': {'value': 'my resource title'}}
:param property_name: name of the property/attribute name in this class or its sub class
to access the metadata element instance of *metadata_element*. This is needed only when
the property/attribute name differs from the element class name
Example:
class ModelProgramMetaData(CoreMetaData):
_mpmetadata = GenericRelation(MpMetadata)
@property
def program(self):
return self._mpmetadata.all().first()
For the above class to update the metadata element MpMetadata, this function needs to
be called with element_name='mpmetadata' and property_name='program'
:return:
"""
for dict_item in metadata:
if element_name in dict_item:
if property_name is None:
element = getattr(self, element_name, None)
else:
element = getattr(self, property_name, None)
if element:
self.update_element(element_id=element.id,
element_model_name=element_name,
**dict_item[element_name])
else:
self.create_element(element_model_name=element_name,
**dict_item[element_name])
def update_repeatable_element(self, element_name, metadata, property_name=None):
"""Update a repeatable metadata element.
Creates new metadata elements of type *element_name*. Any existing metadata elements of
matching type get deleted first.
:param element_name: class name of the metadata element (e.g. creator)
:param metadata: a list of dicts containing data for each of the metadata elements that
needs to be created/updated as part of bulk update
:param property_name: (Optional) the property/attribute name used in this instance of
CoreMetaData (or its sub class) to access all the objects of type *element_type*
Example:
class MODFLOWModelInstanceMetaData(ModelInstanceMetaData):
_model_input = GenericRelation(ModelInput)
@property
def model_inputs(self):
return self._model_input.all()
For the above class to update the metadata element ModelInput, this function needs to
be called with element_name='modelinput' and property_name='model_inputs'. If in the
above class instead of using the attribute name '_model_inputs' we have used
'modelinputs' then this function needs to be called with element_name='modelinput' and
no need to pass a value for the property_name.
:return:
"""
element_list = [element_dict for element_dict in metadata if element_name in element_dict]
if len(element_list) > 0:
if property_name is None:
elements = getattr(self, element_name + 's')
else:
elements = getattr(self, property_name)
elements.all().delete()
for element in element_list:
self.create_element(element_model_name=element_name, **element[element_name])
class TaskNotification(models.Model):
TASK_STATUS_CHOICES = (
('progress', 'Progress'),
('failed', 'Failed'),
('aborted', 'Aborted'),
('completed', 'Completed'),
('delivered', 'Delivered'),
)
created = models.DateTimeField(auto_now_add=True)
username = models.CharField(max_length=150, blank=True, db_index=True)
task_id = models.CharField(max_length=50, unique=True)
name = models.CharField(max_length=1000, blank=True)
payload = models.CharField(max_length=1000, blank=True)
status = models.CharField(max_length=20, choices=TASK_STATUS_CHOICES, default='progress')
def resource_processor(request, page):
"""Return mezzanine page processor for resource page."""
extra = page_permissions_page_processor(request, page)
return extra
@receiver(post_save)
def resource_creation_signal_handler(sender, instance, created, **kwargs):
"""Return resource update signal handler for newly created resource.
For now this is just a placeholder for some actions to be taken when a resource gets saved
"""
if isinstance(instance, AbstractResource):
if created:
pass
else:
resource_update_signal_handler(sender, instance, created, **kwargs)
def resource_update_signal_handler(sender, instance, created, **kwargs):
"""Do nothing (noop)."""
pass
| hydroshare/hydroshare | hs_core/models.py | Python | bsd-3-clause | 207,265 | [
"NetCDF"
] | 598ede8b4bcf6e55b4f449e73e921af3bb37751fc2804842cf195ae39987ddbc |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines classes to represent the density of states, etc.
"""
import functools
import warnings
from typing import Dict, Optional
import numpy as np
from monty.json import MSONable
from scipy.constants.codata import value as _cd
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.spectrum import Spectrum
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.core import Orbital, Spin
from pymatgen.util.coord import get_linear_interpolated_value
from pymatgen.util.typing import ArrayLike, SpeciesLike
class DOS(Spectrum):
"""
Replacement basic DOS object. All other DOS objects are extended versions
of this object. Work in progress.
.. attribute: energies
The sequence of energies
.. attribute: densities
A dict of spin densities, e.g., {Spin.up: [...], Spin.down: [...]}
.. attribute: efermi
Fermi level
"""
XLABEL = "Energy"
YLABEL = "Density"
def __init__(self, energies: ArrayLike, densities: ArrayLike, efermi: float):
"""
Args:
energies: A sequence of energies
densities (ndarray): Either a Nx1 or a Nx2 array. If former, it is
interpreted as a Spin.up only density. Otherwise, the first column
is interpreted as Spin.up and the other is Spin.down.
efermi: Fermi level energy.
"""
super().__init__(energies, densities, efermi)
self.efermi = efermi
def get_interpolated_gap(self, tol: float = 0.001, abs_tol: bool = False, spin: Spin = None):
"""
Expects a DOS object and finds the gap
Args:
tol: tolerance in occupations for determining the gap
abs_tol: Set to True for an absolute tolerance and False for a
relative one.
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
(gap, cbm, vbm):
Tuple of floats in eV corresponding to the gap, cbm and vbm.
"""
if spin is None:
tdos = self.y if len(self.ydim) == 1 else np.sum(self.y, axis=1)
elif spin == Spin.up:
tdos = self.y[:, 0]
else:
tdos = self.y[:, 1]
if not abs_tol:
tol = tol * tdos.sum() / tdos.shape[0] # type: ignore
energies = self.x
below_fermi = [i for i in range(len(energies)) if energies[i] < self.efermi and tdos[i] > tol]
above_fermi = [i for i in range(len(energies)) if energies[i] > self.efermi and tdos[i] > tol]
vbm_start = max(below_fermi)
cbm_start = min(above_fermi)
if vbm_start == cbm_start:
return 0.0, self.efermi, self.efermi
# Interpolate between adjacent values
terminal_dens = tdos[vbm_start : vbm_start + 2][::-1]
terminal_energies = energies[vbm_start : vbm_start + 2][::-1]
start = get_linear_interpolated_value(terminal_dens, terminal_energies, tol)
terminal_dens = tdos[cbm_start - 1 : cbm_start + 1]
terminal_energies = energies[cbm_start - 1 : cbm_start + 1]
end = get_linear_interpolated_value(terminal_dens, terminal_energies, tol)
return end - start, end, start
def get_cbm_vbm(self, tol: float = 0.001, abs_tol: bool = False, spin=None):
"""
Expects a DOS object and finds the cbm and vbm.
Args:
tol: tolerance in occupations for determining the gap
abs_tol: An absolute tolerance (True) and a relative one (False)
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
(cbm, vbm): float in eV corresponding to the gap
"""
# determine tolerance
if spin is None:
tdos = self.y if len(self.ydim) == 1 else np.sum(self.y, axis=1)
elif spin == Spin.up:
tdos = self.y[:, 0]
else:
tdos = self.y[:, 1]
if not abs_tol:
tol = tol * tdos.sum() / tdos.shape[0] # type: ignore
# find index of fermi energy
i_fermi = 0
while self.x[i_fermi] <= self.efermi:
i_fermi += 1
# work backwards until tolerance is reached
i_gap_start = i_fermi
while i_gap_start - 1 >= 0 and tdos[i_gap_start - 1] <= tol:
i_gap_start -= 1
# work forwards until tolerance is reached
i_gap_end = i_gap_start
while i_gap_end < tdos.shape[0] and tdos[i_gap_end] <= tol:
i_gap_end += 1
i_gap_end -= 1
return self.x[i_gap_end], self.x[i_gap_start]
def get_gap(self, tol: float = 0.001, abs_tol: bool = False, spin: Spin = None):
"""
Expects a DOS object and finds the gap.
Args:
tol: tolerance in occupations for determining the gap
abs_tol: An absolute tolerance (True) and a relative one (False)
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
gap in eV
"""
(cbm, vbm) = self.get_cbm_vbm(tol, abs_tol, spin)
return max(cbm - vbm, 0.0)
def __str__(self):
"""
Returns a string which can be easily plotted (using gnuplot).
"""
if Spin.down in self.densities:
stringarray = [f"#{'Energy':30s} {'DensityUp':30s} {'DensityDown':30s}"]
for i, energy in enumerate(self.energies):
stringarray.append(f"{energy:.5f} {self.densities[Spin.up][i]:.5f} {self.densities[Spin.down][i]:.5f}")
else:
stringarray = [f"#{'Energy':30s} {'DensityUp':30s}"]
for i, energy in enumerate(self.energies):
stringarray.append(f"{energy:.5f} {self.densities[Spin.up][i]:.5f}")
return "\n".join(stringarray)
class Dos(MSONable):
"""
Basic DOS object. All other DOS objects are extended versions of this
object.
.. attribute: energies
The sequence of energies
.. attribute: densities
A dict of spin densities, e.g., {Spin.up: [...], Spin.down: [...]}
.. attribute: efermi
Fermi level
"""
def __init__(self, efermi: float, energies: ArrayLike, densities: Dict[Spin, ArrayLike]):
"""
Args:
efermi: Fermi level energy
energies: A sequences of energies
densities ({Spin: np.array}): representing the density of states
for each Spin.
"""
self.efermi = efermi
self.energies = np.array(energies)
self.densities = {k: np.array(d) for k, d in densities.items()}
def get_densities(self, spin: Spin = None):
"""
Returns the density of states for a particular spin.
Args:
spin: Spin
Returns:
Returns the density of states for a particular spin. If Spin is
None, the sum of all spins is returned.
"""
if self.densities is None:
result = None
elif spin is None:
if Spin.down in self.densities:
result = self.densities[Spin.up] + self.densities[Spin.down]
else:
result = self.densities[Spin.up]
else:
result = self.densities[spin]
return result
def get_smeared_densities(self, sigma: float):
"""
Returns the Dict representation of the densities, {Spin: densities},
but with a Gaussian smearing of std dev sigma applied about the fermi
level.
Args:
sigma: Std dev of Gaussian smearing function.
Returns:
Dict of Gaussian-smeared densities.
"""
from scipy.ndimage.filters import gaussian_filter1d
smeared_dens = {}
diff = [self.energies[i + 1] - self.energies[i] for i in range(len(self.energies) - 1)]
avgdiff = sum(diff) / len(diff)
for spin, dens in self.densities.items():
smeared_dens[spin] = gaussian_filter1d(dens, sigma / avgdiff)
return smeared_dens
def __add__(self, other):
"""
Adds two DOS together. Checks that energy scales are the same.
Otherwise, a ValueError is thrown.
Args:
other: Another DOS object.
Returns:
Sum of the two DOSs.
"""
if not all(np.equal(self.energies, other.energies)):
raise ValueError("Energies of both DOS are not compatible!")
densities = {spin: self.densities[spin] + other.densities[spin] for spin in self.densities.keys()}
return Dos(self.efermi, self.energies, densities)
def get_interpolated_value(self, energy: float):
"""
Returns interpolated density for a particular energy.
Args:
energy: Energy to return the density for.
"""
f = {}
for spin in self.densities.keys():
f[spin] = get_linear_interpolated_value(self.energies, self.densities[spin], energy)
return f
def get_interpolated_gap(self, tol: float = 0.001, abs_tol: bool = False, spin: Spin = None):
"""
Expects a DOS object and finds the gap
Args:
tol: tolerance in occupations for determining the gap
abs_tol: Set to True for an absolute tolerance and False for a
relative one.
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
(gap, cbm, vbm):
Tuple of floats in eV corresponding to the gap, cbm and vbm.
"""
tdos = self.get_densities(spin)
if not abs_tol:
tol = tol * tdos.sum() / tdos.shape[0]
energies = self.energies
below_fermi = [i for i in range(len(energies)) if energies[i] < self.efermi and tdos[i] > tol]
above_fermi = [i for i in range(len(energies)) if energies[i] > self.efermi and tdos[i] > tol]
vbm_start = max(below_fermi)
cbm_start = min(above_fermi)
if vbm_start == cbm_start:
return 0.0, self.efermi, self.efermi
# Interpolate between adjacent values
terminal_dens = tdos[vbm_start : vbm_start + 2][::-1]
terminal_energies = energies[vbm_start : vbm_start + 2][::-1]
start = get_linear_interpolated_value(terminal_dens, terminal_energies, tol)
terminal_dens = tdos[cbm_start - 1 : cbm_start + 1]
terminal_energies = energies[cbm_start - 1 : cbm_start + 1]
end = get_linear_interpolated_value(terminal_dens, terminal_energies, tol)
return end - start, end, start
def get_cbm_vbm(self, tol: float = 0.001, abs_tol: bool = False, spin: Spin = None):
"""
Expects a DOS object and finds the cbm and vbm.
Args:
tol: tolerance in occupations for determining the gap
abs_tol: An absolute tolerance (True) and a relative one (False)
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
(cbm, vbm): float in eV corresponding to the gap
"""
# determine tolerance
tdos = self.get_densities(spin)
if not abs_tol:
tol = tol * tdos.sum() / tdos.shape[0]
# find index of fermi energy
i_fermi = 0
while self.energies[i_fermi] <= self.efermi:
i_fermi += 1
# work backwards until tolerance is reached
i_gap_start = i_fermi
while i_gap_start - 1 >= 0 and tdos[i_gap_start - 1] <= tol:
i_gap_start -= 1
# work forwards until tolerance is reached
i_gap_end = i_gap_start
while i_gap_end < tdos.shape[0] and tdos[i_gap_end] <= tol:
i_gap_end += 1
i_gap_end -= 1
return self.energies[i_gap_end], self.energies[i_gap_start]
def get_gap(self, tol: float = 0.001, abs_tol: bool = False, spin: Spin = None):
"""
Expects a DOS object and finds the gap.
Args:
tol: tolerance in occupations for determining the gap
abs_tol: An absolute tolerance (True) and a relative one (False)
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
gap in eV
"""
(cbm, vbm) = self.get_cbm_vbm(tol, abs_tol, spin)
return max(cbm - vbm, 0.0)
def __str__(self):
"""
Returns a string which can be easily plotted (using gnuplot).
"""
if Spin.down in self.densities:
stringarray = [f"#{'Energy':30s} {'DensityUp':30s} {'DensityDown':30s}"]
for i, energy in enumerate(self.energies):
stringarray.append(f"{energy:.5f} {self.densities[Spin.up][i]:.5f} {self.densities[Spin.down][i]:.5f}")
else:
stringarray = [f"#{'Energy':30s} {'DensityUp':30s}"]
for i, energy in enumerate(self.energies):
stringarray.append(f"{energy:.5f} {self.densities[Spin.up][i]:.5f}")
return "\n".join(stringarray)
@classmethod
def from_dict(cls, d) -> "Dos":
"""
Returns Dos object from dict representation of Dos.
"""
return Dos(
d["efermi"],
d["energies"],
{Spin(int(k)): v for k, v in d["densities"].items()},
)
def as_dict(self) -> dict:
"""
Json-serializable dict representation of Dos.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"efermi": self.efermi,
"energies": self.energies.tolist(),
"densities": {str(spin): dens.tolist() for spin, dens in self.densities.items()},
}
class FermiDos(Dos, MSONable):
"""
This wrapper class helps relate the density of states, doping levels
(i.e. carrier concentrations) and corresponding fermi levels. A negative
doping concentration indicates the majority carriers are electrons
(n-type doping); a positive doping concentration indicates holes are the
majority carriers (p-type doping).
"""
def __init__(
self,
dos: Dos,
structure: Structure = None,
nelecs: float = None,
bandgap: float = None,
):
"""
Args:
dos: Pymatgen Dos object.
structure: A structure. If not provided, the structure
of the dos object will be used. If the dos does not have an
associated structure object, an error will be thrown.
nelecs: The number of electrons included in the energy range of
dos. It is used for normalizing the densities. Default is the total
number of electrons in the structure.
bandgap: If set, the energy values are scissored so that the electronic
band gap matches this value.
"""
super().__init__(
dos.efermi,
energies=dos.energies,
densities={k: np.array(d) for k, d in dos.densities.items()},
)
if structure is None:
if hasattr(dos, "structure"):
structure = dos.structure
else:
raise ValueError("Structure object is not provided and not present in dos")
self.structure = structure
self.nelecs = nelecs or self.structure.composition.total_electrons
self.volume = self.structure.volume
self.energies = np.array(dos.energies)
self.de = np.hstack((self.energies[1:], self.energies[-1])) - self.energies
# normalize total density of states based on integral at 0K
tdos = np.array(self.get_densities())
self.tdos = tdos * self.nelecs / (tdos * self.de)[self.energies <= self.efermi].sum()
ecbm, evbm = self.get_cbm_vbm()
self.idx_vbm = int(np.argmin(abs(self.energies - evbm)))
self.idx_cbm = int(np.argmin(abs(self.energies - ecbm)))
self.A_to_cm = 1e-8
if bandgap:
if evbm < self.efermi < ecbm:
eref = self.efermi
else:
eref = (evbm + ecbm) / 2.0
idx_fermi = int(np.argmin(abs(self.energies - eref)))
if idx_fermi == self.idx_vbm:
# Fermi level and vbm should be different indices
idx_fermi += 1
self.energies[:idx_fermi] -= (bandgap - (ecbm - evbm)) / 2.0
self.energies[idx_fermi:] += (bandgap - (ecbm - evbm)) / 2.0
def get_doping(self, fermi_level: float, temperature: float) -> float:
"""
Calculate the doping (majority carrier concentration) at a given
fermi level and temperature. A simple Left Riemann sum is used for
integrating the density of states over energy & equilibrium Fermi-Dirac
distribution.
Args:
fermi_level: The fermi_level level in eV.
temperature: The temperature in Kelvin.
Returns:
The doping concentration in units of 1/cm^3. Negative values
indicate that the majority carriers are electrons (n-type doping)
whereas positivie values indicates the majority carriers are holes
(p-type doping).
"""
cb_integral = np.sum(
self.tdos[self.idx_cbm :]
* f0(self.energies[self.idx_cbm :], fermi_level, temperature)
* self.de[self.idx_cbm :],
axis=0,
)
vb_integral = np.sum(
self.tdos[: self.idx_vbm + 1]
* f0(-self.energies[: self.idx_vbm + 1], -fermi_level, temperature)
* self.de[: self.idx_vbm + 1],
axis=0,
)
return (vb_integral - cb_integral) / (self.volume * self.A_to_cm**3)
def get_fermi_interextrapolated(
self, concentration: float, temperature: float, warn: bool = True, c_ref: float = 1e10, **kwargs
) -> float:
"""
Similar to get_fermi except that when get_fermi fails to converge,
an interpolated or extrapolated fermi is returned with the assumption
that the fermi level changes linearly with log(abs(concentration)).
Args:
concentration: The doping concentration in 1/cm^3. Negative values
represent n-type doping and positive values represent p-type
doping.
temperature: The temperature in Kelvin.
warn: Whether to give a warning the first time the fermi cannot be
found.
c_ref: A doping concentration where get_fermi returns a
value without error for both c_ref and -c_ref.
**kwargs: Keyword arguments passed to the get_fermi function.
Returns:
The Fermi level. Note, the value is possibly interpolated or
extrapolated and must be used with caution.
"""
try:
return self.get_fermi(concentration, temperature, **kwargs)
except ValueError as e:
if warn:
warnings.warn(str(e))
if abs(concentration) < c_ref:
if abs(concentration) < 1e-10:
concentration = 1e-10
# max(10, ) is to avoid log(0<x<1) and log(1+x) both of which
# are slow
f2 = self.get_fermi_interextrapolated(
max(10, abs(concentration) * 10.0), temperature, warn=False, **kwargs
)
f1 = self.get_fermi_interextrapolated(
-max(10, abs(concentration) * 10.0), temperature, warn=False, **kwargs
)
c2 = np.log(abs(1 + self.get_doping(f2, temperature)))
c1 = -np.log(abs(1 + self.get_doping(f1, temperature)))
slope = (f2 - f1) / (c2 - c1)
return f2 + slope * (np.sign(concentration) * np.log(abs(1 + concentration)) - c2)
f_ref = self.get_fermi_interextrapolated(np.sign(concentration) * c_ref, temperature, warn=False, **kwargs)
f_new = self.get_fermi_interextrapolated(concentration / 10.0, temperature, warn=False, **kwargs)
clog = np.sign(concentration) * np.log(abs(concentration))
c_newlog = np.sign(concentration) * np.log(abs(self.get_doping(f_new, temperature)))
slope = (f_new - f_ref) / (c_newlog - np.sign(concentration) * 10.0)
return f_new + slope * (clog - c_newlog)
def get_fermi(
self,
concentration: float,
temperature: float,
rtol: float = 0.01,
nstep: int = 50,
step: float = 0.1,
precision: int = 8,
):
"""
Finds the fermi level at which the doping concentration at the given
temperature (T) is equal to concentration. A greedy algorithm is used
where the relative error is minimized by calculating the doping at a
grid which continually becomes finer.
Args:
concentration: The doping concentration in 1/cm^3. Negative values
represent n-type doping and positive values represent p-type
doping.
temperature: The temperature in Kelvin.
rtol: The maximum acceptable relative error.
nstep: THe number of steps checked around a given fermi level.
step: Initial step in energy when searching for the Fermi level.
precision: Essentially the decimal places of calculated Fermi level.
Returns:
The fermi level in eV.. Note that this is different from the default
dos.efermi.
"""
fermi = self.efermi # initialize target fermi
relative_error = [float("inf")]
for _ in range(precision):
frange = np.arange(-nstep, nstep + 1) * step + fermi
calc_doping = np.array([self.get_doping(f, temperature) for f in frange])
relative_error = np.abs(calc_doping / concentration - 1.0) # type: ignore
fermi = frange[np.argmin(relative_error)]
step /= 10.0
if min(relative_error) > rtol:
raise ValueError(f"Could not find fermi within {rtol * 100}% of concentration={concentration}")
return fermi
@classmethod
def from_dict(cls, d) -> "FermiDos":
"""
Returns Dos object from dict representation of Dos.
"""
dos = Dos(
d["efermi"],
d["energies"],
{Spin(int(k)): v for k, v in d["densities"].items()},
)
return FermiDos(dos, structure=Structure.from_dict(d["structure"]), nelecs=d["nelecs"])
def as_dict(self) -> dict:
"""
Json-serializable dict representation of Dos.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"efermi": self.efermi,
"energies": self.energies.tolist(),
"densities": {str(spin): dens.tolist() for spin, dens in self.densities.items()},
"structure": self.structure,
"nelecs": self.nelecs,
}
class CompleteDos(Dos):
"""
This wrapper class defines a total dos, and also provides a list of PDos.
Mainly used by pymatgen.io.vasp.Vasprun to create a complete Dos from
a vasprun.xml file. You are unlikely to try to generate this object
manually.
.. attribute:: structure
Structure associated with the CompleteDos.
.. attribute:: pdos
Dict of partial densities of the form {Site:{Orbital:{Spin:Densities}}}
"""
def __init__(
self, structure: Structure, total_dos: Dos, pdoss: Dict[PeriodicSite, Dict[Orbital, Dict[Spin, ArrayLike]]]
):
"""
Args:
structure: Structure associated with this particular DOS.
total_dos: total Dos for structure
pdoss: The pdoss are supplied as an {Site:{Orbital:{
Spin:Densities}}}
"""
super().__init__(
total_dos.efermi,
energies=total_dos.energies,
densities={k: np.array(d) for k, d in total_dos.densities.items()},
)
self.pdos = pdoss
self.structure = structure
def get_site_orbital_dos(self, site: PeriodicSite, orbital: Orbital) -> Dos:
"""
Get the Dos for a particular orbital of a particular site.
Args:
site: Site in Structure associated with CompleteDos.
orbital: Orbital in the site.
Returns:
Dos containing densities for orbital of site.
"""
return Dos(self.efermi, self.energies, self.pdos[site][orbital])
def get_site_dos(self, site: PeriodicSite) -> Dos:
"""
Get the total Dos for a site (all orbitals).
Args:
site: Site in Structure associated with CompleteDos.
Returns:
Dos containing summed orbital densities for site.
"""
site_dos = functools.reduce(add_densities, self.pdos[site].values())
return Dos(self.efermi, self.energies, site_dos)
def get_site_spd_dos(self, site: PeriodicSite) -> Dict[Orbital, Dos]:
"""
Get orbital projected Dos of a particular site
Args:
site: Site in Structure associated with CompleteDos.
Returns:
dict of {orbital: Dos}, e.g. {"s": Dos object, ...}
"""
spd_dos: Dict[Orbital, Dict[Spin, ArrayLike]] = {}
for orb, pdos in self.pdos[site].items():
orbital_type = _get_orb_type(orb)
if orbital_type in spd_dos:
spd_dos[orbital_type] = add_densities(spd_dos[orbital_type], pdos)
else:
spd_dos[orbital_type] = pdos
return {orb: Dos(self.efermi, self.energies, densities) for orb, densities in spd_dos.items()}
def get_site_t2g_eg_resolved_dos(self, site: PeriodicSite) -> Dict[str, Dos]:
"""
Get the t2g, eg projected DOS for a particular site.
Args:
site: Site in Structure associated with CompleteDos.
Returns:
A dict {"e_g": Dos, "t2g": Dos} containing summed e_g and t2g DOS
for the site.
"""
t2g_dos = []
eg_dos = []
for s, atom_dos in self.pdos.items():
if s == site:
for orb, pdos in atom_dos.items():
if orb in (Orbital.dxy, Orbital.dxz, Orbital.dyz):
t2g_dos.append(pdos)
elif orb in (Orbital.dx2, Orbital.dz2):
eg_dos.append(pdos)
return {
"t2g": Dos(self.efermi, self.energies, functools.reduce(add_densities, t2g_dos)),
"e_g": Dos(self.efermi, self.energies, functools.reduce(add_densities, eg_dos)),
}
def get_spd_dos(self) -> Dict[Orbital, Dos]:
"""
Get orbital projected Dos.
Returns:
dict of {orbital: Dos}, e.g. {"s": Dos object, ...}
"""
spd_dos = {}
for atom_dos in self.pdos.values():
for orb, pdos in atom_dos.items():
orbital_type = _get_orb_type(orb)
if orbital_type not in spd_dos:
spd_dos[orbital_type] = pdos
else:
spd_dos[orbital_type] = add_densities(spd_dos[orbital_type], pdos)
return {orb: Dos(self.efermi, self.energies, densities) for orb, densities in spd_dos.items()}
def get_element_dos(self) -> Dict[SpeciesLike, Dos]:
"""
Get element projected Dos.
Returns:
dict of {Element: Dos}
"""
el_dos = {}
for site, atom_dos in self.pdos.items():
el = site.specie
for pdos in atom_dos.values():
if el not in el_dos:
el_dos[el] = pdos
else:
el_dos[el] = add_densities(el_dos[el], pdos)
return {el: Dos(self.efermi, self.energies, densities) for el, densities in el_dos.items()}
def get_element_spd_dos(self, el: SpeciesLike) -> Dict[Orbital, Dos]:
"""
Get element and spd projected Dos
Args:
el: Element in Structure.composition associated with CompleteDos
Returns:
dict of {orbital: Dos}, e.g. {"s": Dos object, ...}
"""
el = get_el_sp(el)
el_dos = {}
for site, atom_dos in self.pdos.items():
if site.specie == el:
for orb, pdos in atom_dos.items():
orbital_type = _get_orb_type(orb)
if orbital_type not in el_dos:
el_dos[orbital_type] = pdos
else:
el_dos[orbital_type] = add_densities(el_dos[orbital_type], pdos)
return {orb: Dos(self.efermi, self.energies, densities) for orb, densities in el_dos.items()}
@property
def spin_polarization(self) -> Optional[float]:
"""
Calculates spin polarization at Fermi level. If the
calculation is not spin-polarized, None will be
returned.
See Sanvito et al., doi: 10.1126/sciadv.1602241 for
an example usage.
:return (float): spin polarization in range [0, 1],
will also return NaN if spin polarization ill-defined
(e.g. for insulator)
"""
n_F = self.get_interpolated_value(self.efermi)
n_F_up = n_F[Spin.up]
if Spin.down not in n_F:
return None
n_F_down = n_F[Spin.down]
if (n_F_up + n_F_down) == 0:
# only well defined for metals or half-mteals
return float("NaN")
spin_polarization = (n_F_up - n_F_down) / (n_F_up + n_F_down)
return abs(spin_polarization)
@classmethod
def from_dict(cls, d) -> "CompleteDos":
"""
Returns CompleteDos object from dict representation.
"""
tdos = Dos.from_dict(d)
struct = Structure.from_dict(d["structure"])
pdoss = {}
for i in range(len(d["pdos"])):
at = struct[i]
orb_dos = {}
for orb_str, odos in d["pdos"][i].items():
orb = Orbital[orb_str]
orb_dos[orb] = {Spin(int(k)): v for k, v in odos["densities"].items()}
pdoss[at] = orb_dos
return CompleteDos(struct, tdos, pdoss)
def as_dict(self) -> dict:
"""
Json-serializable dict representation of CompleteDos.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"efermi": self.efermi,
"structure": self.structure.as_dict(),
"energies": self.energies.tolist(),
"densities": {str(spin): dens.tolist() for spin, dens in self.densities.items()},
"pdos": [],
}
if len(self.pdos) > 0:
for at in self.structure:
dd = {}
for orb, pdos in self.pdos[at].items():
dd[str(orb)] = {
"densities": {str(int(spin)): list(dens) for spin, dens in pdos.items()} # type: ignore
}
d["pdos"].append(dd)
d["atom_dos"] = {str(at): dos.as_dict() for at, dos in self.get_element_dos().items()}
d["spd_dos"] = {str(orb): dos.as_dict() for orb, dos in self.get_spd_dos().items()}
return d
def __str__(self):
return "Complete DOS for " + str(self.structure)
class LobsterCompleteDos(CompleteDos):
"""
Extended CompleteDOS for Lobster
"""
def get_site_orbital_dos(self, site: PeriodicSite, orbital: str) -> Dos: # type: ignore
"""
Get the Dos for a particular orbital of a particular site.
Args:
site: Site in Structure associated with CompleteDos.
orbital: principal quantum number and orbital in string format, e.g. "4s".
possible orbitals are: "s", "p_y", "p_z", "p_x", "d_xy", "d_yz", "d_z^2",
"d_xz", "d_x^2-y^2", "f_y(3x^2-y^2)", "f_xyz",
"f_yz^2", "f_z^3", "f_xz^2", "f_z(x^2-y^2)", "f_x(x^2-3y^2)"
In contrast to the Cohpcar and the Cohplist objects, the strings from the Lobster files are used
Returns:
Dos containing densities of an orbital of a specific site.
"""
if orbital[1:] not in [
"s",
"p_y",
"p_z",
"p_x",
"d_xy",
"d_yz",
"d_z^2",
"d_xz",
"d_x^2-y^2",
"f_y(3x^2-y^2)",
"f_xyz",
"f_yz^2",
"f_z^3",
"f_xz^2",
"f_z(x^2-y^2)",
"f_x(x^2-3y^2)",
]:
raise ValueError("orbital is not correct")
return Dos(self.efermi, self.energies, self.pdos[site][orbital]) # type: ignore
def get_site_t2g_eg_resolved_dos(self, site: PeriodicSite) -> Dict[str, Dos]:
"""
Get the t2g, eg projected DOS for a particular site.
Args:
site: Site in Structure associated with CompleteDos.
Returns:
A dict {"e_g": Dos, "t2g": Dos} containing summed e_g and t2g DOS
for the site.
"""
warnings.warn("Are the orbitals correctly oriented? Are you sure?")
t2g_dos = []
eg_dos = []
for s, atom_dos in self.pdos.items():
if s == site:
for orb, pdos in atom_dos.items():
if _get_orb_lobster(orb) in (Orbital.dxy, Orbital.dxz, Orbital.dyz):
t2g_dos.append(pdos)
elif _get_orb_lobster(orb) in (Orbital.dx2, Orbital.dz2):
eg_dos.append(pdos)
return {
"t2g": Dos(self.efermi, self.energies, functools.reduce(add_densities, t2g_dos)),
"e_g": Dos(self.efermi, self.energies, functools.reduce(add_densities, eg_dos)),
}
def get_spd_dos(self) -> Dict[str, Dos]: # type: ignore
"""
Get orbital projected Dos.
For example, if 3s and 4s are included in the basis of some element, they will be both summed in the orbital
projected DOS
Returns:
dict of {orbital: Dos}, e.g. {"s": Dos object, ...}
"""
spd_dos = {}
for atom_dos in self.pdos.values():
for orb, pdos in atom_dos.items():
orbital_type = _get_orb_type_lobster(orb)
if orbital_type not in spd_dos:
spd_dos[orbital_type] = pdos
else:
spd_dos[orbital_type] = add_densities(spd_dos[orbital_type], pdos)
return {orb: Dos(self.efermi, self.energies, densities) for orb, densities in spd_dos.items()}
def get_element_spd_dos(self, el: SpeciesLike) -> Dict[str, Dos]: # type: ignore
"""
Get element and spd projected Dos
Args:
el: Element in Structure.composition associated with LobsterCompleteDos
Returns:
dict of {"S": densities, "P": densities, "D": densities}
"""
el = get_el_sp(el)
el_dos = {}
for site, atom_dos in self.pdos.items():
if site.specie == el:
for orb, pdos in atom_dos.items():
orbital_type = _get_orb_type_lobster(orb)
if orbital_type not in el_dos:
el_dos[orbital_type] = pdos
else:
el_dos[orbital_type] = add_densities(el_dos[orbital_type], pdos)
return {orb: Dos(self.efermi, self.energies, densities) for orb, densities in el_dos.items()}
@classmethod
def from_dict(cls, d) -> "LobsterCompleteDos":
"""
Returns: CompleteDos object from dict representation.
"""
tdos = Dos.from_dict(d)
struct = Structure.from_dict(d["structure"])
pdoss = {}
for i in range(len(d["pdos"])):
at = struct[i]
orb_dos = {}
for orb_str, odos in d["pdos"][i].items():
orb = orb_str
orb_dos[orb] = {Spin(int(k)): v for k, v in odos["densities"].items()}
pdoss[at] = orb_dos
return LobsterCompleteDos(struct, tdos, pdoss)
def add_densities(density1: Dict[Spin, ArrayLike], density2: Dict[Spin, ArrayLike]) -> Dict[Spin, ArrayLike]:
"""
Method to sum two densities.
Args:
density1: First density.
density2: Second density.
Returns:
Dict of {spin: density}.
"""
return {spin: np.array(density1[spin]) + np.array(density2[spin]) for spin in density1.keys()}
def _get_orb_type(orb):
try:
return orb.orbital_type
except AttributeError:
return orb
def f0(E, fermi, T):
"""
Returns the equilibrium fermi-dirac.
Args:
E (float): energy in eV
fermi (float): the fermi level in eV
T (float): the temperature in kelvin
"""
return 1.0 / (1.0 + np.exp((E - fermi) / (_cd("Boltzmann constant in eV/K") * T)))
def _get_orb_type_lobster(orb):
"""
Args:
orb: string representation of orbital
Returns:
OrbitalType
"""
orb_labs = [
"s",
"p_y",
"p_z",
"p_x",
"d_xy",
"d_yz",
"d_z^2",
"d_xz",
"d_x^2-y^2",
"f_y(3x^2-y^2)",
"f_xyz",
"f_yz^2",
"f_z^3",
"f_xz^2",
"f_z(x^2-y^2)",
"f_x(x^2-3y^2)",
]
try:
orbital = Orbital(orb_labs.index(orb[1:]))
return orbital.orbital_type
except AttributeError:
print("Orb not in list")
return None
def _get_orb_lobster(orb):
"""
Args:
orb: string representation of orbital
Returns:
Orbital
"""
orb_labs = [
"s",
"p_y",
"p_z",
"p_x",
"d_xy",
"d_yz",
"d_z^2",
"d_xz",
"d_x^2-y^2",
"f_y(3x^2-y^2)",
"f_xyz",
"f_yz^2",
"f_z^3",
"f_xz^2",
"f_z(x^2-y^2)",
"f_x(x^2-3y^2)",
]
try:
orbital = Orbital(orb_labs.index(orb[1:]))
return orbital
except AttributeError:
print("Orb not in list")
return None
| materialsproject/pymatgen | pymatgen/electronic_structure/dos.py | Python | mit | 39,326 | [
"DIRAC",
"Gaussian",
"VASP",
"pymatgen"
] | b976225b1f584a8f312dd1e34b714100d10d49445b7381d85f79a0c139646702 |
from common2 import *
# NAME IDEA -> pooling/random/sparse/distributed hebbian/horde/crowd/fragment/sample memory
# FEATURES:
# + boost (TODO vol?)
# + noise
# + dropout -- temporal disabling of neurons
# + decay -- move from mem to vol
# + negatives -- learning to avoid detecting some patterns
# - prune -- if input < mem perform fast decay
# - fatigue -- winner has lower score for some time
# NEXT VERSION:
# - layers -- rsm stacking
# NEXT VERSION:
# - attention
# - https://towardsdatascience.com/the-fall-of-rnn-lstm-2d1594c74ce0
# - https://towardsdatascience.com/memory-attention-sequences-37456d271992
# NEXT VERSION:
# - numpy -- faster version
# - cython -- faster version
# - gpu -- faster version
# - distributed
class rsm:
def __init__(self,n,m,v):
"""Random Sample Memory
n -- number of neurons
m -- max hard connections per neuron (memory)
v -- max soft connections per neuron (volatile memory)
"""
self.N = n
self.M = m
self.V = v
self.mem = {j:set() for j in range(n)}
self.vol = {j:set() for j in range(n)}
self.win = set() # NEW fatigue
def scores(self, input, boost=False, noise=False, dropout=0.0): # -> dict[i] -> scores
"""
input -- sparse binary features
boost -- improve scores based on number of unconnected synapses (TODO)
noise -- randomize scores to prevent snowballing
dropout -- temporal disabling of neurons
"""
mem = self.mem
vol = self.vol
N = self.N
M = self.M
V = self.V
scores = {}
for j in mem:
scores[j] = len(input & mem[j])
scores[j] += 1.0*len(input & vol[j])/(V+1)
if noise:
for j in mem:
scores[j] += 1.0/(V+2)*random()
if boost:
for j in mem:
scores[j] += 1+2*(M-len(mem[j])) if len(mem[j])<M else 0
if dropout:
k = int(round(float(dropout)*N))
for j in combinations(N,k):
scores[j] = -1
return scores
def learn(self, input, k, decay=0.0, dropout=0.0, quick=False, negative=False):
"""
input -- sparse binary features
k -- number of winning neurons
"""
mem = self.mem
vol = self.vol
M = self.M
V = self.V
# quick learning
if quick and not negative:
known_inputs = set()
for j in mem:
known_inputs.update(mem[j])
known_inputs.update(vol[j])
scores = self.scores(input, boost=True, noise=True, dropout=dropout)
winners = top(k,scores)
for j in winners:
# negative learning
if negative:
vol[j].difference_update(input)
mem[j].difference_update(input)
continue
# quick learning
if quick:
if len(mem[j])==0:
unknown_inputs = input - known_inputs
mem[j].update(self.pick(unknown_inputs, M))
known_inputs.update(mem[j])
confirmed = vol[j] & input # must be done before decay
# handle decay
if decay and random()<decay:
decay_candidates = mem[j] - input
if decay_candidates:
d_list = list(decay_candidates)
shuffle(d_list)
d = d_list[0]
mem[j].remove(d)
if V:
vol[j].add(d)
# handle confirmed
# -> add to mem, remove from vol
free_mem = self.M - len(mem[j])
mem_delta = self.pick(confirmed, free_mem)
mem[j].update(mem_delta)
vol[j].difference_update(mem_delta)
# handle unknown
# -> add to vol
known = mem[j] & input
unknown = input - known - confirmed
not_comfirmed = vol[j] - confirmed
not_memorized = confirmed - set(mem_delta) # must stay in vol
new_vol = list(unknown) + list(not_comfirmed) # TODO: proportion
shuffle(new_vol)
new_vol = list(not_memorized) + new_vol
vol[j] = set(new_vol[:V])
#print(scores) # XXX
# TODO handle fatigue
@classmethod
def pick(self,v_set,n):
"select n random values from a set"
# TODO random
out = list(v_set)
shuffle(out)
return out[:n]
# auxiliary
def score(self, input, k=1, method=1):
"aggregate scores to scalar"
scores = self.scores(input)
if method==0:
return top(k, scores, values=True)
elif method==1:
score = 1.0*sum(top(k, scores, values=True))/(k*(self.M+1))
return score
elif method==2:
score = 1.0*sum(top(k, scores, values=True))/(k*self.M)
return min(1.0,score)
if method==3:
score = 1.0*min(top(k, scores, values=True))/(self.M+1)
return score
elif method==4:
score = 1.0*min(top(k, scores, values=True))/self.M
return min(1.0,score)
if method==5:
score = 1.0*max(top(k, scores, values=True))/(self.M+1)
return score
elif method==6:
score = 1.0*max(top(k, scores, values=True))/self.M
return min(1.0,score)
def stats(self,prefix=''):
vol_v = self.vol.values()
mem_v = self.mem.values()
out = {}
out['m_empty'] = sum([1.0 if len(x)==0 else 0.0 for x in mem_v])/self.N
out['m_not_empty'] = sum([1.0 if len(x)>0 else 0.0 for x in mem_v])/self.N
out['m_full'] = sum([1.0 if len(x)==self.M else 0.0 for x in mem_v])/self.N
out['v_empty'] = sum([1.0 if len(x)==0 else 0.0 for x in vol_v])/self.N
out['v_not_empty'] = sum([1.0 if len(x)>0 else 0.0 for x in vol_v])/self.N
out['v_full'] = sum([1.0 if len(x)==self.V else 0.0 for x in vol_v])/self.N
out['m_avg'] = sum([1.0*len(x) for x in mem_v])/(self.N*self.M)
out['v_avg'] = sum([1.0*len(x) for x in vol_v])/(self.N*self.V)
return {k:v for k,v in out.items() if k.startswith(prefix)}
| mobarski/sandbox | rsm/v9le/v2.py | Python | mit | 5,289 | [
"NEURON"
] | 6ad0d342df384cc6daeaff3e83cf6a929ee2c9b33b39e410fe8a0709d98d9fe1 |
"""
Tools for the instructor dashboard
"""
import dateutil
import json
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponseBadRequest
from django.utils.timezone import utc
from django.utils.translation import ugettext as _
from courseware.models import StudentModule
from xmodule.fields import Date
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from bulk_email.models import CourseAuthorization
DATE_FIELD = Date()
class DashboardError(Exception):
"""
Errors arising from use of the instructor dashboard.
"""
def response(self):
"""
Generate an instance of HttpResponseBadRequest for this error.
"""
error = unicode(self)
return HttpResponseBadRequest(json.dumps({'error': error}))
def handle_dashboard_error(view):
"""
Decorator which adds seamless DashboardError handling to a view. If a
DashboardError is raised during view processing, an HttpResponseBadRequest
is sent back to the client with JSON data about the error.
"""
def wrapper(request, course_id):
"""
Wrap the view.
"""
try:
return view(request, course_id=course_id)
except DashboardError, error:
return error.response()
return wrapper
def bulk_email_is_enabled_for_course(course_id):
"""
Staff can only send bulk email for a course if all the following conditions are true:
1. Bulk email feature flag is on.
2. It is a studio course.
3. Bulk email is enabled for the course.
"""
bulk_email_enabled_globally = (settings.FEATURES['ENABLE_INSTRUCTOR_EMAIL'] == True)
is_studio_course = (modulestore().get_modulestore_type(course_id) != ModuleStoreEnum.Type.xml)
bulk_email_enabled_for_course = CourseAuthorization.instructor_email_enabled(course_id)
if bulk_email_enabled_globally and is_studio_course and bulk_email_enabled_for_course:
return True
return False
def strip_if_string(value):
if isinstance(value, basestring):
return value.strip()
return value
def get_student_from_identifier(unique_student_identifier):
"""
Gets a student object using either an email address or username.
Returns the student object associated with `unique_student_identifier`
Raises User.DoesNotExist if no user object can be found.
"""
unique_student_identifier = strip_if_string(unique_student_identifier)
if "@" in unique_student_identifier:
student = User.objects.get(email=unique_student_identifier)
else:
student = User.objects.get(username=unique_student_identifier)
return student
def parse_datetime(datestr):
"""
Convert user input date string into an instance of `datetime.datetime` in
UTC.
"""
try:
return dateutil.parser.parse(datestr).replace(tzinfo=utc)
except ValueError:
raise DashboardError(_("Unable to parse date: ") + datestr)
def find_unit(course, url):
"""
Finds the unit (block, module, whatever the terminology is) with the given
url in the course tree and returns the unit. Raises DashboardError if no
unit is found.
"""
def find(node, url):
"""
Find node in course tree for url.
"""
if node.location.to_deprecated_string() == url:
return node
for child in node.get_children():
found = find(child, url)
if found:
return found
return None
unit = find(course, url)
if unit is None:
raise DashboardError(_("Couldn't find module for url: {0}").format(url))
return unit
def get_units_with_due_date(course):
"""
Returns all top level units which have due dates. Does not return
descendents of those nodes.
"""
units = []
def visit(node):
"""
Visit a node. Checks to see if node has a due date and appends to
`units` if it does. Otherwise recurses into children to search for
nodes with due dates.
"""
if getattr(node, 'due', None):
units.append(node)
else:
for child in node.get_children():
visit(child)
visit(course)
#units.sort(key=_title_or_url)
return units
def title_or_url(node):
"""
Returns the `display_name` attribute of the passed in node of the course
tree, if it has one. Otherwise returns the node's url.
"""
title = getattr(node, 'display_name', None)
if not title:
title = node.location.to_deprecated_string()
return title
def set_due_date_extension(course, unit, student, due_date):
"""
Sets a due date extension.
"""
def set_due_date(node):
"""
Recursively set the due date on a node and all of its children.
"""
try:
student_module = StudentModule.objects.get(
student_id=student.id,
course_id=course.id,
module_state_key=node.location
)
state = json.loads(student_module.state)
except StudentModule.DoesNotExist:
# Normally, a StudentModule is created as a side effect of assigning
# a value to a property in an XModule or XBlock which has a scope
# of 'Scope.user_state'. Here, we want to alter user state but
# can't use the standard XModule/XBlock machinery to do so, because
# it fails to take into account that the state being altered might
# belong to a student other than the one currently logged in. As a
# result, in our work around, we need to detect whether the
# StudentModule has been created for the given student on the given
# unit and create it if it is missing, so we can use it to store
# the extended due date.
student_module = StudentModule.objects.create(
student_id=student.id,
course_id=course.id,
module_state_key=node.location,
module_type=node.category
)
state = {}
state['extended_due'] = DATE_FIELD.to_json(due_date)
student_module.state = json.dumps(state)
student_module.save()
for child in node.get_children():
set_due_date(child)
set_due_date(unit)
def dump_module_extensions(course, unit):
"""
Dumps data about students with due date extensions for a particular module,
specified by 'url', in a particular course.
"""
data = []
header = [_("Username"), _("Full Name"), _("Extended Due Date")]
query = StudentModule.objects.filter(
course_id=course.id,
module_state_key=unit.location)
for module in query:
state = json.loads(module.state)
extended_due = state.get("extended_due")
if not extended_due:
continue
extended_due = DATE_FIELD.from_json(extended_due)
extended_due = extended_due.strftime("%Y-%m-%d %H:%M")
fullname = module.student.profile.name
data.append(dict(zip(
header,
(module.student.username, fullname, extended_due))))
data.sort(key=lambda x: x[header[0]])
return {
"header": header,
"title": _("Users with due date extensions for {0}").format(
title_or_url(unit)),
"data": data
}
def dump_student_extensions(course, student):
"""
Dumps data about the due date extensions granted for a particular student
in a particular course.
"""
data = []
header = [_("Unit"), _("Extended Due Date")]
units = get_units_with_due_date(course)
units = dict([(u.location, u) for u in units])
query = StudentModule.objects.filter(
course_id=course.id,
student_id=student.id)
for module in query:
state = json.loads(module.state)
# temporary hack: module_state_key is missing the run but units are not. fix module_state_key
module_loc = module.module_state_key.map_into_course(module.course_id)
if module_loc not in units:
continue
extended_due = state.get("extended_due")
if not extended_due:
continue
extended_due = DATE_FIELD.from_json(extended_due)
extended_due = extended_due.strftime("%Y-%m-%d %H:%M")
title = title_or_url(units[module_loc])
data.append(dict(zip(header, (title, extended_due))))
return {
"header": header,
"title": _("Due date extensions for {0} {1} ({2})").format(
student.first_name, student.last_name, student.username),
"data": data}
| LICEF/edx-platform | lms/djangoapps/instructor/views/tools.py | Python | agpl-3.0 | 8,753 | [
"VisIt"
] | 8ce9b8a43abeddf8e176a9d1290f9f591074b64df0e90da2963388d38b49e6db |
'''
Deep Residual Learning for Image Recognition, http://arxiv.org/abs/1512.03385
an exmaple of deep residual network for cifar10
commands & setups:
set following parameters in example/image-classification/train_model.py
momentum = 0.9,
wd = 0.0001,
initializer = mx.init.Xavier(rnd_type="gaussian", factor_type="in", magnitude=2.0)
set n=3(3 for 20 layers, n=9 for 56 layers) in the get_symbol function in example/image-classification/symbol_resnet-28-small.py
#first train the network with lr=0.1 for 80 epoch
python example/image-classification/train_cifar10.py --network resnet-28-small --num-examples 50000 --lr 0.1 --num-epoch 80 --model-prefix cifar10/resnet
#second train the network with lr=0.01 from epoch 81 to epoch 120, with lr=0.001 from epoch 121 to epoch 160
python example/image-classification/train_cifar10.py --network resnet-28-small --num-examples 50000 --model-prefix cifar10/resnet --load-epoch 80 --lr 0.01 --lr-factor 0.1 --lr-factor-epoch 40 --num-epoch 200
#in the paper, he train cifar10 for 160 epoch, I set num-epoch to 200 because I want to see whether it is usefull when set lr=0.0001
#since it needs 160 epochs, please be patient
#and I use batch-size of 128, train the models on one GPU
accuracy:
for 20 layers resnet, accuracy=0.905+, 0.9125 in the paper
for 32 layers resnet, accuracy=0.908+, 0.9239 in the paper
for 56 layers resnet, accuracy=0.915+, 0.9303 in the paper
though the numbers are a little bit lower than the paper, but it does obey the rule: the deeper, the better
differences to the paper on cifar10 network setup
1. in the paper, the author use identity shortcut when dealing with increasing dimensions, while I use 1*1 convolutions to deal with it
2. in the paper, 4 pixels are padded on each side and a 32*32 crop is randomly sampled from the padded image, while I use the dataset provided by mxnet, so the input is 28*28, as a results for 3 different kinds of 2n layers output map sizes are 28*28, 14*14, 7*7, instead of 32*32, 16*16, 8*8 in the paper.
the above two reason might answer why the accuracy is a bit lower than the paper, I suppose.
Off course, there might be other reasons(for example the true network architecture may be different from my script, since my script is just my understanding of the paper), if you find out, please tell me, declanxu@gmail.com or declanxu@126.com, thanks
'''
import mxnet as mx
import find_mxnet
def conv_factory(data, num_filter, kernel, stride, pad, act_type = 'relu', conv_type = 0):
if conv_type == 0:
conv = mx.symbol.Convolution(data = data, num_filter = num_filter, kernel = kernel, stride = stride, pad = pad)
bn = mx.symbol.BatchNorm(data=conv)
act = mx.symbol.Activation(data = bn, act_type=act_type)
return act
elif conv_type == 1:
conv = mx.symbol.Convolution(data = data, num_filter = num_filter, kernel = kernel, stride = stride, pad = pad)
bn = mx.symbol.BatchNorm(data=conv)
return bn
def residual_factory(data, num_filter, dim_match):
if dim_match == True: # if dimension match
identity_data = data
conv1 = conv_factory(data=data, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1), act_type='relu', conv_type=0)
conv2 = conv_factory(data=conv1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1), conv_type=1)
new_data = identity_data + conv2
act = mx.symbol.Activation(data=new_data, act_type='relu')
return act
else:
conv1 = conv_factory(data=data, num_filter=num_filter, kernel=(3,3), stride=(2,2), pad=(1,1), act_type='relu', conv_type=0)
conv2 = conv_factory(data=conv1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1), conv_type=1)
# adopt project method in the paper when dimension increased
project_data = conv_factory(data=data, num_filter=num_filter, kernel=(1,1), stride=(2,2), pad=(0,0), conv_type=1)
new_data = project_data + conv2
act = mx.symbol.Activation(data=new_data, act_type='relu')
return act
def residual_net(data, n):
#fisrt 2n layers
for i in range(n):
data = residual_factory(data=data, num_filter=16, dim_match=True)
#second 2n layers
for i in range(n):
if i==0:
data = residual_factory(data=data, num_filter=32, dim_match=False)
else:
data = residual_factory(data=data, num_filter=32, dim_match=True)
#third 2n layers
for i in range(n):
if i==0:
data = residual_factory(data=data, num_filter=64, dim_match=False)
else:
data = residual_factory(data=data, num_filter=64, dim_match=True)
return data
def get_symbol(num_classes = 10):
conv = conv_factory(data=mx.symbol.Variable(name='data'), num_filter=16, kernel=(3,3), stride=(1,1), pad=(1,1), act_type='relu', conv_type=0)
n = 3 # set n = 3 means get a model with 3*6+2=20 layers, set n = 9 means 9*6+2=56 layers
resnet = residual_net(conv, n) #
pool = mx.symbol.Pooling(data=resnet, kernel=(7,7), pool_type='avg')
flatten = mx.symbol.Flatten(data=pool, name='flatten')
fc = mx.symbol.FullyConnected(data=flatten, num_hidden=num_classes, name='fc1')
softmax = mx.symbol.SoftmaxOutput(data=fc, name='softmax')
return softmax
| aaalgo/picpac | examples/mxnet/symbol_resnet-28-small.py | Python | bsd-2-clause | 5,340 | [
"Gaussian"
] | 9cc63d0e766d9f37034977378a384d86bce24c142aff90377f0c1118917b5bb4 |
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Implement BoxResize."""
from hoomd.operation import Updater
from hoomd.box import Box
from hoomd.data.parameterdicts import ParameterDict
from hoomd.data.typeconverter import OnlyTypes, box_preprocessing
from hoomd.variant import Variant, Constant
from hoomd import _hoomd
from hoomd.filter import ParticleFilter, All
class BoxResize(Updater):
"""Resizes the box between an initial and final box.
When part of a `hoomd.Simulation` ``updater`` list, this object will resize
the box between the initial and final boxes passed. The behavior is a linear
interpolation between the initial and final boxes where the minimum of the
variant is tagged to `box1` and the maximum is tagged to `box2`. All values
between the minimum and maximum result in a box that is the interpolation of
the three lengths and tilt factors of the initial and final boxes.
Note:
The passed `Variant` must be bounded (i.e. it must have a true minimum
and maximum) or the behavior of the updater is undefined.
Note:
Currently for MPI simulations the rescaling of particles does not work
properly in HPMC.
Args:
trigger (hoomd.trigger.Trigger): The trigger to activate this updater.
box1 (hoomd.Box): The box associated with the minimum of the
passed variant.
box2 (hoomd.Box): The box associated with the maximum of the
passed variant.
variant (hoomd.variant.Variant): A variant used to interpolate between
the two boxes.
filter (hoomd.filter.ParticleFilter): The subset of particle positions
to update.
Attributes:
box1 (hoomd.Box): The box associated with the minimum of the
passed variant.
box2 (hoomd.Box): The box associated with the maximum of the
passed variant.
variant (hoomd.variant.Variant): A variant used to interpolate between
the two boxes.
trigger (hoomd.trigger.Trigger): The trigger to activate this updater.
filter (hoomd.filter.ParticleFilter): The subset of particles to
update.
"""
def __init__(self, trigger, box1, box2, variant, filter=All()):
params = ParameterDict(box1=OnlyTypes(Box,
preprocess=box_preprocessing),
box2=OnlyTypes(Box,
preprocess=box_preprocessing),
variant=Variant,
filter=ParticleFilter)
params['box1'] = box1
params['box2'] = box2
params['variant'] = variant
params['trigger'] = trigger
params['filter'] = filter
self._param_dict.update(params)
super().__init__(trigger)
def _attach(self):
group = self._simulation.state._get_group(self.filter)
self._cpp_obj = _hoomd.BoxResizeUpdater(
self._simulation.state._cpp_sys_def, self.box1, self.box2,
self.variant, group)
super()._attach()
def get_box(self, timestep):
"""Get the box for a given timestep.
Args:
timestep (int): The timestep to use for determining the resized
box.
Returns:
Box: The box used at the given timestep.
`None` before the first call to `Simulation.run`.
"""
if self._attached:
timestep = int(timestep)
if timestep < 0:
raise ValueError("Timestep must be a non-negative integer.")
return Box._from_cpp(self._cpp_obj.get_current_box(timestep))
else:
return None
@staticmethod
def update(state, box, filter=All()):
"""Immediately scale the particle in the system state to the given box.
Args:
state (State): System state to scale.
box (Box): New box.
filter (hoomd.filter.ParticleFilter): The subset of particles to
update.
"""
group = state._get_group(filter)
updater = _hoomd.BoxResizeUpdater(state._cpp_sys_def, state.box, box,
Constant(1), group)
if state._simulation._system_communicator is not None:
updater.setCommunicator(state._simulation._system_communicator)
updater.update(state._simulation.timestep)
| joaander/hoomd-blue | hoomd/update/box_resize.py | Python | bsd-3-clause | 4,573 | [
"HOOMD-blue"
] | 56c329db4ed46de41ba1f0b069f9aed7d81ef0394e126e41b98fcb612db6ca0c |
"""
This class a wrapper around elasticsearch-py.
It is used to query Elasticsearch instances.
"""
from datetime import datetime
from datetime import timedelta
import certifi
import copy
import functools
import json
try:
from opensearchpy import OpenSearch as Elasticsearch
from opensearch_dsl import Search, Q, A
from opensearchpy.exceptions import ConnectionError, TransportError, NotFoundError, RequestError
from opensearchpy.helpers import BulkIndexError, bulk
except ImportError:
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search, Q, A
from elasticsearch.exceptions import ConnectionError, TransportError, NotFoundError, RequestError
from elasticsearch.helpers import BulkIndexError, bulk
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities import Time, DErrno
from DIRAC.FrameworkSystem.Client.BundleDeliveryClient import BundleDeliveryClient
sLog = gLogger.getSubLogger(__name__)
def ifConnected(method):
"""Decorator for checking that the connection is established."""
@functools.wraps(method)
def wrapper_decorator(self, *args, **kwargs):
if self._connected:
return method(self, *args, **kwargs)
else:
sLog.error("Not connected")
return S_ERROR("Not connected")
return wrapper_decorator
def generateDocs(data, withTimeStamp=True):
"""Generator for fast bulk indexing, yields docs
:param list data: list of dictionaries
:param bool withTimeStamp: add the timestamps to the docs
:return: doc
"""
for doc in copy.deepcopy(data):
if withTimeStamp:
if "timestamp" not in doc:
sLog.warn("timestamp is not given")
# if the timestamp is not provided, we use the current utc time.
timestamp = doc.get("timestamp", int(Time.toEpoch()))
try:
if isinstance(timestamp, datetime):
doc["timestamp"] = int(timestamp.strftime("%s")) * 1000
elif isinstance(timestamp, str):
timeobj = datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S.%f")
doc["timestamp"] = int(timeobj.strftime("%s")) * 1000
else: # we assume the timestamp is an unix epoch time (integer).
doc["timestamp"] = timestamp * 1000
except (TypeError, ValueError) as e:
# in case we are not able to convert the timestamp to epoch time....
sLog.error("Wrong timestamp", e)
doc["timestamp"] = int(Time.toEpoch()) * 1000
sLog.debug("yielding %s" % doc)
yield doc
class ElasticSearchDB(object):
"""
.. class:: ElasticSearchDB
:param str url: the url to the database for example: el.cern.ch:9200
:param str gDebugFile: is used to save the debug information to a file
:param int timeout: the default time out to Elasticsearch
:param int RESULT_SIZE: The number of data points which will be returned by the query.
"""
__url = ""
__timeout = 120
clusterName = ""
RESULT_SIZE = 10000
########################################################################
def __init__(
self,
host,
port,
user=None,
password=None,
indexPrefix="",
useSSL=True,
useCRT=False,
ca_certs=None,
client_key=None,
client_cert=None,
):
"""c'tor
:param self: self reference
:param str host: name of the database for example: MonitoringDB
:param str port: The full name of the database for example: 'Monitoring/MonitoringDB'
:param str user: user name to access the db
:param str password: if the db is password protected we need to provide a password
:param str indexPrefix: it is the indexPrefix used to get all indexes
:param bool useSSL: We can disable using secure connection. By default we use secure connection.
:param bool useCRT: Use certificates.
:param str ca_certs: CA certificates bundle.
:param str client_key: Client key.
:param str client_cert: Client certificate.
"""
self.__indexPrefix = indexPrefix
self._connected = False
if user and password:
sLog.debug("Specified username and password")
if port:
self.__url = "https://%s:%s@%s:%d" % (user, password, host, port)
else:
self.__url = "https://%s:%s@%s" % (user, password, host)
else:
sLog.debug("Username and password not specified")
if port:
self.__url = "http://%s:%d" % (host, port)
else:
self.__url = "http://%s" % host
if port:
sLog.verbose("Connecting to %s:%s, useSSL = %s" % (host, port, useSSL))
else:
sLog.verbose("Connecting to %s, useSSL = %s" % (host, useSSL))
if useSSL:
if ca_certs:
casFile = ca_certs
else:
bd = BundleDeliveryClient()
retVal = bd.getCAs()
casFile = None
if not retVal["OK"]:
sLog.error("CAs file does not exists:", retVal["Message"])
casFile = certifi.where()
else:
casFile = retVal["Value"]
self.client = Elasticsearch(
self.__url, timeout=self.__timeout, use_ssl=True, verify_certs=True, ca_certs=casFile
)
elif useCRT:
self.client = Elasticsearch(
self.__url,
timeout=self.__timeout,
use_ssl=True,
verify_certs=True,
ca_certs=ca_certs,
client_cert=client_cert,
client_key=client_key,
)
else:
self.client = Elasticsearch(self.__url, timeout=self.__timeout)
# Before we use the database we try to connect
# and retrieve the cluster name
try:
if self.client.ping():
# Returns True if the cluster is running, False otherwise
result = self.client.info()
self.clusterName = result.get("cluster_name", " ") # pylint: disable=no-member
sLog.info("Database info\n", json.dumps(result, indent=4))
self._connected = True
else:
sLog.error("Cannot ping ElasticsearchDB!")
except ConnectionError as e:
sLog.error(repr(e))
########################################################################
def getIndexPrefix(self):
"""
It returns the DIRAC setup.
"""
return self.__indexPrefix
########################################################################
@ifConnected
def query(self, index, query):
"""Executes a query and returns its result (uses ES DSL language).
:param self: self reference
:param str index: index name
:param dict query: It is the query in ElasticSearch DSL language
"""
try:
esDSLQueryResult = self.client.search(index=index, body=query)
return S_OK(esDSLQueryResult)
except RequestError as re:
return S_ERROR(re)
@ifConnected
def update(self, index, query=None, updateByQuery=True, id=None):
"""Executes an update of a document, and returns S_OK/S_ERROR
:param self: self reference
:param str index: index name
:param dict query: It is the query in ElasticSearch DSL language
:param bool updateByQuery: A bool to determine update by query or index values using index function.
:param int id: ID for the document to be created.
"""
sLog.debug("Updating %s with %s, updateByQuery=%s, id=%s" % (index, query, updateByQuery, id))
if not index or not query:
return S_ERROR("Missing index or query")
try:
if updateByQuery:
esDSLQueryResult = self.client.update_by_query(index=index, body=query)
else:
esDSLQueryResult = self.client.index(index=index, body=query, id=id)
return S_OK(esDSLQueryResult)
except RequestError as re:
return S_ERROR(re)
@ifConnected
def _Search(self, indexname):
"""
it returns the object which can be used for retreiving certain value from the DB
"""
return Search(using=self.client, index=indexname)
########################################################################
def _Q(self, name_or_query="match", **params):
"""
It is a wrapper to ElasticDSL Query module used to create a query object.
:param str name_or_query is the type of the query
"""
return Q(name_or_query, **params)
def _A(self, name_or_agg, aggsfilter=None, **params):
"""
It is a wrapper to ElasticDSL aggregation module, used to create an aggregation
"""
return A(name_or_agg, aggsfilter, **params)
########################################################################
@ifConnected
def getIndexes(self, indexName=None):
"""
It returns the available indexes...
"""
if not indexName:
indexName = self.__indexPrefix
sLog.debug("Getting indices alias of %s" % indexName)
# we only return indexes which belong to a specific prefix for example 'lhcb-production' or 'dirac-production etc.
return list(self.client.indices.get_alias("%s*" % indexName))
########################################################################
@ifConnected
def getDocTypes(self, indexName):
"""
Returns mappings, by index.
:param str indexName: is the name of the index...
:return: S_OK or S_ERROR
"""
result = []
try:
sLog.debug("Getting mappings for ", indexName)
result = self.client.indices.get_mapping(indexName)
except Exception as e: # pylint: disable=broad-except
sLog.exception()
return S_ERROR(e)
doctype = ""
for indexConfig in result:
if not result[indexConfig].get("mappings"):
# there is a case when the mapping exits and the value is None...
# this is usually an empty index or a corrupted index.
sLog.warn("Index does not have mapping %s!" % indexConfig)
continue
if result[indexConfig].get("mappings"):
doctype = result[indexConfig]["mappings"]
break # we suppose the mapping of all indexes are the same...
if not doctype:
return S_ERROR("%s does not exists!" % indexName)
return S_OK(doctype)
########################################################################
@ifConnected
def existingIndex(self, indexName):
"""
Checks the existance of an index, by its name
:param str indexName: the name of the index
:returns: S_OK/S_ERROR if the request is successful
"""
sLog.debug("Checking existance of index %s" % indexName)
try:
return S_OK(self.client.indices.exists(indexName))
except TransportError as e:
sLog.exception()
return S_ERROR(e)
########################################################################
@ifConnected
def createIndex(self, indexPrefix, mapping=None, period="day"):
"""
:param str indexPrefix: it is the index name.
:param dict mapping: the configuration of the index.
:param str period: We can specify, which kind of index will be created.
Currently only daily and monthly indexes are supported.
"""
if period is not None:
fullIndex = self.generateFullIndexName(indexPrefix, period) # we have to create an index each period...
else:
sLog.warn("The period is not provided, so using non-periodic indexes names")
fullIndex = indexPrefix
res = self.existingIndex(fullIndex)
if not res["OK"]:
return res
elif res["Value"]:
return S_OK(fullIndex)
try:
sLog.info("Create index: ", fullIndex + str(mapping))
self.client.indices.create(index=fullIndex, body={"mappings": mapping}) # ES7
return S_OK(fullIndex)
except Exception as e: # pylint: disable=broad-except
sLog.error("Can not create the index:", repr(e))
return S_ERROR("Can not create the index")
@ifConnected
def deleteIndex(self, indexName):
"""
:param str indexName: the name of the index to be deleted...
"""
sLog.info("Deleting index", indexName)
try:
retVal = self.client.indices.delete(indexName)
except NotFoundError:
sLog.warn("Index does not exist", indexName)
return S_OK("Noting to delete")
except ValueError as e:
return S_ERROR(DErrno.EVALUE, e)
if retVal.get("acknowledged"):
# if the value exists and the value is not None
sLog.info("Deleted index", indexName)
return S_OK(indexName)
return S_ERROR(retVal)
def index(self, indexName, body=None, docID=None):
"""
:param str indexName: the name of the index to be used
:param dict body: the data which will be indexed (basically the JSON)
:param int id: optional document id
:return: the index name in case of success.
"""
sLog.debug("Indexing in %s body %s, id=%s" % (indexName, body, docID))
if not indexName or not body:
return S_ERROR("Missing index or body")
try:
res = self.client.index(index=indexName, body=body, id=docID)
except (RequestError, TransportError) as e:
sLog.exception()
return S_ERROR(e)
if res.get("created") or res.get("result") in ("created", "updated"):
# the created index exists but the value can be None.
return S_OK(indexName)
return S_ERROR(res)
@ifConnected
def bulk_index(self, indexPrefix, data=None, mapping=None, period="day", withTimeStamp=True):
"""
:param str indexPrefix: index name.
:param list data: contains a list of dictionary
:param dict mapping: the mapping used by elasticsearch
:param str period: Accepts 'day' and 'month'. We can specify which kind of indexes will be created.
:param bool withTimeStamp: add timestamp to data, if not there already.
:returns: S_OK/S_ERROR
"""
sLog.verbose("Bulk indexing", "%d records will be inserted" % len(data))
if mapping is None:
mapping = {}
if period is not None:
indexName = self.generateFullIndexName(indexPrefix, period)
else:
indexName = indexPrefix
sLog.debug("Bulk indexing into %s of %s" % (indexName, data))
res = self.existingIndex(indexName)
if not res["OK"]:
return res
if not res["Value"]:
retVal = self.createIndex(indexPrefix, mapping, period)
if not retVal["OK"]:
return retVal
try:
res = bulk(client=self.client, index=indexName, actions=generateDocs(data, withTimeStamp))
except (BulkIndexError, RequestError) as e:
sLog.exception()
return S_ERROR(e)
if res[0] == len(data):
# we have inserted all documents...
return S_OK(len(data))
else:
return S_ERROR(res)
@ifConnected
def getUniqueValue(self, indexName, key, orderBy=False):
"""
:param str indexName: the name of the index which will be used for the query
:param dict orderBy: it is a dictionary in case we want to order the result {key:'desc'} or {key:'asc'}
:returns: a list of unique value for a certain key from the dictionary.
"""
query = self._Search(indexName)
endDate = datetime.utcnow()
startDate = endDate - timedelta(days=30)
timeFilter = self._Q(
"range",
timestamp={
"lte": int(Time.toEpoch(endDate)) * 1000,
"gte": int(Time.toEpoch(startDate)) * 1000,
},
)
query = query.filter("bool", must=timeFilter)
if orderBy:
query.aggs.bucket(key, "terms", field=key, size=self.RESULT_SIZE, order=orderBy).metric(
key, "cardinality", field=key
)
else:
query.aggs.bucket(key, "terms", field=key, size=self.RESULT_SIZE).metric(key, "cardinality", field=key)
try:
query = query.extra(size=self.RESULT_SIZE) # do not need the raw data.
sLog.debug("Query", query.to_dict())
result = query.execute()
except TransportError as e:
return S_ERROR(e)
values = []
for bucket in result.aggregations[key].buckets:
values += [bucket["key"]]
del query
sLog.debug("Nb of unique rows retrieved", len(values))
return S_OK(values)
def pingDB(self):
"""
Try to connect to the database
:return: S_OK(TRUE/FALSE)
"""
connected = False
try:
connected = self.client.ping()
except ConnectionError as e:
sLog.error("Cannot connect to the db", repr(e))
return S_OK(connected)
@ifConnected
def deleteByQuery(self, indexName, query):
"""
Delete data by query (careful!)
:param str indexName: the name of the index
:param str query: the JSON-formatted query for which we want to issue the delete
"""
try:
self.client.delete_by_query(index=indexName, body=query)
except Exception as inst:
sLog.error("ERROR: Couldn't delete data")
return S_ERROR(inst)
return S_OK("Successfully deleted data from index %s" % indexName)
@staticmethod
def generateFullIndexName(indexName, period):
"""
Given an index prefix we create the actual index name.
:param str indexName: it is the name of the index
:param str period: We can specify which kind of indexes will be created (day, week, month, year, null).
:returns: string with full index name
"""
# if the period is not correct, we use no-period indexes (same as "null").
if period.lower() not in ["day", "week", "month", "year", "null"]:
sLog.error("Period is not correct: ", period)
return indexName
elif period.lower() == "day":
today = datetime.today().strftime("%Y-%m-%d")
return "%s-%s" % (indexName, today)
elif period.lower() == "week":
week = datetime.today().isocalendar()[1]
return "%s-%s" % (indexName, week)
elif period.lower() == "month":
month = datetime.today().strftime("%Y-%m")
return "%s-%s" % (indexName, month)
elif period.lower() == "year":
year = datetime.today().strftime("%Y")
return "%s-%s" % (indexName, year)
elif period.lower() == "null":
return indexName
| DIRACGrid/DIRAC | src/DIRAC/Core/Utilities/ElasticSearchDB.py | Python | gpl-3.0 | 19,612 | [
"DIRAC"
] | 8537fd55fcd14d67fffa71a6df6106a269b2da50c4aabaa8691cb0f82c4d50cf |
#
# Differential evolution MCMC
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import pints
import numpy as np
import warnings
class DifferentialEvolutionMCMC(pints.MultiChainMCMC):
r"""
Uses differential evolution MCMC as described in [1]_ to perform posterior
sampling from the posterior.
In each step of the algorithm ``n`` chains are evolved using the evolution
equation::
x_proposed = x[i,r] + gamma * (X[i,r1] - x[i,r2]) + epsilon
where ``r1`` and ``r2`` are random chain indices chosen (without
replacement) from the ``n`` available chains, which must not equal ``i`` or
each other, where ``i`` indicates the current time step, and
``epsilon ~ N(0,b)`` where ``d`` is the dimensionality of the parameter
vector.
If ``x_proposed / x[i,r] > u ~ U(0,1)``, then
``x[i+1,r] = x_proposed``; otherwise, ``x[i+1,r] = x[i]``.
Extends :class:`MultiChainMCMC`.
.. note::
This sampler requires a number of chains :math:`n \ge 3`, and
recommends :math:`n \ge 1.5 d`.
References
----------
.. [1] "A Markov Chain Monte Carlo version of the genetic algorithm
Differential Evolution: easy Bayesian computing for real parameter
spaces". Cajo J. F. Ter Braak (2006) Statistical Computing
https://doi.org/10.1007/s11222-006-8769-1
"""
def __init__(self, chains, x0, sigma0=None):
super(DifferentialEvolutionMCMC, self).__init__(chains, x0, sigma0)
# Need at least 3 chains
if self._n_chains < 3:
raise ValueError('Need at least 3 chains.')
# Warn user against using too few chains
if self._n_chains < 1.5 * self._n_parameters:
warnings.warn('This method should be run with n_chains >= '
'1.5 * n_parameters')
# Set initial state
self._running = False
# Current points and proposed points
self._current = None
self._current_log_pdfs = None
self._proposed = None
#
# Default settings
#
# Gamma
self._gamma = 2.38 / np.sqrt(2 * self._n_parameters)
# Gamma switch to 1 every (below) steps to help find
# modes
self._gamma_switch_rate = 10
# Error scale width
self._b = 0.001
# Mean used for scaling error process
self._mu = np.mean(self._x0, axis=0)
# Gaussian error vs uniform
self._gaussian_error = True
# Relative scaling
self._relative_scaling = True
def ask(self):
""" See :meth:`pints.MultiChainMCMC.ask()`. """
# Initialise on first call
if not self._running:
self._initialise()
# Propose new points
if self._proposed is None:
# set gamma to 1
if self._iter_count % self._gamma_switch_rate == 0:
self._gamma = 1
self._iter_count += 1
self._proposed = np.zeros(self._current.shape)
for j in range(self._n_chains):
if self._gaussian_error:
error = np.random.normal(0, self._b_star, self._mu.shape)
else:
error = np.random.uniform(-self._b_star, self._b_star,
self._mu.shape)
r1, r2 = self._r_draw(j, self._n_chains)
self._proposed[j] = (
self._current[j]
+ self._gamma * (self._current[r1] - self._current[r2])
+ error
)
# reset gamma
self._gamma = 2.38 / np.sqrt(2 * self._n_parameters)
# Set as read only
self._proposed.setflags(write=False)
# Return proposed points
return self._proposed
def gamma(self):
"""
Returns the coefficient ``gamma`` used in updating the position of each
chain.
"""
return self._gamma
def gamma_switch_rate(self):
"""
Returns the number of steps between iterations where gamma is set to 1
(then reset immediately afterwards).
"""
return self._gamma_switch_rate
def gaussian_error(self):
"""
Returns whether a Gaussian versus uniform error process is used.
"""
return self._gaussian_error
def _initialise(self):
"""
Initialises the routine before the first iteration.
"""
if self._running:
raise RuntimeError('Already initialised.')
# Propose x0 as first points
self._current = None
self._current_log_pdfs = None
self._proposed = self._x0
self._proposed.setflags(write=False)
# Set mu
# TODO: Should this be a user setting?
self._mu = np.mean(self._x0, axis=0)
# Use relative or absolute scaling of error process
if self._relative_scaling:
self._b_star = np.abs(self._mu * self._b)
else:
self._b_star = np.repeat(self._b, self._n_parameters)
# Gamma set to 1 counter
self._iter_count = 0
# Update sampler state
self._running = True
def n_hyper_parameters(self):
""" See :meth:`TunableMethod.n_hyper_parameters()`. """
return 5
def name(self):
""" See :meth:`pints.MCMCSampler.name()`. """
return 'Differential Evolution MCMC'
def _r_draw(self, i, num_chains):
"""
Chooses two chain indexes uniformly at random such that they are
not the same nor do they equal `i`.
"""
indexes = list(range(num_chains))
indexes.pop(i)
r1, r2 = np.random.choice(indexes, 2, replace=False)
return r1, r2
def relative_scaling(self):
"""
Returns whether an error process whose standard deviation scales
relatively is used (False indicates absolute scale).
"""
return self._relative_scaling
def scale_coefficient(self):
"""
Sets the scale coefficient ``b`` of the error process used in updating
the position of each chain.
"""
return self._b
def set_gamma(self, gamma):
"""
Sets the coefficient ``gamma`` used in updating the position of each
chain.
"""
gamma = float(gamma)
if gamma < 0:
raise ValueError('Gamma must be non-negative.')
self._gamma = gamma
def set_gamma_switch_rate(self, gamma_switch_rate):
"""
Sets the number of steps between iterations where gamma is set to 1
(then reset immediately afterwards).
"""
if gamma_switch_rate < 1:
raise ValueError('The interval number of steps between ' +
' gamma=1 iterations must equal or exceed 1.')
if not isinstance(gamma_switch_rate, int):
raise ValueError('The interval number of steps between ' +
' gamma=1 iterations must be an integer.')
self._gamma_switch_rate = gamma_switch_rate
def set_gaussian_error(self, gaussian_error):
"""
If ``True`` sets the error process to be a gaussian error,
``N(0, b*)``; if ``False``, it uses a uniform error ``U(-b*, b*)``;
where ``b* = b`` if absolute scaling used and ``b* = mu * b`` if
relative scaling is used instead.
"""
gaussian_error = bool(gaussian_error)
self._gaussian_error = gaussian_error
def set_hyper_parameters(self, x):
"""
The hyper-parameter vector is ``[gamma, gaussian_scale_coefficient,
gamma_switch_rate, gaussian_error, relative_scaling]``.
See :meth:`TunableMethod.set_hyper_parameters()`.
"""
self.set_gamma(x[0])
self.set_scale_coefficient(x[1])
try:
int_x2 = int(x[2])
except (ValueError, TypeError):
raise ValueError('The interval number of steps between ' +
'gamma=1 iterations must be convertable ' +
'to an integer.')
self.set_gamma_switch_rate(int_x2)
self.set_gaussian_error(x[3])
self.set_relative_scaling(x[4])
def set_relative_scaling(self, relative_scaling):
"""
Sets whether to use an error process whose standard deviation scales
relatively (``scale = self._mu * self_b``) or absolutely
(``scale = self._b`` in all dimensions).
"""
relative_scaling = bool(relative_scaling)
self._relative_scaling = relative_scaling
if self._relative_scaling:
self._b_star = self._mu * self._b
else:
self._b_star = np.repeat(self._b, self._n_parameters)
def set_scale_coefficient(self, b):
"""
Sets the scale coefficient ``b`` of the error process used in updating
the position of each chain.
"""
b = float(b)
if b < 0:
raise ValueError('Scale coefficient must be non-negative.')
self._b = b
def tell(self, proposed_log_pdfs):
""" See :meth:`pints.MultiChainMCMC.tell()`. """
# Check if we had a proposal
if self._proposed is None:
raise RuntimeError('Tell called before proposal was set.')
# Ensure proposed_log_pdfs are numpy array
proposed_log_pdfs = np.array(proposed_log_pdfs)
# First points?
if self._current is None:
if not np.all(np.isfinite(proposed_log_pdfs)):
raise ValueError(
'Initial points for MCMC must have finite logpdf.')
# Accept
self._current = self._proposed
self._current_log_pdfs = proposed_log_pdfs
self._current_log_pdfs.setflags(write=False)
# Clear proposal
self._proposed = None
# Return first samples for chains
accepted = np.array([True] * self._n_chains)
return self._current, self._current_log_pdfs, accepted
# Perform iteration
next = np.array(self._current, copy=True)
next_log_pdfs = np.array(self._current_log_pdfs, copy=True)
# Sample uniform numbers
u = np.log(np.random.uniform(size=self._n_chains))
# Get chains to be updated
i = u < (proposed_log_pdfs - self._current_log_pdfs)
# Update
next[i] = self._proposed[i]
next_log_pdfs[i] = proposed_log_pdfs[i]
self._current = next
self._current_log_pdfs = next_log_pdfs
self._current.setflags(write=False)
self._current_log_pdfs.setflags(write=False)
# Clear proposal
self._proposed = None
# Return samples to add to chains
return self._current, self._current_log_pdfs, i
| martinjrobins/hobo | pints/_mcmc/_differential_evolution.py | Python | bsd-3-clause | 11,031 | [
"Gaussian"
] | 77def2541f1aa7cae89000342c5385a9e2f06207912c3a14f4f38a6420de3c92 |
#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
# USAGE:
# PREAMBLE:
import numpy as np
import MDAnalysis
import sys
import os
import matplotlib.pyplot as plt
traj_file ='%s' %(sys.argv[1])
# ----------------------------------------
# VARIABLE DECLARATION
base1 = 1
nbases = 15
#nbases = 3
#Nsteps = 150000 # check length of the energy file; if not 150000 lines, then need to alter Nsteps value so that angle values will match up
#Nsteps = 149996
#equilib_step = 37500 # we have chosen 75 ns to be the equilib time; 75ns = 37500 frames; if energy values do not match with angle values, then equilib_step needs to be altered as well...
#equilib_step = 37496
#production = Nsteps - equilib_step
# SUBROUTINES/DEFINITIONS:
arccosine = np.arccos
dotproduct = np.dot
pi = np.pi
ldtxt = np.loadtxt
zeros = np.zeros
# ----------------------------------------
# DICTIONARY DECLARATION
normals = {} # create the normals dictionary for future use
total_binaries = {} # create the total_binaries dictionary for future use
get_norm = normals.get
get_tb = total_binaries.get
# ----------------------------------------
# PLOTTING SUBROUTINES
def plotting(xdata, ydata, base):
plt.plot(xdata, ydata, 'rx')
plt.title('Stacking behavior of base %s over the trajectory' %(base))
plt.xlabel('Simulation time (ns)')
plt.ylabel('Stacking metric')
plt.xlim((0,300))
plt.grid( b=True, which='major', axis='both', color='k', linestyle='-')
plt.savefig('stacking_binary.%s.png' %(base))
plt.close()
def vdw_hist(data, base_a, base_b):
events, edges, patches = plt.hist(data, bins = 100, histtype = 'bar')
plt.title('Distribution of vdW Energies - Base Pair %s-%s' %(base_a, base_b))
plt.xlabel('vdW Energy ($kcal\ mol^{-1}$)')
plt.xlim((-8,0))
plt.ylabel('Frequency')
plt.savefig('energy.%s.%s.png' %(base_a, base_b))
nf = open('energy.%s.%s.dat' %(base_a, base_b), 'w')
for i in range(len(events)):
nf.write(' %10.1f %10.4f\n' %(events[i], edges[i]))
nf.close()
plt.close()
events = []
edges = []
patches = []
def angle_hist(data, base_a, base_b):
events, edges, patches = plt.hist(data, bins = 100, histtype = 'bar')
plt.title('Distribution of Angles btw Base Pair %s-%s' %(base_a, base_b))
plt.xlabel('Angle (Degrees)')
plt.ylabel('Frequency')
plt.savefig('angle.%s.%s.png' %(base_a, base_b))
nf = open('angle.%s.%s.dat' %(base_a, base_b), 'w')
for i in range(len(events)):
nf.write(' %10.1f %10.4f\n' %(events[i], edges[i]))
nf.close()
plt.close()
events = []
edges = []
patches = []
def energy_angle_hist(xdata, ydata, base_a, base_b):
counts, xedges, yedges, image = plt.hist2d(xdata, ydata, bins = 100)
cb1 = plt.colorbar()
cb1.set_label('Frequency')
plt.title('Distribution of Base Pair interactions - %s-%s' %(base_a, base_b))
plt.xlabel('Angle (Degrees)')
plt.ylabel('vdW Energy ($kcal\ mol^{-1}$)')
plt.ylim((-6,0.5))
plt.savefig('vdw_angle.%s.%s.png' %(base_a, base_b))
plt.close()
counts = []
xedges = []
yedges = []
image = []
# MAIN PROGRAM:
# ----------------------------------------
# ATOM SELECTION - load the trajectory and select the desired nucleotide atoms to be analyzed later on
u = MDAnalysis.Universe('../nucleic_ions.pdb', traj_file, delta=2.0) # load in trajectory file
Nsteps = len(u.trajectory)
equilib_step = 37500 # first 75 ns are not to be included in total stacking metric
production = Nsteps - equilib_step
nucleic = u.selectAtoms('resid 1:15') # atom selections for nucleic chain
a1 = nucleic.selectAtoms('resid 1') # residue 1 has different atom IDs for the base atoms
a1_base = a1.atoms[10:24] # atom selections
bases = [] # make a list of the 15 bases filled with atoms
bases.append(a1_base) # add base 1 into list
for residue in nucleic.residues[1:15]: # collect the other bases into list
residue_base = []
residue_base = residue.atoms[12:26]
bases.append(residue_base)
# ----------------------------------------
# DICTIONARY DEVELOPMENT - Develop the normals and total binary dictionary which contain the data for each base
while base1 <= nbases:
normals['normal.%s' %(base1)] = get_norm('normal.%s' %(base1), np.zeros((Nsteps, 3)))
total_binaries['base.%s' %(base1)] = get_tb('base.%s' %(base1), np.zeros(Nsteps))
base1 += 1
# ----------------------------------------
# SIMULATION TIME - calculate the array that contains the simulation time in ns units
time = np.zeros(Nsteps)
for i in range(Nsteps):
time[i] = i*0.002 # time units: ns
# ----------------------------------------
# NORMAL ANALYSIS for each base - loops through all bases and all timesteps of the trajectory; calculates the normal vector of the base atoms
base1 = 1
while (base1 <= nbases):
for ts in u.trajectory:
Princ_axes = []
Princ_axes = bases[base1 - 1].principalAxes()
normals['normal.%s' %(base1)][ts.frame - 1] = Princ_axes[2] # ts.frame index starts at 1; add normal to dictionary with index starting at 0
base1 += 1
# ----------------------------------------
# BASE PAIR ANALYSIS - loops through all base pairs (w/out duplicates) and performs the angle analysis as well as the binary analysis
base1 = 1 # reset the base index to start at 1
while (base1 <= nbases): # while loops to perform the base-pair analysis while avoiding performing the same analysis twice
base2 = base1 + 1
while (base2 <= nbases):
os.mkdir('base%s_base%s' %(base1, base2)) # makes and moves into a directory for the base pair
os.chdir('base%s_base%s' %(base1, base2))
energyfile = '../../nonbond_energy/base%s_base%s/base%s_base%s.energies.dat' %(base1, base2, base1, base2)
energies = ldtxt(energyfile) # load in the energy file to a numpy array
vdw_energies = energies[:,2]
binary = zeros(Nsteps)
nf = open('binary.%s.%s.dat' %(base1, base2), 'w') # write the base pair data to a file; make sure to be writing this in a base pair directory
# angle and binary analysis for base pair;
for i in range(Nsteps):
angle = 0.
angle = arccosine(dotproduct(normals['normal.%s' %(base1)][i], normals['normal.%s' %(base2)][i]))
angle = angle*(180./pi)
if angle > 90.:
angle = 180. - angle
if vdw_energies[i] <= -3.5 and angle <= 30.: # cutoff: -3.5 kcal mol^-1 and 30 degrees
binary[i] = 1. # assumed else binary[i] = 0.
nf.write(' %10.3f %10.5f %10.5f %10.1f\n' %(time[i], vdw_energies[i], angle, binary[i])) # check time values
total_binaries['base.%s' %(base1)][i] = total_binaries['base.%s' %(base1)][i] + binary[i]
total_binaries['base.%s' %(base2)][i] = total_binaries['base.%s' %(base2)][i] + binary[i]
nf.close()
angles = []
energies = []
vdw_energies = []
os.chdir('..')
base2 += 1
base1 += 1
# ----------------------------------------
# TOTAL BINARY METRIC ANALYSIS - writing to file and plotting
# print out (also plot) the total binary data to an indivual file for each individual base
base1 = 1 # reset the base index to start at 1
os.mkdir('total_binaries')
os.chdir('total_binaries')
while (base1 <= nbases):
os.mkdir('base%s' %(base1))
os.chdir('base%s' %(base1))
nf = open('binary.%s.dat' %(base1), 'w')
for i in range(Nsteps):
nf.write(' %10.3f %10.1f\n' %(time[i], total_binaries['base.%s' %(base1)][i])) # check time values
nf.close()
counts = 0
for i in range(equilib_step, Nsteps):
if total_binaries['base.%s' %(base1)][i] > 0.:
counts +=1
prob = 0.
prob = (float(counts)/production)*100.
nf = open('stacking.%s.dat' %(base1), 'w')
nf.write('counts: %10.1f out of %10.1f time steps \n Probability of stacking = %10.4f ' %(counts, production, prob))
nf.close()
plotting(time[:], total_binaries['base.%s' %(base1)][:], base1)
os.chdir('..')
base1 += 1
# ----------------------------------------
# BASE PAIR PLOTTING - making histogram plots for vdW energy distributions, angle distributions, and 2d hist of vdw vs angle distributions
# Also printint out a file that contains the count of timesteps where the base pair are stacked
os.chdir('..')
base1 = 1
while (base1 <= nbases): # while loops to perform the base-pair analysis while avoiding performing the same analysis twice
base2 = base1 + 1
while (base2 <= nbases):
os.chdir('base%s_base%s' %(base1, base2))
infile = 'binary.%s.%s.dat' %(base1, base2)
data = ldtxt(infile) # data[0] = time, data[1] = vdW energies, data[2] = angle, data[3] = base pair binary metric
vdw_hist(data[equilib_step:,1], base1, base2)
angle_hist(data[equilib_step:,2], base1, base2)
energy_angle_hist(data[equilib_step:,2], data[equilib_step:,1], base1, base2)
nf = open('stacking.%s.%s.dat' %(base1, base2), 'w')
bp_counts = sum(data[equilib_step:,3])
nf.write('counts for base pair %s-%s: %10.1f' %(base1, base2, bp_counts))
nf.close()
data = []
os.chdir('..')
base2 += 1
base1 += 1
# ----------------------------------------
# END
| rbdavid/DNA_stacking_analysis | angles_binary.py | Python | mit | 9,052 | [
"MDAnalysis"
] | aaf26dc5bd24c213721622bf24ae410aa4e09edd734063bd21d63e0e1f760c33 |
from ase import Atoms
from gpaw import GPAW
from gpaw.tddft import TDDFT
from gpaw.tddft.ehrenfest import EhrenfestVelocityVerlet
import sys
d = 4.5
atoms = Atoms('NaCl', [(0,0,0),(0,0,d)])
atoms.center(vacuum=4.5)
d = 4.0
atoms.set_positions([(0,0,0),(0,0,d)])
atoms.center()
gs_calc = GPAW(nbands=4, gpts=(64,64,96), xc='LDA', setups='hgh')
atoms.set_calculator(gs_calc)
atoms.get_potential_energy()
gs_calc.write('nacl_hgh_gs.gpw', 'all')
td_calc = TDDFT('nacl_hgh_gs.gpw', propagator='ETRSCN')
evv = EhrenfestVelocityVerlet(td_calc, 0.001)
i=0
evv.get_energy()
r = evv.x[1][2] - evv.x[0][2]
print 'E = ', [i, r, evv.Etot, evv.Ekin, evv.Epot]
for i in range(10000):
evv.propagate(1.0)
evv.get_energy()
r = evv.x[1][2] - evv.x[0][2]
print 'E = ', [i+1, r, evv.Etot, evv.Ekin, evv.Epot]
| qsnake/gpaw | gpaw/test/ehrenfest_nacl.py | Python | gpl-3.0 | 814 | [
"ASE",
"GPAW"
] | 37249e01299da86266924cebf6ea5e44d410b79fd1f030aed21cb51de3df6012 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.simpleapi import logger
import AbinsModules
class AbinsLoadCRYSTALTest(unittest.TestCase, AbinsModules.GeneralLoadAbInitioTester):
def tearDown(self):
AbinsModules.AbinsTestHelpers.remove_output_files(list_of_names=["LoadCRYSTAL"])
# *************************** USE CASES ********************************************
# ===================================================================================
# | Use cases: Gamma point calculation for CRYSTAL |
# ===================================================================================
_gamma_crystal = "crystalB3LYP_LoadCRYSTAL"
_set_crystal = "crystal_set_key_LoadCRYSTAL"
# ===================================================================================
# | Use case: Molecular calculation for CRYSTAL |
# ===================================================================================
_molecule = "toluene_molecule_LoadCRYSTAL"
# ===================================================================================
# | Use cases: Phonon dispersion calculation for CRYSTAL |
# ===================================================================================
_phonon_dispersion_v1 = "mgo-GX_LoadCRYSTAL"
_phonon_dispersion_v2 = "MgO-222-DISP_LoadCRYSTAL"
def test_gamma_crystal(self):
self.check(name=self._gamma_crystal, loader=AbinsModules.LoadCRYSTAL)
self.check(name=self._set_crystal, loader=AbinsModules.LoadCRYSTAL)
def test_molecule(self):
self.check(name=self._molecule, loader=AbinsModules.LoadCRYSTAL)
def test_phonon_dispersion_crystal(self):
self.check(name=self._phonon_dispersion_v1, loader=AbinsModules.LoadCRYSTAL)
self.check(name=self._phonon_dispersion_v2, loader=AbinsModules.LoadCRYSTAL)
if __name__ == '__main__':
unittest.main()
| mganeva/mantid | scripts/test/AbinsLoadCRYSTALTest.py | Python | gpl-3.0 | 2,341 | [
"CRYSTAL"
] | ac70207b8382043116f21b6e577b5cbec4823f8b71325e687aecb466f8c3ce6a |
# -*- coding: utf-8 -*-
"""
Unit tests for instructor.api methods.
"""
# pylint: disable=E1111
import unittest
import json
import requests
import datetime
from urllib import quote
from django.test import TestCase
from nose.tools import raises
from mock import Mock, patch, ANY
from django.conf import settings
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.http import HttpRequest, HttpResponse
from django_comment_common.models import FORUM_ROLE_COMMUNITY_TA, Role
from django_comment_common.utils import seed_permissions_roles
from django.core import mail
from django.utils.timezone import utc
from django.contrib.auth.models import User
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from courseware.tests.helpers import LoginEnrollmentTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from student.tests.factories import UserFactory, UserProfileFactory, UserStandingFactory
from courseware.tests.factories import StaffFactory, InstructorFactory, BetaTesterFactory
from student.roles import CourseBetaTesterRole
from student.models import CourseEnrollment, CourseEnrollmentAllowed, UserStanding
from courseware.models import StudentModule
from survey.models import SurveySubmission
from survey.tests.factories import SurveySubmissionFactory
# modules which are mocked in test cases.
import instructor_task.api
from instructor.access import allow_access
import instructor.views.api
from instructor.views.api import _split_input_list, _msk_from_problem_urlname, common_exceptions_400
from instructor_task.api_helper import AlreadyRunningError
from xmodule.exceptions import NotFoundError
from .test_tools import get_extended_due
@common_exceptions_400
def view_success(request): # pylint: disable=W0613
"A dummy view for testing that returns a simple HTTP response"
return HttpResponse('success')
@common_exceptions_400
def view_user_doesnotexist(request): # pylint: disable=W0613
"A dummy view that raises a User.DoesNotExist exception"
raise User.DoesNotExist()
@common_exceptions_400
def view_alreadyrunningerror(request): # pylint: disable=W0613
"A dummy view that raises an AlreadyRunningError exception"
raise AlreadyRunningError()
class TestCommonExceptions400(unittest.TestCase):
"""
Testing the common_exceptions_400 decorator.
"""
def setUp(self):
self.request = Mock(spec=HttpRequest)
self.request.META = {}
def test_happy_path(self):
resp = view_success(self.request)
self.assertEqual(resp.status_code, 200)
def test_user_doesnotexist(self):
self.request.is_ajax.return_value = False
resp = view_user_doesnotexist(self.request)
self.assertEqual(resp.status_code, 400)
self.assertIn("User does not exist", resp.content)
def test_user_doesnotexist_ajax(self):
self.request.is_ajax.return_value = True
resp = view_user_doesnotexist(self.request)
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("User does not exist", result["error"])
def test_alreadyrunningerror(self):
self.request.is_ajax.return_value = False
resp = view_alreadyrunningerror(self.request)
self.assertEqual(resp.status_code, 400)
self.assertIn("Task is already running", resp.content)
def test_alreadyrunningerror_ajax(self):
self.request.is_ajax.return_value = True
resp = view_alreadyrunningerror(self.request)
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("Task is already running", result["error"])
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorAPIDenyLevels(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users cannot access endpoints they shouldn't be able to.
"""
def setUp(self):
self.course = CourseFactory.create()
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course.id)
self.problem_urlname = 'robot-some-problem-urlname'
_module = StudentModule.objects.create(
student=self.user,
course_id=self.course.id,
module_state_key=_msk_from_problem_urlname(
self.course.id,
self.problem_urlname
),
state=json.dumps({'attempts': 10}),
)
# Endpoints that only Staff or Instructors can access
self.staff_level_endpoints = [
('students_update_enrollment', {'identifiers': 'foo@example.org', 'action': 'enroll'}),
('get_grading_config', {}),
('get_students_features', {}),
('get_distribution', {}),
('get_student_progress_url', {'unique_student_identifier': self.user.username}),
('reset_student_attempts', {'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
('update_forum_role_membership', {'unique_student_identifier': self.user.email, 'rolename': 'Moderator', 'action': 'allow'}),
('list_forum_members', {'rolename': FORUM_ROLE_COMMUNITY_TA}),
('proxy_legacy_analytics', {'aname': 'ProblemGradeDistribution'}),
('send_email', {'send_to': 'staff', 'subject': 'test', 'message': 'asdf'}),
('list_instructor_tasks', {}),
('list_background_email_tasks', {}),
('list_report_downloads', {}),
('calculate_grades_csv', {}),
('create_pgreport_csv', {}),
('get_pgreport_csv', {}),
]
# Endpoints that only Instructors can access
self.instructor_level_endpoints = [
('bulk_beta_modify_access', {'identifiers': 'foo@example.org', 'action': 'add'}),
('modify_access', {'unique_student_identifier': self.user.email, 'rolename': 'beta', 'action': 'allow'}),
('list_course_role_members', {'rolename': 'beta'}),
('rescore_problem', {'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
]
def _access_endpoint(self, endpoint, args, status_code, msg):
"""
Asserts that accessing the given `endpoint` gets a response of `status_code`.
endpoint: string, endpoint for instructor dash API
args: dict, kwargs for `reverse` call
status_code: expected HTTP status code response
msg: message to display if assertion fails.
"""
url = reverse(endpoint, kwargs={'course_id': self.course.id})
if endpoint in 'send_email':
response = self.client.post(url, args)
else:
response = self.client.get(url, args)
print endpoint
print response
self.assertEqual(
response.status_code,
status_code,
msg=msg
)
def test_student_level(self):
"""
Ensure that an enrolled student can't access staff or instructor endpoints.
"""
self.client.login(username=self.user.username, password='test')
for endpoint, args in self.staff_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
def test_staff_level(self):
"""
Ensure that a staff member can't access instructor endpoints.
"""
staff_member = StaffFactory(course=self.course.location)
CourseEnrollment.enroll(staff_member, self.course.id)
self.client.login(username=staff_member.username, password='test')
# Try to promote to forums admin - not working
# update_forum_role(self.course.id, staff_member, FORUM_ROLE_ADMINISTRATOR, 'allow')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'proxy_legacy_analytics', 'list_forum_members', 'create_pgreport_csv', 'get_pgreport_csv']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Staff member should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Staff member should not be allowed to access endpoint " + endpoint
)
def test_instructor_level(self):
"""
Ensure that an instructor member can access all endpoints.
"""
inst = InstructorFactory(course=self.course.location)
CourseEnrollment.enroll(inst, self.course.id)
self.client.login(username=inst.username, password='test')
for endpoint, args in self.staff_level_endpoints:
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'proxy_legacy_analytics', 'create_pgreport_csv', 'get_pgreport_csv']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
# TODO: make this work
if endpoint in ['rescore_problem']:
continue
self._access_endpoint(
endpoint,
args,
200,
"Instructor should be allowed to access endpoint " + endpoint
)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test enrollment modification endpoint.
This test does NOT exhaustively test state changes, that is the
job of test_enrollment. This tests the response and action switch.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
self.enrolled_student = UserFactory(username='EnrolledStudent', first_name='Enrolled', last_name='Student')
CourseEnrollment.enroll(
self.enrolled_student,
self.course.id
)
self.notenrolled_student = UserFactory(username='NotEnrolledStudent', first_name='NotEnrolled', last_name='Student')
# Create invited, but not registered, user
cea = CourseEnrollmentAllowed(email='robot-allowed@robot.org', course_id=self.course.id)
cea.save()
self.allowed_email = 'robot-allowed@robot.org'
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103)
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.enrolled_student.email, 'action': action})
self.assertEqual(response.status_code, 400)
def test_invalid_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': 'percivaloctavius@', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius@',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_invalid_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': 'percivaloctavius', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_with_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notenrolled_student.username, 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": self.notenrolled_student.username,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': False})
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_enroll_with_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': True})
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been enrolled in Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear NotEnrolled Student\n\nYou have been enrolled in Robot Super Course "
"at edx.org by a member of the course staff. "
"The course should now appear on your edx.org dashboard.\n\n"
"To start accessing course materials, please visit "
"https://edx.org/courses/MITx/999/Robot_Super_Course/\n\n----\n"
"This email was automatically sent from edx.org to NotEnrolled Student"
)
def test_enroll_with_email_not_registered(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True})
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit https://edx.org/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, "
"visit https://edx.org/courses/MITx/999/Robot_Super_Course/about to join the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org"
)
def test_enroll_email_not_registered_mktgsite(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit https://edx.org/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"You can then enroll in Robot Super Course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org"
)
def test_enroll_with_email_not_registered_autoenroll(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True, 'auto_enroll': True})
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit https://edx.org/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, you will see Robot Super Course listed on your dashboard.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org"
)
def test_unenroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll', 'email_students': False})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_unenroll_with_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll', 'email_students': True})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear Enrolled Student\n\nYou have been un-enrolled in Robot Super Course "
"at edx.org by a member of the course staff. "
"The course will no longer appear on your edx.org dashboard.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to Enrolled Student"
)
def test_unenroll_with_email_allowed_student(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.allowed_email, 'action': 'unenroll', 'email_students': True})
print "type(self.allowed_email): {}".format(type(self.allowed_email))
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.allowed_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": True,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear Student,\n\nYou have been un-enrolled from course Robot Super Course by a member of the course staff. "
"Please disregard the invitation previously sent.\n\n----\n"
"This email was automatically sent from edx.org to robot-allowed@robot.org"
)
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib(self, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True})
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To access the course visit https://edx.org/courses/MITx/999/Robot_Super_Course/about and register for the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org"
)
@patch('instructor.enrollment.uses_shib')
def test_enroll_email_not_registered_shib_mktgsite(self, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org"
)
@patch('instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib_autoenroll(self, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True, 'auto_enroll': True})
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join Robot Super Course at edx.org by a member of the course staff.\n\n"
"To access the course visit https://edx.org/courses/MITx/999/Robot_Super_Course/ and login.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org"
)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIBulkBetaEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test bulk beta modify access endpoint.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
self.beta_tester = BetaTesterFactory(course=self.course.location)
CourseEnrollment.enroll(
self.beta_tester,
self.course.id
)
self.notenrolled_student = UserFactory(username='NotEnrolledStudent')
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103)
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': action})
self.assertEqual(response.status_code, 400)
def add_notenrolled(self, response, identifier):
"""
Test Helper Method (not a test, called by other tests)
Takes a client response from a call to bulk_beta_modify_access with 'email_students': False,
and the student identifier (email or username) given as 'identifiers' in the request.
Asserts the reponse returns cleanly, that the student was added as a beta tester, and the
response properly contains their identifier, 'error': False, and 'userDoesNotExist': False.
Additionally asserts no email was sent.
"""
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.location).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": identifier,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_add_notenrolled_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_email_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_with_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.location).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {0}\n\nYou have been invited to be a beta tester "
"for Robot Super Course at edx.org by a member of the course staff.\n\n"
"Visit https://edx.org/courses/MITx/999/Robot_Super_Course/about to join "
"the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {1}".format(
self.notenrolled_student.profile.name,
self.notenrolled_student.email
)
)
def test_add_notenrolled_with_email_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(
url,
{'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True, 'auto_enroll': True}
)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.location).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {0}\n\nYou have been invited to be a beta tester "
"for Robot Super Course at edx.org by a member of the course staff.\n\n"
"To start accessing course materials, please visit "
"https://edx.org/courses/MITx/999/Robot_Super_Course/\n\n----\n"
"This email was automatically sent from edx.org to {1}".format(
self.notenrolled_student.profile.name,
self.notenrolled_student.email
)
)
def test_add_notenrolled_email_mktgsite(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.get(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
u"Dear {0}\n\nYou have been invited to be a beta tester "
"for Robot Super Course at edx.org by a member of the course staff.\n\n"
"Visit edx.org to enroll in the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {1}".format(
self.notenrolled_student.profile.name,
self.notenrolled_student.email
)
)
def test_enroll_with_email_not_registered(self):
# User doesn't exist
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.notregistered_email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notregistered_email,
"error": True,
"userDoesNotExist": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_without_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': False})
self.assertEqual(response.status_code, 200)
self.assertFalse(CourseBetaTesterRole(self.course.location).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_with_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertFalse(CourseBetaTesterRole(self.course.location).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been removed from a beta test for Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear {full_name}\n\nYou have been removed as a beta tester for "
"Robot Super Course at edx.org by a member of the course staff. "
"The course will remain on your dashboard, but you will no longer "
"be part of the beta testing group.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to {email_address}".format(
full_name=self.beta_tester.profile.name,
email_address=self.beta_tester.email
)
)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPILevelsAccess(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change permissions
of other users.
This test does NOT test whether the actions had an effect on the
database, that is the job of test_access.
This tests the response and action switch.
Actually, modify_access does not have a very meaningful
response yet, so only the status code is tested.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
self.other_instructor = UserFactory()
allow_access(self.course, self.other_instructor, 'instructor')
self.other_staff = UserFactory()
allow_access(self.course, self.other_staff, 'staff')
self.other_user = UserFactory()
def test_modify_access_noparams(self):
""" Test missing all query parameters. """
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_action(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'robot-not-an-action',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_role(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'robot-not-a-roll',
'action': 'revoke',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_allow(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.other_instructor.email,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_allow_with_uname(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.other_instructor.username,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_with_username(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.username,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_with_fake_user(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': 'GandalfTheGrey',
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': 'GandalfTheGrey',
'userDoesNotExist': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_with_inactive_user(self):
self.other_user.is_active = False
self.other_user.save() # pylint: disable=no-member
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.other_user.username,
'rolename': 'beta',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': self.other_user.username,
'inactiveUser': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_revoke_not_allowed(self):
""" Test revoking access that a user does not have. """
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_self(self):
"""
Test that an instructor cannot remove instructor privelages from themself.
"""
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'unique_student_identifier': self.instructor.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'unique_student_identifier': self.instructor.username,
'rolename': 'instructor',
'action': 'revoke',
'removingSelfAsInstructor': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_noparams(self):
""" Test missing all query parameters. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_bad_rolename(self):
""" Test with an invalid rolename parameter. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'rolename': 'robot-not-a-rolename',
})
print response
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_staff(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'rolename': 'staff',
})
print response
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id,
'staff': [
{
'username': self.other_staff.username,
'email': self.other_staff.email,
'first_name': self.other_staff.first_name,
'last_name': self.other_staff.last_name,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_beta(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'rolename': 'beta',
})
print response
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id,
'beta': []
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_update_forum_role_membership(self):
"""
Test update forum role membership with user's email and username.
"""
# Seed forum roles for course.
seed_permissions_roles(self.course.id)
# Test add discussion admin with email.
self.assert_update_forum_role_membership(self.other_user.email, "Administrator", "allow")
# Test revoke discussion admin with email.
self.assert_update_forum_role_membership(self.other_user.email, "Administrator", "revoke")
# Test add discussion moderator with username.
self.assert_update_forum_role_membership(self.other_user.username, "Moderator", "allow")
# Test revoke discussion moderator with username.
self.assert_update_forum_role_membership(self.other_user.username, "Moderator", "revoke")
# Test add discussion community TA with email.
self.assert_update_forum_role_membership(self.other_user.email, "Community TA", "allow")
# Test revoke discussion community TA with username.
self.assert_update_forum_role_membership(self.other_user.username, "Community TA", "revoke")
def assert_update_forum_role_membership(self, unique_student_identifier, rolename, action):
"""
Test update forum role membership.
Get unique_student_identifier, rolename and action and update forum role.
"""
url = reverse('update_forum_role_membership', kwargs={'course_id': self.course.id})
response = self.client.get(
url,
{
'unique_student_identifier': unique_student_identifier,
'rolename': rolename,
'action': action,
}
)
# Status code should be 200.
self.assertEqual(response.status_code, 200)
user_roles = self.other_user.roles.filter(course_id=self.course.id).values_list("name", flat=True)
if action == 'allow':
self.assertIn(rolename, user_roles)
elif action == 'revoke':
self.assertNotIn(rolename, user_roles)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPILevelsDataDump(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints that show data without side effects.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
self.students = [UserFactory() for _ in xrange(6)]
for student in self.students:
CourseEnrollment.enroll(student, self.course.id)
def test_get_students_features(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
url = reverse('get_students_features', kwargs={'course_id': self.course.id})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for student in self.students:
student_json = [
x for x in res_json['students']
if x['username'] == student.username
][0]
self.assertEqual(student_json['username'], student.username)
self.assertEqual(student_json['email'], student.email)
def test_get_anon_ids(self):
"""
Test the CSV output for the anonymized user ids.
"""
url = reverse('get_anon_ids', kwargs={'course_id': self.course.id})
with patch('instructor.views.api.unique_id_for_user') as mock_unique:
mock_unique.return_value = '42'
response = self.client.get(url, {})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith('"User ID","Anonymized user ID"\n"2","42"\n'))
self.assertTrue(body.endswith('"7","42"\n'))
def test_list_report_downloads(self):
url = reverse('list_report_downloads', kwargs={'course_id': self.course.id})
with patch('instructor_task.models.LocalFSReportStore.links_for') as mock_links_for:
mock_links_for.return_value = [
('mock_file_name_1', 'https://1.mock.url'),
('mock_file_name_2', 'https://2.mock.url'),
]
response = self.client.get(url, {})
expected_response = {
"downloads": [
{
"url": "https://1.mock.url",
"link": "<a href=\"https://1.mock.url\">mock_file_name_1</a>",
"name": "mock_file_name_1"
},
{
"url": "https://2.mock.url",
"link": "<a href=\"https://2.mock.url\">mock_file_name_2</a>",
"name": "mock_file_name_2"
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected_response)
def test_calculate_grades_csv_success(self):
url = reverse('calculate_grades_csv', kwargs={'course_id': self.course.id})
with patch('instructor_task.api.submit_calculate_grades_csv') as mock_cal_grades:
mock_cal_grades.return_value = True
response = self.client.get(url, {})
success_status = "Your grade report is being generated! You can view the status of the generation task in the 'Pending Instructor Tasks' section."
self.assertIn(success_status, response.content)
def test_calculate_grades_csv_already_running(self):
url = reverse('calculate_grades_csv', kwargs={'course_id': self.course.id})
with patch('instructor_task.api.submit_calculate_grades_csv') as mock_cal_grades:
mock_cal_grades.side_effect = AlreadyRunningError()
response = self.client.get(url, {})
already_running_status = "A grade report generation task is already in progress. Check the 'Pending Instructor Tasks' table for the status of the task. When completed, the report will be available for download in the table below."
self.assertIn(already_running_status, response.content)
def test_get_students_features_csv(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
url = reverse('get_students_features', kwargs={'course_id': self.course.id})
response = self.client.get(url + '/csv', {})
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_distribution_no_feature(self):
"""
Test that get_distribution lists available features
when supplied no feature parameter.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(type(res_json['available_features']), list)
url = reverse('get_distribution', kwargs={'course_id': self.course.id})
response = self.client.get(url + u'?feature=')
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(type(res_json['available_features']), list)
def test_get_distribution_unavailable_feature(self):
"""
Test that get_distribution fails gracefully with
an unavailable feature.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'feature': 'robot-not-a-real-feature'})
self.assertEqual(response.status_code, 400)
def test_get_distribution_gender(self):
"""
Test that get_distribution fails gracefully with
an unavailable feature.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'feature': 'gender'})
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
print res_json
self.assertEqual(res_json['feature_results']['data']['m'], 6)
self.assertEqual(res_json['feature_results']['choices_display_names']['m'], 'Male')
self.assertEqual(res_json['feature_results']['data']['no_data'], 0)
self.assertEqual(res_json['feature_results']['choices_display_names']['no_data'], 'No Data')
def test_get_student_progress_url(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id})
url += "?unique_student_identifier={}".format(
quote(self.students[0].email.encode("utf-8"))
)
print url
response = self.client.get(url)
print response
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_from_uname(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id})
url += "?unique_student_identifier={}".format(
quote(self.students[0].username.encode("utf-8"))
)
print url
response = self.client.get(url)
print response
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_noparams(self):
""" Test that the endpoint 404's without the required query params. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_get_student_progress_url_nostudent(self):
""" Test that the endpoint 400's when requesting an unknown email. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIRegradeTask(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change student grades.
This includes resetting attempts and starting rescore tasks.
This test does NOT test whether the actions had an effect on the
database, that is the job of task tests and test_enrollment.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_urlname = 'robot-some-problem-urlname'
self.module_to_reset = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=_msk_from_problem_urlname(
self.course.id,
self.problem_urlname
),
state=json.dumps({'attempts': 10}),
)
def test_reset_student_attempts_deletall(self):
""" Make sure no one can delete all students state on a problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
'delete_module': True,
})
print response.content
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_single(self):
""" Test reset single student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
print response.content
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_module = StudentModule.objects.get(pk=self.module_to_reset.pk)
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(instructor_task.api, 'submit_reset_problem_attempts_for_all_students')
def test_reset_student_attempts_all(self, act):
""" Test reset all student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
print response.content
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_missingmodule(self):
""" Test reset for non-existant problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': 'robot-not-a-real-module',
'unique_student_identifier': self.student.email,
})
print response.content
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_delete(self):
""" Test delete single student state. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'delete_module': True,
})
print response.content
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.module_to_reset.course_id,
# module_state_key=self.module_to_reset.module_state_key,
).count(),
0
)
def test_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'all_students': True,
})
print response.content
self.assertEqual(response.status_code, 400)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
print response.content
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single_from_uname(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.username,
})
print response.content
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_all_students')
def test_rescore_problem_all(self, act):
""" Test rescoring for all students. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
print response.content
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestInstructorSendEmail(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Checks that only instructors have access to email endpoints, and that
these endpoints are only accessible with courses that actually exist,
only with valid email messages.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
test_subject = u'\u1234 test subject'
test_message = u'\u6824 test message'
self.full_test_message = {
'send_to': 'staff',
'subject': test_subject,
'message': test_message,
}
def test_send_email_as_logged_in_instructor(self):
url = reverse('send_email', kwargs={'course_id': self.course.id})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 200)
def test_send_email_but_not_logged_in(self):
self.client.logout()
url = reverse('send_email', kwargs={'course_id': self.course.id})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_not_staff(self):
self.client.logout()
student = UserFactory()
self.client.login(username=student.username, password='test')
url = reverse('send_email', kwargs={'course_id': self.course.id})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_course_not_exist(self):
url = reverse('send_email', kwargs={'course_id': 'GarbageCourse/DNE/NoTerm'})
response = self.client.post(url, self.full_test_message)
self.assertNotEqual(response.status_code, 200)
def test_send_email_no_sendto(self):
url = reverse('send_email', kwargs={'course_id': self.course.id})
response = self.client.post(url, {
'subject': 'test subject',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_subject(self):
url = reverse('send_email', kwargs={'course_id': self.course.id})
response = self.client.post(url, {
'send_to': 'staff',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_message(self):
url = reverse('send_email', kwargs={'course_id': self.course.id})
response = self.client.post(url, {
'send_to': 'staff',
'subject': 'test subject',
})
self.assertEqual(response.status_code, 400)
class MockCompletionInfo(object):
"""Mock for get_task_completion_info"""
times_called = 0
def mock_get_task_completion_info(self, *args): # pylint: disable=unused-argument
"""Mock for get_task_completion_info"""
self.times_called += 1
if self.times_called % 2 == 0:
return True, 'Task Completed'
return False, 'Task Errored In Some Way'
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPITaskLists(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor task list endpoint.
"""
class FakeTask(object):
""" Fake task object """
FEATURES = [
'task_type',
'task_input',
'task_id',
'requester',
'task_state',
'created',
'status',
'task_message',
'duration_sec'
]
def __init__(self, completion):
for feature in self.FEATURES:
setattr(self, feature, 'expected')
# created needs to be a datetime
self.created = datetime.datetime(2013, 10, 25, 11, 42, 35)
# set 'status' and 'task_message' attrs
success, task_message = completion()
if success:
self.status = "Complete"
else:
self.status = "Incomplete"
self.task_message = task_message
# Set 'task_output' attr, which will be parsed to the 'duration_sec' attr.
self.task_output = '{"duration_ms": 1035000}'
self.duration_sec = 1035000 / 1000.0
def make_invalid_output(self):
"""Munge task_output to be invalid json"""
self.task_output = 'HI MY NAME IS INVALID JSON'
# This should be given the value of 'unknown' if the task output
# can't be properly parsed
self.duration_sec = 'unknown'
def to_dict(self):
""" Convert fake task to dictionary representation. """
attr_dict = {key: getattr(self, key) for key in self.FEATURES}
attr_dict['created'] = attr_dict['created'].isoformat()
return attr_dict
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_urlname = 'robot-some-problem-urlname'
self.module = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=_msk_from_problem_urlname(
self.course.id,
self.problem_urlname
),
state=json.dumps({'attempts': 10}),
)
mock_factory = MockCompletionInfo()
self.tasks = [self.FakeTask(mock_factory.mock_get_task_completion_info) for _ in xrange(7)]
self.tasks[-1].make_invalid_output()
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
@patch.object(instructor_task.api, 'get_running_instructor_tasks')
def test_list_instructor_tasks_running(self, act):
""" Test list of all running tasks. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id})
mock_factory = MockCompletionInfo()
with patch('instructor.views.api.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_background_email_tasks(self, act):
"""Test list of background email tasks."""
act.return_value = self.tasks
url = reverse('list_background_email_tasks', kwargs={'course_id': self.course.id})
mock_factory = MockCompletionInfo()
with patch('instructor.views.api.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem(self, act):
""" Test list task history for problem. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id})
mock_factory = MockCompletionInfo()
with patch('instructor.views.api.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_urlname': self.problem_urlname,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem_student(self, act):
""" Test list task history for problem AND student. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id})
mock_factory = MockCompletionInfo()
with patch('instructor.views.api.get_task_completion_info') as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.get(url, {
'problem_urlname': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@override_settings(ANALYTICS_SERVER_URL="http://robotanalyticsserver.netbot:900/")
@override_settings(ANALYTICS_API_KEY="robot_api_key")
class TestInstructorAPIAnalyticsProxy(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor analytics proxy endpoint.
"""
class FakeProxyResponse(object):
""" Fake successful requests response object. """
def __init__(self):
self.status_code = requests.status_codes.codes.OK
self.content = '{"test_content": "robot test content"}'
class FakeBadProxyResponse(object):
""" Fake strange-failed requests response object. """
def __init__(self):
self.status_code = 'notok.'
self.content = '{"test_content": "robot test content"}'
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_url(self, act):
""" Test legacy analytics proxy url generation. """
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
print response.content
self.assertEqual(response.status_code, 200)
# check request url
expected_url = "{url}get?aname={aname}&course_id={course_id}&apikey={api_key}".format(
url="http://robotanalyticsserver.netbot:900/",
aname="ProblemGradeDistribution",
course_id=self.course.id,
api_key="robot_api_key",
)
act.assert_called_once_with(expected_url)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy(self, act):
"""
Test legacy analytics content proxyin, actg.
"""
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
print response.content
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_res = {'test_content': "robot test content"}
self.assertEqual(json.loads(response.content), expected_res)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_reqfailed(self, act):
""" Test proxy when server reponds with failure. """
act.return_value = self.FakeBadProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
print response.content
self.assertEqual(response.status_code, 500)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_missing_param(self, act):
""" Test proxy when missing the aname query parameter. """
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id})
response = self.client.get(url, {})
print response.content
self.assertEqual(response.status_code, 400)
self.assertFalse(act.called)
class TestInstructorAPIHelpers(TestCase):
""" Test helpers for instructor.api """
def test_split_input_list(self):
strings = []
lists = []
strings.append("Lorem@ipsum.dolor, sit@amet.consectetur\nadipiscing@elit.Aenean\r convallis@at.lacus\r, ut@lacinia.Sed")
lists.append(['Lorem@ipsum.dolor', 'sit@amet.consectetur', 'adipiscing@elit.Aenean', 'convallis@at.lacus', 'ut@lacinia.Sed'])
for (stng, lst) in zip(strings, lists):
self.assertEqual(_split_input_list(stng), lst)
def test_split_input_list_unicode(self):
self.assertEqual(_split_input_list('robot@robot.edu, robot2@robot.edu'), ['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'), ['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'), [u'robot@robot.edu', 'robot2@robot.edu'])
scary_unistuff = unichr(40960) + u'abcd' + unichr(1972)
self.assertEqual(_split_input_list(scary_unistuff), [scary_unistuff])
def test_msk_from_problem_urlname(self):
course_id = 'RobotU/Robots101/3001_Spring'
capa_urlname = 'capa_urlname'
capa_urlname_xml = 'capa_urlname.xml'
xblock_urlname = 'notaproblem/someothername'
xblock_urlname_xml = 'notaproblem/someothername.xml'
capa_msk = 'i4x://RobotU/Robots101/problem/capa_urlname'
xblock_msk = 'i4x://RobotU/Robots101/notaproblem/someothername'
for urlname in [capa_urlname, capa_urlname_xml]:
self.assertEqual(
_msk_from_problem_urlname(course_id, urlname),
capa_msk
)
for urlname in [xblock_urlname, xblock_urlname_xml]:
self.assertEqual(
_msk_from_problem_urlname(course_id, urlname),
xblock_msk
)
@raises(ValueError)
def test_msk_from_problem_urlname_error(self):
args = ('notagoodcourse', 'L2Node1')
_msk_from_problem_urlname(*args)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestDueDateExtensions(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test data dumps for reporting.
"""
def setUp(self):
"""
Fixtures.
"""
due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=utc)
course = CourseFactory.create()
week1 = ItemFactory.create(due=due)
week2 = ItemFactory.create(due=due)
week3 = ItemFactory.create(due=due)
course.children = [week1.location.url(), week2.location.url(),
week3.location.url()]
homework = ItemFactory.create(
parent_location=week1.location,
due=due
)
week1.children = [homework.location.url()]
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week1.location.url()).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week2.location.url()).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=week3.location.url()).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=course.id,
module_state_key=homework.location.url()).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=course.id,
module_state_key=week1.location.url()).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=course.id,
module_state_key=homework.location.url()).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=course.id,
module_state_key=week1.location.url()).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=course.id,
module_state_key=homework.location.url()).save()
self.course = course
self.week1 = week1
self.homework = homework
self.week2 = week2
self.user1 = user1
self.user2 = user2
self.instructor = InstructorFactory(course=course.location)
self.client.login(username=self.instructor.username, password='test')
def test_change_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.url(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(datetime.datetime(2013, 12, 30, 0, 0, tzinfo=utc),
get_extended_due(self.course, self.week1, self.user1))
def test_reset_date(self):
self.test_change_due_date()
url = reverse('reset_due_date', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'student': self.user1.username,
'url': self.week1.location.url(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(None,
get_extended_due(self.course, self.week1, self.user1))
def test_show_unit_extensions(self):
self.test_change_due_date()
url = reverse('show_unit_extensions',
kwargs={'course_id': self.course.id})
response = self.client.get(url, {'url': self.week1.location.url()})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Full Name': self.user1.profile.name,
u'Username': self.user1.username}],
u'header': [u'Username', u'Full Name', u'Extended Due Date'],
u'title': u'Users with due date extensions for %s' %
self.week1.display_name})
def test_show_student_extensions(self):
self.test_change_due_date()
url = reverse('show_student_extensions',
kwargs={'course_id': self.course.id})
response = self.client.get(url, {'student': self.user1.username})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Unit': self.week1.display_name}],
u'header': [u'Unit', u'Extended Due Date'],
u'title': u'Due date extensions for %s (%s)' % (
self.user1.profile.name, self.user1.username)})
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPISurveyDownload(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor survey endpoint.
"""
def setUp(self):
class _UserProfileFactory(UserProfileFactory):
year_of_birth = None
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
self.user1 = UserFactory.create(
profile__gender='m',
profile__year_of_birth=1980,
profile__level_of_education='p',
)
self.user1_standing = UserStandingFactory.create(
user=self.user1,
account_status=UserStanding.ACCOUNT_ENABLED,
changed_by=self.user1,
)
self.user2 = UserFactory.create(
profile__gender='foo',
profile__year_of_birth=None,
profile__level_of_education='bar',
)
self.user2_standing = UserStandingFactory.create(
user=self.user2,
account_status=UserStanding.ACCOUNT_DISABLED,
changed_by=self.user2,
)
self.user3 = UserFactory.create(
profile__gender=None,
profile__year_of_birth=None,
profile__level_of_education=None,
)
self.user4 = UserFactory.create()
self.submission1 = {
'course_id': self.course.id,
'unit_id': '11111111111111111111111111111111',
'user': self.user1,
'survey_name': 'survey #1',
'survey_answer': '{"Q1": "1", "Q2": ["1", "2"], "Q3": "submission #1"}',
}
self.submission2 = {
'course_id': self.course.id,
'unit_id': '11111111111111111111111111111111',
'user': self.user2,
'survey_name': 'survey #1',
'survey_answer': '{"Q3": "submission #2", "Q1": "1", "Q2": "2"}',
}
self.submission3 = {
'course_id': self.course.id,
'unit_id': '22222222222222222222222222222222',
'user': self.user3,
'survey_name': 'survey #2',
'survey_answer': '{"Q1": "", "Q2": "", "Q3": "", "Q4": "extra"}',
}
self.submission4 = {
'course_id': 'edX/test/dummy',
'unit_id': '22222222222222222222222222222222',
'user': self.user4,
'survey_name': 'survey #2',
'survey_answer': '{"Q1": "1", "Q2": "2", "Q3": "submission #4"}',
}
def test_get_survey(self):
"""
Test the CSV output for the survey result.
"""
submission1 = SurveySubmissionFactory.create(**self.submission1)
submission2 = SurveySubmissionFactory.create(**self.submission2)
submission3 = SurveySubmissionFactory.create(**self.submission3)
submission4 = SurveySubmissionFactory.create(**self.submission4)
url = reverse('get_survey', kwargs={'course_id': self.course.id})
response = self.client.get(url, {})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.rstrip('\n').replace('\r', '')
rows = body.split('\n')
self.assertTrue(4, len(rows))
#Note(#EDX-501): Modified temporarily.
#self.assertEqual(rows[0], '"Unit ID","Survey Name","Created","User Name","Gender","Year of Birth","Level of Education","Disabled","Q1","Q2","Q3","Q4"')
self.assertEqual(rows[0], '"Unit ID","Survey Name","Created","User Name","Disabled","Q1","Q2","Q3","Q4"')
#self.assertEqual(
# rows[1],
# '"11111111111111111111111111111111","survey #1","%s","%s","Male","1980","Doctorate","","1","1,2","submission #1",""'
# % (submission1.created, submission1.user.username)
#)
#self.assertEqual(
# rows[2],
# '"11111111111111111111111111111111","survey #1","%s","%s","foo","","bar","disabled","1","2","submission #2",""'
# % (submission2.created, submission2.user.username)
#)
#self.assertEqual(
# rows[3],
# '"22222222222222222222222222222222","survey #2","%s","%s","","","","","","","","extra"'
# % (submission3.created, submission3.user.username)
#)
self.assertEqual(
rows[1],
'"11111111111111111111111111111111","survey #1","%s","%s","","1","1,2","submission #1",""'
% (submission1.created, submission1.user.username)
)
self.assertEqual(
rows[2],
'"11111111111111111111111111111111","survey #1","%s","%s","disabled","1","2","submission #2",""'
% (submission2.created, submission2.user.username)
)
self.assertEqual(
rows[3],
'"22222222222222222222222222222222","survey #2","%s","%s","","","","","extra"'
% (submission3.created, submission3.user.username)
)
def test_get_survey_when_data_is_empty(self):
url = reverse('get_survey', kwargs={'course_id': self.course.id})
response = self.client.get(url, {})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.rstrip('\n').replace('\r', '')
rows = body.split('\n')
self.assertTrue(1, len(rows))
#Note(#EDX-501): Modified temporarily.
#self.assertEqual(rows[0], '"Unit ID","Survey Name","Created","User Name","Gender","Year of Birth","Level of Education","Disabled"')
self.assertEqual(rows[0], '"Unit ID","Survey Name","Created","User Name","Disabled"')
#class TestInstructorAPIProgressModules(ModuleStoreTestCase, LoginEnrollmentTestCase):
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIProgressModules(ModuleStoreTestCase):
"""
Test instructor progress modules endpoint.
"""
def setUp(self):
self.course = CourseFactory.create()
self.instructor = InstructorFactory(course=self.course.location)
self.client.login(username=self.instructor.username, password='test')
def test_create_pgreport_url(self):
""" Test create url generation. """
create_url = reverse('create_pgreport_csv', kwargs={'course_id': self.course.id})
with patch('instructor_task.api.submit_create_pgreport_csv') as mock_task:
mock_task.return_value = True
response = self.client.get(create_url, {})
mock_task.assert_called_once_with(ANY, self.course.id)
success_status = "Report is being generated! You can view the status of the generation task in the 'Pending Instructor Tasks' section."
self.assertEqual(response.status_code, 200)
self.assertIn(success_status, response.content)
with patch('instructor_task.api.submit_create_pgreport_csv') as mock_task2:
mock_task2.side_effect = AlreadyRunningError()
response = self.client.get(create_url)
mock_task2.assert_called_once_with(ANY, self.course.id)
already_running_status = "Report generation task is already in progress. Check the 'Pending Instructor Tasks' table for the status of the task. When completed, the report will be available for download in the table below."
self.assertIn(already_running_status, response.content)
def test_get_pgreport_url(self):
""" Test get url generation. """
get_url = reverse('get_pgreport_csv', kwargs={'course_id': self.course.id})
cstore_mock = Mock()
content_mock = Mock()
content_mock.stream_data.return_value = ["row1", "row2", "row3"]
cstore_mock.find.return_value = content_mock
with patch('instructor.views.api.contentstore', return_value=cstore_mock) as pmock:
response = self.client.get(get_url, {})
self.assertEqual(response.status_code, 200)
pmock.assert_called_once_with()
cstore_mock.find.assert_called_once_with(ANY, throw_on_not_found=True, as_stream=True)
content_mock.stream_data.assert_called_once_with()
self.assertEquals(response.content, 'row1row2row3')
cstore_mock.reset_mock()
cstore_mock.find.side_effect = NotFoundError()
with patch('instructor.views.api.contentstore', return_value=cstore_mock) as p2mock:
response = self.client.get(get_url, {})
p2mock.assert_called_once_with()
cstore_mock.find.assert_called_once_with(ANY, throw_on_not_found=True, as_stream=True)
self.assertEqual(response.status_code, 403)
| WatanabeYasumasa/edx-platform | lms/djangoapps/instructor/tests/test_api.py | Python | agpl-3.0 | 97,192 | [
"VisIt"
] | 7be0515173bfabbce8c7903e6e90d59f2c0933c2bd44acb187937ade53daa89e |
"""
End-to-end tests for the main LMS Dashboard (aka, Student Dashboard).
"""
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.dashboard import DashboardPage
from common.test.acceptance.tests.helpers import UniqueCourseTest, generate_course_key
DEFAULT_SHORT_DATE_FORMAT = '{dt:%b} {dt.day}, {dt.year}'
TEST_DATE_FORMAT = '{dt:%b} {dt.day}, {dt.year} {dt.hour:02}:{dt.minute:02}'
class BaseLmsDashboardTestMultiple(UniqueCourseTest):
""" Base test suite for the LMS Student Dashboard with Multiple Courses"""
def setUp(self):
"""
Initializes the components (page objects, courses, users) for this test suite
"""
# Some parameters are provided by the parent setUp() routine, such as the following:
# self.course_id, self.course_info, self.unique_id
super().setUp()
# Load page objects for use by the tests
self.dashboard_page = DashboardPage(self.browser)
# Configure some aspects of the test course and install the settings into the course
self.courses = {
'A': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_A',
'display_name': 'Test Course A',
'enrollment_mode': 'audit',
'cert_name_long': 'Certificate of Audit Achievement'
},
'B': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_B',
'display_name': 'Test Course B',
'enrollment_mode': 'verified',
'cert_name_long': 'Certificate of Verified Achievement'
},
'C': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_C',
'display_name': 'Test Course C',
'enrollment_mode': 'credit',
'cert_name_long': 'Certificate of Credit Achievement'
}
}
self.username = "test_{uuid}".format(uuid=self.unique_id[0:6])
self.email = f"{self.username}@example.com"
self.course_keys = {}
self.course_fixtures = {}
for key, value in self.courses.items():
course_key = generate_course_key(
value['org'],
value['number'],
value['run'],
)
course_fixture = CourseFixture(
value['org'],
value['number'],
value['run'],
value['display_name'],
)
course_fixture.add_advanced_settings({
"social_sharing_url": {"value": "http://custom/course/url"},
"cert_name_long": {"value": value['cert_name_long']}
})
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1,1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data='<problem>problem 1 dummy body</problem>'),
XBlockFixtureDesc('html', 'html 1', data="<html>html 1 dummy body</html>"),
XBlockFixtureDesc('problem', 'Test Problem 2', data="<problem>problem 2 dummy body</problem>"),
XBlockFixtureDesc('html', 'html 2', data="<html>html 2 dummy body</html>"),
),
XBlockFixtureDesc('sequential', 'Test Subsection 1,2').add_children(
XBlockFixtureDesc('problem', 'Test Problem 3', data='<problem>problem 3 dummy body</problem>'),
),
XBlockFixtureDesc(
'sequential', 'Test HIDDEN Subsection', metadata={'visible_to_staff_only': True}
).add_children(
XBlockFixtureDesc('problem', 'Test HIDDEN Problem', data='<problem>hidden problem</problem>'),
),
)
).install()
self.course_keys[key] = course_key
self.course_fixtures[key] = course_fixture
# Create the test user, register them for the course, and authenticate
AutoAuthPage(
self.browser,
username=self.username,
email=self.email,
course_id=course_key,
enrollment_mode=value['enrollment_mode']
).visit()
# Navigate the authenticated, enrolled user to the dashboard page and get testing!
self.dashboard_page.visit()
class LmsDashboardA11yTest(BaseLmsDashboardTestMultiple):
"""
Class to test lms student dashboard accessibility.
"""
a11y = True
def test_dashboard_course_listings_a11y(self):
"""
Test the accessibility of the course listings
"""
self.dashboard_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'button-name', # TODO: AC-935
'landmark-no-duplicate-banner', # TODO: AC-934
'landmark-complementary-is-top-level', # TODO: AC-939
'region' # TODO: AC-932
]
})
course_listings = self.dashboard_page.get_courses()
assert len(course_listings) == 3
self.dashboard_page.a11y_audit.check_for_accessibility_errors()
| eduNEXT/edunext-platform | common/test/acceptance/tests/lms/test_lms_dashboard.py | Python | agpl-3.0 | 5,617 | [
"VisIt"
] | 58f8371c812aa759cd246b37152d952822c1533fd2b3062888656f1223a6451b |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.