text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright (c) 2015 James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..core import GP
from . import GPLVM
from .. import mappings
class BCGPLVM(GPLVM):
"""
Back constrained Gaussian Process Latent Variable Model
:param Y: observed data
:type Y: np.ndarray
:param input_dim: latent dimensionality
:type input_dim: int
:param mapping: mapping for back constraint
:type mapping: GPy.core.Mapping object
"""
def __init__(self, Y, input_dim, kernel=None, mapping=None):
if mapping is None:
mapping = mappings.MLP(input_dim=Y.shape[1],
output_dim=input_dim,
hidden_dim=10)
else:
assert mapping.input_dim==Y.shape[1], "mapping input dim does not work for Y dimension"
assert mapping.output_dim==input_dim, "mapping output dim does not work for self.input_dim"
super(BCGPLVM, self).__init__(Y, input_dim, X=mapping.f(Y), kernel=kernel, name="bcgplvm")
self.unlink_parameter(self.X)
self.mapping = mapping
self.link_parameter(self.mapping)
self.X = self.mapping.f(self.Y)
def parameters_changed(self):
self.X = self.mapping.f(self.Y)
GP.parameters_changed(self)
Xgradient = self.kern.gradients_X(self.grad_dict['dL_dK'], self.X, None)
self.mapping.update_gradients(Xgradient, self.Y)
|
SheffieldML/GPy
|
GPy/models/bcgplvm.py
|
Python
|
bsd-3-clause
| 1,475
|
[
"Gaussian"
] |
35a0932fbb41ded2bbfb71ee578b00cb78455ba132b6ca022a73a3f02721f3ae
|
"""
WhatsUP: astronomical object suggestions for Las Cumbres Observatory Global Telescope Network
Copyright (C) 2014-2015 LCOGT
models.py - Database schemas
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext as _
APERTURES = (
('1m0', '1-meter'), ('2m0', '2-meter'), ('0m4', '0.4-meter'), ('any', 'Any'), ('sml', ' 1m and 0.4m only'))
FILTERS = (('B', 'Bessell-B'),
('I', 'Bessell-I'),
('R', 'Bessell-R'),
('V', 'Bessell-V'),
('H-Alpha', 'H Alpha'),
('H-Beta', 'H Beta'),
('OIII', 'OIII'),
('Y', 'PanSTARRS-Y'),
('zs', 'PanSTARRS-Z'),
('gp', 'SDSS-g′'),
('ip', 'SDSS-i′'),
('rp', 'SDSS-r′'),
('up', 'SDSS-u′'),
('solar', 'Solar (V+R)')
)
CATEGORIES = (
('3.6.4', 'Star cluster'),
('3.6.4.1', 'Open Cluster'),
('3.6.4.2', 'Globular Cluster'),
('4', 'Nebula'),
('4.1.2', 'Star-forming Nebula'),
('4.1.3', 'Planetary Nebula'),
('4.1.4', 'Supernova Remnant'),
('5','Galaxy'),
('5.1.1', 'Spiral Galaxy'),
('5.1.2', 'Barred Spiral Galaxy'),
('5.1.4', 'Elliptical Galaxy'),
('5.1.6', 'Irregular Galaxies'),
('5.5', 'Galaxy Groups'),
)
class Constellation(models.Model):
name = models.CharField(max_length=20)
shortname = models.CharField(max_length=3)
class Meta:
verbose_name = _('Constellation')
verbose_name_plural = _('Constellations')
def __unicode__(self):
return u"%s" % self.name
class Target(models.Model):
name = models.CharField(max_length=20)
description = models.TextField(null=True, blank=True)
ra = models.FloatField(db_index=True, default=0.0)
dec = models.FloatField(default=0.0)
avm_code = models.CharField(max_length=50, null=True, blank=True)
avm_desc = models.CharField(max_length=50, null=True, blank=True)
constellation = models.ForeignKey(Constellation, null=True, blank=True, on_delete=models.CASCADE)
best = models.BooleanField("Editor's pick", default=False)
class Meta:
verbose_name = _('Target')
verbose_name_plural = _('Targets')
ordering = ['name', ]
def __unicode__(self):
return u"%s" % self.name
class Params(models.Model):
target = models.ForeignKey(Target, related_name='parameters', on_delete=models.CASCADE)
filters = models.CharField('Filter name', choices=FILTERS, max_length=15)
exposure = models.FloatField(default=1)
aperture = models.CharField(max_length=3, choices=APERTURES, default='1m0')
class Meta:
verbose_name = _('Observation Parameter')
ordering = ['target', 'aperture']
def __unicode__(self):
return u"%s for %s" % (self.filters, self.target)
|
LCOGT/whatsup
|
whatsup/models.py
|
Python
|
gpl-3.0
| 3,519
|
[
"Galaxy"
] |
4a56054b099592100f24ce0d63fbc4176535f27a9205d92f0d909fbb0ff52438
|
"""Reproduce by simulation of PCA estimation using wheelerdata"""
import sys, os
import numpy as np
from numpy.random import RandomState
import argparse
import json
from sklearn.decomposition import PCA
from simfMRI.noise import white
from wheelerdata.load.meta import get_data
from fmrilearn.analysis import fir
from wheelerexp.base import Space
from wheelerexp.base import DecomposeExpReproduction
# ---------------------------------------------------------------------------
# Process argv
# ---------------------------------------------------------------------------
parser = argparse.ArgumentParser(
description="Calculate average FIRs using simulated Wheelerdata")
parser.add_argument("--name",
help="The basename of this experiment")
parser.add_argument("--cond",
help=("Name from data to use as labels"))
parser.add_argument("--index",
help=("Name from data for a trial index"))
parser.add_argument("--data",
help=("The name of the Wheelerdata set"))
parser.add_argument("--TR", type=float,
help=("The TR of the data"))
parser.add_argument("--trname", default="TR",
help=("Name of the TRs in data"))
parser.add_argument("--smooth", default=False,
help=("Smooth data before averaging"))
parser.add_argument("--filtfile", default=None,
help=("Filter the labels using based on a json file"))
parser.add_argument("--n_features", default=10, type=int,
help=("The total number of features"))
parser.add_argument("--n_univariate", default=None, type=int,
help=("The number of boxcar features"))
parser.add_argument("--n_accumulator", default=None, type=int,
help=("The number of accumulator features"))
parser.add_argument("--n_decision", default=None, type=int,
help=("The number of decision features"))
parser.add_argument("--n_noise", default=None, type=int,
help=("The number of noise features"))
parser.add_argument("--drift_noise", default=False, type=bool,
help=("Add noise to accumulator drift rate"))
parser.add_argument("--step_noise", default=False, type=bool,
help=("Add noise to accumulator drift rate"))
parser.add_argument("--z_noise", default=False, type=bool,
help=("Add noise to accumulator drift rate"))
parser.add_argument("--drift_noise_param",
default='{"loc": 0, "scale" : 0.5}', type=json.loads,
help=("Modify Gaussian drift noise"))
parser.add_argument("--step_noise_param",
default='{"loc" : 0, "scale" : 0.2, "size" : 1}', type=json.loads,
help=("Modify Gaussian step noise"))
parser.add_argument("--z_noise_param",
default='{"low" : 0.01, "high" : 0.5, "size" : 1}', type=json.loads,
help=("Modify uniform start value noise"))
parser.add_argument("--seed", default=None,
help=("Random seed value (initalizes a RandomState() instance"))
args = parser.parse_args()
prng = None
if args.seed != None:
prng = RandomState(int(args.seed))
# RTs for Wheelerdata
def lookup_cond_to_dt(data):
lookup = {
"fh" : {
"fast" : 2, "slow" : 4, "nan": 0,
1 : 2, 2 : 4, 0 : 0
},
"butterfly" : {
"fast" : 4, "slow" : 6, "nan": 0,
1 : 4, 2 : 6, 0 : 0
},
"clock" : {
"fast" : 4, "slow" : 6, "nan": 0,
1 : 4, 2 : 6, 0 : 0
},
"polygon" : {
"fast" : 1, "slow" : 2, "nan": 0,
1 : 1, 2 : 2, 0 : 0
},
"redgreen" : {
"fast" : 1, "slow" : 3, "nan": 0,
1 : 1, 2 : 3, 0 : 0
},
"biasbox" : {
"fast" : 1, "slow" : 2, "nan": 0,
1 : 1, 2 : 2, 0 : 0
}
}
return lookup[data]
# Lookup cond_to_rt
cond_to_rt = lookup_cond_to_dt(args.data)
# ---------------------------------------------------------------------------
# BOLD creation vars
# ---------------------------------------------------------------------------
noise_f=white
hrf_f=None
hrf_params=None
# ---------------------------------------------------------------------------
# Setup exp and run
# ---------------------------------------------------------------------------
data = get_data(args.data)
spacetime = Space(PCA(5, whiten=True), fir, mode="decompose")
exp = DecomposeExpReproduction(spacetime, data,
window=15, nsig=3, tr=args.TR)
exp.run(args.name, args.cond, args.index, data, cond_to_rt,
smooth=args.smooth,
filtfile=args.filtfile,
TR=args.TR,
trname=args.trname,
n_features=args.n_features,
n_univariate=args.n_univariate,
n_accumulator=args.n_accumulator,
n_decision=args.n_decision,
n_noise=args.n_noise,
drift_noise=args.drift_noise,
step_noise=args.step_noise,
z_noise=args.z_noise,
drift_noise_param=args.drift_noise_param,
step_noise_param=args.step_noise_param,
z_noise_param=args.z_noise_param,
noise_f=white, hrf_f=hrf_f, hrf_params=hrf_params, prng=prng)
|
parenthetical-e/wheelerexp
|
meta/reproduce_pca.py
|
Python
|
bsd-2-clause
| 5,081
|
[
"Gaussian"
] |
41b3a71409acfe2ed956dbbf23e547452149f5aad611360790f27dca20209cb5
|
# -*- coding: utf-8 -*-
#
# Magic-Wormhole documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 12 10:24:09 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from recommonmark.parser import CommonMarkParser
source_parsers = {
".md": CommonMarkParser,
}
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
#source_suffix = '.md'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Magic-Wormhole'
copyright = u'2017, Brian Warner'
author = u'Brian Warner'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
def _get_versions():
import os.path, sys, subprocess
here = os.path.dirname(os.path.abspath(__file__))
parent = os.path.dirname(here)
v = subprocess.check_output([sys.executable, "setup.py", "--version"],
cwd=parent)
if sys.version_info[0] >= 3:
v = v.decode()
short = ".".join(v.split(".")[:2])
long = v
return short, long
version, release = _get_versions()
# The short X.Y version.
#version = u'0.10'
# The full version, including alpha/beta/rc tags.
#release = u'0.10.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Magic-Wormholedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Magic-Wormhole.tex', u'Magic-Wormhole Documentation',
u'Brian Warner', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'magic-wormhole', u'Magic-Wormhole Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Magic-Wormhole', u'Magic-Wormhole Documentation',
author, 'Magic-Wormhole', 'One line description of project.',
'Miscellaneous'),
]
|
warner/magic-wormhole
|
docs/conf.py
|
Python
|
mit
| 5,669
|
[
"Brian"
] |
a4b0055054bb275f2823d52a681cc742992f71ac7a7500f5fa125d83545586fc
|
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import os
import tempfile
from pyemma.coordinates.data import MDFeaturizer
from logging import getLogger
import pyemma.coordinates.api as api
import numpy as np
from pyemma.coordinates.data.numpy_filereader import NumPyFileReader
from pyemma.coordinates.data.py_csv_reader import PyCSVReader as CSVReader
import shutil
import pkg_resources
logger = getLogger('pyemma.'+'TestReaderUtils')
class TestApiSourceFileReader(unittest.TestCase):
@classmethod
def setUpClass(cls):
data_np = np.random.random((100, 3))
data_raw = np.arange(300 * 4).reshape(300, 4)
cls.dir = tempfile.mkdtemp("test-api-src")
cls.npy = tempfile.mktemp(suffix='.npy', dir=cls.dir)
cls.npz = tempfile.mktemp(suffix='.npz', dir=cls.dir)
cls.dat = tempfile.mktemp(suffix='.dat', dir=cls.dir)
cls.csv = tempfile.mktemp(suffix='.csv', dir=cls.dir)
cls.bs = tempfile.mktemp(suffix=".bs", dir=cls.dir)
with open(cls.bs, "w") as fh:
fh.write("meaningless\n")
fh.write("this can not be interpreted\n")
np.save(cls.npy, data_np)
np.savez(cls.npz, data_np, data_np)
np.savetxt(cls.dat, data_raw)
np.savetxt(cls.csv, data_raw)
path = pkg_resources.resource_filename(__name__, 'data') + os.path.sep
cls.bpti_pdbfile = os.path.join(path, 'bpti_ca.pdb')
extensions = ['.xtc', '.binpos', '.dcd', '.h5', '.lh5', '.nc', '.netcdf', '.trr']
cls.bpti_mini_files = [os.path.join(path, 'bpti_mini%s' % ext) for ext in extensions]
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.dir, ignore_errors=True)
def test_various_formats_source(self):
chunksizes = [0, 13]
X = None
bpti_mini_previous = None
for cs in chunksizes:
for bpti_mini in self.bpti_mini_files:
Y = api.source(bpti_mini, top=self.bpti_pdbfile).get_output(chunk=cs)
if X is not None:
np.testing.assert_array_almost_equal(X, Y, err_msg='Comparing %s to %s failed for chunksize %s'
% (bpti_mini, bpti_mini_previous, cs))
X = Y
bpti_mini_previous = bpti_mini
def test_obtain_numpy_file_reader_npy(self):
reader = api.source(self.npy)
self.assertIsNotNone(reader, "Reader object should not be none.")
self.assertTrue(
isinstance(reader, NumPyFileReader), "Should be a NumPyFileReader.")
@unittest.skip("npz currently unsupported")
def test_obtain_numpy_file_reader_npz(self):
reader = api.source(self.npz)
self.assertIsNotNone(reader, "Reader object should not be none.")
self.assertTrue(
isinstance(reader, NumPyFileReader), "Should be a NumPyFileReader.")
def test_obtain_csv_file_reader_dat(self):
reader = api.source(self.dat)
self.assertIsNotNone(reader, "Reader object should not be none.")
self.assertTrue(isinstance(reader, CSVReader), "Should be a CSVReader.")
def test_obtain_csv_file_reader_csv(self):
reader = api.source(self.csv)
self.assertIsNotNone(reader, "Reader object should not be none.")
self.assertTrue(isinstance(reader, CSVReader), "Should be a CSVReader.")
def test_bullshit_csv(self):
# this file is not parseable as tabulated float file
with self.assertRaises(Exception) as r:
api.source(self.bs)
# depending on we have the traj info cache switched on, we get these types of exceptions.
self.assertIsInstance(r.exception, (IOError, ValueError))
self.assertIn('could not parse', str(r.exception))
def test_source_set_chunksize(self):
x = np.zeros(10)
r = api.source(x, chunksize=1)
assert r.chunksize == 1
r2 = api.source(r, chunksize=2)
assert r2 is r
assert r2.chunksize == 2
# reset to default chunk size.
r3 = api.source(r, chunksize=None)
assert r3.chunksize is not None
def test_pdb_traj_unsupported(self):
with self.assertRaises(ValueError) as c, tempfile.NamedTemporaryFile(suffix='.pdb') as ntf:
api.source([ntf.name], top=self.bpti_pdbfile)
assert 'PDB' in c.exception.args[0]
class TestApiSourceFeatureReader(unittest.TestCase):
def setUp(self):
path = pkg_resources.resource_filename(__name__, 'data') + os.path.sep
self.pdb_file = os.path.join(path, 'bpti_ca.pdb')
self.traj_files = [
os.path.join(path, 'bpti_001-033.xtc'),
os.path.join(path, 'bpti_067-100.xtc')
]
def test_read_multiple_files_topology_file(self):
reader = api.source(self.traj_files, top=self.pdb_file)
self.assertIsNotNone(reader, "The reader should not be none.")
self.assertEqual(reader.topfile, self.pdb_file,
"Reader topology file and input topology file should coincide.")
self.assertListEqual(reader.filenames, self.traj_files, "Reader trajectories and input"
" trajectories should coincide.")
self.assertEqual(reader.featurizer.topologyfile, self.pdb_file, "Featurizers topology file and input "
"topology file should coincide.")
def test_read_multiple_files_featurizer(self):
featurizer = MDFeaturizer(self.pdb_file)
reader = api.source(self.traj_files, features=featurizer)
self.assertIsNotNone(reader, "The reader should not be none.")
self.assertEqual(reader.topfile, self.pdb_file,
"Reader topology file and input topology file should coincide.")
self.assertListEqual(reader.filenames, self.traj_files, "Reader trajectories and input"
" trajectories should coincide.")
self.assertEqual(reader.featurizer.topologyfile, self.pdb_file, "Featurizers topology file and input "
"topology file should coincide.")
def test_read_single_file_toplogy_file(self):
reader = api.source(self.traj_files[0], top=self.pdb_file)
self.assertIsNotNone(reader, "The reader should not be none.")
self.assertEqual(reader.topfile, self.pdb_file,
"Reader topology file and input topology file should coincide.")
self.assertListEqual(reader.filenames, [self.traj_files[0]], "Reader trajectories and input"
" trajectories should coincide.")
self.assertEqual(reader.featurizer.topologyfile, self.pdb_file, "Featurizers topology file and input "
"topology file should coincide.")
def test_read_single_file_featurizer(self):
featurizer = MDFeaturizer(self.pdb_file)
reader = api.source(self.traj_files[0], features=featurizer)
self.assertIsNotNone(reader, "The reader should not be none.")
self.assertEqual(reader.topfile, self.pdb_file,
"Reader topology file and input topology file should coincide.")
self.assertListEqual(reader.filenames, [self.traj_files[0]], "Reader trajectories and input"
" trajectories should coincide.")
self.assertEqual(reader.featurizer.topologyfile, self.pdb_file, "Featurizers topology file and input "
"topology file should coincide.")
def test_invalid_input(self):
# neither featurizer nor topology file given
self.assertRaises(ValueError, api.source, self.traj_files, None, None)
# no input files but a topology file
self.assertRaises(ValueError, api.source, None, None, self.pdb_file)
featurizer = MDFeaturizer(self.pdb_file)
# no input files but a featurizer
self.assertRaises(ValueError, api.source, None, featurizer, None)
# empty list of input files
self.assertRaises(ValueError, api.source, [], None, self.pdb_file)
# empty tuple of input files
self.assertRaises(ValueError, api.source, (), None, self.pdb_file)
def test_invalid_files(self):
# files do not have the same extension
self.assertRaises(ValueError, api.source, self.traj_files.append(
self.pdb_file), None, self.pdb_file)
# files list contains something else than strings
self.assertRaises(
ValueError, api.source, self.traj_files.append([2]), None, self.pdb_file)
# input file is directory
root_dir = os.path.abspath(os.sep)
self.assertRaises(
ValueError, api.source, root_dir, None, self.pdb_file)
if __name__ == "__main__":
unittest.main()
|
markovmodel/PyEMMA
|
pyemma/coordinates/tests/test_api_source.py
|
Python
|
lgpl-3.0
| 9,871
|
[
"NetCDF"
] |
e16f2a078e7bf531f87363e4070b4872383754ad6d8404bbddc27424e48cb355
|
# $Id$
#
# Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the Smiles file handling stuff
"""
import unittest,sys,os
from rdkit import RDConfig
from rdkit import Chem
class TestCase(unittest.TestCase):
def setUp(self):
self.smis = ['CC','CCC','CCCCC','CCCCCC','CCCCCCC','CC','CCCCOC']
def test1LazyReader(self):
" tests lazy reads """
supp = Chem.SmilesMolSupplierFromText('\n'.join(self.smis),',',0,-1,0)
for i in range(4):
m = supp.next()
assert m,'read %d failed'%i
assert m.GetNumAtoms(),'no atoms in mol %d'%i
i = len(supp)-1
m = supp[i]
assert m,'read %d failed'%i
assert m.GetNumAtoms(),'no atoms in mol %d'%i
ms = [x for x in supp]
for i in range(len(supp)):
m = ms[i]
if m:
ms[i] = Chem.MolToSmiles(m)
l = len(supp)
assert l == len(self.smis),'bad supplier length: %d'%(l)
i = len(self.smis)-3
m = supp[i-1]
assert m,'back index %d failed'%i
assert m.GetNumAtoms(),'no atoms in mol %d'%i
try:
m = supp[len(self.smis)]
except:
fail = 1
else:
fail = 0
assert fail,'out of bound read did not fail'
def test2LazyIter(self):
" tests lazy reads using the iterator interface "
supp = Chem.SmilesMolSupplierFromText('\n'.join(self.smis),',',0,-1,0)
nDone = 0
for mol in supp:
assert mol,'read %d failed'%i
assert mol.GetNumAtoms(),'no atoms in mol %d'%i
nDone += 1
assert nDone==len(self.smis),'bad number of molecules'
l = len(supp)
assert l == len(self.smis),'bad supplier length: %d'%(l)
i = len(self.smis)-3
m = supp[i-1]
assert m,'back index %d failed'%i
assert m.GetNumAtoms(),'no atoms in mol %d'%i
try:
m = supp[len(self.smis)]
except:
fail = 1
else:
fail = 0
assert fail,'out of bound read did not fail'
def test3BoundaryConditions(self):
smis = ['CC','CCOC','fail','CCO']
supp = Chem.SmilesMolSupplierFromText('\n'.join(smis),',',0,-1,0)
assert len(supp)==4
assert supp[2] is None
assert supp[3]
supp = Chem.SmilesMolSupplierFromText('\n'.join(smis),',',0,-1,0)
assert supp[2] is None
assert supp[3]
assert len(supp)==4
try:
supp[4]
except:
ok=1
else:
ok=0
assert ok
supp = Chem.SmilesMolSupplierFromText('\n'.join(smis),',',0,-1,0)
assert len(supp)==4
assert supp[3]
try:
supp[4]
except:
ok=1
else:
ok=0
assert ok
supp = Chem.SmilesMolSupplierFromText('\n'.join(smis),',',0,-1,0)
try:
supp[4]
except:
ok=1
else:
ok=0
assert ok
assert len(supp)==4
assert supp[3]
if __name__ == '__main__':
unittest.main()
|
rdkit/rdkit-orig
|
rdkit/Chem/Suppliers/UnitTestSmilesMolSupplier.py
|
Python
|
bsd-3-clause
| 3,020
|
[
"RDKit"
] |
8c72f964cf8c2b0e8e180438e0de9d466dc1229cbf7438df92a5f0cfad33b9d1
|
from enum import Enum
from itertools import product
from psi4 import core
from psi4.driver import psifiles as psif
import numpy as np
from qcelemental.util import which_import
class RemovalPolicy(Enum):
LargestError = 1
OldestAdded = 2
class StoragePolicy(Enum):
InCore = 1
OnDisk = 2
def axpy(y, alpha, x):
if isinstance(y, (core.Matrix, core.Vector)):
y.axpy(alpha, x)
elif isinstance(y, (core.dpdbuf4, core.dpdfile2)):
y.axpy_matrix(x, alpha)
else:
raise TypeError("Unrecognized object type for DIIS.")
def template_helper(*args):
template = []
for arg in args:
if isinstance(arg, core.Vector):
template.append([arg.dimpi()])
elif isinstance(arg, (core.Matrix, core.dpdfile2, core.dpdbuf4)):
template.append([arg.rowdim(), arg.coldim()])
elif isinstance(arg, float):
template.append(float(0))
else:
raise TypeError("Unrecognized object type for DIIS.")
return template
class DIIS:
def __init__(self, max_vecs: int, name: str, removal_policy = RemovalPolicy.LargestError, storage_policy = StoragePolicy.OnDisk, closed_shell = True, engines = {"diis"}):
# We don't have a good sense for how this class may need to expand, so the current structure is amorphous.
# LargestError is only _defined_ for the case of one engine and not theoretically sound for adiis/ediis:
# those methods want to traverse a wide range of solution space. As such:
if engines != {"diis"}:
self.removal_policy = RemovalPolicy.OldestAdded
elif not isinstance(removal_policy, RemovalPolicy):
raise TypeError(f"removal_policy must be a RemovalPolicy, not a {type(removal_policy)}")
else:
self.removal_policy = removal_policy
if not isinstance(storage_policy, StoragePolicy):
raise TypeError(f"storage_policy must be a StoragePolicy, not a {type(storage_policy)}")
if not which_import("scipy", return_bool=True) and ("ediis" in engines or "adiis" in engines):
raise ModuleNotFoundError("Python module scipy not found. Solve by\n" +
" (1) installing it: `conda install scipy` or `pip install scipy`, or" +
" (2) de-activating a/ediis with option: `set scf scf_initial_accelerator none`")
self.max_vecs = max_vecs
self.name = name
self.storage_policy = storage_policy
# The template matches each entry key to the expected dimensions of each of its items.
# For the simple DIIS case, there are functions to populate this. (Useful C-side.)
# For all other cases, this is set automatically the first time an entry is added.
self.template = {}
self.reset_subspace()
# Resource Acquired: Open PSIO file.
self.opened_libdiis = False
if self.storage_policy == StoragePolicy.OnDisk:
psio = core.IO.shared_object()
if not psio.open_check(psif.PSIF_LIBDIIS):
psio.open(psif.PSIF_LIBDIIS, 1) # 1 = PSIO_OPEN_OLD
self.opened_libdiis = True
self.closed_shell = closed_shell # Only needed for A/EDIIS, which doesn't allow ROHF anyways.
self.engines = engines
def __del__(self):
# RAII the PSIO file away.
if self.opened_libdiis:
psio = core.IO.shared_object()
if psio.open_check(psif.PSIF_LIBDIIS):
psio.close(psif.PSIF_LIBDIIS, 1) # 1 = KEEP
def reset_subspace(self):
""" Wipe all data from previous iterations. """
self.stored_vectors = [] # elt. i is entry i
self.iter_num = -1
# At present, we only cache for DIIS, not EDIIS or ADIIS. In principle, we could, but
# their quantities are N^2, so we assume the savings are negligible.
self.cached_dot_products = dict()
def copier(self, x, new_name: str):
""" Copy the object x and give it a new_name. Save it to disk if needed. """
if isinstance(x, (core.Matrix, core.Vector)):
copy = x.clone()
elif isinstance(x, (core.dpdbuf4, core.dpdfile2)):
copy = core.Matrix(x)
elif isinstance(x, float):
# Never cache a _number_.
return x
else:
raise TypeError("Unrecognized object type for DIIS.")
copy.name = new_name
if self.storage_policy == StoragePolicy.OnDisk:
psio = core.IO.shared_object()
if isinstance(x, core.Vector):
copy.save(psio, psif.PSIF_LIBDIIS)
else:
copy.save(psio, psif.PSIF_LIBDIIS, core.SaveType.SubBlocks)
copy = None
return copy
def get_name(self, name, entry_num, item_num):
""" This is what we'll save an object to disk with."""
return f"{self.name}: {name} Entry {entry_num}, Item {item_num}"
def load_quantity(self, name, entry_num, item_num, force_new = True):
""" Load quantity from wherever it's stored, constructing a new object if needed. """
template_object = self.template[name][item_num]
if isinstance(template_object, float) or self.storage_policy == StoragePolicy.InCore:
quantity = self.stored_vectors[entry_num][name][item_num]
try:
quantity = quantity.clone()
except AttributeError:
# The quantity must have been a float. No need to clone.
pass
elif self.storage_policy == StoragePolicy.OnDisk:
entry_dims = template_object
full_name = self.get_name(name, entry_num, item_num)
psio = core.IO.shared_object()
if len(entry_dims) == 2:
quantity = core.Matrix(full_name, *entry_dims)
quantity.load(psio, psif.PSIF_LIBDIIS, core.SaveType.SubBlocks)
elif len(entry_dims) == 1:
quantity = core.Vector(full_name, *entry_dims)
quantity.load(psio, psif.PSIF_LIBDIIS)
else:
raise Exception(f"StoragePolicy {self.storage_policy} not recognized. This is a bug: contact developers.")
return quantity
def get_dot_product(self, i: int, j: int):
""" Get a DIIS dot product. i and j represent entry numbers. """
key = frozenset([i, j])
try:
return self.cached_dot_products[key]
except KeyError:
dot_product = 0
for item_num in range(len(self.template["error"])):
Rix = self.load_quantity("error", i, item_num)
Rjx = self.load_quantity("error", j, item_num)
dot_product += Rix.vector_dot(Rjx)
self.cached_dot_products[key] = dot_product
return dot_product
def set_error_vector_size(self, *args):
""" Set the template for the DIIS error. Kept mainly for backwards compatibility. """
self.template["error"] = template_helper(*args)
def set_vector_size(self, *args):
""" Set the template for the extrapolation target. Kept mainly for backwards compatibility. """
self.template["target"] = template_helper(*args)
def build_entry(self, entry, target_index):
return {key: [self.copier(elt, self.get_name(key, target_index, i)) for i, elt in enumerate(val)] for key, val in entry.items()}
def add_entry(self, *args):
if self.max_vecs == 0:
return False
# Convert from "raw list of args" syntax to a proper entry.
# While "entry" format is more general, "raw list of args" won't break C-side code, which doesn't need the generality.
if not (len(args) == 1 and isinstance(args[0], dict)):
R_len = len(self.template.get("error", []))
T_len = len(self.template.get("target", []))
if R_len + T_len != len(args):
raise Exception(f"Cannot build {R_len} residuals and {T_len} amplitudes from {len(entries)} items.")
entry = {"error": args[:R_len], "target": args[R_len:]}
else:
entry = args[0]
self.template = {key: template_helper(*val) for key, val in entry.items()}
self.iter_num += 1
if len(self.stored_vectors) >= self.max_vecs:
if self.removal_policy == RemovalPolicy.OldestAdded:
target_index = self.iter_num % self.max_vecs
elif self.removal_policy == RemovalPolicy.LargestError:
target_index = np.argmax([self.get_dot_product(i, i) for i in range(len(self.stored_vectors))])
else:
raise Exception(f"RemovalPolicy {self.removal_policy} not recognized. This is a bug: contact developers.")
# Purge imminently-outdated values from cache.
self.cached_dot_products = {key: val for key, val in self.cached_dot_products.items() if target_index not in key}
# Set the new entry.
self.stored_vectors[target_index] = self.build_entry(entry, target_index)
else:
self.stored_vectors.append(self.build_entry(entry, self.iter_num))
return True
def diis_coefficients(self):
dim = len(self.stored_vectors) + 1
B = np.zeros((dim, dim))
for i in range(len(self.stored_vectors)):
for j in range(len(self.stored_vectors)):
B[i, j] = self.get_dot_product(i, j)
B[-1, :-1] = B[:-1, -1] = -1
rhs = np.zeros((dim))
rhs[-1] = -1
# Trick to improve numerical conditioning.
# Instead of solving B c = r, we solve D B D^-1 D c = D r, using
# D r = r. D is the diagonals ^ -1/2 matrix.
# This improves the conditioning of the problem.
diagonals = B.diagonal().copy()
diagonals[-1] = 1
if np.all(diagonals > 0):
diagonals = diagonals ** (- 0.5)
B = np.einsum("i,ij,j -> ij", diagonals, B, diagonals)
return np.linalg.lstsq(B, rhs, rcond=None)[0][:-1] * diagonals[:-1]
else:
return np.linalg.lstsq(B, rhs, rcond=None)[0][:-1]
def adiis_energy(self, x):
return np.dot(self.adiis_linear, x) + np.einsum("i,ij,j->", x, self.adiis_quadratic, x) / 2
def adiis_gradient(self, x):
return self.adiis_linear + np.einsum("i,ij->j", x, self.adiis_quadratic)
def adiis_coefficients(self):
from scipy.optimize import minimize
self.adiis_populate()
result = minimize(self.adiis_energy, np.ones(len(self.stored_vectors)), method="SLSQP",
bounds = tuple((0, 1) for i in self.stored_vectors),
constraints = [{"type": "eq", "fun": lambda x: sum(x) - 1, "jac": lambda x: np.ones_like(x)}],
jac=self.adiis_gradient, tol=1e-6, options={"maxiter": 200})
if not result.success:
raise Exception("ADIIS minimization failed. File a bug, and include your entire input and output files.")
return result.x
def adiis_populate(self):
""" Fills linear and quadratic coefficients in ADIIS energy estimate. """
# We are currently assuming that all of dD and dF fit in-core.
# These quantities are N^2, so this should be fine in most cases.
num_entries = len(self.stored_vectors)
dD = [[] for x in range(num_entries)]
dF = [[] for x in range(num_entries)]
for name, array in zip(["densities", "target"], [dD, dF]):
for item_num in range(len(self.template[name])):
latest_entry = self.load_quantity(name, len(self.stored_vectors) - 1, item_num)
for entry_num in range(num_entries):
temp = self.load_quantity(name, entry_num, item_num, force_new=True)
temp.subtract(latest_entry)
array[entry_num].append(temp)
self.adiis_linear = np.zeros((num_entries))
latest_fock = []
for item_num in range(len(self.template["target"])):
latest_fock.append(self.load_quantity("target", len(self.stored_vectors) - 1, item_num))
for i in range(num_entries):
self.adiis_linear[i] = sum(d.vector_dot(f) for d, f in zip(dD[i], latest_fock))
self.adiis_quadratic = np.zeros((num_entries, num_entries))
for i, j in product(range(num_entries), repeat = 2):
self.adiis_quadratic[i][j] = sum(d.vector_dot(f) for d, f in zip(dD[i], dF[j]))
if self.closed_shell:
self.adiis_linear *= 2
self.adiis_quadratic *= 2
def ediis_energy(self, x):
ediis_linear = np.array([entry["energy"][0] for entry in self.stored_vectors])
return np.dot(ediis_linear, x) + np.einsum("i,ij,j->", x, self.ediis_quadratic, x) / 2
def ediis_gradient(self, x):
""" Gradient of energy estimate w.r.t. input coefficient """
ediis_linear = np.array([entry["energy"][0] for entry in self.stored_vectors])
return ediis_linear + np.einsum("i,ij->j", x, self.ediis_quadratic)
def ediis_coefficients(self):
from scipy.optimize import minimize
self.ediis_populate()
result = minimize(self.ediis_energy, np.ones(len(self.stored_vectors)), method="SLSQP",
bounds = tuple((0, 1) for i in self.stored_vectors),
constraints = [{"type": "eq", "fun": lambda x: sum(x) - 1, "jac": lambda x: np.ones_like(x)}],
jac=self.ediis_gradient, tol=1e-6, options={"maxiter": 200})
if not result.success:
raise Exception("EDIIS minimization failed. File a bug, and include your entire input and output files.")
return result.x
def ediis_populate(self):
""" Fills quadratic coefficients in ADIIS energy estimate. """
num_entries = len(self.stored_vectors)
self.ediis_quadratic = np.zeros((num_entries, num_entries))
for i in range(num_entries):
for item_num in range(len(self.template["densities"])):
d = self.load_quantity("densities", i, item_num)
for j in range(num_entries):
f = self.load_quantity("target", j, item_num)
self.ediis_quadratic[i][j] += d.vector_dot(f)
diag = np.diag(self.ediis_quadratic)
# D_i F_i + D_j F_j - D_i F_j - D_j F_i; First two terms use broadcasting tricks
self.ediis_quadratic = diag[:, None] + diag - self.ediis_quadratic - self.ediis_quadratic.T
self.ediis_quadratic *= -1/2
if self.closed_shell:
self.ediis_quadratic *= 2
def extrapolate(self, *args, Dnorm = None):
""" Perform extrapolation. Must be passed in an error metric to decide how to handle hybrid algorithms. """
if {"adiis", "ediis"}.intersection(self.engines) and Dnorm is None:
raise ValidationError("An extrapolation engine insists you specify the error metric.")
performed = set()
if self.engines == {"diis"}:
coeffs = self.diis_coefficients()
performed.add("DIIS")
elif len(self.engines) == 1:
blend_stop = core.get_option("SCF", "SCF_INITIAL_FINISH_DIIS_TRANSITION")
if Dnorm <= blend_stop:
return performed
elif self.engines == {"ediis"}:
coeffs = self.ediis_coefficients()
performed.add("EDIIS")
elif self.engines == {"adiis"}:
coeffs = self.adiis_coefficients()
performed.add("ADIIS")
else:
raise Exception(f"DIIS engine not recognized: {self.engines[0]}.")
elif self.engines == {"diis", "adiis"} or self.engines == {"diis", "ediis"}:
blend_start = core.get_option("SCF", "SCF_INITIAL_START_DIIS_TRANSITION")
blend_stop = core.get_option("SCF", "SCF_INITIAL_FINISH_DIIS_TRANSITION")
if "adiis" in self.engines:
initial_coefficient_function = self.adiis_coefficients
initial_name = "ADIIS"
else:
initial_coefficient_function = self.ediis_coefficients
initial_name = "EDIIS"
if Dnorm >= blend_start:
coeffs = initial_coefficient_function()
performed.add(initial_name)
elif Dnorm <= blend_stop:
coeffs = self.diis_coefficients()
performed.add("DIIS")
else:
m = 1 - (Dnorm - blend_start) / (blend_stop - blend_start)
coeffs = m * initial_coefficient_function() + (1 - m) * self.diis_coefficients()
performed.add("DIIS")
performed.add(initial_name)
else:
raise Exception(f"DIIS engine combination not recognized: {self.engines}")
for j, Tj in enumerate(args):
Tj.zero()
for i, ci in enumerate(coeffs):
Tij = self.load_quantity("target", i, j)
axpy(Tj, ci, Tij)
return performed
def delete_diis_file(self):
""" Purge all data in the DIIS file. """
psio = core.IO.shared_object()
if not psio.open_check(psif.PSIF_LIBDIIS):
psio.open(psif.PSIF_LIBDIIS, 1) # 1 = PSIO_OPEN_OLD
psio.close(psif.PSIF_LIBDIIS, 0) # 0 = DELETE
|
psi4/psi4
|
psi4/driver/procrouting/diis.py
|
Python
|
lgpl-3.0
| 17,437
|
[
"Psi4"
] |
4385c948926ae906a49cbb0cc504c2d430e0b72ab51ba64133afe6f2a5104c5b
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffycomp(RPackage):
"""The package contains functions that can be used to compare
expression measures for Affymetrix Oligonucleotide Arrays."""
homepage = "https://www.bioconductor.org/packages/affycomp/"
url = "https://git.bioconductor.org/packages/affycomp"
version('1.52.0', git='https://git.bioconductor.org/packages/affycomp', commit='1b97a1cb21ec93bf1e5c88d5d55b988059612790')
depends_on('r@3.4.0:3.4.9', when='@1.52.0')
depends_on('r-biobase', type=('build', 'run'))
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-affycomp/package.py
|
Python
|
lgpl-2.1
| 1,773
|
[
"Bioconductor"
] |
518fd0dbb7be874494140a72ac5914d412a51d120b415f543e9468b9a4f6dfaa
|
"""
vtk - surface and countours
"""
import enaml
from enaml.qt.qt_application import QtApplication
def run():
with enaml.imports():
from vtk_canvas_views import Main
app = QtApplication()
view = Main(custom_title='VTK Demo Example')
view.show()
# Start the application event loop
app.start()
run()
|
viz4biz/PyDataNYC2015
|
tutorial/vtk_canvas_views_run.py
|
Python
|
apache-2.0
| 338
|
[
"VTK"
] |
56c326f936789e77a8964e6ba9cd01e425a63de55f428391db7e08d10560ad9e
|
################################################################
# SQUAD - superconducting quantum dot #
# Copyright (C) 2012-2019 Vladislav Pokorny; pokornyv@fzu.cz #
# homepage: github.com/pokornyv/SQUAD #
# config_squad.py - config and global variables #
# method described in #
# Sci. Rep. 5, 8821 (2015). #
# Phys. Rev. B 93, 024523 (2016). #
################################################################
from __future__ import print_function
import scipy as sp
from os import listdir
from sys import argv,exit,version_info
from time import ctime,time
from ConfigParser import SafeConfigParser
###########################################################
## reading parameters from command line ###################
U = float(argv[1])
Delta = float(argv[2])
GammaR = float(argv[3])
GammaL = float(argv[4])*GammaR
eps = float(argv[5])
P = float(argv[6])
GammaN = 0.0 ## for compatibility with functions from SSN branch (not in GitHub master branch)
## little post-processing
ed = eps-U/2.0 ## energy level shifted to symmetry point
Phi = P*sp.pi ## phase difference
GammaLR = GammaL/GammaR if GammaR != 0.0 else 1.0 ## coupling strength ratio
GammaTot = GammaL + GammaR ## total coupling strength
###########################################################
## reading config file ####################################
cfile = 'squad.in'
if cfile not in listdir('.'):
print('- Parameter file '+cfile+' missing. Exit.')
exit(1)
config = SafeConfigParser()
config.optionxform = str ## case-sensitive names
config.read(cfile)
## default values #########################################
## consult infile.md for details
M = 20
dE = 1e-4
rootf = 'brentq'
ConvN = 1e-4
ConvX = 1e-5
ConvHF = 1e-6
MuMin = -2.0
MuMax = 2.0
ABSinit_val = 0.99
HF_max_iter = 10000
offset_x = 1e-12
WriteIO = 1
Write_HFGF = 0
Write_Bubble = 0
Write_2ndSE = 0
Write_2ndGF = 1
Write_AC = 0 ## Andreev conductance, for compatibility with SSN codes
EmaxFiles = 10.0
EstepFiles = 10
## read the .in file ######################################
## [params] section
if config.has_option('params','M'):
M = int(config.get('params','M'))
if config.has_option('params','dE'):
dE = float(config.get('params','dE'))
if config.has_option('params','rootf'):
rootf = str(config.get('params','rootf'))
if config.has_option('params','ConvN'):
ConvN = float(config.get('params','ConvN'))
if config.has_option('params','ConvX'):
ConvX = float(config.get('params','ConvX'))
if config.has_option('params','ConvHF'):
ConvHF = float(config.get('params','ConvHF'))
if config.has_option('params','MuMin'):
MuMin = float(config.get('params','MuMin'))
if config.has_option('params','MuMax'):
MuMax = float(config.get('params','MuMax'))
if config.has_option('params','ABSinit_val'):
ABSinit_val = float(config.get('params','ABSinit_val'))
if config.has_option('params','HF_max_iter'):
HF_max_iter = int(config.get('params','HF_max_iter'))
if config.has_option('params','offset_x'):
offset_x = float(config.get('params','offset_x'))
## [IO] section
if config.has_option('IO','WriteIO'):
chat = bool(int(config.get('IO','WriteIO')))
if config.has_option('IO','Write_HFGF'):
Write_HFGF = bool(int(config.get('IO','Write_HFGF')))
if config.has_option('IO','Write_Bubble'):
Write_Bubble = bool(int(config.get('IO','Write_Bubble')))
if config.has_option('IO','Write_2ndSE'):
Write_2ndSE = bool(int(config.get('IO','Write_2ndSE')))
if config.has_option('IO','Write_2ndGF'):
Write_2ndGF = bool(int(config.get('IO','Write_2ndGF')))
if config.has_option('IO','Write_AC'):
Write_AC = bool(int(config.get('IO','Write_AC'))) ## compatibility with SSN codes
if config.has_option('IO','EmaxFiles'):
EmaxFiles = float(config.get('IO','EmaxFiles'))
if config.has_option('IO','EstepFiles'):
EstepFiles = int(config.get('IO','EstepFiles'))
###########################################################
## energy axis ############################################
## In case you run into RuntimeWarning: invalid value encountered in power:
## for Kramers-Kronig we need range(N)**3 array, for large N it can
## hit the limit of 2**63 = 9223372036854775808 of signed int
## large values of N also introduce instability to calcualtion of ABS
N = 2**M-1 ## number of points for bubble/self-energy FFT calculation
dE_dec = int(-sp.log10(dE))
En_A = sp.around(sp.linspace(-(N-1)/2*dE,(N-1)/2*dE,N),dE_dec+2)
Nhalf = int((len(En_A)-1)/2) ## zero on the energy axis
## cannot print what is not calculated
if EmaxFiles > sp.fabs(En_A[0]): EmaxFiles = sp.fabs(En_A[0])
if any([GammaL <= 0.0,GammaR <= 0.0, U < 0.0, Delta <= 0.0]):
print('# check_params: Error: All of GammaL, GammaR, U, Delta must be positive.')
exit(1)
if Delta > En_A[-1]:
print('# Error: Delta must be smaller than the bandwidth.')
print('# Delta = {0: .5f}, Emax = {1: .5f}'.format(Delta,En_A[-1]))
exit(1)
## locate band edges in En_A
EdgePos1 = sp.nonzero(sp.around(En_A,dE_dec) == sp.around(-Delta,dE_dec))[0][0]
EdgePos2 = sp.nonzero(sp.around(En_A,dE_dec) == sp.around( Delta,dE_dec))[0][0]
## Fermi-Dirac distribution for T=0
FD_A = 1.0*sp.concatenate([sp.ones(int((N-1)/2)),[0.5],sp.zeros(int((N-1)/2))])
## config_squad.py end ##
|
pokornyv/josephson
|
config_squad.py
|
Python
|
mit
| 5,659
|
[
"DIRAC"
] |
c4f88335aef495d5d6adef6c975feaa743dcf95d709d3b1429972ac049ea82af
|
# $HeadURL$
__RCSID__ = "$Id$"
from DIRAC.AccountingSystem.Client.Types.BaseAccountingType import BaseAccountingType
import DIRAC
class Job( BaseAccountingType ):
def __init__( self ):
BaseAccountingType.__init__( self )
self.definitionKeyFields = [ ( 'User', 'VARCHAR(32)' ),
( 'UserGroup', 'VARCHAR(32)' ),
( 'JobGroup', "VARCHAR(64)" ),
( 'JobType', 'VARCHAR(32)' ),
( 'JobClass', 'VARCHAR(32)' ),
( 'ProcessingType', 'VARCHAR(32)' ),
( 'Site', 'VARCHAR(32)' ),
( 'FinalMajorStatus', 'VARCHAR(32)' ),
( 'FinalMinorStatus', 'VARCHAR(64)' )
]
self.definitionAccountingFields = [ ( 'CPUTime', "INT UNSIGNED" ),
( 'NormCPUTime', "INT UNSIGNED" ),
( 'ExecTime', "INT UNSIGNED" ),
( 'InputDataSize', 'BIGINT UNSIGNED' ),
( 'OutputDataSize', 'BIGINT UNSIGNED' ),
( 'InputDataFiles', 'INT UNSIGNED' ),
( 'OutputDataFiles', 'INT UNSIGNED' ),
( 'DiskSpace', 'BIGINT UNSIGNED' ),
( 'InputSandBoxSize', 'BIGINT UNSIGNED' ),
( 'OutputSandBoxSize', 'BIGINT UNSIGNED' ),
( 'ProcessedEvents', 'INT UNSIGNED' )
]
self.bucketsLength = [ ( 86400 * 8, 3600 ), #<1w+1d = 1h
( 86400 * 35, 3600 * 4 ), #<35d = 4h
( 86400 * 30 * 6, 86400 ), #<6m = 1d
( 86400 * 365, 86400 * 2 ), #<1y = 2d
( 86400 * 600, 604800 ), #>1y = 1w
]
self.checkType()
#Fill the site
self.setValueByKey( "Site", DIRAC.siteName() )
def checkRecord( self ):
result = self.getValue( "ExecTime" )
if not result[ 'OK' ]:
return result
execTime = result[ 'Value' ]
result = self.getValue( "CPUTime" )
if not result[ 'OK' ]:
return result
cpuTime = result[ 'Value' ]
if cpuTime > execTime * 100:
return DIRAC.S_ERROR( "OOps. CPUTime seems to be more than 100 times the ExecTime. Smells fishy!" )
if execTime > 33350400: # 1 year
return DIRAC.S_ERROR( "OOps. More than 1 year of cpu time smells fishy!" )
return DIRAC.S_OK()
|
Sbalbp/DIRAC
|
AccountingSystem/Client/Types/Job.py
|
Python
|
gpl-3.0
| 2,736
|
[
"DIRAC"
] |
4030219ebfa0c287fddafc08461f27d8b778ed345b4cc73bb8e5956db57db449
|
from dataclasses import dataclass
from typing import Iterator, TypeVar, Union
from blib2to3.pytree import Node, Leaf, type_repr
from blib2to3.pgen2 import token
from black.nodes import Visitor
from black.output import out
from black.parsing import lib2to3_parse
LN = Union[Leaf, Node]
T = TypeVar("T")
@dataclass
class DebugVisitor(Visitor[T]):
tree_depth: int = 0
def visit_default(self, node: LN) -> Iterator[T]:
indent = " " * (2 * self.tree_depth)
if isinstance(node, Node):
_type = type_repr(node.type)
out(f"{indent}{_type}", fg="yellow")
self.tree_depth += 1
for child in node.children:
yield from self.visit(child)
self.tree_depth -= 1
out(f"{indent}/{_type}", fg="yellow", bold=False)
else:
_type = token.tok_name.get(node.type, str(node.type))
out(f"{indent}{_type}", fg="blue", nl=False)
if node.prefix:
# We don't have to handle prefixes for `Node` objects since
# that delegates to the first child anyway.
out(f" {node.prefix!r}", fg="green", bold=False, nl=False)
out(f" {node.value!r}", fg="blue", bold=False)
@classmethod
def show(cls, code: Union[str, Leaf, Node]) -> None:
"""Pretty-print the lib2to3 AST of a given string of `code`.
Convenience method for debugging.
"""
v: DebugVisitor[None] = DebugVisitor()
if isinstance(code, str):
code = lib2to3_parse(code)
list(v.visit(code))
|
psf/black
|
src/black/debug.py
|
Python
|
mit
| 1,595
|
[
"VisIt"
] |
05b945f4bf3c84d19bb95a3e107e92cc7a398e005b160587c8b2f5e338653b76
|
#!/usr/bin/python
import getopt
import sys
from Bio import SeqIO
from Bio.SeqUtils import GC
import time# import time, gmtime, strftime
import os
import shutil
import pandas
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
import csv
#from datetime import datetime
import numpy as np
from scipy import stats
__author__ = "Andriy Sheremet"
#Helper functions definitions
def genome_shredder(input_dct, shear_val):
shredded = {}
for key, value in input_dct.items():
#print input_dct[i].seq
#print i
dic_name = key
rec_name = value.name
for j in range(0, len(str(value.seq)), int(shear_val)):
# print j
record = str(value.seq)[0+j:int(shear_val)+j]
shredded[dic_name+"_"+str(j)] = SeqRecord(Seq(record),rec_name+"_"+str(j),'','')
#record = SeqRecord(input_ref_records[i].seq[0+i:int(shear_val)+i],input_ref_records[i].name+"_%i"%i,"","")
return shredded
def parse_contigs_ind(f_name):
"""
Returns sequences index from the input files(s)
remember to close index object after use
"""
handle = open(f_name, "rU")
record_dict = SeqIO.index(f_name,"fasta")
handle.close()
return record_dict
#returning specific sequences and overal list
def retrive_sequence(contig_lst, rec_dic):
"""
Returns list of sequence elements from dictionary/index of SeqIO objects specific to the contig_lst parameter
"""
contig_seqs = list()
#record_dict = rec_dic
#handle.close()
for contig in contig_lst:
contig_seqs.append(str(rec_dic[contig].seq))#fixing BiopythonDeprecationWarning
return contig_seqs
def filter_seq_dict(key_lst, rec_dic):
"""
Returns filtered dictionary element from rec_dic according to sequence names passed in key_lst
"""
return { key: rec_dic[key] for key in key_lst }
def unique_scaffold_topEval(dataframe):
#returns pandas series object
variables = list(dataframe.columns.values)
scaffolds=dict()
rows=list()
for row in dataframe.itertuples():
#if row[1]=='Ga0073928_10002560':
if row[1] not in scaffolds:
scaffolds[row[1]]=row
else:
if row[11]<scaffolds[row[1]][11]:
scaffolds[row[1]]=row
rows=scaffolds.values()
#variables=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
df = pandas.DataFrame([[getattr(i,j) for j in variables] for i in rows], columns = variables)
return df
def unique_scaffold_topBits(dataframe):
#returns pandas series object
variables = list(dataframe.columns.values)
scaffolds=dict()
rows=list()
for row in dataframe.itertuples():
#if row[1]=='Ga0073928_10002560':
if row[1] not in scaffolds:
scaffolds[row[1]]=row
else:
if row[12]>scaffolds[row[1]][12]:
scaffolds[row[1]]=row
rows=scaffolds.values()
#variables=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
df = pandas.DataFrame([[getattr(i,j) for j in variables] for i in rows], columns = variables)
return df
def close_ind_lst(ind_lst):
"""
Closes index objects supplied in input parameter list
"""
for index in ind_lst:
index.close()
def usage():
print "\nThis is the usage function\n"
# print 'Usage: '+sys.argv[0]+' -i <input_file> [-o <output>] [-l <minimum length>]'
# print 'Example: '+sys.argv[0]+' -i input.fasta -o output.fasta -l 100'
def main(argv):
#default parameters
mg_lst = []
ref_lst = []
e_val = 1e-5
alen = 50.0
alen_percent = True
alen_bp = False
iden = 95.0
name= "output"
fmt_lst = ["fasta"]
supported_formats =["fasta", "csv"]
iterations = 1
alen_increment = 5.0
iden_increment = 0.0
blast_db_Dir = ""
results_Dir = ""
input_files_Dir = ""
ref_out_0 = ""
blasted_lst = []
continue_from_previous = False #poorly supported, just keeping the directories
skip_blasting = False
debugging = False
sheared = False
shear_val = None
logfile = ""
try:
opts, args = getopt.getopt(argv, "r:m:n:e:a:i:s:f:h", ["reference=", "metagenome=", "name=", "e_value=", "alignment_length=", "identity=","shear=","format=", "iterations=", "alen_increment=", "iden_increment=","continue_from_previous","skip_blasting","debugging", "help"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
# elif opt in ("--recover_after_failure"):
# recover_after_failure = True
# print "Recover after failure:", recover_after_failure
elif opt in ("--continue_from_previous"):
continue_from_previous = True
if debugging:
print "Continue after failure:", continue_from_previous
elif opt in ("--debugging"):
debugging = True
if debugging:
print "Debugging messages:", debugging
elif opt in ("-r", "--reference"):
if arg:
ref_lst=arg.split(',')
#infiles = arg
if debugging:
print "Reference file(s)", ref_lst
elif opt in ("-m", "--metagenome"):
if arg:
mg_lst=arg.split(',')
#infiles = arg
if debugging:
print "Metagenome file(s)", mg_lst
elif opt in ("-f", "--format"):
if arg:
fmt_lst=arg.split(',')
#infiles = arg
if debugging:
print "Output format(s)", fmt_lst
elif opt in ("-n", "--name"):
if arg.strip():
name = arg
if debugging:
print "Project name", name
elif opt in ("-e", "--e_value"):
try:
e_val = float(arg)
except:
print "\nERROR: Please enter numerical value as -e parameter (default: 1e-5)"
usage()
sys.exit(1)
if debugging:
print "E value", e_val
elif opt in ("-a", "--alignment_length"):
if arg.strip()[-1]=="%":
alen_bp = False
alen_percent = True
else:
alen_bp = True
alen_percent = False
try:
alen = float(arg.split("%")[0])
except:
print "\nERROR: Please enter a numerical value as -a parameter (default: 50.0)"
usage()
sys.exit(1)
if debugging:
print "Alignment length", alen
elif opt in ("-i", "--identity"):
try:
iden = float(arg)
except:
print "\nERROR: Please enter a numerical value as -i parameter (default: 95.0)"
usage()
sys.exit(1)
if debugging:
print "Alignment length", iden
elif opt in ("-s", "--shear"):
sheared = True
try:
shear_val = int(arg)
except:
print "\nERROR: Please enter an integer value as -s parameter"
usage()
sys.exit(1)
if debugging:
print "Alignment length", iden
elif opt in ("--iterations"):
try:
iterations = int(arg)
except:
print "\nWARNING: Please enter integer value as --iterations parameter (using default: 1)"
if debugging:
print "Iterations: ", iterations
elif opt in ("--alen_increment"):
try:
alen_increment = float(arg)
except:
print "\nWARNING: Please enter numerical value as --alen_increment parameter (using default: )", alen_increment
if debugging:
print "Alignment length increment: ", alen_increment
elif opt in ("--iden_increment"):
try:
iden_increment = float(arg)
except:
print "\nWARNING: Please enter numerical value as --iden_increment parameter (using default: )", iden_increment
if debugging:
print "Alignment length increment: ", iden_increment
elif opt in ("--skip_blasting"):
skip_blasting = True
if debugging:
print "Blasting step omitted; Using previous blast output."
for ref_file in [x for x in ref_lst if x]:
try:
#
with open(ref_file, "rU") as hand_ref:
pass
except:
print "\nERROR: Reference File(s) ["+ref_file+"] doesn't exist"
usage()
sys.exit(1)
for mg_file in [x for x in mg_lst if x]:
try:
#
with open(mg_file, "rU") as hand_mg:
pass
except:
print "\nERROR: Metagenome File(s) ["+mg_file+"] doesn't exist"
usage()
sys.exit(1)
for fmt in [x for x in fmt_lst if x]:
if fmt not in supported_formats:
print "\nWARNING: Output format [",fmt,"] is not supported"
print "\tUse -h(--help) option for the list of supported formats"
fmt_lst=["fasta"]
print "\tUsing default output format: ", fmt_lst[0]
project_dir = name
if not continue_from_previous:
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
try:
os.mkdir(project_dir)
except OSError:
print "ERROR: Cannot create project directory: " + name
raise
print "\n\t Initial Parameters:"
print "\nProject Name: ", name,'\n'
print "Project Directory: ", os.path.abspath(name),'\n'
print "Reference File(s): ", ref_lst,'\n'
if sheared:
print "Shear Reference File(s):", str(shear_val)+"bp",'\n'
print "Metagenome File(s): ", mg_lst,'\n'
print "E Value: ", e_val, "\n"
if alen_percent:
print "Alignment Length: "+str(alen)+'%\n'
if alen_bp:
print "Alignment Length: "+str(alen)+'bp\n'
print "Sequence Identity: "+str(iden)+'%\n'
print "Output Format(s):", fmt_lst,'\n'
if iterations > 1:
print "Iterations: ", iterations, '\n'
print "Alignment Length Increment: ", alen_increment, '\n'
print "Sequence identity Increment: ", iden_increment, '\n'
#Initializing directories
blast_db_Dir = name+"/blast_db"
if not continue_from_previous:
if os.path.exists(blast_db_Dir):
shutil.rmtree(blast_db_Dir)
try:
os.mkdir(blast_db_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + blast_db_Dir
raise
results_Dir = name+"/results"
if not continue_from_previous:
if os.path.exists(results_Dir):
shutil.rmtree(results_Dir)
try:
os.mkdir(results_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + results_Dir
raise
input_files_Dir = name+"/input_files"
if not continue_from_previous:
if os.path.exists(input_files_Dir):
shutil.rmtree(input_files_Dir)
try:
os.mkdir(input_files_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + input_files_Dir
raise
# Writing raw reference files into a specific input filename
input_ref_records = {}
for reference in ref_lst:
ref_records_ind = parse_contigs_ind(reference)
#ref_records = dict(ref_records_ind)
input_ref_records.update(ref_records_ind)
ref_records_ind.close()
#input_ref_records.update(ref_records)
ref_out_0 = input_files_Dir+"/reference0.fna"
if (sheared & bool(shear_val)):
with open(ref_out_0, "w") as handle:
SeqIO.write(genome_shredder(input_ref_records, shear_val).values(), handle, "fasta")
#NO NEED TO CLOSE with statement will automatically close the file
else:
with open(ref_out_0, "w") as handle:
SeqIO.write(input_ref_records.values(), handle, "fasta")
# Making BLAST databases
#output fname from before used as input for blast database creation
input_ref_0 = ref_out_0
title_db = name+"_db"#add iteration functionality
outfile_db = blast_db_Dir+"/iteration"+str(iterations)+"/"+name+"_db"#change into for loop
os.system("makeblastdb -in "+input_ref_0+" -dbtype prot -title "+title_db+" -out "+outfile_db+" -parse_seqids")
# BLASTing query contigs
if not skip_blasting:
print "\nBLASTing query file(s):"
for i in range(len(mg_lst)):
database = outfile_db # adjust for iterations
blasted_lst.append(results_Dir+"/recruited_mg_"+str(i)+".tab")
start = time.time()
os_string = 'blastx -db '+database+' -query \"'+mg_lst[i]+'\" -out '+blasted_lst[i]+" -evalue "+str(e_val)+" -outfmt 6 -num_threads 8"
#print os_string
os.system(os_string)
print "\t"+mg_lst[i]+"; Time elapsed: "+str(time.time()-start)+" seconds."
else:
for i in range(len(mg_lst)):
blasted_lst.append(results_Dir+"/recruited_mg_"+str(i)+".tab")
# Parsing BLAST outputs
blast_cols = ['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
recruited_mg=[]
for i in range(len(mg_lst)):
try:
df = pandas.read_csv(blasted_lst[i] ,sep="\t", header=None)
except:
df = pandas.DataFrame(columns=blast_cols)
df.columns=blast_cols
recruited_mg.append(df)
# print len(recruited_mg[0])
# print len(recruited_mg[1])
#creating all_records entry
#! Remember to close index objects after they are no longer needed
#! Use helper function close_ind_lst()
all_records = []
all_input_recs = parse_contigs_ind(ref_out_0)
##calculating GC of the reference
if (len(all_input_recs)>1):
ref_gc_lst = np.array([GC(x.seq) for x in all_input_recs.values()])
ref_cnt = ref_gc_lst.size
ref_gc_avg = np.mean(ref_gc_lst)
ref_gc_avg_std = np.std(ref_gc_lst)
if(len(ref_gc_lst) > 0):
ref_gc_avg_sem = stats.sem(ref_gc_lst, axis=0)
else:
ref_gc_avg_sem=0
else:
if (debugging):
print "Only one reference"
ref_gc_lst = np.array([GC(x.seq) for x in all_input_recs.values()])
ref_cnt = ref_gc_lst.size
ref_gc_avg = np.mean(ref_gc_lst)
ref_gc_avg_std=0
ref_gc_avg_sem=0
#ref_gc_avg_sem = stats.sem(ref_gc_lst, axis=0)
# _ = 0
# for key, value in all_input_recs.items():
# _ +=1
# if _ < 20:
# print key, len(value)
print "\nIndexing metagenome file(s):"
for i in range(len(mg_lst)):
start = time.time()
all_records.append(parse_contigs_ind(mg_lst[i]))
print "\t"+mg_lst[i]+" Indexed in : "+str(time.time()-start)+" seconds."
# Transforming data
print "\nParsing recruited contigs:"
for i in range(len(mg_lst)):
start = time.time()
#cutoff_contigs[dataframe]=evalue_filter(cutoff_contigs[dataframe])
recruited_mg[i]=unique_scaffold_topBits(recruited_mg[i])
contig_list = recruited_mg[i]['quid'].tolist()
recruited_mg[i]['Contig_nt']=retrive_sequence(contig_list, all_records[i])
recruited_mg[i]['Contig_size']=recruited_mg[i]['Contig_nt'].apply(lambda x: len(x))
#recruited_mg[i]['Ref_nt']=recruited_mg[i]['suid'].apply(lambda x: all_input_recs[str(x)].seq)
recruited_mg[i]['Ref_size']=recruited_mg[i]['suid'].apply(lambda x: len(all_input_recs[str(x)]))
recruited_mg[i]['Ref_GC']=recruited_mg[i]['suid'].apply(lambda x: GC(all_input_recs[str(x)].seq))
#recruited_mg[i]['Coverage']=recruited_mg[i]['alen'].apply(lambda x: 100.0*float(x))/min(recruited_mg[i]['Contig_size'].apply(lambda y: y),recruited_mg[i]['Ref_size'].apply(lambda z: z))
#df.loc[:, ['B0', 'B1', 'B2']].min(axis=1)
recruited_mg[i]['Coverage']=recruited_mg[i]['alen'].apply(lambda x: 100.0*float(x))/recruited_mg[i].loc[:,["Contig_size", "Ref_size"]].min(axis=1)
recruited_mg[i]['Metric']=recruited_mg[i]['Coverage']*recruited_mg[i]['iden']/100.0
try:
recruited_mg[i]['Contig_GC']=recruited_mg[i]['Contig_nt'].apply(lambda x: GC(x))
except:
recruited_mg[i]['Contig_GC']=recruited_mg[i]['Contig_nt'].apply(lambda x: None)
try:
recruited_mg[i]['Read_RPKM']=1.0/((recruited_mg[i]['Ref_size']/1000.0)*(len(all_records[i])/1000000.0))
except:
recruited_mg[i]['Read_RPKM']=np.nan
#recruited_mg[i] = recruited_mg[i][['quid', 'suid', 'iden', 'alen','Coverage','Metric', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits','Ref_size','Ref_GC','Ref_nt','Contig_size','Contig_GC','Contig_nt']]
recruited_mg[i] = recruited_mg[i][['quid', 'suid', 'iden', 'alen','Coverage','Metric', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits','Ref_size','Ref_GC','Contig_size','Contig_GC','Read_RPKM','Contig_nt']]
print "\tContigs from "+mg_lst[i]+" parsed in : "+str(time.time()-start)+" seconds."
# Here would go statistics functions and producing plots
#
#
#
#
#
# Quality filtering before outputting
if alen_percent:
for i in range(len(recruited_mg)):
recruited_mg[i]=recruited_mg[i][(recruited_mg[i]['iden']>=iden)&(recruited_mg[i]['Coverage']>=alen)&(recruited_mg[i]['eval']<=e_val)]
if alen_bp:
for i in range(len(recruited_mg)):
recruited_mg[i]=recruited_mg[i][(recruited_mg[i]['iden']>=iden)&(recruited_mg[i]['alen']>=alen)&(recruited_mg[i]['eval']<=e_val)]
# print len(recruited_mg[0])
# print len(recruited_mg[1])
# Batch export to outfmt (csv and/or multiple FASTA)
alen_str = ""
iden_str = "_iden_"+str(iden)+"%"
if alen_percent:
alen_str = "_alen_"+str(alen)+"%"
if alen_bp:
alen_str = "_alen_"+str(alen)+"bp"
if iterations > 1:
prefix=name+"/results/"+name.split("/")[0]+"_iter_e_"+str(e_val)+iden_str+alen_str
else:
prefix=name+"/results/"+name.split("/")[0]+"_e_"+str(e_val)+iden_str+alen_str
if sheared:
prefix = prefix+'_sheared_'+str(shear_val)+"bp"
prefix = prefix + "_recruited_mg_"
#initializing log file data
logfile=name.split("/")[0]+"/results_log.csv"
try:
run = int(name.split("/")[-1].split("_")[-1])# using "_" less depends on the wrapper script
except:
if name.split("/")[-1].split("_")[-1]==name:
run = 0
else:
print "Warning: Run identifier could not be written in: "+logfile
#sys.exit(1)
run = None
alen_header = "Min alen"
if alen_bp:
alen_header = alen_header+" (bp)"
if alen_percent:
alen_header = alen_header+" (%)"
shear_header = "Reference Shear (bp)"
shear_log_value = 0
if sheared:
shear_log_value = str(shear_val)
print "\nWriting files:"
for i in range(len(mg_lst)):
records= []
if "csv" in fmt_lst:
outfile1 = prefix+str(i)+".csv"
recruited_mg[i].to_csv(outfile1, sep='\t')
print str(len(recruited_mg[i]))+" sequences written to "+outfile1
if "fasta" in fmt_lst:
ids = recruited_mg[i]['quid'].tolist()
#if len(ids)==len(sequences):
for j in range(len(ids)):
records.append(all_records[i][ids[j]])
outfile2 = prefix+str(i)+".fasta"
with open(outfile2, "w") as output_handle:
SeqIO.write(records, output_handle, "fasta")
print str(len(ids))+" sequences written to "+outfile2
#Writing logfile
try:
time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
except:
print "Warning: Time identifier could not be written in: "+logfile
metagenome = mg_lst[i]
#contig info
rpkm_lst = np.array(recruited_mg[i]['Read_RPKM'].tolist())
if(len(rpkm_lst) > 0):
rpkm = np.sum(rpkm_lst)
rpkm_std= np.std(rpkm_lst)
rpkm_sem = np.std(rpkm_lst)*np.sqrt(len(rpkm_lst))
else:
rpkm = 0
rpkm_std= 0
rpkm_sem=0
sizes_lst = np.array(recruited_mg[i]['Contig_size'].tolist())
if(len(sizes_lst) > 0):
sizes_avg = np.mean(sizes_lst)
sizes_avg_std= np.std(sizes_lst)
if(len(sizes_lst) > 1):
sizes_avg_sem = stats.sem(sizes_lst, axis=0)
else:
sizes_avg_sem = 0
else:
sizes_avg = 0
sizes_avg_std= 0
sizes_avg_sem=0
#sizes_avg_sem = stats.sem(sizes_lst, axis=0)
alen_lst = np.array(recruited_mg[i]['alen'].tolist())
if(len(alen_lst) > 0):
alen_avg = np.mean(alen_lst)
alen_avg_std = np.std(alen_lst)
if(len(alen_lst) > 1):
alen_avg_sem = stats.sem(alen_lst, axis=0)
else:
alen_avg_sem = 0
else:
alen_avg = 0
alen_avg_std = 0
alen_avg_sem=0
#alen_avg_sem = stats.sem(alen_lst, axis=0)
iden_lst = np.array(recruited_mg[i]['iden'].tolist())
if(len(iden_lst) > 0):
iden_avg = np.mean(iden_lst)
iden_avg_std = np.std(iden_lst)
if(len(iden_lst) > 1):
iden_avg_sem = stats.sem(iden_lst, axis=0)
else:
iden_avg_sem = 0
else:
iden_avg = 0
iden_avg_std = 0
iden_avg_sem=0
#iden_avg_sem = stats.sem(iden_lst, axis=0)
gc_lst = np.array(recruited_mg[i]['Contig_GC'].tolist())
if(len(gc_lst) > 0):
gc_avg = np.mean(gc_lst)
gc_avg_std = np.std(gc_lst)
if(len(gc_lst) > 1):
gc_avg_sem = stats.sem(gc_lst, axis=0)
else:
gc_avg_sem = 0
else:
gc_avg = 0
gc_avg_std = 0
gc_avg_sem=0
if ref_cnt > 0:
recr_percent = float(len(ids))/float(len(all_records[i]))*100
else:
recr_percent = 0.0
#log_header = ['Run','Project Name','Created', 'Reference(s)','Metagenome', 'No. Contigs','No. References', alen_header, "Min iden (%)", shear_header, "Mean Contig Size (bp)","STD Contig Size", "SEM Contig Size", "Mean Contig alen (bp)","STD Contig alen", "SEM Contig alen", "Mean Contig iden (bp)","STD Contig iden", "SEM Contig iden", "Mean Contig GC (%)","STD Contig GC","SEM Contig GC","Mean Reference GC (%)","STD Reference GC","SEM Reference GC"]
log_header = ['Run','Project Name','Created', 'Reference(s)', shear_header,'No. Ref. Sequences','Metagenome','No. Metagenome Contigs' , alen_header, "Min iden (%)",'No. Recruited Contigs','% Recruited Contigs', 'Total RPKM', 'RPKM STD', 'RPKM SEM', "Mean Rec. Contig Size (bp)","STD Rec. Contig Size", "SEM Rec. Contig Size", "Mean alen (bp)","STD alen", "SEM alen", "Mean Rec. Contig iden (bp)","STD Rec. Contig iden", "SEM Rec. Contig iden", "Mean Rec. Contigs GC (%)","STD Rec. Contig GC","SEM Rec. Contig GC","Mean Total Reference(s) GC (%)","STD Total Reference(s) GC","SEM Total Reference(s) GC"]
#log_row = [run,name.split("/")[0],time_str, ";".join(ref_lst), metagenome, len(ids),ref_cnt, alen, iden, shear_log_value, sizes_avg,sizes_avg_std, sizes_avg_sem, alen_avg,alen_avg_std, alen_avg_sem, iden_avg,iden_avg_std, iden_avg_sem, gc_avg,gc_avg_std, gc_avg_sem,ref_gc_avg,ref_gc_avg_std, ref_gc_avg_sem]
log_row = [run,name.split("/")[0],time_str, ";".join(ref_lst), shear_log_value,ref_cnt, metagenome,len(all_records[i]) , alen, iden,len(ids),recr_percent,rpkm, rpkm_std, rpkm_sem, sizes_avg,sizes_avg_std, sizes_avg_sem, alen_avg,alen_avg_std, alen_avg_sem, iden_avg,iden_avg_std, iden_avg_sem, gc_avg,gc_avg_std, gc_avg_sem,ref_gc_avg,ref_gc_avg_std, ref_gc_avg_sem]
if os.path.isfile(logfile):#file exists - appending
with open(logfile, "a") as log_handle:
log_writer = csv.writer(log_handle, delimiter='\t')
log_writer.writerow(log_row)
else:#no file exists - writing
with open(logfile,"w") as log_handle:
log_writer = csv.writer(log_handle, delimiter='\t')
log_writer.writerow(log_header)
log_writer.writerow(log_row)
close_ind_lst(all_records)
close_ind_lst([all_input_recs])
#run = 0
#all_records[i].close()# keep open if multiple iterations
#recruited_mg_1 = pandas.read_csv(out_name1 ,sep="\t", header=None)
#recruited_mg_1.columns=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
#recruited_mg_2 = pandas.read_csv(out_name2 ,sep="\t", header=None)
#recruited_mg_2.columns=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
#recruited_mg = [recruited_mg_1, recruited_mg_2]
# blast_db_Dir = ""
# results_Dir = ""
# input_files_Dir = ""
# parsed = SeqIO.parse(handle, "fasta")
#
# records = list()
#
#
# total = 0
# processed = 0
# for record in parsed:
# total += 1
# #print(record.id), len(record.seq)
# if len(record.seq) >= length:
# processed += 1
# records.append(record)
# handle.close()
#
# print "%d sequences found"%(total)
#
# try:
# output_handle = open(outfile, "w")
# SeqIO.write(records, output_handle, "fasta")
# output_handle.close()
# print "%d sequences written"%(processed)
# except:
# print "ERROR: Illegal output filename"
# sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
|
nyirock/mg_blast_wrapper
|
mg_xwrapser_latest.py
|
Python
|
mit
| 26,958
|
[
"BLAST"
] |
0a51ab19dcb4ec9412f0833dea526a616c7ec895ecfdfc565214595a1c6c8097
|
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework import viewsets
from .models import Vault, Recent
from .serializers import VaultSerializer, RecentSerializer, TagsListSerializer
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from django.utils import timezone
from taggit.models import Tag
from rest_framework.generics import ListAPIView
from django.core import serializers
from rest_framework import status
from django.views.generic import View
from django.http import HttpResponse
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.contrib import auth
from django.http import JsonResponse
from rest_framework.authtoken.models import Token
import os
import logging
@csrf_exempt
def login(request):
"""
token authentication for desktop apps
:param request:
:return:
"""
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = auth.authenticate(username=username, password=password)
if user:
token = Token.objects.get_or_create(user=user)
return JsonResponse({'token': str(token[0])})
else:
return HttpResponse(
"""
Username or password is not valid!
""",
status=403,
)
class BunnyAppView(View):
"""
Serves the compiled frontend entry point (only works if you have run `npm
run build`).
"""
def get(self, request):
try:
with open(os.path.join(settings.REACT_APP_DIR, 'build', 'index.html')) as f:
return HttpResponse(f.read())
except FileNotFoundError:
logging.exception('Production build of app not found')
return HttpResponse(
"""
This URL is only used when you have built the production
version of the app. Visit http://localhost:3000/ instead, or
run `npm run build` to test the production version.
""",
status=501,
)
class VaultViewSet(viewsets.ModelViewSet):
authentication_classes = (SessionAuthentication, BasicAuthentication, JSONWebTokenAuthentication)
permission_classes = (IsAuthenticated,)
queryset = Vault.objects.all()
serializer_class = VaultSerializer
def retrieve(self, request, pk=None, *args, **kwargs):
"""
Save recently accessed item to recent table
"""
instance = self.get_object()
if instance:
obj, created = Recent.objects.get_or_create(vault=instance)
if not created:
count = Recent.objects.count()
if count == 20: # table can contain 20 max items, otherwise delete the 1st item brutally :D
obj_del = Recent.objects.first()
obj_del.delete()
obj.accessed_at = timezone.now()
obj.save()
serializer = self.get_serializer(instance)
return Response(serializer.data)
# def get_serializer(self, *args, **kwargs):
# """
# enable bulk creation
# """
# if "data" in kwargs:
# data = kwargs["data"]
#
# if isinstance(data, list):
# kwargs["many"] = True
# return super(VaultViewSet, self).get_serializer(*args, **kwargs)
# class TagViewSet(viewsets.ModelViewSet):
# authentication_classes = (SessionAuthentication, BasicAuthentication, JSONWebTokenAuthentication)
# permission_classes = (IsAuthenticated,)
# queryset = Tag.objects.all()
# serializer_class = TagSerializer
# @detail_route(methods=['get'])
# def tagged(self, request, *args, **kwargs):
# """
# Get all Vault item with specific tag
# """
# instance = self.get_object()
# if instance:
# hiren = Vault.objects.filter(tag=instance.id)
# data = serializers.serialize("json", hiren)
# return Response(data)
class RecentViewSet(viewsets.ModelViewSet):
authentication_classes = (SessionAuthentication, BasicAuthentication, JSONWebTokenAuthentication)
permission_classes = (IsAuthenticated,)
queryset = Recent.objects.order_by('-accessed_at')
serializer_class = RecentSerializer
class TagsListView(ListAPIView):
"""
API endpoint that return list of tags
"""
queryset = Tag.objects.all()
permission_classes = (IsAuthenticated,)
authentication_classes = (SessionAuthentication, BasicAuthentication, JSONWebTokenAuthentication)
serializer_class = TagsListSerializer
# class SecretViewset(viewsets.ModelViewSet):
# """
# API endpoint that allows secret key to be created, viewed ,edited.
# """
# authentication_classes = (SessionAuthentication, BasicAuthentication, JSONWebTokenAuthentication)
# permission_classes = (IsAuthenticated,)
# queryset = Secret.objects.all()
# serializer_class = SecretSerializer
# def create(self, request, *args, **kwargs):
# """
# Check if secret key already exists
# """
# count = Secret.objects.all().count()
# if count == 1:
# content = {'error': 'key already exits'}
# return Response(content, status.HTTP_403_FORBIDDEN)
# else:
# instance = self.request.data['key']
# Secret.objects.create(key=instance)
# response = {"done": "key created"}
# return Response(response, status.HTTP_201_CREATED)
# def destroy(self, request, pk=None, *args, **kwargs):
# bunny = {'error': 'method not supported :/'}
# return Response(bunny, status.HTTP_403_FORBIDDEN)
# def list(self, request, *args, **kwargs):
# query = Secret.objects.all()
# if query.count() == 0:
# return Response(" :P ", status.HTTP_404_NOT_FOUND)
# else:
# serializer = self.get_serializer(query, many=True)
# return Response(serializer.data)
|
pyprism/Hiren-Vault
|
password/views.py
|
Python
|
mit
| 6,310
|
[
"VisIt"
] |
9b9fcb377c4a64f8648e46c12ea1bde03294d49ff1341f934d00ba66d6373c6b
|
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006,2007,2012,2014,2015, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Test unrestrictied single point logfiles in cclib"""
import os
import unittest
import numpy
from skip import skipForParser
from skip import skipForLogfile
__filedir__ = os.path.realpath(os.path.dirname(__file__))
class GenericSPunTest(unittest.TestCase):
"""Generic unrestricted single point unittest"""
def testnatom(self):
"""Is the number of atoms equal to 20?"""
self.assertEquals(self.data.natom, 20)
def testatomnos(self):
"""Are the atomnos correct?"""
self.failUnless(numpy.alltrue([numpy.issubdtype(atomno,int) for atomno in self.data.atomnos]))
self.assertEquals(self.data.atomnos.shape, (20,) )
self.assertEquals(sum(self.data.atomnos==6) + sum(self.data.atomnos==1), 20)
def testatomcoords(self):
"""Are the dimensions of atomcoords 1 x natom x 3?"""
self.assertEquals(self.data.atomcoords.shape,(1,self.data.natom,3))
@skipForParser('Jaguar', 'Data file does not contain enough information')
def testdimmocoeffs(self):
"""Are the dimensions of mocoeffs equal to 2 x nmo x nbasis?"""
self.assertEquals(type(self.data.mocoeffs), type([]))
self.assertEquals(len(self.data.mocoeffs), 2)
self.assertEquals(self.data.mocoeffs[0].shape,
(self.data.nmo, self.data.nbasis))
self.assertEquals(self.data.mocoeffs[1].shape,
(self.data.nmo, self.data.nbasis))
def testcharge_and_mult(self):
"""Are the charge and multiplicity correct?"""
self.assertEquals(self.data.charge, 1)
self.assertEquals(self.data.mult, 2)
def testhomos(self):
"""Are the homos correct?"""
msg = "%s != array([34,33],'i')" % numpy.array_repr(self.data.homos)
numpy.testing.assert_array_equal(self.data.homos, numpy.array([34,33],"i"), msg)
def testmoenergies(self):
"""Are the dims of the moenergies equals to 2 x nmo?"""
self.assertEquals(len(self.data.moenergies), 2)
self.assertEquals(len(self.data.moenergies[0]), self.data.nmo)
self.assertEquals(len(self.data.moenergies[1]), self.data.nmo)
@skipForParser('Molpro', '?')
@skipForParser('ORCA', 'ORCA has no support for symmetry yet')
def testmosyms(self):
"""Are the dims of the mosyms equals to 2 x nmo?"""
shape = (len(self.data.mosyms), len(self.data.mosyms[0]))
self.assertEquals(shape, (2, self.data.nmo))
class GamessUK70SPunTest(GenericSPunTest):
"""Customized unrestricted single point unittest"""
def testdimmocoeffs(self):
"""Are the dimensions of mocoeffs equal to 2 x (homos+6) x nbasis?"""
self.assertEquals(type(self.data.mocoeffs), type([]))
self.assertEquals(len(self.data.mocoeffs), 2)
# This is only an issue in version 7.0 (and before?), since in the version 8.0
# logfile all eigenvectors are happily printed.
shape_alpha = (self.data.homos[0]+6, self.data.nbasis)
shape_beta = (self.data.homos[1]+6, self.data.nbasis)
self.assertEquals(self.data.mocoeffs[0].shape, shape_alpha)
self.assertEquals(self.data.mocoeffs[1].shape, shape_beta)
def testnooccnos(self):
"""Are natural orbital occupation numbers the right size?"""
self.assertEquals(self.data.nooccnos.shape, (self.data.nmo, ))
class GamessUK80SPunTest(GenericSPunTest):
"""Customized unrestricted single point unittest"""
def testnooccnos(self):
"""Are natural orbital occupation numbers the right size?"""
self.assertEquals(self.data.nooccnos.shape, (self.data.nmo, ))
class GaussianSPunTest(GenericSPunTest):
"""Customized unrestricted single point unittest"""
def testatomnos(self):
"""Does atomnos have the right dimension (20)?"""
size = len(self.data.atomnos)
self.assertEquals(size, 20)
class JaguarSPunTest(GenericSPunTest):
"""Customized unrestricted single point unittest"""
def testmoenergies(self):
"""Are the dims of the moenergies equal to 2 x homos+11?"""
self.assertEquals(len(self.data.moenergies), 2)
self.assertEquals(len(self.data.moenergies[0]), self.data.homos[0]+11)
self.assertEquals(len(self.data.moenergies[1]), self.data.homos[1]+11)
def testmosyms(self):
"""Are the dims of the mosyms equals to 2 x nmo?"""
shape0 = (len(self.data.mosyms), len(self.data.mosyms[0]))
shape1 = (len(self.data.mosyms), len(self.data.mosyms[1]))
self.assertEquals(shape0, (2, self.data.homos[0]+11))
self.assertEquals(shape1, (2, self.data.homos[1]+11))
if __name__=="__main__":
import sys
sys.path.append(os.path.join(__filedir__, ".."))
from test_data import DataSuite
suite = DataSuite(['SPun'])
suite.testall()
|
jchodera/cclib
|
test/data/testSPun.py
|
Python
|
lgpl-2.1
| 5,462
|
[
"Jaguar",
"Molpro",
"ORCA",
"cclib"
] |
e8ecff0add3828c5b4e26083aefee2d1fb1d13f475fa13935edaceb534a9e2d5
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
from pymatgen.util.testing import PymatgenTest
from pymatgen.transformations.defect_transformations import \
DefectTransformation
from pymatgen.analysis.defects.core import Vacancy
class DefectTransformationTest(PymatgenTest):
def test_apply_transformation(self):
struc = PymatgenTest.get_structure("VO2")
vac = Vacancy(struc, struc[0], charge=1)
def_transform = DefectTransformation([2, 2, 2], vac)
trans_structure = def_transform.apply_transformation(struc)
self.assertEqual(len(trans_structure), 47)
# confirm that transformation doesnt work for bulk structures
# which are slightly different than those used for defect object
# scaled volume
scaled_struc = struc.copy()
scaled_struc.scale_lattice(1.1 * struc.volume)
self.assertRaises(ValueError, def_transform.apply_transformation,
scaled_struc)
# slightly different atomic positions
pert_struc = struc.copy()
pert_struc.perturb(.1)
self.assertRaises(ValueError, def_transform.apply_transformation,
pert_struc)
if __name__ == '__main__':
unittest.main()
|
mbkumar/pymatgen
|
pymatgen/transformations/tests/test_defect_transformations.py
|
Python
|
mit
| 1,327
|
[
"pymatgen"
] |
cd2f7c5eee5c909086a75bbf04560b48b4ce85dd6cefa202d5629b912863d3d1
|
# This module provides classes that represent VRML objects for use
# in data visualization applications.
#
# Written by: Konrad Hinsen <hinsen@cnrs-orleans.fr>
# With contributions from Frank Horowitz <frank@ned.dem.csiro.au>
# Last revision: 2004-7-19
#
"""This module provides definitions of simple 3D graphics objects and
VRML scenes containing them. The objects are appropriate for data
visualization, not for virtual reality modelling. Scenes can be written
to VRML files or visualized immediately using a VRML browser, whose
name is taken from the environment variable VRML2VIEWER (under Unix).
There are a few attributes that are common to all graphics objects:
material -- a Material object defining color and surface properties
comment -- a comment string that will be written to the VRML file
reuse -- a boolean flag (defaulting to false). If set to one,
the object may share its VRML definition with other
objects. This reduces the size of the VRML file, but
can yield surprising side effects in some cases.
This module used the VRML 2.0 definition, also known as VRML97. For
the original VRML 1, use the module VRML, which uses exactly the same
interface. There is another almost perfectly compatible module VMD,
which produces input files for the molecular visualization program
VMD.
Example:
>>>from Scientific.Visualization.VRML2 import *
>>>scene = Scene([])
>>>scale = ColorScale(10.)
>>>for x in range(11):
>>> color = scale(x)
>>> scene.addObject(Cube(Vector(x, 0., 0.), 0.2,
>>> material=Material(diffuse_color = color)))
>>>scene.view()
"""
from Scientific.IO.TextFile import TextFile
from Scientific.Geometry import Transformation, Vector, VectorModule
import Numeric
import os, string, tempfile
from Color import *
#
# VRML file
#
class SceneFile:
def __init__(self, filename, mode = 'r'):
if mode == 'r':
raise TypeError, 'Not implemented.'
self.file = TextFile(filename, 'w')
self.file.write('#VRML V2.0 utf8\n')
self.file.write('Transform { children [\n')
self.memo = {}
self.name_counter = 0
def __del__(self):
self.close()
def writeString(self, data):
self.file.write(data)
def close(self):
if self.file is not None:
self.file.write(']}\n')
self.file.close()
self.file = None
def write(self, object):
object.writeToFile(self)
def uniqueName(self):
self.name_counter = self.name_counter + 1
return 'i' + `self.name_counter`
VRMLFile = SceneFile
#
# Scene
#
class Scene:
"""VRML scene
A VRML scene is a collection of graphics objects that can be
written to a VRML file or fed directly to a VRML browser.
Constructor: Scene(|objects|=None, |cameras|=None, **|options|)
Arguments:
|objects| -- a list of graphics objects or 'None' for an empty scene
|cameras| -- a list of cameras
|options| -- options as keyword arguments (none defined at the moment;
this argument is provided for compatibility with
other modules)
"""
def __init__(self, objects = None, cameras = None, **options):
if objects is None:
self.objects = []
elif type(objects) == type([]):
self.objects = objects
else:
self.objects = [objects]
if cameras is None:
self.cameras = []
else:
self.cameras = cameras
def __len__(self):
return len(self.objects)
def __getitem__(self, item):
return self.object[item]
def addObject(self, object):
"Adds |object| to the list of graphics objects."
self.objects.append(object)
def addCamera(self, camera):
"Adds |camera| to the list of cameras."
self.cameras.append(camera)
def writeToFile(self, filename):
"Writes the scene to a VRML file with name |filename|."
file = VRMLFile(filename, 'w')
if self.cameras:
for camera in self.cameras:
camera.writeToFile(file)
for o in self.objects:
o.writeToFile(file)
file.close()
def view(self, *args):
"Start a VRML browser for the scene."
import sys
filename = tempfile.mktemp()+'.wrl'
if sys.platform == 'win32':
import win32api
self.writeToFile(filename)
win32api.ShellExecute(0, "open", filename, None, "", 1)
elif os.environ.has_key('VRML2VIEWER'):
self.writeToFile(filename)
if os.fork() == 0:
os.system(os.environ['VRML2VIEWER'] + ' ' + filename +
' 1> /dev/null 2>&1')
os.unlink(filename)
os._exit(0)
else:
print 'No VRML2 viewer defined'
#
# Camera class
#
class Camera:
"""Camera/viewpoint for a scene
Constructor: Camera(|position|, |orientation|, |description|,
|field_of_view|)
Arguments:
|position| -- the location of the camera (a vector)
|orientation| -- an (axis, angle) tuple in which the axis is
a vector and angle a number (in radians);
axis and angle specify a rotation with respect
to the standard orientation along the negative z axis
|description| -- a label for the viewpoint (a string)
|field_of_view| -- the field of view (a positive number)
"""
def __init__(self, position=None, orientation=None,
description=None, field_of_view=None):
self.field_of_view = field_of_view
self.orientation = orientation
self.position = position
self.description = description
def writeToFile(self, file):
file.writeString('Viewpoint {\n')
if self.field_of_view != None:
file.writeString('fieldOfView %f\n' % self.field_of_view)
if self.orientation != None:
axis, angle = self.orientation
axis = axis.normal()
file.writeString('orientation %f %f %f %f\n' % \
(axis[0], axis[1], axis[2], angle))
if self.position != None:
file.writeString('position %f %f %f\n' % \
(self.position[0], \
self.position[1], \
self.position[2]))
if self.description != None:
file.writeString('description "%s"' % \
self.description)
file.writeString('}\n')
#
# Navigation Info
#
class NavigationInfo:
"""Navigation Information
Constructor: NavigationInfo(|speed|, |type|)
Arguments:
|speed| -- walking speed in length units per second
|type| -- one of 'WALK', 'EXAMINE', 'FLY', 'NONE', 'ANY'
"""
def __init__(self, speed=100.0, type="EXAMINE"):
self.speed = speed
self.type = type
def writeToFile(self, file):
file.writeString('NavigationInfo {\n')
file.writeString('speed %f\n' % self.speed )
file.writeString('type [ ')
if self.type != "ANY":
file.writeString('"%s", ' % self.type)
file.writeString('"ANY" ]\n')
file.writeString('}\n')
#
# Base class for everything that produces nodes
#
class VRMLObject:
def __init__(self, attr):
self.attr = {}
for key, value in attr.items():
if key in self.attribute_names:
self.attr[key] = value
else:
raise AttributeError, 'illegal attribute: ' + str(key)
attribute_names = ['comment']
def __getitem__(self, attr):
try:
return self.attr[attr]
except KeyError:
return None
def __setitem__(self, attr, value):
self.attr[attr] = value
def __copy__(self):
return copy.deepcopy(self)
def writeToFile(self, file):
raise AttributeError, 'Class ' + self.__class__.__name__ + \
' does not implement file output.'
#
# Shapes
#
class ShapeObject(VRMLObject):
def __init__(self, attr, rotation, translation, reference_point):
VRMLObject.__init__(self, attr)
if rotation is None:
rotation = Transformation.Rotation(VectorModule.ez, 0.)
else:
rotation = apply(Transformation.Rotation, rotation)
if translation is None:
translation = Transformation.Translation(Vector(0.,0.,0.))
else:
translation = Transformation.Translation(translation)
self.transformation = translation*rotation
self.reference_point = reference_point
attribute_names = VRMLObject.attribute_names + ['material', 'reuse']
def __add__(self, other):
return Group([self]) + Group([other])
def writeToFile(self, file):
comment = self['comment']
if comment is not None:
file.writeString('# ' + comment + '\n')
file.writeString('Transform{\n')
vector = self.transformation.translation().displacement()
axis, angle = self.transformation.rotation().axisAndAngle()
trans_flag = vector.length() > 1.e-4
rot_flag = abs(angle) > 1.e-4
if trans_flag:
file.writeString('translation ' + `vector[0]` + ' ' + \
`vector[1]` + ' ' + `vector[2]` + '\n')
if rot_flag:
file.writeString('rotation ' + `axis[0]` + ' ' + \
`axis[1]` + ' ' + `axis[2]` + ' ' + \
`angle` + '\n')
material = self['material']
reuse = self['reuse']
file.writeString('children [\n')
if reuse:
key = self.memoKey() + (material, self.__class__)
if file.memo.has_key(key):
file.writeString('USE ' + file.memo[key] + '\n')
self.use(file)
if material is not None:
material.use(file)
else:
name = file.uniqueName()
file.memo[key] = name
file.writeString('DEF ' + name + ' Shape{\n')
if material is not None:
file.writeString('appearance ')
material.writeToFile(file)
file.writeString('geometry ')
self.writeSpecification(file)
file.writeString('}\n')
else:
file.writeString('Shape{')
if material is not None:
file.writeString('appearance ')
material.writeToFile(file)
file.writeString('geometry ')
self.writeSpecification(file)
file.writeString('}\n')
file.writeString(']}\n')
def use(self, file):
pass
class Sphere(ShapeObject):
"""Sphere
Constructor: Sphere(|center|, |radius|, **|attributes|)
Arguments:
|center| -- the center of the sphere (a vector)
|radius| -- the sphere radius (a positive number)
|attributes| -- any graphics object attribute
"""
def __init__(self, center, radius, **attr):
self.radius = radius
ShapeObject.__init__(self, attr, None, center, center)
def writeSpecification(self, file):
file.writeString('Sphere{radius ' + `self.radius` + '}\n')
def memoKey(self):
return (self.radius, )
class Cube(ShapeObject):
"""Cube
Constructor: Cube(|center|, |edge|, **|attributes|)
Arguments:
|center| -- the center of the cube (a vector)
|edge| -- the length of an edge (a positive number)
|attributes| -- any graphics object attribute
The edges of a cube are always parallel to the coordinate axes.
"""
def __init__(self, center, edge, **attr):
self.edge = edge
ShapeObject.__init__(self, attr, None, center, center)
def writeSpecification(self, file):
file.writeString('Box{size' + 3*(' ' + `self.edge`) + '}\n')
def memoKey(self):
return (self.edge, )
class LinearOrientedObject(ShapeObject):
def __init__(self, attr, point1, point2):
center = 0.5*(point1+point2)
axis = point2-point1
self.height = axis.length()
if self.height > 0:
axis = axis/self.height
rot_axis = VectorModule.ey.cross(axis)
sine = rot_axis.length()
cosine = VectorModule.ey*axis
angle = Transformation.angleFromSineAndCosine(sine, cosine)
if abs(angle) < 1.e-4 or abs(angle-2.*Numeric.pi) < 1.e-4:
rotation = None
else:
if abs(sine) < 1.e-4:
rot_axis = VectorModule.ex
rotation = (rot_axis, angle)
else:
rotation = None
ShapeObject.__init__(self, attr, rotation, center, center)
class Cylinder(LinearOrientedObject):
"""Cylinder
Constructor: Cylinder(|point1|, |point2|, |radius|,
|faces|='(1, 1, 1)', **|attributes|)
Arguments:
|point1|, |point2| -- the end points of the cylinder axis (vectors)
|radius| -- the radius (a positive number)
|attributes| -- any graphics object attribute
|faces| -- a sequence of three boolean flags, corresponding to
the cylinder hull and the two circular end pieces,
specifying for each of these parts whether it is visible
or not.
"""
def __init__(self, point1, point2, radius, faces = (1, 1, 1), **attr):
self.faces = faces
self.radius = radius
LinearOrientedObject.__init__(self, attr, point1, point2)
def writeSpecification(self, file):
file.writeString('Cylinder{')
if not self.faces[0]:
file.writeString('side FALSE ')
if not self.faces[1]:
file.writeString('bottom FALSE ')
if not self.faces[2]:
file.writeString('top FALSE ')
file.writeString('radius ' + `self.radius` + \
' height ' + `self.height` + '}\n')
def memoKey(self):
return (self.radius, self.height, self.faces)
class Cone(LinearOrientedObject):
"""Cone
Constructor: Cone(|point1|, |point2|, |radius|, |face|='1', **|attributes|)
Arguments:
|point1|, |point2| -- the end points of the cylinder axis (vectors).
|point1| is the tip of the cone.
|radius| -- the radius (a positive number)
|attributes| -- any graphics object attribute
|face| -- a boolean flag, specifying if the circular bottom is visible
"""
def __init__(self, point1, point2, radius, face = 1, **attr):
self.face = face
self.radius = radius
LinearOrientedObject.__init__(self, attr, point2, point1)
def writeSpecification(self, file):
file.writeString('Cone{')
if not self.face:
file.writeString('bottom FALSE ')
file.writeString('bottomRadius ' + `self.radius` + \
' height ' + `self.height` + '}\n')
def memoKey(self):
return (self.radius, self.height, self.face)
class Line(ShapeObject):
"""Line
Constructor: Line(|point1|, |point2|, **|attributes|)
Arguments:
|point1|, |point2| -- the end points of the line (vectors)
|attributes| -- any graphics object attribute
"""
def __init__(self, point1, point2, **attr):
self.points = (point1, point2)
center = 0.5*(point1+point2)
ShapeObject.__init__(self, attr, None, None, center)
def writeSpecification(self, file):
file.writeString('IndexedLineSet{coord Coordinate{point [' + \
`self.points[0][0]` + ' ' + `self.points[0][1]` + \
' ' + `self.points[0][2]` + ',' + \
`self.points[1][0]` + ' ' + `self.points[1][1]` + \
' ' + `self.points[1][2]` + \
']} coordIndex[0,1,-1]}\n')
def memoKey(self):
return tuple(self.points[0]) + tuple(self.points[1])
class PolyLines(ShapeObject):
"""Multiple connected lines
Constructor: PolyLines(|points|, **|attributes|)
Arguments:
|points| -- a sequence of points to be connected by lines
|attributes| -- any graphics object attribute
"""
def __init__(self, points, **attr):
self.points = points
ShapeObject.__init__(self, attr, None, None, Vector(0., 0., 0.))
def writeSpecification(self, file):
s = 'IndexedLineSet{coord Coordinate{point ['
for p in self.points:
s = s + `p[0]` + ' ' + `p[1]` + ' ' + `p[2]` + ','
file.writeString(s[:-1] + ']} coordIndex')
file.writeString(`range(len(self.points))+[-1]` + '}\n')
def memoKey(self):
return tuple(map(tuple, self.points))
class Polygons(ShapeObject):
"""Polygons
Constructor: Polygons(|points|, |index_lists|, **|attributes|)
Arguments:
|points| -- a sequence of points
|index_lists| -- a sequence of index lists, one for each polygon.
The index list for a polygon defines which points
in |points| are vertices of the polygon.
|attributes| -- any graphics object attribute
"""
def __init__(self, points, index_lists, **attr):
self.points = points
self.index_lists = index_lists
ShapeObject.__init__(self, attr, None, None, Vector(0.,0.,0.))
def writeSpecification(self, file):
file.writeString('IndexedFaceSet{coord Coordinate{point [')
for v in self.points[:-1]:
file.writeString(`v[0]` + ' ' + `v[1]` + ' ' + `v[2]` + ',')
v = self.points[-1]
file.writeString(`v[0]` + ' ' + `v[1]` + ' ' + `v[2]` + \
']} coordIndex[')
for polygon in self.index_lists:
for index in polygon:
file.writeString(`index`+',')
file.writeString('-1,')
file.writeString(']}\n')
def memoKey(self):
return (tuple(map(tuple, self.points)),
tuple(map(tuple, self.index_lists)))
#
# Groups
#
class Group:
def __init__(self, objects, **attr):
self.objects = []
for o in objects:
if isGroup(o):
self.objects = self.objects + o.objects
else:
self.objects.append(o)
for key, value in attr.items():
for o in self.objects:
o[key] = value
is_group = 1
def __len__(self):
return len(self.objects)
def __getitem__(self, item):
return self.object[item]
def __coerce__(self, other):
if not isGroup(other):
other = Group([other])
return (self, other)
def __add__(self, other):
return Group(self.objects + other.objects)
def writeToFile(self, file):
for o in self.objects:
o.writeToFile(file)
def isGroup(x):
return hasattr(x, 'is_group')
#
# Composite Objects
#
class Arrow(Group):
"""Arrow
An arrow consists of a cylinder and a cone.
Constructor: Arrow(|point1|, |point2|, |radius|, **|attributes|)
Arguments:
|point1|, |point2| -- the end points of the arrow (vectors).
|point2| defines the tip of the arrow.
|radius| -- the radius of the arrow shaft (a positive number)
|attributes| -- any graphics object attribute
"""
def __init__(self, point1, point2, radius, **attr):
axis = point2-point1
height = axis.length()
axis = axis/height
cone_height = min(height, 4.*radius)
cylinder_height = height - cone_height
junction = point2-axis*cone_height
cone = apply(Cone, (point2, junction, 0.75*cone_height), attr)
objects = [cone]
if cylinder_height > 0.005*radius:
cylinder = apply(Cylinder, (point1, junction, radius), attr)
objects.append(cylinder)
Group.__init__(self, objects)
#
# Materials
#
class Material(VRMLObject):
"""Material for graphics objects
A material defines the color and surface properties of an object.
Constructor: Material(**|attributes|)
The attributes are "ambient_color", "diffuse_color", "specular_color",
"emissive_color", "shininess", and "transparency".
"""
def __init__(self, **attr):
VRMLObject.__init__(self, attr)
attribute_names = VRMLObject.attribute_names + \
['ambient_color', 'diffuse_color', 'specular_color',
'emissive_color', 'shininess', 'transparency']
attribute_conversion = {'ambient_color': 'ambientColor',
'diffuse_color': 'diffuseColor',
'specular_color': 'specularColor',
'emissive_color': 'emissiveColor',
'shininess': 'shininess',
'transparency': 'transparency'}
def writeToFile(self, file):
if file.memo.has_key(self):
file.writeString('USE ' + file.memo[self] + '\n')
else:
name = file.uniqueName()
file.memo[self] = name
file.writeString('DEF '+name+' Appearance{material Material{\n')
for key, value in self.attr.items():
file.writeString(self.attribute_conversion[key] + ' ' + \
str(value) + '\n')
file.writeString('}}\n')
def use(self, file):
pass
#
# Predefined materials
#
def DiffuseMaterial(color):
"Returns a material with the 'diffuse color' attribute set to |color|."
if type(color) is type(''):
color = ColorByName(color)
try:
return diffuse_material_dict[color]
except KeyError:
m = Material(diffuse_color = color)
diffuse_material_dict[color] = m
return m
diffuse_material_dict = {}
def EmissiveMaterial(color):
"Returns a material with the 'emissive color' attribute set to |color|."
if type(color) is type(''):
color = ColorByName(color)
try:
return emissive_material_dict[color]
except KeyError:
m = Material(emissive_color = color)
emissive_material_dict[color] = m
return m
emissive_material_dict = {}
#
# Test code
#
if __name__ == '__main__':
if 1:
spheres = DiffuseMaterial('green')
links = DiffuseMaterial('red')
s1 = Sphere(VectorModule.null, 0.05, material = spheres, reuse = 1)
s2 = Sphere(VectorModule.ex, 0.05, material = spheres, reuse = 1)
s3 = Sphere(VectorModule.ey, 0.05, material = spheres, reuse = 1)
s4 = Sphere(VectorModule.ez, 0.05, material = spheres, reuse = 1)
a1 = Arrow(VectorModule.null, VectorModule.ex, 0.01, material = links)
a2 = Arrow(VectorModule.null, VectorModule.ey, 0.01, material = links)
a3 = Arrow(VectorModule.null, VectorModule.ez, 0.01, material = links)
scene = Scene([a1, a2, a3, s1, s2, s3, s4])
scene.view()
if 0:
scene = Scene([])
scale = ColorScale(10.)
for x in range(11):
color = scale(x)
m = Material(diffuse_color = color)
scene.addObject(Cube(Vector(x,0.,0.), 0.2, material=m))
scene.view()
if 0:
points = [Vector(0., 0., 0.),
Vector(0., 1., 0.),
Vector(1., 1., 0.),
Vector(1., 0., 0.),
Vector(1., 0., 1.),
Vector(1., 1., 1.)]
indices = [[0, 1, 2, 3, 0], [3, 4, 5, 2, 3]]
scene = Scene(Polygons(points, indices,
material=DiffuseMaterial('yellow')))
scene.view()
if 0:
points = [Vector(0., 0., 0.),
Vector(0., 1., 0.),
Vector(1., 1., 0.),
Vector(1., 0., 0.),
Vector(1., 0., 1.),
Vector(1., 1., 1.)]
scene = Scene(PolyLines(points, material = EmissiveMaterial('yellow')))
scene.view()
|
OS2World/DEV-PYTHON-UTIL-ScientificPython
|
src/Lib/site-packages/Scientific/Visualization/VRML2.py
|
Python
|
isc
| 21,701
|
[
"VMD"
] |
8875a10ee8c5010f26b9903d4599c70fc64b76400e41eb8372f200783b351a19
|
#
# Test the multipole transformation rules
#
import sys
from math import exp, sqrt, sin, cos, pi
from cmath import exp as cexp
import random
import numpy as np
import ase
from hotbit.coulomb.multipole import zero_moments, get_moments
from _hotbit import multipole_to_multipole, multipole_to_local, local_to_local, transform_multipole
###
### For debugging purposes
#def D(l, m, n, a, b, g):
# """
# This is Wigner's formula. For debugging purposes only.
# """
#
# h = 0.0
# for s in range(0, max(l+abs(m), l+abs(n))+1):
# f1 = l+n-s
# f2 = m-n+s
# f3 = l-m-s
# if f1 >= 0 and f2 >= 0 and f3 >= 0:
# h += (-1.0)**(m-n-s)/(factorial(f1)*factorial(s)*factorial(f2)*fact#orial(f3)) * (cos(b/2))**(2*l+n-m-2*s) * (sin(b/2))**(m-n+2*s)
#
# return cexp(-1j*m*a) * cexp(-1j*n*g) * h * factorial(l+n)*factorial(l-n)
###
###
ANGLE = 10*pi/180
NRUNS = 10
NAT = 20
SX, SY, SZ = 10.0, 10.0, 10.0
CHARGE = 1.0
L_MAX = 5
TOL_MOM = 1e-6
TOL_PHI = 1e-4
TOL_PHI2 = 1e-4
TOL_ROT = 1e-9
debug = False
###
for run in range(NRUNS):
r0 = np.array( [ SX/2, SY/2, SZ/2 ] )
r0c = np.array( [ SX, SY, SZ ] )
# Random atoms and charges (charges between -1 and 1)
a = [ ]
for i in range(8):
a += [ ase.Atoms(
"%iH" % NAT,
positions = np.random.random([NAT,3])*SX,
charges = (2*np.random.random([NAT])-1)*CHARGE,
cell = [ SX, SY, SZ ]
) ]
# Compute moments
M = [ ]
for i in range(8):
M += [ get_moments(a[i].get_positions(), a[i].get_initial_charges(), L_MAX, r0) ]
# Construct a composite atoms object
# and compute the corresponding multipole
# expansion
b = ase.Atoms(cell=[ 2*SX, 2*SY, 2*SZ ])
Mc0_l, Mc_L = zero_moments(L_MAX)
for i, ( ca, ( M0_l, M_L ) ) in enumerate(zip(a, M)):
x = i % 2
y = (i/2) % 2
z = (i/4) % 2
dr = np.array([x*SX, y*SY, z*SZ])
ca.translate(dr)
b += ca
dr = np.array([(2*x-1)*SX/2, (2*y-1)*SY/2, (2*z-1)*SZ/2])
multipole_to_multipole(dr, L_MAX, M0_l, M_L, Mc0_l, Mc_L)
# Compute the multipole moment directly
Md0_l, Md_L = get_moments(b.get_positions(), b.get_initial_charges(), L_MAX, r0c)
err_mom1 = np.max(np.abs(Mc0_l-Md0_l))
err_mom2 = np.max(np.abs(Mc_L-Md_L))
if debug:
print("err_mom1 = ", err_mom1)
print("err_mom2 = ", err_mom2)
assert err_mom1 < TOL_MOM and err_mom2 < TOL_MOM
# Now that we have verified that the moments are okay,
# lets compute the field somewhere randomly.
x, y, z = np.random.random_integers(-3,3,3)
while abs(x) != 3 and abs(y) != 3 and abs(z) != 3:
x, y, z = np.random.random_integers(-3,3,3)
r0tar = ( np.array([x, y, z]) + np.random.random(3) ) * np.array([2*SX, 2*SY, 2*SZ])
L0_l, L_L = multipole_to_local(r0tar - r0c, L_MAX, Mc0_l, Mc_L)
phi = 0.0
E = np.zeros(3)
for i in b:
dr = i.position - r0tar
phi += i.charge/sqrt(np.dot(dr, dr))
E -= i.charge*dr/(np.dot(dr, dr)**(3./2))
err_phi1 = abs(phi - L0_l[0])
err_phi2 = np.max(np.abs(E - np.array([ -L_L[0].real,
-L_L[0].imag,
L0_l[1] ])))
if debug:
print("err_phi1 = ", err_phi1)
print("err_phi2 = ", err_phi2)
assert err_phi1 < TOL_PHI
assert err_phi2 < TOL_PHI
# Shift the expansion somewhere else
r0tar2 = ( np.array([x, y, z]) + np.random.random(3) ) * np.array([2*SX, 2*SY, 2*SZ])
L0_l2, L_L2 = local_to_local(r0tar2 - r0tar, L_MAX, L0_l, L_L, L_MAX)
phi = 0.0
E = np.zeros(3)
for i in b:
dr = i.position - r0tar2
phi += i.charge/sqrt(np.dot(dr, dr))
E -= i.charge*dr/(np.dot(dr, dr)**(3./2))
err_phi3 = abs(phi - L0_l2[0])
err_phi4 = np.max(np.abs(E - np.array([ -L_L2[0].real,
-L_L2[0].imag,
L0_l2[1] ])))
if debug:
print("err_phi3 = ", err_phi3)
print("err_phi4 = ", err_phi4)
assert err_phi3 < TOL_PHI2
assert err_phi4 < TOL_PHI2
# Compute the multipole moment directly
Md0_l, Md_L = get_moments(b.get_positions(), b.get_initial_charges(), L_MAX, r0c)
# Now rotate the atoms, recompute the multipole expansion and compare the result
# to the rotated expansion
a1 = 2*pi*random.random()
a2 = 2*pi*random.random()
a3 = 2*pi*random.random()
Rz1 = np.array( [ [ cos(a1), -sin(a1), 0 ],
[ sin(a1), cos(a1), 0 ],
[ 0, 0, 1 ] ] )
Ry = np.array( [ [ cos(a2), 0, -sin(a2) ],
[ 0, 1, 0 ],
[ sin(a2), 0, cos(a2) ] ] )
Rz2 = np.array( [ [ cos(a3), -sin(a3), 0 ],
[ sin(a3), cos(a3), 0 ],
[ 0, 0, 1 ] ] )
R = np.dot(np.dot(Rz1, Ry), Rz2)
for i in b:
i.position = r0c + np.dot(R, i.position - r0c)
Me0_l, Me_L = get_moments(b.get_positions(), b.get_initial_charges(), L_MAX, r0c)
Mf0_l, Mf_L = transform_multipole(R, L_MAX, Md0_l, Md_L)
err_mom3 = np.max(np.abs(Me0_l-Mf0_l))
err_mom4 = np.max(np.abs(Me_L-Mf_L))
if debug:
print("err_mom3 = ", err_mom3)
print("err_mom4 = ", err_mom4)
assert err_mom3 < TOL_ROT
assert err_mom4 < TOL_ROT
|
pekkosk/hotbit
|
hotbit/test/multipole_operations.py
|
Python
|
gpl-2.0
| 5,584
|
[
"ASE"
] |
cc30ba3117e6ae3b27758082b17185f36adeeb96cbb4187ea1823889fd103de6
|
"""
This module contains unit tests for the theano autocorrelation function
"""
import unittest
import numpy as np
from mjhmc.misc.autocor import autocorrelation, slow_autocorrelation, sample_to_df
from mjhmc.misc.distributions import Gaussian, MultimodalGaussian
from mjhmc.samplers.markov_jump_hmc import MarkovJumpHMC
# default is 1E-8
TOL = 1E-7
class TestFastAutocorrelation(unittest.TestCase):
"""
Test class for theano autocorrelation function
"""
def setUp(self):
np.random.seed(2015)
def test_autocorrelation_good_init_full_window(self):
""" Tests that the legacy and fast ac implementations produce identical output
when the sampler is not initialized in a biased manner
(meaning we don't have to worry about variance mismatch)
runs on the full window
:returns: None
:rtype: None
"""
gaussian = Gaussian()
sample_df = sample_to_df(MarkovJumpHMC, gaussian, num_steps=1000)
slow_ac_df = slow_autocorrelation(sample_df, half_window=False)
slow_ac = slow_ac_df.autocorrelation.as_matrix()
fast_ac_df = autocorrelation(sample_df, half_window=False)
fast_ac = fast_ac_df.autocorrelation.as_matrix()
self.assertTrue(np.isclose(slow_ac, fast_ac, atol=TOL).all())
def test_autocorrelation_good_init_half_window(self):
""" Tests that the legacy and fast ac implementations produce identical output
when the sampler is not initialized in a biased manner
(meaning we don't have to worry about variance mismatch)
runs on the half window
:returns: None
:rtype: None
"""
gaussian = Gaussian()
sample_df = sample_to_df(MarkovJumpHMC, gaussian, num_steps=1000)
slow_ac_df = slow_autocorrelation(sample_df, half_window=True)
slow_ac = slow_ac_df.autocorrelation.as_matrix()
fast_ac_df = autocorrelation(sample_df, half_window=True)
fast_ac = fast_ac_df.autocorrelation.as_matrix()
self.assertTrue(np.isclose(slow_ac, fast_ac, atol=TOL).all())
def test_autocorrelation_bad_init(self):
""" Tests that the legacy and fast ac implementations produce identical output
when the sampler is initialized with a bias set of samples
:returns: None
:rtype: None
"""
gaussian = MultimodalGaussian()
sample_df = sample_to_df(MarkovJumpHMC, gaussian, num_steps=1000)
slow_ac_df = slow_autocorrelation(sample_df)
slow_ac = slow_ac_df.autocorrelation.as_matrix()
fast_ac_df = autocorrelation(sample_df)
fast_ac = fast_ac_df.autocorrelation.as_matrix()
self.assertTrue(np.isclose(slow_ac, fast_ac, atol=TOL).all())
|
rueberger/MJHMC
|
mjhmc/tests/test_fast_ac.py
|
Python
|
gpl-2.0
| 2,761
|
[
"Gaussian"
] |
0dbfa86f2ba0c024c84d6023167a0867383450179db851fec163e3044c3a4ec5
|
# -*- coding: utf-8 -*-
# @Author: Zachary Priddy
# @Date: 2016-12-24 12:39:45
# @Last Modified by: Zachary Priddy
# @Last Modified time: 2016-12-24 20:34:28
import difflib
import json
import logging
import requests
from core import ffCommand
from core.database.device_db import getDeviceList
from core.database.routine_db import getRoutineList
from core.database.device_db import getDeviceInfo
from core import configPath
import os
# TODO: Pull this info from the firefly.confg
FIREFLY_ADDRESS = 'http://localhost:6002'
HA_BRIDGE_ADDRESS = 'http://localhost:80/api/devices'
def ha_bridge_handler(p_request):
action = p_request.get('action')
if action == 'dim':
return ha_bridge_dim(p_request)
elif action == 'on':
return ha_bridge_on(p_request)
elif action == 'off':
return ha_bridge_off(p_request)
else:
return ""
def ha_bridge_dim(p_request):
ffCommand(p_request.get('device'), {'setLight' : {'level':p_request.get('level')}}, source='HA Bridge')
return ""
def ha_bridge_on(p_request):
ffCommand(p_request.get('device'), {'switch': 'on'}, source='HA Bridge')
return ""
def ha_bridge_off(p_request):
ffCommand(p_request.get('device'), {'switch': 'off'}, source='HA Bridge')
return ""
def ha_bridge_push_config():
# 1) Get the HA-Bridge config details from request.
# 2) Build of the URLS and JSON for each device and push them.
# 3) Delete existing Firefly Configs
# 4) Add the devices to the HA-Bridge
devices = getDeviceInfo(filters=['lights','light','switch','hue','dimmer','fan','group'])
device_config = []
# TODO make this read the ha-bridge config fiel
ha_bride_alias = {}
with open(os.path.join(configPath,'ha_bridge_alias.json')) as alias:
ha_bride_alias = json.load(alias)
for device, config in devices.iteritems():
d_config = {
'name': config.get('name'),
'deviceType': config.get('type'),
'onUrl': FIREFLY_ADDRESS + '/API/habridge/command',
'offUrl': FIREFLY_ADDRESS + '/API/habridge/command',
'dimUrl': FIREFLY_ADDRESS + '/API/habridge/command',
'targetDevice': 'Firefly - Auto-added',
'httpVerb': 'POST',
'contentType': 'application/json',
'contentBody': '{"device": "' + config.get('id') + '" ,"action": "on"}',
'contentBodyOff': '{"device": "' + config.get('id') + '" ,"action": "off"}',
'contentBodyDim': '{"device": "' + config.get('id') + '" ,"action": "dim", "level":${intensity.percent}}'
}
if ha_bride_alias.get(d_config.get('name')):
d_config['name'] = ha_bride_alias['name']
device_config.append(d_config)
current_devices = requests.get(HA_BRIDGE_ADDRESS).json()
for d in current_devices:
if d.get('targetDevice') == 'Firefly - Auto-added':
requests.delete(HA_BRIDGE_ADDRESS + '/' + str(d.get('id')))
for d in device_config:
r = requests.post(HA_BRIDGE_ADDRESS, json=d)
logging.critical('Added ' + d.get('name') + ' to HA Bridge')
logging.critical('Done addeding devices to HA Bridge.')
return str(device_config)
|
zpriddy/Firefly
|
Firefly/core/api/ha_bridge.py
|
Python
|
apache-2.0
| 3,051
|
[
"Firefly"
] |
98ff8307f665dea81cd1c2cd781c56a41064a761aacbd742c5ce76d5d25c626e
|
#!/usr/bin/python
#
# Copyright 2015 John Kendrick
#
# This file is part of PDielec
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# You should have received a copy of the MIT License
# along with this program, if not see https://opensource.org/licenses/MIT
#
"""Generic reader of output files. Actual reader should inherit from this class"""
from __future__ import print_function
import os
from PDielec.VaspOutputReader import VaspOutputReader
from PDielec.PhonopyOutputReader import PhonopyOutputReader
from PDielec.CastepOutputReader import CastepOutputReader
from PDielec.GulpOutputReader import GulpOutputReader
from PDielec.CrystalOutputReader import CrystalOutputReader
from PDielec.AbinitOutputReader import AbinitOutputReader
from PDielec.QEOutputReader import QEOutputReader
from PDielec.ExperimentOutputReader import ExperimentOutputReader
def find_program_from_name( filename ):
# Determine the program to use from the file name being used
head,tail = os.path.split(filename)
root,ext = os.path.splitext(tail)
if head == '':
head = './'
else:
head = head+'/'
if tail == 'OUTCAR':
if os.path.isfile(head+'phonopy.yaml'):
return 'phonopy'
else:
return 'vasp'
if ext == '.gout':
return 'gulp'
if ext == '.born':
return 'phonopy'
if ext == '.castep':
return 'castep'
if ext == '.out':
if os.path.isfile(head+root+'.files'):
return 'abinit'
elif os.path.isfile(head+root+'.dynG'):
return 'qe'
else:
return 'crystal'
if ext == '.dynG':
return 'qe'
if ext == '.exp':
return 'experiment'
if ext == '.py':
return 'pdgui'
return ''
def get_reader( name, program, qmprogram):
fulldirname = name
head,tail = os.path.split(fulldirname)
root,ext = os.path.splitext(tail)
if program == "castep":
names = [ name ]
reader = CastepOutputReader( names )
elif program == "vasp":
name1 = name
name2 = os.path.join(head,'KPOINTS')
names = [ name1, name2 ]
reader = VaspOutputReader( names )
elif program == "gulp":
names = [ name ]
reader = GulpOutputReader( names )
elif program == "crystal":
names = [ name ]
reader = CrystalOutputReader( names )
elif program == "abinit":
names = [ name ]
reader = AbinitOutputReader( names )
elif program == "qe":
tail1 = root+'.dynG'
tail2 = root+'.log'
tail3 = root+'.out'
name1 = os.path.join(head,tail2)
name2 = os.path.join(head,tail3)
name3 = os.path.join(head,tail)
# We want the dynG entry last rounding causes problems otherwise
name4 = os.path.join(head,tail1)
names = []
for n in [ name1, name2, name3, name4 ]:
if os.path.isfile(n):
if not n in names:
names.append(n)
reader = QEOutputReader( names )
elif program == "phonopy":
# The order is important
pname1 = os.path.join(head,'qpoints.yaml')
pname2 = os.path.join(head,'phonopy.yaml')
# Only works for VASP at the moment
vname1 = name
vname2 = os.path.join(head,'KPOINTS')
pnames = [ pname1, pname2 ]
vnames = [ vname1, vname2 ]
pnames.extend(vnames)
names = pnames
# Which QM program was used by PHONOPY?
if qmprogram == "castep":
print("Error in qmreader",qmprogram)
exit()
qmreader = CastepOutputReader(names)
elif qmprogram == "vasp":
qmreader = VaspOutputReader(vnames)
elif qmprogram == "gulp":
print("Error in qmreader",qmprogram)
exit()
qmreader = GulpOutputReader(names)
elif qmprogram == "crystal":
print("Error in qmreader",qmprogram)
exit()
qmreader = CrystalOutputReader(names)
elif qmprogram == "abinit":
print("Error in qmreader",qmprogram)
exit()
qmreader = AbinitOutputReader(names)
elif qmprogram == "qe":
print("Error in qmreader",qmprogram)
exit()
qmreader = QEOutputReader(names)
# The QM reader is used to get info about the QM calculation
reader = PhonopyOutputReader(pnames,qmreader)
else:
print('Program name not recognized',program,file=sys.stderr)
exit()
return reader
def pdgui_get_reader(program,names,qmprogram):
reader = None
program = program.lower()
qmprogram = qmprogram.lower()
#print("get_reader",names,program)
if program == "":
# This is the old behaviour. It copes with VASP, CASTEP and Crystal
# If names[0] is a directory then we will use a vaspoutputreader
# Otherwise it is a seedname for castep, or a gulp output file, or a crystal output file
if os.path.isdir(names[0]):
print('Analysing VASP directory: {} '.format(names[0]))
outcarfile = os.path.join(names[0], "OUTCAR")
kpointsfile = os.path.join(names[0], "KPOINTS")
if not os.path.isfile(outcarfile):
print("Error: NO OUTCAR FILE IN DIRECTORY")
reader = None
return
reader = VaspOutputReader( [outcarfile, kpointsfile] )
elif names[0].find("OUTCAR") >= 0 and os.path.isfile("OUTCAR"):
reader = VaspOutputReader(names)
elif names[0].find(".gout") >= 0 and os.path.isfile(names[0]):
reader = GulpOutputReader(names)
elif names[0].find(".out") >= 0 and os.path.isfile(names[0]):
reader = CrystalOutputReader(names)
elif names[0].find(".castep") >= 0 and os.path.isfile(names[0]):
reader = CastepOutputReader(names)
elif os.path.isfile(names[0]+".castep") and os.path.isfile(names[0]+".castep"):
reader = CastepOutputReader([names[0]+".castep"])
else:
print('No valid file name has been found on the command line')
print('Try using the -program option to specify the')
print('files which will be read')
reader = None
else:
# New Specification of Program used to define the input files
# Abinit and QE need a couple of files to be specified
#
# First Check that the file(s) we requested are there
#
checkfiles = []
if program == "castep":
if names[0].find(".castep") >= 0:
seedname, ext = os.path.splitext(names[0])
else:
seedname = names[0]
checkfiles.append(seedname+".castep")
elif program == "phonopy":
# We only have a VASP / Phonopy interface
# Creat a list of phonopy files
pnames = []
head,tail = os.path.split(names[0])
pnames.append(os.path.join(head,'qpoints.yaml'))
pnames.append(os.path.join(head,'phonopy.yaml'))
# Creat a list of VASP files NB. They all have to be in the same directory
vnames = names
pnames.extend(vnames)
checkfiles = pnames
else:
checkfiles = names
#jk print("")
#jk print("Program used to perform the phonon calculation was: {}".format(program))
for f in checkfiles:
#jk print("The file containing the output is: {}".format(f))
if not os.path.isfile(f):
print("Output files created by program: {}".format(program))
print("Error: file not available: {}".format(f))
reader = None
return reader
# The files requested are available so read them
if program == "castep":
reader = CastepOutputReader(names)
elif program == "vasp":
reader = VaspOutputReader(names)
elif program == "gulp":
reader = GulpOutputReader(names)
elif program == "crystal":
reader = CrystalOutputReader(names)
elif program == "abinit":
reader = AbinitOutputReader(names)
elif program == "qe":
reader = QEOutputReader(names)
elif program == "phonopy":
# Which QM program was used by PHONOPY?
if qmprogram == "castep":
qmreader = CastepOutputReader(vnames)
elif qmprogram == "vasp":
qmreader = VaspOutputReader(vnames)
elif qmprogram == "gulp":
qmreader = GulpOutputReader(vnames)
elif qmprogram == "crystal":
qmreader = CrystalOutputReader(vnames)
elif qmprogram == "abinit":
qmreader = AbinitOutputReader(vnames)
elif qmprogram == "qe":
qmreader = QEOutputReader(vnames)
# The QM reader is used to get info about the QM calculation
reader = PhonopyOutputReader(pnames,qmreader)
elif program == "experiment":
reader = ExperimentOutputReader(names)
# endif
# end if
return reader
class Debug():
def __init__(self,debug,text,level=0):
self.debug = debug
self.text = text
self.level = level
return
def print(self,*args,level=0):
if self.debug:
if level <= self.level:
print(self.text,*args)
return
|
JohnKendrick/PDielec
|
PDielec/Utilities.py
|
Python
|
mit
| 9,784
|
[
"ABINIT",
"CASTEP",
"CRYSTAL",
"GULP",
"VASP",
"phonopy"
] |
bddbd34967f95d01bd334c3cbdea539f54d0df913ecf573f4712c63863c6e171
|
WORDS = (
'Smith',
'Johnson',
'Williams',
'Brown',
'Jones',
'Miller',
'Davis',
'Garcia',
'Rodriguez',
'Wilson',
'Martinez',
'Anderson',
'Taylor',
'Thomas',
'Hernandez',
'Moore',
'Martin',
'Jackson',
'Thompson',
'White',
'Lopez',
'Lee',
'Gonzalez',
'Harris',
'Clark',
'Lewis',
'Robinson',
'Walker',
'Perez',
'Hall',
'Young',
'Allen',
'Sanchez',
'Wright',
'King',
'Scott',
'Green',
'Baker',
'Adams',
'Nelson',
'Hill',
'Ramirez',
'Campbell',
'Mitchell',
'Roberts',
'Carter',
'Phillips',
'Evans',
'Turner',
'Torres',
'Parker',
'Collins',
'Edwards',
'Stewart',
'Flores',
'Morris',
'Nguyen',
'Murphy',
'Rivera',
'Cook',
'Rogers',
'Morgan',
'Peterson',
'Cooper',
'Reed',
'Bailey',
'Bell',
'Gomez',
'Kelly',
'Howard',
'Ward',
'Cox',
'Diaz',
'Richardson',
'Wood',
'Watson',
'Brooks',
'Bennett',
'Gray',
'James',
'Reyes',
'Cruz',
'Hughes',
'Price',
'Myers',
'Long',
'Foster',
'Sanders',
'Ross',
'Morales',
'Powell',
'Sullivan',
'Russell',
'Ortiz',
'Jenkins',
'Gutierrez',
'Perry',
'Butler',
'Barnes',
'Fisher',
'Henderson',
'Coleman',
'Simmons',
'Patterson',
'Jordan',
'Reynolds',
'Hamilton',
'Graham',
'Kim',
'Gonzales',
'Alexander',
'Ramos',
'Wallace',
'Griffin',
'West',
'Cole',
'Hayes',
'Chavez',
'Gibson',
'Bryant',
'Ellis',
'Stevens',
'Murray',
'Ford',
'Marshall',
'Owens',
'Mcdonald',
'Harrison',
'Ruiz',
'Kennedy',
'Wells',
'Alvarez',
'Woods',
'Mendoza',
'Castillo',
'Olson',
'Webb',
'Washington',
'Tucker',
'Freeman',
'Burns',
'Henry',
'Vasquez',
'Snyder',
'Simpson',
'Crawford',
'Jimenez',
'Porter',
'Mason',
'Shaw',
'Gordon',
'Wagner',
'Hunter',
'Romero',
'Hicks',
'Dixon',
'Hunt',
'Palmer',
'Robertson',
'Black',
'Holmes',
'Stone',
'Meyer',
'Boyd',
'Mills',
'Warren',
'Fox',
'Rose',
'Rice',
'Moreno',
'Schmidt',
'Patel',
'Ferguson',
'Nichols',
'Herrera',
'Medina',
'Ryan',
'Fernandez',
'Weaver',
'Daniels',
'Stephens',
'Gardner',
'Payne',
'Kelley',
'Dunn',
'Pierce',
'Arnold',
'Tran',
'Spencer',
'Peters',
'Hawkins',
'Grant',
'Hansen',
'Castro',
'Hoffman',
'Hart',
'Elliott',
'Cunningham',
'Knight',
'Bradley',
'Carroll',
'Hudson',
'Duncan',
'Armstrong',
'Berry',
'Andrews',
'Johnston',
'Ray',
'Lane',
'Riley',
'Carpenter',
'Perkins',
'Aguilar',
'Silva',
'Richards',
'Willis',
'Matthews',
'Chapman',
'Lawrence',
'Garza',
'Vargas',
'Watkins',
'Wheeler',
'Larson',
'Carlson',
'Harper',
'George',
'Greene',
'Burke',
'Guzman',
'Morrison',
'Munoz',
'Jacobs',
'Obrien',
'Lawson',
'Franklin',
'Lynch',
'Bishop',
'Carr',
'Salazar',
'Austin',
'Mendez',
'Gilbert',
'Jensen',
'Williamson',
'Montgomery',
'Harvey',
'Oliver',
'Howell',
'Dean',
'Hanson',
'Weber',
'Garrett',
'Sims',
'Burton',
'Fuller',
'Soto',
'Mccoy',
'Welch',
'Chen',
'Schultz',
'Walters',
'Reid',
'Fields',
'Walsh',
'Little',
'Fowler',
'Bowman',
'Davidson',
'May',
'Day',
'Schneider',
'Newman',
'Brewer',
'Lucas',
'Holland',
'Wong',
'Banks',
'Santos',
'Curtis',
'Pearson',
'Delgado',
'Valdez',
'Pena',
'Rios',
'Douglas',
'Sandoval',
'Barrett',
'Hopkins',
'Keller',
'Guerrero',
'Stanley',
'Bates',
'Alvarado',
'Beck',
'Ortega',
'Wade',
'Estrada',
'Contreras',
'Barnett',
'Caldwell',
'Santiago',
'Lambert',
'Powers',
'Chambers',
'Nunez',
'Craig',
'Leonard',
'Lowe',
'Rhodes',
'Byrd',
'Gregory',
'Shelton',
'Frazier',
'Becker',
'Maldonado',
'Fleming',
'Vega',
'Sutton',
'Cohen',
'Jennings',
'Parks',
'Mcdaniel',
'Watts',
'Barker',
'Norris',
'Vaughn',
'Vazquez',
'Holt',
'Schwartz',
'Steele',
'Benson',
'Neal',
'Dominguez',
'Horton',
'Terry',
'Wolfe',
'Hale',
'Lyons',
'Graves',
'Haynes',
'Miles',
'Park',
'Warner',
'Padilla',
'Bush',
'Thornton',
'Mccarthy',
'Mann',
'Zimmerman',
'Erickson',
'Fletcher',
'Mckinney',
'Page',
'Dawson',
'Joseph',
'Marquez',
'Reeves',
'Klein',
'Espinoza',
'Baldwin',
'Moran',
'Love',
'Robbins',
'Higgins',
'Ball',
'Cortez',
'Le',
'Griffith',
'Bowen',
'Sharp',
'Cummings',
'Ramsey',
'Hardy',
'Swanson',
'Barber',
'Acosta',
'Luna',
'Chandler',
'Blair',
'Daniel',
'Cross',
'Simon',
'Dennis',
'Oconnor',
'Quinn',
'Gross',
'Navarro',
'Moss',
'Fitzgerald',
'Doyle',
'Mclaughlin',
'Rojas',
'Rodgers',
'Stevenson',
'Singh',
'Yang',
'Figueroa',
'Harmon',
'Newton',
'Paul',
'Manning',
'Garner',
'Mcgee',
'Reese',
'Francis',
'Burgess',
'Adkins',
'Goodman',
'Curry',
'Brady',
'Christensen',
'Potter',
'Walton',
'Goodwin',
'Mullins',
'Molina',
'Webster',
'Fischer',
'Campos',
'Avila',
'Sherman',
'Todd',
'Chang',
'Blake',
'Malone',
'Wolf',
'Hodges',
'Juarez',
'Gill',
'Farmer',
'Hines',
'Gallagher',
'Duran',
'Hubbard',
'Cannon',
'Miranda',
'Wang',
'Saunders',
'Tate',
'Mack',
'Hammond',
'Carrillo',
'Townsend',
'Wise',
'Ingram',
'Barton',
'Mejia',
'Ayala',
'Schroeder',
'Hampton',
'Rowe',
'Parsons',
'Frank',
'Waters',
'Strickland',
'Osborne',
'Maxwell',
'Chan',
'Deleon',
'Norman',
'Harrington',
'Casey',
'Patton',
'Logan',
'Bowers',
'Mueller',
'Glover',
'Floyd',
'Hartman',
'Buchanan',
'Cobb',
'French',
'Kramer',
'Mccormick',
'Clarke',
'Tyler',
'Gibbs',
'Moody',
'Conner',
'Sparks',
'Mcguire',
'Leon',
'Bauer',
'Norton',
'Pope',
'Flynn',
'Hogan',
'Robles',
'Salinas',
'Yates',
'Lindsey',
'Lloyd',
'Marsh',
'Mcbride',
'Owen',
'Solis',
'Pham',
'Lang',
'Pratt',
'Lara',
'Brock',
'Ballard',
'Trujillo',
'Shaffer',
'Drake',
'Roman',
'Aguirre',
'Morton',
'Stokes',
'Lamb',
'Pacheco',
'Patrick',
'Cochran',
'Shepherd',
'Cain',
'Burnett',
'Hess',
'Li',
'Cervantes',
'Olsen',
'Briggs',
'Ochoa',
'Cabrera',
'Velasquez',
'Montoya',
'Roth',
'Meyers',
'Cardenas',
'Fuentes',
'Weiss',
'Hoover',
'Wilkins',
'Nicholson',
'Underwood',
'Short',
'Carson',
'Morrow',
'Colon',
'Holloway',
'Summers',
'Bryan',
'Petersen',
'Mckenzie',
'Serrano',
'Wilcox',
'Carey',
'Clayton',
'Poole',
'Calderon',
'Gallegos',
'Greer',
'Rivas',
'Guerra',
'Decker',
'Collier',
'Wall',
'Whitaker',
'Bass',
'Flowers',
'Davenport',
'Conley',
'Houston',
'Huff',
'Copeland',
'Hood',
'Monroe',
'Massey',
'Roberson',
'Combs',
'Franco',
'Larsen',
'Pittman',
'Randall',
'Skinner',
'Wilkinson',
'Kirby',
'Cameron',
'Bridges',
'Anthony',
'Richard',
'Kirk',
'Bruce',
'Singleton',
'Mathis',
'Bradford',
'Boone',
'Abbott',
'Charles',
'Allison',
'Sweeney',
'Atkinson',
'Horn',
'Jefferson',
'Rosales',
'York',
'Christian',
'Phelps',
'Farrell',
'Castaneda',
'Nash',
'Dickerson',
'Bond',
'Wyatt',
'Foley',
'Chase',
'Gates',
'Vincent',
'Mathews',
'Hodge',
'Garrison',
'Trevino',
'Villarreal',
'Heath',
'Dalton',
'Valencia',
'Callahan',
'Hensley',
'Atkins',
'Huffman',
'Roy',
'Boyer',
'Shields',
'Lin',
'Hancock',
'Grimes',
'Glenn',
'Cline',
'Delacruz',
'Camacho',
'Dillon',
'Parrish',
'Oneill',
'Melton',
'Booth',
'Kane',
'Berg',
'Harrell',
'Pitts',
'Savage',
'Wiggins',
'Brennan',
'Salas',
'Marks',
'Russo',
'Sawyer',
'Baxter',
'Golden',
'Hutchinson',
'Liu',
'Walter',
'Mcdowell',
'Wiley',
'Rich',
'Humphrey',
'Johns',
'Koch',
'Suarez',
'Hobbs',
'Beard',
'Gilmore',
'Ibarra',
'Keith',
'Macias',
'Khan',
'Andrade',
'Ware',
'Stephenson',
'Henson',
'Wilkerson',
'Dyer',
'Mcclure',
'Blackwell',
'Mercado',
'Tanner',
'Eaton',
'Clay',
'Barron',
'Beasley',
'Oneal',
'Preston',
'Small',
'Wu',
'Zamora',
'Macdonald',
'Vance',
'Snow',
'Mcclain',
'Stafford',
'Orozco',
'Barry',
'English',
'Shannon',
'Kline',
'Jacobson',
'Woodard',
'Huang',
'Kemp',
'Mosley',
'Prince',
'Merritt',
'Hurst',
'Villanueva',
'Roach',
'Nolan',
'Lam',
'Yoder',
'Mccullough',
'Lester',
'Santana',
'Valenzuela',
'Winters',
'Barrera',
'Leach',
'Orr',
'Berger',
'Mckee',
'Strong',
'Conway',
'Stein',
'Whitehead',
'Bullock',
'Escobar',
'Knox',
'Meadows',
'Solomon',
'Velez',
'Odonnell',
'Kerr',
'Stout',
'Blankenship',
'Browning',
'Kent',
'Lozano',
'Bartlett',
'Pruitt',
'Buck',
'Barr',
'Gaines',
'Durham',
'Gentry',
'Mcintyre',
'Sloan',
'Melendez',
'Rocha',
'Herman',
'Sexton',
'Moon',
'Hendricks',
'Rangel',
'Stark',
'Lowery',
'Hardin',
'Hull',
'Sellers',
'Ellison',
'Calhoun',
'Gillespie',
'Mora',
'Knapp',
'Mccall',
'Morse',
'Dorsey',
'Weeks',
'Nielsen',
'Livingston',
'Leblanc',
'Mclean',
'Bradshaw',
'Glass',
'Middleton',
'Buckley',
'Schaefer',
'Frost',
'Howe',
'House',
'Mcintosh',
'Ho',
'Pennington',
'Reilly',
'Hebert',
'Mcfarland',
'Hickman',
'Noble',
'Spears',
'Conrad',
'Arias',
'Galvan',
'Velazquez',
'Huynh',
'Frederick',
'Randolph',
'Cantu',
'Fitzpatrick',
'Mahoney',
'Peck',
'Villa',
'Michael',
'Donovan',
'Mcconnell',
'Walls',
'Boyle',
'Mayer',
'Zuniga',
'Giles',
'Pineda',
'Pace',
'Hurley',
'Mays',
'Mcmillan',
'Crosby',
'Ayers',
'Case',
'Bentley',
'Shepard',
'Everett',
'Pugh',
'David',
'Mcmahon',
'Dunlap',
'Bender',
'Hahn',
'Harding',
'Acevedo',
'Raymond',
'Blackburn',
'Duffy',
'Landry',
'Dougherty',
'Bautista',
'Shah',
'Potts',
'Arroyo',
'Valentine',
'Meza',
'Gould',
'Vaughan',
'Fry',
'Rush',
'Avery',
'Herring',
'Dodson',
'Clements',
'Sampson',
'Tapia',
'Bean',
'Lynn',
'Crane',
'Farley',
'Cisneros',
'Benton',
'Ashley',
'Mckay',
'Finley',
'Best',
'Blevins',
'Friedman',
'Moses',
'Sosa',
'Blanchard',
'Huber',
'Frye',
'Krueger',
'Bernard',
'Rosario',
'Rubio',
'Mullen',
'Benjamin',
'Haley',
'Chung',
'Moyer',
'Choi',
'Horne',
'Yu',
'Woodward',
'Ali',
'Nixon',
'Hayden',
'Rivers',
'Estes',
'Mccarty',
'Richmond',
'Stuart',
'Maynard',
'Brandt',
'Oconnell',
'Hanna',
'Sanford',
'Sheppard',
'Church',
'Burch',
'Levy',
'Rasmussen',
'Coffey',
'Ponce',
'Faulkner',
'Donaldson',
'Schmitt',
'Novak',
'Costa',
'Montes',
'Booker',
'Cordova',
'Waller',
'Arellano',
'Maddox',
'Mata',
'Bonilla',
'Stanton',
'Compton',
'Kaufman',
'Dudley',
'Mcpherson',
'Beltran',
'Dickson',
'Mccann',
'Villegas',
'Proctor',
'Hester',
'Cantrell',
'Daugherty',
'Cherry',
'Bray',
'Davila',
'Rowland',
'Levine',
'Madden',
'Spence',
'Good',
'Irwin',
'Werner',
'Krause',
'Petty',
'Whitney',
'Baird',
'Hooper',
'Pollard',
'Zavala',
'Jarvis',
'Holden',
'Haas',
'Hendrix',
'Mcgrath',
'Bird',
'Lucero',
'Terrell',
'Riggs',
'Joyce',
'Mercer',
'Rollins',
'Galloway',
'Duke',
'Odom',
'Andersen',
'Downs',
'Hatfield',
'Benitez',
'Archer',
'Huerta',
'Travis',
'Mcneil',
'Hinton',
'Zhang',
'Hays',
'Mayo',
'Fritz',
'Branch',
'Mooney',
'Ewing',
'Ritter',
'Esparza',
'Frey',
'Braun',
'Gay',
'Riddle',
'Haney',
'Kaiser',
'Holder',
'Chaney',
'Mcknight',
'Gamble',
'Vang',
'Cooley',
'Carney',
'Cowan',
'Forbes',
'Ferrell',
'Davies',
'Barajas',
'Shea',
'Osborn',
'Bright',
'Cuevas',
'Bolton',
'Murillo',
'Lutz',
'Duarte',
'Kidd',
'Key',
'Cooke',
'Goff',
'Dejesus',
'Marin',
'Dotson',
'Bonner',
'Cotton',
'Merrill',
'Lindsay',
'Lancaster',
'Mcgowan',
'Felix',
'Salgado',
'Slater',
'Carver',
'Guthrie',
'Holman',
'Fulton',
'Snider',
'Sears',
'Witt',
'Newell',
'Byers',
'Lehman',
'Gorman',
'Costello',
'Donahue',
'Delaney',
'Albert',
'Workman',
'Rosas',
'Springer',
'Justice',
'Kinney',
'Odell',
'Lake',
'Donnelly',
'Law',
'Dailey',
'Guevara',
'Shoemaker',
'Barlow',
'Marino',
'Winter',
'Craft',
'Katz',
'Pickett',
'Espinosa',
'Daly',
'Maloney',
'Goldstein',
'Crowley',
'Vogel',
'Kuhn',
'Pearce',
'Hartley',
'Cleveland',
'Palacios',
'Mcfadden',
'Britt',
'Wooten',
'Cortes',
'Dillard',
'Childers',
'Alford',
'Dodd',
'Emerson',
'Wilder',
'Lange',
'Goldberg',
'Quintero',
'Beach',
'Enriquez',
'Quintana',
'Helms',
'Mackey',
'Finch',
'Cramer',
'Minor',
'Flanagan',
'Franks',
'Corona',
'Kendall',
'Mccabe',
'Hendrickson',
'Moser',
'Mcdermott',
'Camp',
'Mcleod',
'Bernal',
'Kaplan',
'Medrano',
'Lugo',
'Tracy',
'Bacon',
'Crowe',
'Richter',
'Welsh',
'Holley',
'Ratliff',
'Mayfield',
'Talley',
'Haines',
'Dale',
'Gibbons',
'Hickey',
'Byrne',
'Kirkland',
'Farris',
'Correa',
'Tillman',
'Sweet',
'Kessler',
'England',
'Hewitt',
'Blanco',
'Connolly',
'Pate',
'Elder',
'Bruno',
'Holcomb',
'Hyde',
'Mcallister',
'Cash',
'Christopher',
'Whitfield',
'Meeks',
'Hatcher',
'Fink',
'Sutherland',
'Noel',
'Ritchie',
'Rosa',
'Leal',
'Joyner',
'Starr',
'Morin',
'Delarosa',
'Connor',
'Hilton',
'Alston',
'Gilliam',
'Wynn',
'Wills',
'Jaramillo',
'Oneil',
'Nieves',
'Britton',
'Rankin',
'Belcher',
'Guy',
'Chamberlain',
'Tyson',
'Puckett',
'Downing',
'Sharpe',
'Boggs',
'Truong',
'Pierson',
'Godfrey',
'Mobley',
'John',
'Kern',
'Dye',
'Hollis',
'Bravo',
'Magana',
'Rutherford',
'Ng',
'Tuttle',
'Lim',
'Romano',
'Arthur',
'Trejo',
'Knowles',
'Lyon',
'Shirley',
'Quinones',
'Childs',
'Dolan',
'Head',
'Reyna',
'Saenz',
'Hastings',
'Kenney',
'Cano',
'Foreman',
'Denton',
'Villalobos',
'Pryor',
'Sargent',
'Doherty',
'Hopper',
'Phan',
'Womack',
'Lockhart',
'Ventura',
'Dwyer',
'Muller',
'Galindo',
'Grace',
'Sorensen',
'Courtney',
'Parra',
'Rodrigues',
'Nicholas',
'Ahmed',
'Mcginnis',
'Langley',
'Madison',
'Locke',
'Jamison',
'Nava',
'Gustafson',
'Sykes',
'Dempsey',
'Hamm',
'Rodriquez',
'Mcgill',
'Xiong',
'Esquivel',
'Simms',
'Kendrick',
'Boyce',
'Vigil',
'Downey',
'Mckenna',
'Sierra',
'Webber',
'Kirkpatrick',
'Dickinson',
'Couch',
'Burks',
'Sheehan',
'Slaughter',
'Pike',
'Whitley',
'Magee',
'Cheng',
'Sinclair',
'Cassidy',
'Rutledge',
'Burris',
'Bowling',
'Crabtree',
'Mcnamara',
'Avalos',
'Vu',
'Herron',
'Broussard',
'Abraham',
'Garland',
'Corbett',
'Corbin',
'Stinson',
'Chin',
'Burt',
'Hutchins',
'Woodruff',
'Lau',
'Brandon',
'Singer',
'Hatch',
'Rossi',
'Shafer',
'Ott',
'Goss',
'Gregg',
'Dewitt',
'Tang',
'Polk',
'Worley',
'Covington',
'Saldana',
'Heller',
'Emery',
'Swartz',
'Cho',
'Mccray',
'Elmore',
'Rosenberg',
'Simons',
'Clemons',
'Beatty',
'Harden',
'Herbert',
'Bland',
'Rucker',
'Manley',
'Ziegler',
'Grady',
'Lott',
'Rouse',
'Gleason',
'Mcclellan',
'Abrams',
'Vo',
'Albright',
'Meier',
'Dunbar',
'Ackerman',
'Padgett',
'Mayes',
'Tipton',
'Coffman',
'Peralta',
'Shapiro',
'Roe',
'Weston',
'Plummer',
'Helton',
'Stern',
'Fraser',
'Stover',
'Fish',
'Schumacher',
'Baca',
'Curran',
'Vinson',
'Vera',
'Clifton',
'Ervin',
'Eldridge',
'Lowry',
'Childress',
'Becerra',
'Gore',
'Seymour',
'Chu',
'Field',
'Akers',
'Carrasco',
'Bingham',
'Sterling',
'Greenwood',
'Leslie',
'Groves',
'Manuel',
'Swain',
'Edmonds',
'Muniz',
'Thomson',
'Crouch',
'Walden',
'Smart',
'Tomlinson',
'Alfaro',
'Quick',
'Goldman',
'Mcelroy',
'Yarbrough',
'Funk',
'Hong',
'Portillo',
'Lund',
'Ngo',
'Elkins',
'Stroud',
'Meredith',
'Battle',
'Mccauley',
'Zapata',
'Bloom',
'Gee',
'Givens',
'Cardona',
'Schafer',
'Robison',
'Gunter',
'Griggs',
'Tovar',
'Teague',
'Swift',
'Bowden',
'Schulz',
'Blanton',
'Buckner',
'Whalen',
'Pritchard',
'Pierre',
'Kang',
'Butts',
'Metcalf',
'Kurtz',
'Sanderson',
'Tompkins',
'Inman',
'Crowder',
'Dickey',
'Hutchison',
'Conklin',
'Hoskins',
'Holbrook',
'Horner',
'Neely',
'Tatum',
'Hollingsworth',
'Draper',
'Clement',
'Lord',
'Reece',
'Feldman',
'Kay',
'Hagen',
'Crews',
'Bowles',
'Post',
'Jewell',
'Daley',
'Cordero',
'Mckinley',
'Velasco',
'Masters',
'Driscoll',
'Burrell',
'Valle',
'Crow',
'Devine',
'Larkin',
'Chappell',
'Pollock',
'Kimball',
'Ly',
'Schmitz',
'Lu',
'Rubin',
'Self',
'Barrios',
'Pereira',
'Phipps',
'Mcmanus',
'Nance',
'Steiner',
'Poe',
'Crockett',
'Jeffries',
'Amos',
'Nix',
'Newsome',
'Dooley',
'Payton',
'Rosen',
'Swenson',
'Connelly',
'Tolbert',
'Segura',
'Esposito',
'Coker',
'Biggs',
'Hinkle',
'Thurman',
'Drew',
'Ivey',
'Bullard',
'Baez',
'Neff',
'Maher',
'Stratton',
'Egan',
'Dubois',
'Gallardo',
'Blue',
'Rainey',
'Yeager',
'Saucedo',
'Ferreira',
'Sprague',
'Lacy',
'Hurtado',
'Heard',
'Connell',
'Stahl',
'Aldridge',
'Amaya',
'Forrest',
'Erwin',
'Gunn',
'Swan',
'Butcher',
'Rosado',
'Godwin',
'Hand',
'Gabriel',
'Otto',
'Whaley',
'Ludwig',
'Clifford',
'Grove',
'Beaver',
'Silver',
'Dang',
'Hammer',
'Dick',
'Boswell',
'Mead',
'Colvin',
'Oleary',
'Milligan',
'Goins',
'Ames',
'Dodge',
'Kaur',
'Escobedo',
'Arredondo',
'Geiger',
'Winkler',
'Dunham',
'Temple',
'Babcock',
'Billings',
'Grimm',
'Lilly',
'Wesley',
'Mcghee',
'Painter',
'Siegel',
'Bower',
'Purcell',
'Block',
'Aguilera',
'Norwood',
'Sheridan',
'Cartwright',
'Coates',
'Davison',
'Regan',
'Ramey',
'Koenig',
'Kraft',
'Bunch',
'Engel',
'Tan',
'Winn',
'Steward',
'Link',
'Vickers',
'Bragg',
'Piper',
'Huggins',
'Michel',
'Healy',
'Jacob',
'Mcdonough',
'Wolff',
'Colbert',
'Zepeda',
'Hoang',
'Dugan',
'Kilgore',
'Meade',
'Guillen',
'Do',
'Hinojosa',
'Goode',
'Arrington',
'Gary',
'Snell',
'Willard',
'Renteria',
'Chacon',
'Gallo',
'Hankins',
'Montano',
'Browne',
'Peacock',
'Ohara',
'Cornell',
'Sherwood',
'Castellanos',
'Thorpe',
'Stiles',
'Sadler',
'Latham',
'Redmond',
'Greenberg',
'Cote',
'Waddell',
'Dukes',
'Diamond',
'Bui',
'Madrid',
'Alonso',
'Sheets',
'Irvin',
'Hurt',
'Ferris',
'Sewell',
'Carlton',
'Aragon',
'Blackmon',
'Hadley',
'Hoyt',
'Mcgraw',
'Pagan',
'Land',
'Tidwell',
'Lovell',
'Miner',
'Doss',
'Dahl',
'Delatorre',
'Stanford',
'Kauffman',
'Vela',
'Gagnon',
'Winston',
'Gomes',
'Thacker',
'Coronado',
'Ash',
'Jarrett',
'Hager',
'Samuels',
'Metzger',
'Raines',
'Spivey',
'Maurer',
'Han',
'Voss',
'Henley',
'Caballero',
'Caruso',
'Coulter',
'North',
'Finn',
'Cahill',
'Lanier',
'Souza',
'Mcwilliams',
'Deal',
'Schaffer',
'Urban',
'Houser',
'Cummins',
'Romo',
'Crocker',
'Bassett',
'Kruse',
'Bolden',
'Ybarra',
'Metz',
'Root',
'Mcmullen',
'Crump',
'Hagan',
'Guidry',
'Brantley',
'Kearney',
'Beal',
'Toth',
'Jorgensen',
'Timmons',
'Milton',
'Tripp',
'Hurd',
'Sapp',
'Whitman',
'Messer',
'Burgos',
'Major',
'Westbrook',
'Castle',
'Serna',
'Carlisle',
'Varela',
'Cullen',
'Wilhelm',
'Bergeron',
'Burger',
'Posey',
'Barnhart',
'Hackett',
'Madrigal',
'Eubanks',
'Sizemore',
'Hilliard',
'Hargrove',
'Boucher',
'Thomason',
'Melvin',
'Roper',
'Barnard',
'Fonseca',
'Pedersen',
'Quiroz',
'Washburn',
'Holliday',
'Yee',
'Rudolph',
'Bermudez',
'Coyle',
'Gil',
'Goodrich',
'Pina',
'Elias',
'Lockwood',
'Cabral',
'Carranza',
'Duvall',
'Cornelius',
'Mccollum',
'Street',
'Mcneal',
'Connors',
'Angel',
'Paulson',
'Hinson',
'Keenan',
'Sheldon',
'Farr',
'Eddy',
'Samuel',
'Ledbetter',
'Ring',
'Betts',
'Fontenot',
'Gifford',
'Hannah',
'Hanley',
'Person',
'Fountain',
'Levin',
'Stubbs',
'Hightower',
'Murdock',
'Koehler',
'Ma',
'Engle',
'Smiley',
'Carmichael',
'Sheffield',
'Langston',
'Mccracken',
'Yost',
'Trotter',
'Story',
'Starks',
'Lujan',
'Blount',
'Cody',
'Rushing',
'Benoit',
'Herndon',
'Jacobsen',
'Nieto',
'Wiseman',
'Layton',
'Epps',
'Shipley',
'Leyva',
'Reeder',
'Brand',
'Roland',
'Fitch',
'Rico',
'Napier',
'Cronin',
'Mcqueen',
'Paredes',
'Trent',
'Christiansen',
'Pettit',
'Spangler',
'Langford',
'Benavides',
'Penn',
'Paige',
'Weir',
'Dietz',
'Prater',
'Brewster',
'Louis',
'Diehl',
'Pack',
'Spaulding',
'Aviles',
'Ernst',
'Nowak',
'Olvera',
'Rock',
'Mansfield',
'Aquino',
'Ogden',
'Stacy',
'Rizzo',
'Sylvester',
'Gillis',
'Sands',
'Machado',
'Lovett',
'Duong',
'Hyatt',
'Landis',
'Platt',
'Bustamante',
'Hedrick',
'Pritchett',
'Gaston',
'Dobson',
'Caudill',
'Tackett',
'Bateman',
'Landers',
'Carmona',
'Gipson',
'Uribe',
'Mcneill',
'Ledford',
'Mims',
'Abel',
'Gold',
'Smallwood',
'Thorne',
'Mchugh',
'Dickens',
'Leung',
'Tobin',
'Kowalski',
'Medeiros',
'Cope',
'Kraus',
'Quezada',
'Overton',
'Montalvo',
'Staley',
'Woody',
'Hathaway',
'Osorio',
'Laird',
'Dobbs',
'Capps',
'Putnam',
'Lay',
'Francisco',
'Adair',
'Bernstein',
'Hutton',
'Burkett',
'Rhoades',
'Richey',
'Yanez',
'Bledsoe',
'Mccain',
'Beyer',
'Cates',
'Roche',
'Spicer',
'Queen',
'Doty',
'Darling',
'Darby',
'Sumner',
'Kincaid',
'Hay',
'Grossman',
'Lacey',
'Wilkes',
'Humphries',
'Paz',
'Darnell',
'Keys',
'Kyle',
'Lackey',
'Vogt',
'Locklear',
'Kiser',
'Presley',
'Bryson',
'Bergman',
'Peoples',
'Fair',
'Mcclendon',
'Corley',
'Prado',
'Christie',
'Delong',
'Skaggs',
'Dill',
'Shearer',
'Judd',
'Stapleton',
'Flaherty',
'Casillas',
'Pinto',
'Haywood',
'Youngblood',
'Toney',
'Ricks',
'Granados',
'Crum',
'Triplett',
'Soriano',
'Waite',
'Hoff',
'Anaya',
'Crenshaw',
'Jung',
'Canales',
'Cagle',
'Denny',
'Marcus',
'Berman',
'Munson',
'Ocampo',
'Bauman',
'Corcoran',
'Keen',
'Zimmer',
'Friend',
'Ornelas',
'Varner',
'Pelletier',
'Vernon',
'Blum',
'Albrecht',
'Culver',
'Schuster',
'Cuellar',
'Mccord',
'Shultz',
'Mcrae',
'Moreland',
'Calvert',
'William',
'Whittington',
'Eckert',
'Keene',
'Mohr',
'Hanks',
'Kimble',
'Cavanaugh',
'Crowell',
'Russ',
'Feliciano',
'Crain',
'Busch',
'Mccormack',
'Drummond',
'Omalley',
'Aldrich',
'Luke',
'Greco',
'Mott',
'Oakes',
'Mallory',
'Mclain',
'Burrows',
'Otero',
'Allred',
'Eason',
'Finney',
'Weller',
'Waldron',
'Champion',
'Jeffers',
'Coon',
'Rosenthal',
'Huddleston',
'Solano',
'Hirsch',
'Akins',
'Olivares',
'Song',
'Sneed',
'Benedict',
'Bain',
'Okeefe',
'Hidalgo',
'Matos',
'Stallings',
'Paris',
'Gamez',
'Kenny',
'Quigley',
'Marrero',
'Fagan',
'Dutton',
'Atwood',
'Pappas',
'Bagley',
'Mcgovern',
'Lunsford',
'Moseley',
'Read',
'Oakley',
'Ashby',
'Granger',
'Shaver',
'Hope',
'Coe',
'Burroughs',
'Helm',
'Ambrose',
'Neumann',
'Michaels',
'Prescott',
'Light',
'Dumas',
'Flood',
'Stringer',
'Currie',
'Comer',
'Fong',
'Whitlock',
'Lemus',
'Hawley',
'Ulrich',
'Staples',
'Boykin',
'Knutson',
'Grover',
'Hobson',
'Cormier',
'Doran',
'Thayer',
'Woodson',
'Whitt',
'Hooker',
'Kohler',
'Addison',
'Vandyke',
'Schrader',
'Haskins',
'Whittaker',
'Madsen',
'Gauthier',
'Burnette',
'Keating',
'Purvis',
'Aleman',
'Huston',
'Hamlin',
'Pimentel',
'Gerber',
'Hooks',
'Schwab',
'Honeycutt',
'Schulte',
'Alonzo',
'Isaac',
'Conroy',
'Adler',
'Eastman',
'Cottrell',
'Orourke',
'Hawk',
'Goldsmith',
'Crandall',
'Rader',
'Reynoso',
'Shook',
'Abernathy',
'Baer',
'Olivas',
'Grayson',
'Bartley',
'Henning',
'Parr',
'Duff',
'Brunson',
'Baum',
'Ennis',
'Laughlin',
'Foote',
'Valadez',
'Adamson',
'Begay',
'Stovall',
'Lincoln',
'Cheung',
'Malloy',
'Rider',
'Giordano',
'Jansen',
'Lopes',
'Arnett',
'Pendleton',
'Gage',
'Barragan',
'Keyes',
'Navarrete',
'Amador',
'Hoffmann',
'Hawthorne',
'Schilling',
'Perdue',
'Schreiber',
'Arevalo',
'Naylor',
'Deluca',
'Marcum',
'Altman',
'Mark',
'Chadwick',
'Doan',
'Easley',
'Ladd',
'Woodall',
'Betancourt',
'Shin',
'Maguire',
'Bellamy',
'Quintanilla',
'Ham',
'Sorenson',
'Mattson',
'Brenner',
'Means',
'Faust',
'Calloway',
'Ojeda',
'Mcnally',
'Dietrich',
'Ransom',
'Hare',
'Felton',
'Whiting',
'Burkhart',
'Clinton',
'Schwarz',
'Cleary',
'Wetzel',
'Reagan',
'Stjohn',
'Chow',
'Hauser',
'Dupree',
'Brannon',
'Lyles',
'Prather',
'Willoughby',
'Sepulveda',
'Nugent',
'Pickens',
'Joiner',
'Mosher',
'Stoner',
'Dowling',
'Trimble',
'Valdes',
'Cheek',
'Scruggs',
'Coy',
'Tilley',
'Barney',
'Saylor',
'Nagy',
'Horvath',
'Lai',
'Corey',
'Ruth',
'Sauer',
'Baron',
'Thao',
'Rowell',
'Grubbs',
'Hillman',
'Schaeffer',
'Sams',
'Hogue',
'Hutson',
'Busby',
'Nickerson',
'Bruner',
'Parham',
'Anders',
'Rendon',
'Lombardo',
'Iverson',
'Kinsey',
'Earl',
'Borden',
'Jean',
'Titus',
'Tellez',
'Beavers',
'Cornett',
'Sotelo',
'Kellogg',
'Burnham',
'Mcnair',
'Silverman',
'Jernigan',
'Escamilla',
'Barrow',
'Coats',
'London',
'Redding',
'Ruffin',
'Yi',
'Boudreaux',
'Goodson',
'Dowell',
'Fenton',
'Mock',
'Dozier',
'Bynum',
'Gale',
'Jolly',
'Beckman',
'Goddard',
'Craven',
'Whitmore',
'Leary',
'Mccloud',
'Gamboa',
'Kerns',
'Brunner',
'Hough',
'Negron',
'Cutler',
'Ledesma',
'Pyle',
'Monahan',
'Tabor',
'Burk',
'Leone',
'Stauffer',
'Hayward',
'Driver',
'Ruff',
'Talbot',
'Seals',
'Boston',
'Carbajal',
'Fay',
'Purdy',
'Mcgregor',
'Sun',
'Orellana',
'Gentile',
'Mahan',
'Brower',
'Patino',
'Thurston',
'Shipman',
'Aaron',
'Torrez',
'Call',
'Weiner',
'Wilburn',
'Oliva',
'Hairston',
'Coley',
'Hummel',
'Arreola',
'Watt',
'Sharma',
'Lentz',
'Arce',
'Power',
'Longoria',
'Wagoner',
'Burr',
'Hsu',
'Tinsley',
'Beebe',
'Wray',
'Nunn',
'Prieto',
'German',
'Rowley',
'Brito',
'Grubb',
'Royal',
'Valentin',
'Bartholomew',
'Schuler',
'Aranda',
'Flint',
'Hearn',
'Venegas',
'Unger',
'Mattingly',
'Boles',
'Barger',
'Casas',
'Julian',
'Dow',
'Dobbins',
'Vann',
'Chester',
'Strange',
'Lemon',
'Kahn',
'Mckinnon',
'Gannon',
'Waggoner',
'Conn',
'Meek',
'Cavazos',
'Skelton',
'Lo',
'Kumar',
'Toledo',
'Lorenz',
'Vallejo',
'Starkey',
'Kitchen',
'Reaves',
'Demarco',
'Farrar',
'Stearns',
'Michaud',
'Higginbotham',
'Fernandes',
'Isaacs',
'Marion',
'Guillory',
'Priest',
'Meehan',
'Oliveira',
'Palma',
'Oswald',
'Galvez',
'Loomis',
'Lind',
'Mena',
'Stclair',
'Hinds',
'Reardon',
'Alley',
'Barth',
'Crook',
'Bliss',
'Nagel',
'Banuelos',
'Parish',
'Harman',
'Douglass',
'Kearns',
'Newcomb',
'Mulligan',
'Coughlin',
'Way',
'Fournier',
'Lawler',
'Kaminski',
'Barbour',
'Sousa',
'Stump',
'Alaniz',
'Ireland',
'Rudd',
'Carnes',
'Lundy',
'Godinez',
'Pulido',
'Dennison',
'Baumann',
'Burdick',
'Dove',
'Stoddard',
'Liang',
'Dent',
'Roark',
'Bowser',
'Mcmahan',
'Parnell',
'Mayberry',
'Wakefield',
'Arndt',
'Ogle',
'Worthington',
'Durbin',
'Escalante',
'Pederson',
'Weldon',
'Vick',
'Knott',
'Ryder',
'Zarate',
'Irving',
'Clemens',
'Shelley',
'Salter',
'Jack',
'Cloud',
'Dasilva',
'Muhammad',
'Squires',
'Rapp',
'Dawkins',
'Polanco',
'Chatman',
'Maier',
'Yazzie',
'Gruber',
'Staton',
'Blackman',
'Mcdonnell',
'Dykes',
'Laws',
'Whitten',
'Pfeiffer',
'Vidal',
'Early',
'Kelsey',
'Baughman',
'Dias',
'Starnes',
'Crespo',
'Kilpatrick',
'Lombardi',
'Deaton',
'Satterfield',
'Wiles',
'Weinstein',
'Rowan',
'Delossantos',
'Hamby',
'Estep',
'Daigle',
'Elam',
'Creech',
'Chavis',
'Heck',
'Echols',
'Foss',
'Trahan',
'Strauss',
'Vanhorn',
'Winslow',
'Rea',
'Fairchild',
'Heaton',
'Minton',
'Hitchcock',
'Linton',
'Handy',
'Crouse',
'Coles',
'Foy',
'Upton',
'Herrington',
'Hwang',
'Mcclelland',
'Rector',
'Luther',
'Kruger',
'Salcedo',
'Chance',
'Gunderson',
'Tharp',
'Griffiths',
'Graf',
'Branham',
'Humphreys',
'Renner',
'Lima',
'Rooney',
'Moya',
'Almeida',
'Gavin',
'Coburn',
'Ouellette',
'Goetz',
'Seay',
'Parrott',
'Harms',
'Robb',
'Storey',
'Barbosa',
'Barraza',
'Loyd',
'Merchant',
'Donohue',
'Carrier',
'Diggs',
'Chastain',
'Sherrill',
'Whipple',
'Braswell',
'Weathers',
'Linder',
'Chapa',
'Bock',
'Oh',
'Lovelace',
'Saavedra',
'Ferrara',
'Callaway',
'Salmon',
'Templeton',
'Christy',
'Harp',
'Dowd',
'Forrester',
'Lawton',
'Epstein',
'Gant',
'Tierney',
'Seaman',
'Corral',
'Dowdy',
'Zaragoza',
'Morrissey',
'Eller',
'Chau',
'Breen',
'High',
'Newberry',
'Beam',
'Yancey',
'Jarrell',
'Cerda',
'Ellsworth',
'Lofton',
'Thibodeaux',
'Pool',
'Rinehart',
'Arteaga',
'Marlow',
'Hacker',
'Will',
'Mackenzie',
'Hook',
'Gilliland',
'Emmons',
'Pickering',
'Medley',
'Andrew',
'Willey',
'Shell',
'Randle',
'Brinkley',
'Pruett',
'Tobias',
'Edmondson',
'Grier',
'Askew',
'Batista',
'Saldivar',
'Moeller',
'Augustine',
'Chavarria',
'Troyer',
'Layne',
'Mcnulty',
'Shank',
'Desai',
'Herrmann',
'Hemphill',
'Bearden',
'Spear',
'Keener',
'Holguin',
'Culp',
'Braden',
'Briscoe',
'Bales',
'Garvin',
'Stockton',
'Abreu',
'Suggs',
'Mccartney',
'Ferrer',
'Rhoads',
'Ha',
'Nevarez',
'Singletary',
'Chong',
'Alcala',
'Cheney',
'Westfall',
'Damico',
'Snodgrass',
'Devries',
'Looney',
'Hein',
'Lyle',
'Lockett',
'Jacques',
'Barkley',
'Wahl',
'Aponte',
'Myrick',
'Bolin',
'Holm',
'Slack',
'Martino',
'Scherer',
'Bachman',
'Ely',
'Nesbitt',
'Marroquin',
'Bouchard',
'Mast',
'Jameson',
'Hills',
'Mireles',
'Bueno',
'Pease',
'Vitale',
'Alarcon',
'Linares',
'Schell',
'Lipscomb',
'Arriaga',
'Bourgeois',
'Bonds',
'Markham',
'Ivy',
'Wisniewski',
'Oldham',
'Fallon',
'Wendt',
'Joy',
'Stamper',
'Babb',
'Steinberg',
'Asher',
'Fuchs',
'Blank',
'Willett',
'Heredia',
'Croft',
'Lytle',
'Lance',
'Lassiter',
'Barrientos',
'Condon',
'Barfield',
'Darden',
'Araujo',
'Guinn',
'Noonan',
'Burleson',
'Belanger',
'Main',
'Traylor',
'Messina',
'Zeigler',
'Danielson',
'Millard',
'Kenyon',
'Radford',
'Graff',
'Beaty',
'Baggett',
'Crisp',
'Salisbury',
'Trout',
'Lorenzo',
'Parson',
'Gann',
'Garber',
'Adcock',
'Covarrubias',
'Scales',
'Acuna',
'Thrasher',
'Card',
'Van',
'Mabry',
'Mohamed',
'Montanez',
'Redd',
'Stock',
'Willingham',
'Redman',
'Zambrano',
'Gaffney',
'Herr',
'Devlin',
'Pringle',
'Schubert',
'Casper',
'Houck',
'Rees',
'Wing',
'Ebert',
'Jeter',
'Cornejo',
'Gillette',
'Shockley',
'Amato',
'Girard',
'Leggett',
'Cheatham',
'Bustos',
'Epperson',
'Dubose',
'Seitz',
'East',
'Frias',
'Schofield',
'Steen',
'Orlando',
'Myles',
'Caron',
'Grey',
'Denney',
'Ontiveros',
'Burden',
'Jaeger',
'Reich',
'Witherspoon',
'Najera',
'Frantz',
'Hammonds',
'Xu',
'Leavitt',
'Gilchrist',
'Adam',
'Barone',
'Forman',
'Ceja',
'Ragsdale',
'Sisk',
'Tubbs',
'Elizondo',
'Pressley',
'Bollinger',
'Linn',
'Huntley',
'Dewey',
'Geary',
'Carlos',
'Ragland',
'Mixon',
'Baugh',
'Mcarthur',
'Tam',
'Nobles',
'Clevenger',
'Foust',
'Lusk',
'Cooney',
'Tamayo',
'Robert',
'Longo',
'Overstreet',
'Oglesby',
'Mace',
'Churchill',
'Matson',
'Hamrick',
'Rockwell',
'Trammell',
'Wheatley',
'Carrington',
'Ferraro',
'Ralston',
'Clancy',
'Mondragon',
'Carl',
'Hu',
'Hopson',
'Breaux',
'Mccurdy',
'Mares',
'Chisholm',
'Mai',
'Matlock',
'Aiken',
'Cary',
'Lemons',
'Anguiano',
'Herrick',
'Crawley',
'Montero',
'Hassan',
'Archuleta',
'Cotter',
'Farias',
'Parris',
'Felder',
'Luu',
'Pence',
'Gilman',
'Killian',
'Naranjo',
'Duggan',
'Easter',
'Scarborough',
'Swann',
'Ricketts',
'France',
'Bello',
'Nadeau',
'Still',
'Rincon',
'Cornwell',
'Slade',
'Fierro',
'Mize',
'Christianson',
'Greenfield',
'Mcafee',
'Landrum',
'Adame',
'Dinh',
'Lankford',
'Lewandowski',
'Rust',
'Bundy',
'Waterman',
'Milner',
'Mccrary',
'Hite',
'Curley',
'Donald',
'Duckworth',
'Cecil',
'Carrera',
'Speer',
'Birch',
'Denson',
'Beckwith',
'Stack',
'Durant',
'Dorman',
'Lantz',
'Christman',
'Spann',
'Masterson',
'Hostetler',
'Kolb',
'Brink',
'Scanlon',
'Nye',
'Beverly',
'Wylie',
'Woo',
'Spurlock',
'Shelby',
'Sommer',
'Reinhardt',
'Robledo',
'Ashton',
'Bertrand',
'Cyr',
'Edgar',
'Doe',
'Harkins',
'Brubaker',
'Stoll',
'Dangelo',
'Zhou',
'Moulton',
'Hannon',
'Falk',
'Rains',
'Broughton',
'Applegate',
'Hudgins',
'Slone',
'Farnsworth',
'Yoon',
'Perales',
'Reedy',
'Milam',
'Franz',
'Ponder',
'Ricci',
'Fontaine',
'Irizarry',
'New',
'Puente',
'Selby',
'Cazares',
'Doughty',
'Moffett',
'Balderas',
'Fine',
'Smalley',
'Carlin',
'Trinh',
'Dyson',
'Galvin',
'Valdivia',
'Benner',
'Low',
'Turpin',
'Lyman',
'Billingsley',
'Mcadams',
'Cardwell',
'Fraley',
'Patten',
'Holton',
'Shanks',
'Mcalister',
'Canfield',
'Sample',
'Harley',
'Cason',
'Tomlin',
'Ahmad',
'Coyne',
'Forte',
'Riggins',
'Littlejohn',
'Forsythe',
'Brinson',
'Halverson',
'Bach',
'Stuckey',
'Falcon',
'Talbert',
'Wenzel',
'Champagne',
'Mchenry',
'Vest',
'Shackelford',
'Ordonez',
'Collazo',
'Boland',
'Sisson',
'Bigelow',
'Hyman',
'Wharton',
'Brumfield',
'Oates',
'Mesa',
'Beckett',
'Morrell',
'Reis',
'Alves',
'Chiu',
'Larue',
'Streeter',
'Grogan',
'Blakely',
'Brothers',
'Hatton',
'Kimbrough',
'Lauer',
'Wallis',
'Jett',
'Pepper',
'Hildebrand',
'Rawls',
'Mello',
'Neville',
'Bull',
'Steffen',
'Braxton',
'Cowart',
'Simpkins',
'Mcneely',
'Blalock',
'Spain',
'Shipp',
'Lindquist',
'Butterfield',
'Oreilly',
'Perrin',
'Qualls',
'Edge',
'Havens',
'Luong',
'Switzer',
'Troutman',
'Fortner',
'Tolliver',
'Monk',
'Poindexter',
'Rupp',
'Ferry',
'Negrete',
'Muse',
'Gresham',
'Beauchamp',
'Barclay',
'Schmid',
'Chun',
'Brice',
'Faulk',
'Watters',
'Briones',
'Guajardo',
'Harwood',
'Grissom',
'Harlow',
'Whelan',
'Burdette',
'Palumbo',
'Paulsen',
'Corrigan',
'Garvey',
'Levesque',
'Dockery',
'Delgadillo',
'Gooch',
'Cao',
'Mullin',
'Ridley',
'Stanfield',
'Dial',
'Noriega',
'Ceballos',
'Nunes',
'Newby',
'Baumgartner',
'Hussain',
'Wyman',
'Causey',
'Gossett',
'Ness',
'Waugh',
'Choate',
'Carman',
'Daily',
'Devore',
'Irby',
'Kong',
'Breeden',
'Whatley',
'Ellington',
'Lamar',
'Fultz',
'Bair',
'Zielinski',
'Colby',
'Houghton',
'Grigsby',
'Fortune',
'Paxton',
'Mcmillian',
'Hammons',
'Bronson',
'Keck',
'Wellman',
'Ayres',
'Whiteside',
'Menard',
'Roush',
'Warden',
'Espino',
'Strand',
'Haggerty',
'Banda',
'Fabian',
'Krebs',
'Bowie',
'Branson',
'Lenz',
'Benavidez',
'Keeler',
'Newsom',
'Ezell',
'Jeffrey',
'Pulliam',
'Clary',
'Byrnes',
'Kopp',
'Beers',
'Smalls',
'Gardiner',
'Sommers',
'Fennell',
'Mancini',
'Osullivan',
'Sebastian',
'Bruns',
'Giron',
'Parent',
'Boyles',
'Keefe',
'Muir',
'Wheat',
'Shuler',
'Vergara',
'Pemberton',
'Brownlee',
'South',
'Brockman',
'Fanning',
'Royer',
'Herzog',
'Morley',
'Bethea',
'Needham',
'Tong',
'Roque',
'Mojica',
'Bunn',
'Francois',
'Noe',
'Kuntz',
'Snowden',
'Withers',
'Harlan',
'Seibert',
'Limon',
'Kiefer',
'Bone',
'Sell',
'Allan',
'Skidmore',
'Wren',
'Dunaway',
'Finnegan',
'Moe',
'Wolford',
'Seeley',
'Kroll',
'Lively',
'Janssen',
'Montague',
'Rahman',
'Boehm',
'Nettles',
'Dees',
'Krieger',
'Peek',
'Hershberger',
'Sage',
'Custer',
'Zheng',
'Otoole',
'Elrod',
'Jaimes',
'Somers',
'Lira',
'Nagle',
'Grooms',
'Soria',
'Drury',
'Keane',
'Bostic',
'Hartmann',
'Pauley',
'Murrell',
'Agee',
'Manzo',
'Morey',
'Hamel',
'Dunning',
'Tavares',
'Mccloskey',
'Plunkett',
'Maples',
'March',
'Armenta',
'Waldrop',
'Espinal',
'Christenson',
'Fajardo',
'Robins',
'Bagwell',
'Massie',
'Leahy',
'Medlin',
'Urbina',
'Zhu',
'Pantoja',
'Barbee',
'Clawson',
'Reiter',
'Ko',
'Crider',
'Maxey',
'Worrell',
'Brackett',
'Mclemore',
'Younger',
'Her',
'Hardesty',
'Danner',
'Ragan',
'Almanza',
'Nielson',
'Graber',
'Mcintire',
'Griswold',
'Tirado',
'Seifert',
'Valles',
'Laney',
'Gupta',
'Malik',
'Libby',
'Marvin',
'Koontz',
'Marr',
'Kozlowski',
'Lemke',
'Brant',
'Phelan',
'Kemper',
'Gooden',
'Beaulieu',
'Cardoza',
'Healey',
'Hardwick',
'Zhao',
'Kitchens',
'Box',
'Stepp',
'Comstock',
'Poston',
'Sager',
'Conti',
'Borges',
'Farrow',
'Acker',
'Glaser',
'Antonio',
'Lennon',
'Gaither',
'Freitas',
'Alicea',
'Mcmillen',
'Chapin',
'Ratcliff',
'Lerma',
'Severson',
'Wilde',
'Mortensen',
'Winchester',
'Flannery',
'Villasenor',
'Centeno',
'Burkholder',
'Horan',
'Meador',
'Ingle',
'Roldan',
'Estrella',
'Pullen',
'Newkirk',
'Gaytan',
'Lindberg',
'Gatlin',
'Windham',
'Behrens',
'Stoltzfus',
'Cintron',
'Broderick',
'Jaime',
'Solorzano',
'Venable',
'Culbertson',
'Garay',
'Caputo',
'Grantham',
'Hanlon',
'Parry',
'Crist',
'Cosby',
'Shore',
'Everhart',
'Dorn',
'Eng',
'Turley',
'Valerio',
'Rand',
'Hiatt',
'Mota',
'Judge',
'Kinder',
'Colwell',
'Ashworth',
'Tejeda',
'Sikes',
'Oshea',
'Westmoreland',
'Culpepper',
'Faber',
'Logsdon',
'Fugate',
'Apodaca',
'Lindley',
'Samson',
'Liles',
'Mcclanahan',
'Burge',
'Vail',
'Etheridge',
'Boudreau',
'Andres',
'Noll',
'Higgs',
'Snead',
'Layman',
'Nolen',
'Turk',
'Wayne',
'Betz',
'Victor',
'Lafferty',
'Carbone',
'Skipper',
'Zeller',
'Kasper',
'Desantis',
'Fogle',
'Gandy',
'Mendenhall',
'Seward',
'Gulley',
'Schweitzer',
'Stine',
'Sowers',
'Duenas',
'Monson',
'Brinkman',
'Hubert',
'Motley',
'Pfeifer',
'Weinberg',
'Eggleston',
'Isom',
'Quinlan',
'Gilley',
'Jasso',
'Loya',
'Mull',
'Reichert',
'Reddy',
'Wirth',
'Hodgson',
'Stowe',
'Mccallum',
'Ahrens',
'Huey',
'Mattox',
'Dupont',
'Aguayo',
'Pak',
'Tice',
'Alba',
'Colburn',
'Currier',
'Gaskins',
'Harder',
'Cohn',
'Yoo',
'Garnett',
'Harter',
'Wenger',
'Charlton',
'Littleton',
'Minter',
'Cone',
'Henriquez',
'Vines',
'Kimmel',
'Crooks',
'Caraballo',
'Searcy',
'Peyton',
'Renfro',
'Groff',
'Moua',
'Thorn',
'Jay',
'Leigh',
'Sanborn',
'Wicker',
'Broome',
'Martens',
'Abney',
'Fisk',
'Argueta',
'Upchurch',
'Alderman',
'Tisdale',
'Castellano',
'Legg',
'Bills',
'Wilbur',
'Dix',
'Mauldin',
'Isbell',
'Mears',
'Latimer',
'Ashcraft',
'Earley',
'Tejada',
'Partridge',
'Anglin',
'Caswell',
'Easton',
'Kirchner',
'Mehta',
'Lanham',
'Blaylock',
'Binder',
'Catalano',
'Handley',
'Storm',
'Albertson',
'Free',
'Tuck',
'Keegan',
'Moriarty',
'Dexter',
'Mancuso',
'Allard',
'Pino',
'Chamberlin',
'Moffitt',
'Haag',
'Schott',
'Agnew',
'Malcolm',
'Hallman',
'Heckman',
'Karr',
'Soares',
'Alfonso',
'Tom',
'Wadsworth',
'Schindler',
'Garibay',
'Kuykendall',
'Penny',
'Littlefield',
'Mcnabb',
'Sam',
'Lea',
'Berrios',
'Murry',
'Dehart',
'Regalado',
'Mohammed',
'Counts',
'Solorio',
'Preciado',
'Armendariz',
'Martell',
'Barksdale',
'Frick',
'Haller',
'Broyles',
'Doll',
'Cable',
'Delvalle',
'Weems',
'Kelleher',
'Gagne',
'Albers',
'Kunz',
'Hawes',
'Hoy',
'Guenther',
'Johansen',
'Chaffin',
'Whitworth',
'Wynne',
'Mcmurray',
'Luce',
'Fiore',
'Straub',
'Majors',
'Mcduffie',
'Bohannon',
'Rawlings',
'Freed',
'Sutter',
'Lindstrom',
'Buss',
'Loera',
'Hoyle',
'Witte',
'Tyree',
'Luttrell',
'Andrus',
'Steed',
'Thiel',
'Cranford',
'Fulmer',
'Gable',
'Porras',
'Weis',
'Maas',
'Packard',
'Noyes',
'Kwon',
'Knoll',
'Marx',
'Feeney',
'Israel',
'Bohn',
'Cockrell',
'Glick',
'Cosgrove',
'Keefer',
'Mundy',
'Batchelor',
'Loveless',
'Horowitz',
'Haskell',
'Kunkel',
'Colson',
'Hedges',
'Staggs',
'Swisher',
'Lomeli',
'Padron',
'Cota',
'Homan',
'Musser',
'Curtin',
'Salerno',
'Segovia',
'Keeton',
'Brandenburg',
'Starling',
'Tsai',
'Mahon',
'Klinger',
'Paquette',
'Haddad',
'Mccune',
'Mathew',
'Higdon',
'Shull',
'Guest',
'Shay',
'Swafford',
'Angulo',
'Hackney',
'Evers',
'Sibley',
'Woodworth',
'Ostrander',
'Mangum',
'Smyth',
'Quarles',
'Mccarter',
'Close',
'Truitt',
'Stpierre',
'Mackay',
'Bayer',
'Timm',
'Thatcher',
'Bess',
'Trinidad',
'Jacoby',
'Proffitt',
'Concepcion',
'Parkinson',
'Carreon',
'Ramon',
'Monroy',
'Leger',
'Glynn',
'Jauregui',
'Neil',
'Taggart',
'Reddick',
'Wiese',
'Dover',
'Wicks',
'Hennessy',
'Bittner',
'Mcclung',
'Mcwhorter',
'Derrick',
'Strom',
'Beckham',
'Kee',
'Coombs',
'Holtz',
'Schrock',
'Maki',
'Willson',
'Hulsey',
'Whitson',
'Haugen',
'Lumpkin',
'Scholl',
'Gall',
'Carvalho',
'Kovach',
'Vieira',
'Irvine',
'Millan',
'Held',
'Jolley',
'Jasper',
'Cadena',
'Runyon',
'Lomax',
'Fahey',
'Hoppe',
'Bivens',
'Ruggiero',
'Hussey',
'Ainsworth',
'Hardman',
'Dugger',
'Ulloa',
'Fitzsimmons',
'Scroggins',
'Sowell',
'Toler',
'Barba',
'Biddle',
'Rafferty',
'Trapp',
'Byler',
'Brill',
'Delagarza',
'Thigpen',
'Hiller',
'Martins',
'Findley',
'Hollins',
'Jankowski',
'Stull',
'Pollack',
'Poirier',
'Bratton',
'Reno',
'Jeffery',
'Menendez',
'Mcnutt',
'Kohl',
'Forster',
'Clough',
'Deloach',
'Bader',
'Hanes',
'Sturm',
'Tafoya',
'Beall',
'Coble',
'Demers',
'Kohn',
'Santamaria',
'Vaught',
'Correia',
'Mcgrew',
'Roby',
'Sarmiento',
'Reinhart',
'Rosenbaum',
'Bernier',
'Schiller',
'Furman',
'Grabowski',
'Perryman',
'Kidwell',
'Sabo',
'Saxton',
'Noland',
'Seaton',
'Packer',
'Seal',
'Ruby',
'Smoot',
'Lavoie',
'Putman',
'Fairbanks',
'Neill',
'Florence',
'Beattie',
'Tarver',
'Stephen',
'Bolen',
'Mccombs',
'Barnhill',
'Freedman',
'Gaddis',
'Goad',
'Worden',
'Canada',
'Calvin',
'Vickery',
'Mcclintock',
'Slocum',
'Clausen',
'Mccutcheon',
'Ripley',
'Razo',
'Southard',
'Bourne',
'Aiello',
'Knudsen',
'Angeles',
'Keeney',
'Stacey',
'Neeley',
'Holly',
'Gallant',
'Eads',
'Lafleur',
'Fredrickson',
'Popp',
'Bobo',
'Pardo',
'Artis',
'Lawless',
'Shen',
'Headley',
'Pedraza',
'Pickard',
'Salvador',
'Hofmann',
'Davey',
'Szymanski',
'Dallas',
'Erb',
'Perea',
'Alcantar',
'Ashford',
'Crutchfield',
'Harry',
'Goebel',
'Ridgeway',
'Mcvey',
'Cordell',
'Florez',
'Kovacs',
'Calkins',
'Redden',
'Ricker',
'Farrington',
'Salcido',
'Reimer',
'Mullis',
'Mayhew',
'Register',
'Kaye',
'Blocker',
'Buford',
'Munguia',
'Cady',
'Burley',
'Sander',
'Robinette',
'Stubblefield',
'Shuman',
'Loy',
'Santillan',
'Deutsch',
'Sales',
'Langdon',
'Mazur',
'Clapp',
'Teal',
'Buffington',
'Elliot',
'Halstead',
'Sturgeon',
'Colley',
'Koehn',
'Bergstrom',
'Dunne',
'Pond',
'Gantt',
'Cousins',
'Viera',
'Wilks',
'Haase',
'Sweat',
'Simonson',
'Breedlove',
'Munn',
'Pitt',
'Faircloth',
'Peter',
'Wheaton',
'Howland',
'Merriman',
'Burney',
'Fusco',
'Bedford',
'Baltazar',
'Persaud',
'Gerard',
'Bourque',
'Chao',
'Slagle',
'Kirsch',
'Volk',
'Heim',
'Glasgow',
'Borders',
'Rauch',
'Goforth',
'Batson',
'Basham',
'Mount',
'Peace',
'Lazo',
'Samples',
'Amaro',
'Ibrahim',
'Slattery',
'Taft',
'Weatherford',
'Aparicio',
'Santoro',
'Jiang',
'Ritchey',
'Goble',
'Spring',
'Strain',
'Scully',
'Villareal',
'Toro',
'Duval',
'Jonas',
'Neuman',
'Dell',
'Varney',
'Wozniak',
'Conover',
'Landon',
'Sigler',
'Galbraith',
'Boss',
'Cepeda',
'Back',
'Mateo',
'Peebles',
'Arsenault',
'Cathey',
'Calabrese',
'Dodds',
'Gilbertson',
'Greenlee',
'Hoke',
'Sauceda',
'Vue',
'Lehmann',
'Lapointe',
'Laster',
'Zink',
'Moy',
'Ammons',
'Llamas',
'Foltz',
'Chew',
'Fleck',
'Amaral',
'Geer',
'Su',
'Carden',
'Nunley',
'Creel',
'Clarkson',
'Provost',
'Covey',
'Paine',
'Wofford',
'Frame',
'Dube',
'Grice',
'Tully',
'Bartels',
'Luciano',
'Molnar',
'Winstead',
'Canady',
'Moreau',
'Burnside',
'Bratcher',
'Infante',
'Peterman',
'Swope',
'Freeland',
'Vetter',
'Lanning',
'Marquis',
'Schulze',
'Thai',
'Coppola',
'Rayburn',
'Conte',
'Martz',
'Showalter',
'Bandy',
'Quinonez',
'Bunting',
'Rao',
'Belt',
'Cruse',
'Hamblin',
'Himes',
'Raney',
'Merrell',
'See',
'Gough',
'Maciel',
'Wimberly',
'Craddock',
'Marquardt',
'Wentz',
'Meeker',
'Sandberg',
'Mosier',
'Wasson',
'Hundley',
'Joe',
'Shumaker',
'Embry',
'Fortin',
'Akin',
'Olivarez',
'Seidel',
'Coons',
'Corrales',
'Earle',
'Matheny',
'Kish',
'Outlaw',
'Barnette',
'Lieberman',
'Spalding',
'Martel',
'Hargis',
'Kelso',
'Merrick',
'Fullerton',
'Fries',
'Doucette',
'Clouse',
'Prewitt',
'Hawks',
'Keaton',
'Worthy',
'Zook',
'Montez',
'Autry',
'Poore',
'Lemay',
'Forsyth',
'Shifflett',
'Briseno',
'Piazza',
'Welker',
'Tennant',
'Haggard',
'Heinz',
'Leighton',
'Brittain',
'Begley',
'Flanders',
'Hermann',
'Botello',
'Mathias',
'Hofer',
'Hutto',
'Godoy',
'Cave',
'Pagano',
'Asbury',
'Bowens',
'Withrow',
'Olivo',
'Harbin',
'Andre',
'Sandlin',
'Wertz',
'Desimone',
'Greiner',
'Heinrich',
'Whitcomb',
'Dayton',
'Petrie',
'Hair',
'Ketchum',
'Bianco',
'Cochrane',
'Heil',
'Shanahan',
'Dagostino',
'Wegner',
'Couture',
'Ling',
'Wingate',
'Arenas',
'Keel',
'Casteel',
'Boothe',
'Derosa',
'Horst',
'Rau',
'Mccorkle',
'Palermo',
'Altamirano',
'Nall',
'Shumate',
'Lightfoot',
'Creamer',
'Romeo',
'Coffin',
'Hutchings',
'Jerome',
'Hutcheson',
'Damron',
'Sorrell',
'Nickel',
'Sells',
'Pinkerton',
'Dao',
'Dion',
'Mcfarlane',
'Atwell',
'Ridenour',
'Sturgill',
'Schoen',
)
|
geradcoles/random-name
|
randomname/lists/surnames_american.py
|
Python
|
apache-2.0
| 44,764
|
[
"Dalton",
"MOE"
] |
2540284e7c1128eabb3b5ebb267b25c1649fc56aa80d2b90f9a6ecaa4e7b9890
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, unicode_literals
import six
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.translation import ugettext
from django.utils.translation import ugettext_lazy as _
from karaage.middleware.threadlocals import get_current_user
try:
# Django >= 1.7
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError:
# Django < 1.7
from django.contrib.contenttypes.generic import GenericForeignKey
ADDITION = 1
CHANGE = 2
DELETION = 3
COMMENT = 4
class LogEntryManager(models.Manager):
def log_action(self, user_id, content_type_id, object_id,
object_repr, action_flag, change_message=''):
msg = self.model(None, None, user_id, content_type_id, object_id,
object_repr[:200], action_flag, change_message)
msg.save()
return msg
def log_object(self, obj, flag, message, user=None):
assert obj is not None
assert obj.pk is not None
if user is None:
user = get_current_user()
if user is None:
user_id = None
else:
user_id = user.pk
return self.log_action(
user_id=user_id,
content_type_id=ContentType.objects.get_for_model(obj).pk,
object_id=obj.pk,
object_repr=six.text_type(obj),
action_flag=flag,
change_message=message)
@python_2_unicode_compatible
class LogEntry(models.Model):
action_time = models.DateTimeField(_('action time'), auto_now_add=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.TextField(_('object id'), blank=True, null=True)
content_object = GenericForeignKey('content_type', 'object_id')
object_repr = models.CharField(_('object repr'), max_length=200)
action_flag = models.PositiveSmallIntegerField(_('action flag'))
change_message = models.TextField(_('change message'), blank=True)
objects = LogEntryManager()
class Meta:
verbose_name = _('log entry')
verbose_name_plural = _('log entries')
db_table = 'admin_log'
app_label = 'karaage'
ordering = ('-action_time', '-pk')
def __repr__(self):
return smart_text(self.action_time)
def __str__(self):
if self.action_flag == ADDITION:
return ugettext('Added "%(object)s".') % \
{'object': self.object_repr}
elif self.action_flag == CHANGE:
return ugettext('Changed "%(object)s" - %(changes)s') % {
'object': self.object_repr,
'changes': self.change_message,
}
elif self.action_flag == DELETION:
return ugettext('Deleted "%(object)s."') % \
{'object': self.object_repr}
elif self.action_flag == COMMENT:
return ugettext('Comment "%(object)s" - %(changes)s') % {
'object': self.object_repr,
'changes': self.change_message,
}
return ugettext('LogEntry Object')
def is_addition(self):
return self.action_flag == ADDITION
def is_change(self):
return self.action_flag == CHANGE
def is_deletion(self):
return self.action_flag == DELETION
def is_comment(self):
return self.action_flag == COMMENT
|
brianmay/karaage
|
karaage/common/models.py
|
Python
|
gpl-3.0
| 4,374
|
[
"Brian"
] |
e7594792239fbbcbf381acbbc7631088964b5c21a1046d0bcaccf4748fc64cb0
|
import numpy as np
from .base import classifier
from .base import regressor
from numpy import asarray as arr
from numpy import asmatrix as mat
################################################################################
## KNNCLASSIFY #################################################################
################################################################################
class knnClassify(classifier):
"""A k-nearest neighbor classifier
Attributes:
Xtr,Ytr : training data (features and target classes)
classes : a list of the possible class labels
K : the number of neighbors to use in the prediction
alpha : the (inverse) "bandwidth" for a weighted prediction
0 = use unweighted data in the prediction
a = weight data point xi proportional to exp( - a * |x-xi|^2 )
"""
def __init__(self, X=None, Y=None, K=1, alpha=0):
"""Constructor for knnClassify object.
Any parameters are passed directly to train(); see train() for arguments.
"""
self.K = K
self.Xtr = []
self.Ytr = []
self.classes = []
self.alpha = alpha
if type(X) == np.ndarray and type(Y) == np.ndarray:
self.train(X, Y)
def __repr__(self):
str_rep = 'knn classifier, {} classes, K={}{}'.format(
len(self.classes), self.K, ', weighted (alpha=' + str(self.alpha) + ')'
if self.alpha else '')
return str_rep
def __str__(self):
str_rep = 'knn classifier, {} classes, K={}{}'.format(
len(self.classes), self.K, ', weighted (alpha=' + str(self.alpha) + ')'
if self.alpha else '')
return str_rep
## CORE METHODS ################################################################
def train(self, X, Y, K=None, alpha=None):
"""Train the classifier (for knn: store the input data)
Args:
X (arr): MxN array of M training examples with N features each
Y (arr): M, or M,1 array of target values associated with each datum
K (int): The number of neighbors to use for predictions.
alpha (float): Nonzero => use weighted average, Gaussian kernel with inverse scale alpha
"""
self.Xtr = np.asarray(X)
self.Ytr = np.asarray(Y)
self.classes = list(np.unique(Y))
if K is not None:
self.K = K
if alpha is not None:
self.alpha = alpha
def predictSoft(self, X):
"""This method makes a "soft" nearest-neighbor prediction on test data.
Args:
X (array): M,N array of M data points of N features to predict with
Returns:
P (array): M,C array of C class probabilities for each data point
"""
mtr,ntr = arr(self.Xtr).shape # get size of training data
mte,nte = arr(X).shape # get size of test data
if nte != ntr:
raise ValueError('Training and prediction data must have same number of features')
num_classes = len(self.classes)
prob = np.zeros((mte,num_classes)) # allocate memory for class probabilities
K = min(self.K, mtr) # (can't use more neighbors than training data points)
for i in range(mte): # for each test example...
# ...compute sum of squared differences...
dist = np.sum(np.power(self.Xtr - arr(X)[i,:], 2), axis=1)
# ...find nearest neighbors over training data and keep nearest K data points
indices = np.argsort(dist, axis=0)[0:K]
sorted_dist = dist[indices]; # = np.sort(dist, axis=0)[0:K]
wts = np.exp(-self.alpha * sorted_dist)
count = np.zeros((num_classes,));
for c in range(len(self.classes)): # find total weight of instances of that classes
count[c] = 1.0 * np.sum(wts[self.Ytr[indices] == self.classes[c]]);
prob[i,:] = count / count.sum() # save (soft) results
return prob
#def predict(self, X):
# """Not implemented; uses predictSoft. Implementation might be more efficient for large C"""
# mtr,ntr = arr(self.Xtr).shape # get size of training data
# mte,nte = arr(X).shape # get size of test data
# assert nte == ntr, 'Training and prediction data must have same number of features'
#
# num_classes = len(self.classes)
# Y_te = np.tile(self.Ytr[0], (mte, 1)) # make Y_te same data type as Ytr
# K = min(self.K, mtr) # (can't use more neighbors than training data points)
# for i in range(mte): # for each test example...
# # ...compute sum of squared differences...
# dist = np.sum(np.power(self.Xtr - arr(X)[i,:], 2), axis=1)
# # ...find neares neighbors over training data and keep nearest K data points
# sorted_dist = np.sort(dist, axis=0)[0:K]
# indices = np.argsort(dist, axis=0)[0:K]
# wts = np.exp(-self.alpha * sorted_dist)
# count = []
# for c in range(len(self.classes)):
# # total weight of instances of that classes
# count.append(np.sum(wts[self.Ytr[indices] == self.classes[c]]))
# count = np.asarray(count)
# c_max = np.argmax(count) # find largest count...
# Y_te[i] = self.classes[c_max] # ...and save results
# return Y_te
################################################################################
################################################################################
################################################################################
class knnRegress(regressor):
"""A k-nearest neighbor regressor
Attributes:
Xtr,Ytr : training data (features and target values)
K : the number of neighbors to use in the prediction
alpha : the (inverse) "bandwidth" for a weighted prediction
0 = use unweighted data in the prediction
a = weight data point xi proportional to exp( - a * |x-xi|^2 )
"""
def __init__(self, X=None, Y=None, K=1, alpha=0):
"""Constructor for knnRegress object.
Any parameters are passed directly to train(); see train() for arguments.
"""
self.K = K
self.Xtr = []
self.Ytr = []
self.alpha = alpha
if X is not None and Y is not None:
self.train(X, Y)
def __repr__(self):
str_rep = 'knnRegress, K={}{}'.format(
self.K, ', weighted (alpha=' + str(self.alpha) + ')'
if self.alpha else '')
return str_rep
def __str__(self):
str_rep = 'knnRegress, K={}{}'.format(
self.K, ', weighted (alpha=' + str(self.alpha) + ')'
if self.alpha else '')
return str_rep
## CORE METHODS ################################################################
def train(self, X, Y, K=None, alpha=None):
"""Train the regressor (for knn: store the input data)
Args:
X (arr): MxN array of M training examples with N features each
Y (arr): M, or M,1 array of target values associated with each datum
K (int): The number of neighbors to use for predictions.
alpha (float): Nonzero => use weighted average, Gaussian kernel with inverse scale alpha
"""
self.Xtr = np.asarray(X)
self.Ytr = np.asarray(Y)
if K is not None:
self.K = K
if alpha is not None:
self.alpha = alpha
def predict(self, X):
"""This method makes a nearest neighbor prediction on test data X.
Args:
X : MxN numpy array containing M data points with N features each
Returns:
array : M, or M,1 numpy array of the predictions for each data point
"""
ntr,mtr = arr(self.Xtr).shape # get size of training data
nte,mte = arr(X).shape # get size of test data
if mtr != mte:
raise ValueError('knnRegress.predict: training and prediction data must have the same number of features')
Y_te = np.tile(self.Ytr[0], (nte, 1)) # make Y_te the same data type as Ytr
K = min(self.K, ntr) # can't have more than n_tr neighbors
for i in range(nte):
dist = np.sum(np.power((self.Xtr - X[i]), 2), axis=1) # compute sum of squared differences
sorted_idx = np.argsort(dist, axis=0)[:K] # find nearest neihbors over Xtr and...
sorted_dist = dist[sorted_idx]; # ...keep nearest K data points
wts = np.exp(-self.alpha * sorted_dist)
Y_te[i] = arr(wts).dot(self.Ytr[sorted_idx].T) / np.sum(wts) # weighted average
return Y_te
|
sameersingh/ml-discussions
|
week9/mltools/knn.py
|
Python
|
apache-2.0
| 9,133
|
[
"Gaussian"
] |
2847c6a96b44c36ec0026ef843966ada9dd42b65593ce0296cc4684e6ff8d6db
|
'''
Author information. This is an extension to the Utils class from Allen Brain API.
The parallel wiring related functions are written by Russell Jarvis rjjarvis@asu.edu
'''
import logging
import glob
from mpi4py import MPI
import numpy as np
import logging
from copy import deepcopy
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s - %(funcName)s - %(lineno)d')
fh = logging.FileHandler('wiring.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
import pdb as pdb
import pickle
import json
import os
#Numba, something I should include in the optimizer package.
#from numba import jit
#from numpy import arange
#@jit
from neuron import h
class Utils():
def __init__(self,NCELL=40,readin=0):
#super(Utils, self).__init__(description)
# To Do
# reduce the number of side effects associated with functions in this code
# by replacing a lot of instance attributes with local variables in methods.
# This will reduce the size of the interface, and increase modularity/maintainability.
# The hocobject and its name space is designed in such a way that that modifying its state one function
# will always result in global side effects.
#setattr(self,'h',h)
#h=h
#h=h
##from neuron import h
self.h=h
h('objref pc, py')
h('pc = new ParallelContext()')
h('py = new PythonObject()')
setattr(self, 'readin',readin)
setattr(self, 'synapse_list',[])
setattr(self, 'namedict',{})
setattr(self, 'global_namedict',{})
setattr(self,'global_spike',tuple)
self.tvec=h.Vector()
self.gidvec=h.Vector()
self.has_cells=0
#self.readin=readin
#self.synapse_list=[]
self.stim = None
self.stim_curr = None
self.sampling_rate = None
self.cells = []
self.gidlist=[]
#TODO update initial attributes using the more pythonic setattr
self.global_vec=[]
#self.NCELL=NCELL
setattr(self,'NCELL',NCELL)
setattr(self,'celldict',{})
setattr(self,'name_list',[])
#self.celldict={}
self.COMM = MPI.COMM_WORLD
self.SIZE = self.COMM.Get_size()
self.RANK = self.COMM.Get_rank()
self.allsecs=None #global list containing all NEURON sections, initialized via mkallsecs
self.coordict=None
self.celldict={}
self.cellmorphdict={}
self.nclist = []
self.seclists=[]
self.icm = np.zeros((self.NCELL, self.NCELL))
self.ecm = np.zeros((self.NCELL, self.NCELL))
self.visited = np.zeros((self.NCELL, self.NCELL))
self.global_visited = np.zeros_like(self.icm)
self.global_icm = np.zeros_like(self.icm)
self.global_ecm = np.zeros_like(self.ecm)
self.debugdata=[]
self.names_list=np.zeros((self.NCELL, self.NCELL))
self.global_names_list=np.zeros((self.NCELL, self.NCELL))
h('load_file("nrngui.hoc")')
h('load_file("import3d.hoc")')
h('load_file("morph4.hoc")')
def prep_list(self):
'''
Construct a new list with 1 in 3 inhibitory neurons and 2 out of 3 excitatory neurons.
It would be preferable to make an exhaustive list of all neurons
however this is not practical for debugging small models, composed
of a balance between excitation and inhibition.
'''
cil = pickle.load(open('cellinfolist.p', 'rb'))
cil.remove(cil[0])#The first list element is the column titles.
cil = [i for i in cil if int(len(i))>9 ]
assert len(cil)!=0
markram = [i for i in cil if "Markram" in i]
aspiny=[i for i in cil if not "interneuron" in i if not "pyramid" in i]
return markram
#ef _move_cells(self):
'''
Not something that would typically be executed.
'''
#for m in markram:
#execute_string='mv main/'+m[len(m)-2]+' swcfolder'
#execute_string='git add swcfolder
#print execute_string
# os.system(execute_string)
return markram
def both_trans(self,markram):
'''
A private method. Prepend _ to all private methods in this class, to be conventional.
Make sure that the network is composed of 2/3 excitatory neurons 1/3 inhibitory neurons.
'''
assert len(markram)!=0
bothtrans=[]
bothtrans=[i for j,i in enumerate(markram) if "pyramid" in i if (j<=2*(self.NCELL/3.0))]
interneuron=[i for j,i in enumerate(markram) if "interneuron" in i ]
bothtrans.extend(interneuron[0:int(2*(self.NCELL/3.0))])
import platform
plat=platform.platform(aliased=0, terse=0)
print 'platform of NSG', plat
if 'Darwin' in plat:
import btmorph
import os
cwd=os.getcwd()
btmorph.population_density_projection(destination=str(cwd), \
filter='*.swc', outN=str(cwd)+"/density.png", precision=[10, 10, 10],depth='Y')
#btmorph.plot_3D_SWC(filtered_fn)
return bothtrans
def make_cells(self,cell_list):
'''
Distribute cells across the hosts in a
Round robin distribution (circular dealing of cells)
https://en.wikipedia.org/wiki/Round-robin
'''
#import neuron
#from neuron import h
coords = [0 for i in xrange(0,3)]#define list as a local variable.
h('objref py')
h('py = new PythonObject()')
NCELL=self.NCELL
SIZE=self.SIZE
RANK=self.RANK
pc=h.ParallelContext()
h('objref tvec, gidvec')
h('gidvec = new Vector()')
h('tvec = new Vector()')
assert len(cell_list)!=0
d = { x: y for x,y in enumerate(cell_list)}
itergids = iter( (d[i][3],i) for i in range(RANK, NCELL, SIZE) )
#Create a dictionary, where keys are soma centre coordinates to check for two cells occupying the same position.
#since dictionary keys have to be unique should throw error once two cell somas occupy exactly the same coordinates.
#TODO keep rank0 free of cells, such that all the memory associated with that CPU is free for graph theory related objects.
#This would require an iterator such as the following.
for (j,i) in itergids:
has_cells=1#RANK specific attribute simplifies later code.
cell = h.mkcell(j)
self.names_list[i]=j
print cell, j,i
cell.geom_nseg()
cell.gid1=i
cell.name=j
# Populate the dictionary with appropriate keys for the sanity check.
#excitatory neuron.
#self.test_cell(d[i])
if 'pyramid' in d[i]:
cell.pyr()
cell.polarity=1
#inhibitory neuron.
else:
cell.basket()
cell.polarity=0
#8 lines of trailing code is drawn from.
#http://neuron.yale.edu/neuron/static/docs/neuronpython/ballandstick5.html
pc.set_gid2node(i,RANK)
nc = cell.connect2target(None)
pc.cell(i, nc) # Associate the cell with this host and gid
#### Record spikes of this cell
pc.spike_record(i, self.tvec, self.gidvec)
assert None!=pc.gid2cell(i)
self.celldict[i]=cell
self.cells.append(cell)
'''
def plot_cell_centre(self,cell_list):
Method string:
plot all of these cell centres in java script, and or plotly
Either run make cells or this method but not both.
import neuron
from neuron import h
coords = [0 for i in xrange(0,3)]#define list as a local variable.
h('objref py')
h('py = new PythonObject()')
NCELL=self.NCELL
SIZE=self.SIZE
RANK=self.RANK
pc=h.ParallelContext()
h('objref tvec, gidvec')
h('gidvec = new Vector()')
h('tvec = new Vector()')
print len(cell_list)
d = { x: y for x,y in enumerate(cell_list)}
itergids = iter( (d[i][3],i) for i in range(RANK, NCELL, SIZE) )
#Create a dictionary, where keys are soma centre coordinates to check for two cells occupying the same position.
#since dictionary keys have to be unique should throw error once two cell somas occupy exactly the same coordinates.
checkd={}
plotx=[]
ploty=[]
plotz=[]
#TODO keep rank0 free of cells, such that all the memory associated with that CPU is free for graph theory related objects.
#This would require an iterator such as the following.
for (j,i) in itergids:
has_cells=1#RANK specific attribute simplifies later code.
cell = h.mkcell(j)
self.names_list[i]=j
print cell, j,i
cell.geom_nseg()
cell.gid1=i
cell.name=j
# Populate the dictionary with appropriate keys for the sanity check.
sec=cell.soma[0]
sec.push()
plotx.append(neuron.h.x3d(0))
ploty.append(neuron.h.y3d(0))
plotz.append(neuron.h.z3d(0))
key_checkd=str(neuron.h.x3d(0))+str(neuron.h.y3d(0))+str(neuron.h.z3d(0))
#Dictionary values may as well be the morphology SWC name, in case of a file that commits offensive duplicating of position.
#Additionally I may as well make a plot of soma positions.
#assert !(key_checkd in checkd.keys()) #If the key is not in the dictionary, then add it and proceed with the business of cell instantiation.
checkd[key_checkd] = (j, str(neuron.h.x3d(0)),str(neuron.h.y3d(0)),str(neuron.h.z3d(0)) )
print key_checkd, checkd[key_checkd]
h.pop_section()
#excitatory neuron.
#self.test_cell(d[i])
if 'pyramid' in d[i]:
cell.pyr()
cell.polarity=1
#inhibitory neuron.
else:
cell.basket()
cell.polarity=0
#8 lines of trailing code is drawn from.
#http://neuron.yale.edu/neuron/static/docs/neuronpython/ballandstick5.html
pc.set_gid2node(i,RANK)
nc = cell.connect2target(None)
pc.cell(i, nc) # Associate the cell with this host and gid
#### Record spikes of this cell
pc.spike_record(i, self.tvec, self.gidvec)
assert None!=pc.gid2cell(i)
self.celldict[i]=cell
self.cells.append(cell)
import platform
plat=platform.platform(aliased=0, terse=0)
print 'platform of NSG', plat
if 'Darwin' in plat:
import plotly.plotly as py
import plotly.graph_objs as go
plotly.offline.plot({
trace1 = go.Scatter3d(
x=plotx,
y=ploty,
z=plotz,
mode='markers',
marker=dict(
color='rgb(127, 127, 127)',
size=12,
symbol='circle',
line=dict(
color='rgb(204, 204, 204)',
width=1
),
opacity=0.9
)
)
data=[trace1]
layout = go.Layout(
margin=dict(
l=0,
r=0,
b=0,
t=0
)
)
})
fig = go.Figure(data=data, layout=layout)
plot_url = py.plot(fig, filename='soma centre positions')
from neuronvisio.controls import Controls
controls = Controls()
checkd=None
'''
def gcs(self,NCELL):
'''
Instantiate NEURON cell Objects in the Python variable space such
that all cells have unique identifiers.
'''
NCELL=self.NCELL
SIZE=self.SIZE
RANK=self.RANK
#from neuron import h
pc=h.ParallelContext()
h('objref nc, cells')
swcdict={}
NFILE = 3175
bothtrans=self.both_trans(self.prep_list())
self.names_list=[0 for x in xrange(0,len(bothtrans))]
os.chdir(os.getcwd() + '/swclist')
self.make_cells(bothtrans)
os.chdir(os.getcwd() + '/../')
h.define_shape()
h('forall{ for(x,0){ insert xtra }}')
h('forall{ for(x,0){ insert extracellular}}')
h('xopen("interpxyz.hoc")')
h('grindaway()')
def spike_gather(self):
NCELL=self.NCELL
SIZE=self.SIZE
COMM = self.COMM
RANK=self.RANK
self.global_spike=COMM.gather([self.tvec.to_python(),self.gidvec.to_python()], root=0)
def cell_info_gather(self):
NCELL=self.NCELL
SIZE=self.SIZE
COMM = self.COMM
RANK=self.RANK
self.namedict= { key : (value.name, int(value.polarity)) for key,value in self.celldict.iteritems() }
self.global_namedict=COMM.gather(self.namedict, root=0)
if RANK==0:
self.global_namedict = {key : value for dic in self.global_namedict for key,value in dic.iteritems() }
def matrix_reduce(self, matrix=None):
'''
collapse many incomplete rank specific matrices into complete global matrices on rank0.
This function has side effects (it mutates object arguments, although it currently has no arguments, this will become clearer after refacttoring).
'''
# TODO make this method argument based so it can handle arbitary input matrices not a few different particular
# types
# TODO apply function decorator.
NCELL=self.NCELL
SIZE=self.SIZE
COMM = self.COMM
RANK=self.RANK
global_matrix = np.zeros_like(matrix)
COMM.Reduce([matrix, MPI.DOUBLE], [global_matrix, MPI.DOUBLE], op=MPI.SUM,
root=0)
#if RANK==0:
# assert np.sum(global_matrix)!=0
# The icm might be zero for example.
return global_matrix
def prun(self,tstop):
#from neuron import h
pc=h.ParallelContext()
NCELL=self.NCELL
SIZE=self.SIZE
COMM = self.COMM
RANK=self.RANK
checkpoint_interval = 50000.
#The following definition body is from the open source code at:
#http://senselab.med.yale.edu/ModelDB/ShowModel.asp?model=151681
#with some minor modifications
cvode = h.CVode()
cvode.cache_efficient(1)
# pc.spike_compress(0,0,1)
pc.setup_transfer()
mindelay = pc.set_maxstep(10)
if RANK == 0:
print 'mindelay = %g' % mindelay
runtime = h.startsw()
exchtime = pc.wait_time()
inittime = h.startsw()
h.stdinit()
inittime = h.startsw() - inittime
if RANK == 0:
print 'init time = %g' % inittime
while h.t < tstop:
told = h.t
tnext = h.t + checkpoint_interval
if tnext > tstop:
tnext = tstop
pc.psolve(tnext)
if h.t == told:
if RANK == 0:
print 'psolve did not advance time from t=%.20g to tnext=%.20g\n' \
% (h.t, tnext)
break
print 'working', h.t
runtime = h.startsw() - runtime
comptime = pc.step_time()
splittime = pc.vtransfer_time(1)
gaptime = pc.vtransfer_time()
exchtime = pc.wait_time() - exchtime
if RANK == 0:
print 'runtime = %g' % runtime
print comptime, exchtime, splittime, gaptime
def pre_synapse(self,j):
'''
search for viable synaptic vesicle sites.
'''
#from neuron import h
pc=h.ParallelContext()
shiplist=[]
h('objref coords')
h('coords = new Vector(3)')
#self.celldict.items()[0]
if j in self.celldict.keys():
seglist= iter( (seg, sec, self.celldict[j]) for sec in self.celldict[j].spk_trig_ls for seg in sec )
for (seg,sec, cellc) in seglist:
sec.push()
get_cox = str('coords.x[0]=x_xtra('
+ str(seg.x) + ')')
h(get_cox)
get_coy = str('coords.x[1]=y_xtra('
+ str(seg.x) + ')')
h(get_coy)
get_coz = str('coords.x[2]=z_xtra('
+ str(seg.x) + ')')
h(get_coz)
coordict={}
coordict['hostfrom'] = pc.id()
coordict['coords'] = np.array(h.coords.to_python(),
dtype=np.float64)
coordict['gid']= int(j)
coordict['seg']= seg.x
secnames = h.cas().name()#sec.name()
coordict['secnames'] = str(secnames)
shiplist.append(coordict)
h.pop_section()
'''
total_matrix=np.matrix(( 3,len(shiplist) ))
total_list=[ (x['coords'][0],x['coords'][1],x['coords'][2]) for x in shiplist ]
for i,j in enumerate(total_list):
print type(j)
#pdb.set_trace()
total_matrix[i][0]=j[0]
total_matrix[i][1]=j[1]
total_matrix[i][2]=j[2]
print total_array[:]
'''
return shiplist
def alloc_synapse_ff(self,r,post_syn,cellind,k,gidn,i):
NCELL=self.NCELL
SIZE=self.SIZE
COMM = self.COMM
RANK=self.RANK
pc=h.ParallelContext()
polarity = 0
polarity=int(h.Cell[int(cellind)].polarity)
if polarity==1:
#TODO pickle load the graphs here instead of making them manually.
self.ecm[i][gidn] = self.ecm[i][gidn] + 1
#self.ecg.add_edge(i,gidn,weight=r/0.4)
assert np.sum(self.ecm)!=0
else:
self.icm[i][gidn] = self.icm[i][gidn] + 1
#self.icg.add_edge(i,gidn,weight=r/0.4)
assert np.sum(self.icm)!=0
#TODO Add other edge attributes like secnames etc.
print post_syn
h('objref syn_')
h(post_syn)
syn_=h.syn_
h.syn_.cid=i
h.Cell[cellind].ampalist.append(h.syn_)
h.Cell[cellind].div.append(k['gid'])
h.Cell[cellind].gvpre.append(k['gid'])
nc=pc.gid_connect(k['gid'],syn_)
nc.threshold = -20
nc.delay=1+r/0.4
nc.weight[0]=r/0.4
self.nclist.append(nc)
def alloc_synapse(self,r,h,sec,seg,cellind,secnames,k,i,gidn):
'''
Allocate a synaptic cleft from exhuastive collision detection.
'''
NCELL=self.NCELL
SIZE=self.SIZE
COMM = self.COMM
RANK=self.RANK
#from neuron import h
pc=h.ParallelContext()
#from neuron import h
self.visited[i][gidn] = self.visited[i][gidn] + 1
if r < 2.5: #2.5 micro metres.
polarity = 0
polarity=int(h.Cell[int(cellind)].polarity)
h('objref syn_')
if int(polarity) == int(0):
post_syn = secnames + ' ' + 'syn_ = new FastInhib(' + str(seg.x) + ')'
#post_syn = secnames + ' ' + 'syn_ = new GABAa(' + str(seg.x) + ')'
self.icm[i][gidn] = self.icm[i][gidn] + 1
#self.icg.add_edge(i,gidn,weight=r/0.4)
#self.icg[i][gidn]['post_loc']=secnames
#self.icg[i][gidn]['pre_loc']=k['secnames']
assert np.sum(self.icm)!=0
else:
if (k['gid']%2==0):
#TODO Find standard open source brain affiliated code for NMDA synapse
post_syn = secnames + ' ' + 'syn_ = new AmpaNmda(' + str(seg.x) + ')'
self.ecm[i][gidn] = self.ecm[i][gidn] + 1
#self.ecg.add_edge(i,gidn,weight=r/0.4)
#self.ecg[i][gidn]['post_loc']=secnames
#self.ecg[i][gidn]['pre_loc']=k['secnames']
self.seclists.append(secnames)
assert np.sum(self.ecm)!=0
else:
#TODO Find standard open source brain affiliated code for NMDA synapse
post_syn = secnames + ' ' + 'syn_ = new ExpSid(' + str(seg.x) + ')'
self.ecm[i][gidn] = self.ecm[i][gidn] + 1
#self.ecg.add_edge(i,gidn,weight=r/0.4)
#self.ecg[i][gidn]['post_loc']=secnames
#self.ecg[i][gidn]['pre_loc']=k['secnames']
self.seclists.append(secnames)
assert np.sum(self.ecm)!=0
h(post_syn)
print post_syn
self.synapse_list.append((r,post_syn,cellind,k,gidn,i))
syn_=h.syn_
h.syn_.cid=i
h.Cell[cellind].ampalist.append(h.syn_)
h.Cell[cellind].div.append(k['gid'])
h.Cell[cellind].gvpre.append(k['gid'])
nc=pc.gid_connect(k['gid'],syn_)
nc.threshold = -20
nc.delay=1+r/0.4
nc.weight[0]=r/0.4
self.nclist.append(nc)
def post_synapse(self,data):
"""
search viable post synaptic receptor sites.
This is the inner most loop of the parallel wiring algorithm.
For every GID
For every coordinate thats received from a broadcast.
for i,t in self.celldict.iteritems():
For ever GID thats on this host (in the dictionary)
if i in self.celldict.keys():
for k,i,t in iterdata :
if the gids are not the same.
Rule out self synapsing neurons (autopses), with the condition
pre GID != post GID
If the putative post synaptic gid exists on this CPU, the referen
tree.
This wiring algorithm uses HOC variables that interpolate the middl
some C libraries to achieve collision detection for synapse alloc
from neuromac.segment_distance import dist3D_segment_to_segment
from segment_distance import dist3D_segment_to_segment
and I am considering using them here also
"""
#from segment_distance import dist3D_segment_to_segment
NCELL=self.NCELL
SIZE=self.SIZE
COMM = self.COMM
RANK=self.RANK
#from neuron import h
pc=h.ParallelContext()
#from neuron import h
secnames = ''
cellind =0
polarity = 0
h('objref coords')
h('coords = new Vector(3)')
h('objref pc')
h('pc = new ParallelContext()')
h('objref coords2')
h('coords2 = new Vector(3)')
for q,s in enumerate(data):
for t in s:
k={} #The only point of this redundantvariable switching is to force the dictionary k to be redclared
k=t #such that it is not prevented from updating
itercell= ( (i,t) for i,t in self.celldict.iteritems() if i in self.celldict.keys() if int(t.gid1) != int(k['gid']) )
for i,t in itercell :
# TODO save time by checking if the somas of the two cells are reasonably close before checking every sec,seg in every neuron.
#
# t.soma[0].
iterseg=iter( (seg,sec) for sec in t.spk_rx_ls for seg in sec)
for (seg,sec) in iterseg:
segxold=seg.x
h('objref cell1')
h('cell1=pc.gid2cell('+str(i)+')')
secnames = sec.name()
cellind = int(secnames[secnames.find('Cell[') + 5:secnames.find('].')]) # This is the index of the post synaptic cell.
h(str('coords2.x[2]=') + str('z_xtra(')
+ str(seg.x) + ')')
h(str('coords2.x[1]=') + str('y_xtra(')
+ str(seg.x) + ')')
h(str('coords2.x[0]=') + str('x_xtra(')
+ str(seg.x) + ')')
h('coordsx=0.0')
h.coordsx = k['coords'][0]
h('coordsy=0.0')
h.coordsy = k['coords'][1]
h('coordsz=0.0')
h.coordsz = k['coords'][2]
#h('coordsx') and coordsx are not tautolous.
#One is a variable in the HOC space, the other is in
#coordsx from the Python space has been broadcast ov
coordsx = float(k['coords'][0])
coordsy = float(k['coords'][1])
coordsz = float(k['coords'][2])
#Find the euclidian distance between putative presynaptic segments,
#and putative post synaptic segments.
#If the euclidian distance is below an allowable threshold in micro
#meters, continue on with code responsible for assigning a
#synapse, and a netcon. Neurons parallel context class can handle the actual message passing associated with sending and receiving action potentials on different hosts.
r = 0.
import math
r=math.sqrt((h.coords2.x[0] - coordsx)**2+(h.coords2.x[1] - coordsy)**2+(h.coords2.x[2] - coordsz)**2)
gidn=k['gid']
r = float(r)
self.alloc_synapse(r,h,sec,seg,cellind,secnames,k,i,gidn)
'''
def destroy_isolated_cells(self):
To be called locally on every rank
This method is intended to do two things.
First it finds and deletes isolated nodes from the 3 networkx objects with degree 0.
Then it intends destroys the associated HOC cell objects with degree 0.
If this does not prove fatal to the subsequent NEURON simulation.
import networkx as nx
self.whole_net=nx.compose(self.ecg, self.icg)
#self.whole_net.compose(self.ecg, self.icg)
isolatedlist=nx.isolates(self.whole_net)
self.whole_net.remove_nodes_from(nx.isolates(self.whole_net))
self.icg.remove_nodes_from(nx.isolates(self.icg))
self.ecg.remove_nodes_from(nx.isolates(self.ecg))
for i in isolatedlist:
print i, "isolated", celldict[i]
celldict[i]=None #hopefully this will destroy the cell.
pass
#TODO hoc object level code that destroys the cell object.
# cell=pc.gid2cell(i)
# h('objref cell')
'''
def wirecells(self):
"""This function constitutes the outermost loop of the parallel wiring algor
The function returns two adjacency matrices. One matrix whose elements are excitatory connections and another matrix of inhibitory connections"""
#from segment_distance import dist3D_segment_to_segment
import pickle
NCELL=self.NCELL
SIZE=self.SIZE
COMM = self.COMM
RANK=self.RANK
#from neuron import h
pc=h.ParallelContext()
secnames = ''
cellind =0
polarity = 0
h('objref coords')
h('coords = new Vector(3)')
h('objref pc')
h('pc = new ParallelContext()')
coordict=None
coordictlist=[]
#Iterate over all CPU ranks, iterate through all GIDs (global
#identifiers, stored in the python dictionary).
if self.readin!=1:
for s in xrange(0, SIZE):
#Synchronise processes here, all ranks must have finished receiving
#transmitted material before another transmission of the coordictlis begins, potentially
#overwritting a coordictlist before it has been properly exhausted.
COMM.barrier() #New line could save CPU but functional? Can be removed
coordictlist=[]
if COMM.rank==s:
print 'begin creating message for transmision on rank ', COMM.rank,' s ', s
celliter= iter(i for i in self.celldict.keys())
for i in celliter:
cell1=pc.gid2cell(i)
coordictlist.append(self.pre_synapse(i))
print 'end tx on rank ', COMM.rank
data = COMM.bcast(coordictlist, root=s) # ie root = rank
print 'checking for rx on rank ', COMM.rank
if len(data) != 0:
print 'receieved rx on rank ', COMM.rank
self.post_synapse(data)
print 'using received message on rank ', COMM.rank
print len(data)
print('finished wiring of connectivity\n')
fname='synapse_list'+str(RANK)+'.p'
if COMM.rank!=0: assert len(self.synapse_list)!=0
with open(fname, 'wb') as handle:
pickle.dump(self.synapse_list, handle)
fname='visited'+str(RANK)+'.p'
with open(fname, 'wb') as handle:
pickle.dump(self.visited,handle)
#self.destroy_isolated_cells()
else:
#if COMM.rank!=0:
fname='synapse_list'+str(RANK)+'.p'
with open(fname, 'rb') as handle:
self.synapse_list=pickle.load(handle)
#for s in self.synapse_list:
for (r,post_syn,cellind,k,gidn,i) in self.synapse_list:
self.alloc_synapse_ff(r,post_syn,cellind,k,gidn,i)
#destroy_isolated_cells()
def tracenet(self):
'''
This method does two things.
1 Send a matrix to rank0.
2 Do a local hub node computation.
Ideally there should be too many neurons to properly visualise them in a network graph.
Future design decision. Keep rank0 free of cells, such that it has the RAM to store
big matrices.
# Then destroy after sending to rank 0.
# Maybe stimulate one cell per CPU.
#
'''
ncsize=len(h.NetCon)
NCELL=self.NCELL
SIZE=self.SIZE
COMM = self.COMM
RANK=self.RANK
self.matrix_reduce()
lsoftup=[]
for i, j in enumerate(h.NetCon):
if type(j)!=None:
assert type(j)!=None
srcind=int(j.srcgid())
tgtind=int(j.postcell().gid1)
print int(j.srcgid()),int(j.postcell().gid1),self.celldict[srcind],self.celldict[tgtind]
lsoftup.append((int(self.h.NetCon[i].srcgid()),int(self.h.NetCon[i].postcell().gid1),self.celldict[srcind],self.celldict[tgtind]))
return lsoftup
def dumpjson_graph(self):
assert self.COMM.rank==0
import json
d =[]
whole=self.global_ecm+self.global_icm
d.append(whole.tolist())
self.cell_info_gather()
d.append(self.global_namedict)
json.dump(d, open('web/js/global_whole_network.json','w'))
d=json.load(open('web/js/global_whole_network.json','r'))
#read the object just to prove that is readable.
d=None #destroy the object.
print('Wrote JSON data to web/js/network.json')
# open URL in running web browser
#http_server.load_url('force/force.html')
def dumpjson_spike(self,tvec,gidvec):
assert self.COMM.rank==0
import json
h=self.h
d =[]
d.append(self.global_namedict)
assert (type(tvec)!=type(self.h) and type(gidvec)!=type(self.h))
d.append(tvec)
d.append(gidvec)
json.dump(d, open('web/js/spike.json','w'))
d=json.load(open('web/js/spike.json','r'))
#read the object just to prove that is readable.
d=None #explicitly destroy the object, as garbage collection would do anyway.
print('Wrote JSON data to web/js/spike.json')
def generate_morphology(self, cell, morph_filename):
'''
This code is from the Allen Brain API examples.
This code is no longer executed.
morph4.hoc is executed instead.
'''
h = h
swc = h.Import3d_SWC_read()
swc.input(morph_filename)
imprt = h.Import3d_GUI(swc, 0)
h('execute("forall delete_section()",cell)')
imprt.instantiate(cell)
for seg in cell.soma[0]:
seg.area()
for sec in cell.allsec():
sec.nseg = 1 + 2 * int(sec.L / 40)
h.define_shape()
#cell.simplify_axon()
#for sec in cell.axonal:
# sec.L = 30
# sec.diam = 1
# sec.nseg = 1 + 2 * int(sec.L / 40)
#cell.axon[0].connect(cell.soma[0], 0.5, 0)
#cell.axon[1].connect(cell.axon[0], 1, 0)
def load_cell_parameters(self, cell, type_index):
#This code is from the Allen Brain API examples.
'''
This code is from the Allen Brain API examples.
This code is no longer executed.
morph4.hoc is executed instead.
It is just good py-hoc example code.
'''
#from neuron import h
passive = self.description.data['fit'][type_index]['passive'][0]
conditions = self.description.data['fit'][type_index]['conditions'][0]
genome = self.description.data['fit'][type_index]['genome']
# Set passive properties
cm_dict = dict([(c['section'], c['cm']) for c in passive['cm']])
for sec in cell.all:
sec.Ra = passive['ra']
sec.cm = cm_dict[sec.name().split(".")[1][:4]]
sec.insert('pas')
for seg in sec:
seg.pas.e = passive["e_pas"]
# Insert channels and set parameters
for p in genome:
sections = [s for s in cell.all if s.name().split(".")[1][:4] == p["section"]]
for sec in sections:
sec.push()
if p["mechanism"] != "":
print p["mechanism"]
sec.insert(p["mechanism"])
h('print psection()')
setattr(sec, p["name"], p["value"])
h.pop_section()
# Set reversal potentials
for erev in conditions['erev']:
sections = [s for s in cell.all if s.name().split(".")[1][:4] == erev["section"]]
for sec in sections:
sec.ena = erev["ena"]
sec.ek = erev["ek"]
def setup_iclamp_step(self, target_cell, amp, delay, dur):
self.stim = h.IClamp(target_cell.soma[0](0.5))
self.stim.amp = amp
self.stim.delay = delay
self.stim.dur = dur
def record_values(self):
vec = { "v": {}, #define a dictionary.
"t": h.Vector() }
for i, cell in enumerate(self.cells):
vec["v"][int(cell.gid1)]=h.Vector()
vec["v"][int(cell.gid1)].record(cell.soma[0](0.5)._ref_v)
vec["t"].record(h._ref_t)
return vec
def spikerecord(self):
'''
This method duplicates other code. I intend to keep it as a method, and delete the duplicate lines.
'''
h('objref tvec, gidvec')
h('gidvec = new Vector()')
h('tvec = new Vector()')
for cell in self.cells:
h.pc.spike_record(int(cell.gid1), h.tvec, h.idvec)
#TODO use neuro electro to test cortical pyramidal cells, and baskett cells before including
#them in the network.
#Call a method test_cell inside the make_cells function.
'''
def test_cell(self,d):#celltype='hip_pyr'):
from neuronunit.neuroelectro import NeuroElectroSummary
from neuronunit import neuroelectro
x = neuroelectro.NeuroElectroDataMap()
if 'hippocampus' in d:
summary = NeuroElectroSummary(neuron={'name':'Hippocampus CA1 Pyramidal Cell'},
ephysprop={'name':'spike width'})
observation = summary.get_observation(show=True)
#from neuronunit.tests import SpikeWidthTest
#ca1_pyramdical_spike_width_test=SPikeWidthTest(observation=observation)
#Does not work due to problem with elephant.
#Note elephant requires pre-release version of neo.
pass
if 'neocortex' in d:
x.set_neuron(nlex_id='sao2128417084')
#pass
#x.set_neuron(nlex_id='nifext_152') # neurolex.org ID for 'Amygdala basolateral
# nucleus pyramidal neuron'.
x.set_ephysprop(id=23) # neuroelectro.org ID for 'Spike width'.
#TODO find neurolex.org ID for Vm
pass
#x.get_values() # Gets values for spike width from this paper.
#pdb.set_trace()
#width = x.val # Spike width reported in that paper.
if 'basket' in d:
x.set_neuron(nlex_id='nifext_56')
pass
if 'dg_basket' in d:
x.set_neuron(nlex_id='nlx_cell_100201')
pass
#TODO use neuro electro to test cortical pyramidal cells, and baskett cells before including
#them in the network.
#Call a method test_cell inside the make_cells function.
def test_cell(self,d):#celltype='hip_pyr'):
from neuronunit.neuroelectro import NeuroElectroSummary
from neuronunit import neuroelectro
x = neuroelectro.NeuroElectroDataMap()
if 'hippocampus' in d:
summary = NeuroElectroSummary(neuron={'name':'Hippocampus CA1 Pyramidal Cell'},
ephysprop={'name':'spike width'})
observation = summary.get_observation(show=True)
#from neuronunit.tests import SpikeWidthTest
#ca1_pyramdical_spike_width_test=SPikeWidthTest(observation=observation)
#Does not work due to problem with elephant.
#Note elephant requires pre-release version of neo.
pass
if 'neocortex' in d:
x.set_neuron(nlex_id='sao2128417084')
#pass
#x.set_neuron(nlex_id='nifext_152') # neurolex.org ID for 'Amygdala basolateral
# nucleus pyramidal neuron'.
x.set_ephysprop(id=23) # neuroelectro.org ID for 'Spike width'.
#TODO find neurolex.org ID for Vm
pass
#x.get_values() # Gets values for spike width from this paper.
#pdb.set_trace()
#width = x.val # Spike width reported in that paper.
if 'basket' in d:
x.set_neuron(nlex_id='nifext_56')
pass
if 'dg_basket' in d:
x.set_neuron(nlex_id='nlx_cell_100201')
pass
'''
def my_decorator(self,some_function):
def wrapper(self):
#from neuron import h
NCELL=self.NCELL
SIZE=self.COMM.size
RANK=self.COMM.rank
pc=h.ParallelContext()
self.some_function()
return wrapper
#@my_decorator
#makecells()#I want to pass the function makecells as a function to the decorator.
#So, @my_decorator is just an easier way of saying just_some_function = my_decorator(just_some_function).
#It's how you apply a decorator to a function
|
russelljjarvis/neurogateway
|
utils.py
|
Python
|
gpl-3.0
| 40,897
|
[
"NEURON"
] |
12f6575df386aeec2472b32899bdd03a7eff85837638ce513d5abd4881988b32
|
# coding: utf-8
# Distributed under the terms of the MIT License.
from .model import AppModel
from ababe.stru.scaffold import ModifiedCell
from ababe.stru.element import Specie
from ababe.io.io import GeneralIO
import os
import numpy as np
class App(AppModel):
def __init__(self, infile, radius):
gcell = GeneralIO.from_file(infile)
self.infile = infile
self.basefname = os.path.basename(infile)
self.mcell = ModifiedCell.from_gcell(gcell)
self.radius = radius
def run(self):
import tempfile
working_path = os.getcwd()
self.mcell.perturb(self.radius)
gcell = self.mcell.to_gcell()
out = GeneralIO(gcell)
ofname = "{:}_PURB.vasp".format(self.basefname.split('.')[0])
print("PROCESSING: {:}".format(self.infile))
out.write_file(ofname)
|
unkcpz/ababe
|
ababe/cmdline/apps/atomperturb.py
|
Python
|
mit
| 854
|
[
"MCell",
"VASP"
] |
fc6f52b653bdf2c35ab7de7c1f9e064bbd448797dcd802c0c0448631d63c2cd0
|
from __future__ import absolute_import
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
import copy
from .evolevents import EvolEvent
def get_reconciled_tree(node, sptree, events):
""" Returns the recoliation gene tree with a provided species
topology """
if len(node.children) == 2:
# First visit childs
morphed_childs = []
for ch in node.children:
mc, ev = get_reconciled_tree(ch, sptree, events)
morphed_childs.append(mc)
# morphed childs are the reconciled children. I trust its
# topology. Remember tree is visited on recursive post-order
sp_child_0 = morphed_childs[0].get_species()
sp_child_1 = morphed_childs[1].get_species()
all_species = sp_child_1 | sp_child_0
# If childs represents a duplication (duplicated species)
# Check that both are reconciliated to the same species
if len(sp_child_0 & sp_child_1) > 0:
newnode = copy.deepcopy(node)
newnode.up = None
newnode.children = []
template = _get_expected_topology(sptree, all_species)
# replaces child0 partition on the template
newmorphed0, matchnode = _replace_on_template(template, morphed_childs[0])
# replaces child1 partition on the template
newmorphed1, matchnode = _replace_on_template(template, morphed_childs[1])
newnode.add_child(newmorphed0)
newnode.add_child(newmorphed1)
newnode.add_feature("evoltype", "D")
node.add_feature("evoltype", "D")
e = EvolEvent()
e.etype = "D"
e.inparalogs = node.children[0].get_leaf_names()
e.outparalogs = node.children[1].get_leaf_names()
e.in_seqs = node.children[0].get_leaf_names()
e.out_seqs = node.children[1].get_leaf_names()
events.append(e)
return newnode, events
# Otherwise, we need to reconciliate species at both sides
# into a single partition.
else:
# gets the topology expected by the observed species
template = _get_expected_topology(sptree, all_species)
# replaces child0 partition on the template
template, matchnode = _replace_on_template(template, morphed_childs[0] )
# replaces child1 partition on the template
template, matchnode = _replace_on_template(template, morphed_childs[1])
template.add_feature("evoltype","S")
node.add_feature("evoltype","S")
e = EvolEvent()
e.etype = "S"
e.inparalogs = node.children[0].get_leaf_names()
e.orthologs = node.children[1].get_leaf_names()
e.in_seqs = node.children[0].get_leaf_names()
e.out_seqs = node.children[1].get_leaf_names()
events.append(e)
return template, events
elif len(node.children)==0:
return copy.deepcopy(node), events
else:
raise ValueError("Algorithm can only work with binary trees.")
def _replace_on_template(orig_template, node):
template = copy.deepcopy(orig_template)
# detects partition within topo that matchs child1 species
nodespcs = node.get_species()
spseed = list(nodespcs)[0] # any sp name woulbe ok
# Set an start point
subtopo = template.search_nodes(children=[], name=spseed)[0]
# While subtopo does not cover all child species
while len(nodespcs - set(subtopo.get_leaf_names() ) )>0:
subtopo= subtopo.up
# Puts original partition on the expected topology template
nodecp = copy.deepcopy(node)
if subtopo.up is None:
return nodecp, nodecp
else:
parent = subtopo.up
parent.remove_child(subtopo)
parent.add_child(nodecp)
return template, nodecp
def _get_expected_topology(t, species):
missing_sp = set(species) - set(t.get_leaf_names())
if missing_sp:
raise KeyError("* The following species are not contained in the species tree: "+ ','.join(missing_sp) )
node = t.search_nodes(children=[], name=list(species)[0])[0]
sps = set(species)
while sps-set(node.get_leaf_names()) != set([]):
node = node.up
template = copy.deepcopy(node)
# make get_species() to work
#template._speciesFunction = _get_species_on_TOL
template.set_species_naming_function(_get_species_on_TOL)
template.detach()
for n in [template]+template.get_descendants():
n.add_feature("evoltype","L")
n.dist = 1
return template
def _get_species_on_TOL(name):
return name
def get_reconciled_tree_zmasek(gtree, sptree, inplace=False):
"""
Reconciles the gene tree with the species tree
using Zmasek and Eddy's algorithm. Details can be
found in the paper:
Christian M. Zmasek, Sean R. Eddy: A simple algorithm
to infer gene duplication and speciation events on a
gene tree. Bioinformatics 17(9): 821-828 (2001)
:argument gtree: gene tree (PhyloTree instance)
:argument sptree: species tree (PhyloTree instance)
:argument False inplace: if True, the provided gene tree instance is
modified. Otherwise a reconciled copy of the gene tree is returned.
:returns: reconciled gene tree
"""
# some cleanup operations
def cleanup(tree):
for node in tree.traverse(): node.del_feature("M")
if not inplace:
gtree = gtree.copy('deepcopy')
# check for missing species
missing_sp = gtree.get_species() - sptree.get_species()
if missing_sp:
raise KeyError("* The following species are not contained in the species tree: "+ ', '.join(missing_sp))
# initialization
sp2node = dict()
for node in sptree.get_leaves(): sp2node[node.species] = node
# set/compute the mapping function M(g) for the
# leaf nodes in the gene tree (see paper for details)
species = sptree.get_species()
for node in gtree.get_leaves():
node.add_feature("M",sp2node[node.species])
# visit each internal node in the gene tree
# and detect its event (duplication or speciation)
for node in gtree.traverse(strategy="postorder"):
if len(node.children) == 0:
continue # nothing to do for leaf nodes
if len(node.children) != 2:
cleanup(gtree)
raise ValueError("Algorithm can only work with binary trees.")
lca = node.children[0].M.get_common_ancestor(node.children[1].M) # LCA in the species tree
node.add_feature("M",lca)
node.add_feature("evoltype","S")
if id(node.children[0].M) == id(node.M) or id(node.children[1].M) == id(node.M):
node.evoltype = "D"
cleanup(gtree)
return gtree
|
karrtikr/ete
|
ete3/phylo/reconciliation.py
|
Python
|
gpl-3.0
| 8,169
|
[
"VisIt"
] |
4ee78de4848634c0d3da590a5a0a2fde973182ce2199e827132c3fd5f79afca4
|
W, B = 1, 0
def fill_enclosed(grid):
reached = set()
for r, row in enumerate(grid):
for c, col in enumerate(row):
if r in (0, len(grid) - 1) or c in (0, len(grid[0]) - 1) and col == W and (r, c) not in reached:
visit(grid, r, c, reached)
# go through the matrix, for all white not in visited, color black
print reached
for r, row in enumerate(grid):
for c, col in enumerate(row):
if (r, c) not in reached:
grid[r][c] = B
DIR = [(0, 1), (0, -1), (1, 0), (-1, 0)]
def visit(grid, r, c, reached):
reached.add((r, c))
for a, b in DIR:
n_r, n_c = r + a, c + b
if 0 <= n_r < len(grid) and 0 <= n_c < len(grid[0]) and (r, c) not in reached and grid[n_r][n_c] == W:
visit(grid, n_r, n_c, reached)
if __name__ == "__main__":
matrix_1 = [[1, 1, 1, 1],
[0, 0, 0, 0],
[1, 0, 1, 0],
[1, 1, 0, 1],
]
matrix_2 = [[1, 1, 1, 1],
[0, 1, 1, 0],
[1, 1, 1, 0],
]
# flip_dfs(matrix_1, (3, 0))
fill_enclosed(matrix_1)
for i in matrix_1:
print i
# _flip2(matrix_1, 3, 0)
# for i in matrix_1:
# print i
# print search(matrix_2, (2, 0), (0, 3))
|
misscindy/Interview
|
Graph/19_03_enclosed_region.py
|
Python
|
cc0-1.0
| 1,326
|
[
"VisIt"
] |
b0c0f2aeac3c31be3c3818e19da6ba83488d1b0c1a828d3b8f3f2158fb5dd684
|
"""Galaxy (ansible-galaxy) plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import tempfile
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..util import (
find_executable,
display,
)
from ..docker_util import (
docker_command,
docker_run,
docker_start,
docker_rm,
docker_inspect,
docker_pull,
get_docker_container_id,
get_docker_hostname,
get_docker_container_ip,
get_docker_preferred_network_name,
is_docker_user_defined_network,
)
# We add BasicAuthentication, to make the tasks that deal with
# direct API access easier to deal with across galaxy_ng and pulp
SETTINGS = b'''
CONTENT_ORIGIN = 'http://ansible-ci-pulp:80'
ANSIBLE_API_HOSTNAME = 'http://ansible-ci-pulp:80'
ANSIBLE_CONTENT_HOSTNAME = 'http://ansible-ci-pulp:80/pulp/content'
TOKEN_AUTH_DISABLED = True
GALAXY_REQUIRE_CONTENT_APPROVAL = False
GALAXY_AUTHENTICATION_CLASSES = [
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
"rest_framework.authentication.BasicAuthentication",
]
'''
SET_ADMIN_PASSWORD = b'''#!/usr/bin/execlineb -S0
foreground {
redirfd -w 1 /dev/null
redirfd -w 2 /dev/null
export DJANGO_SETTINGS_MODULE pulpcore.app.settings
export PULP_CONTENT_ORIGIN localhost
s6-setuidgid postgres
if { /usr/local/bin/django-admin reset-admin-password --password password }
if { /usr/local/bin/pulpcore-manager create-group system:partner-engineers --users admin }
}
'''
# There are 2 overrides here:
# 1. Change the gunicorn bind address from 127.0.0.1 to 0.0.0.0 now that Galaxy NG does not allow us to access the
# Pulp API through it.
# 2. Grant access allowing us to DELETE a namespace in Galaxy NG. This is as CI deletes and recreates repos and
# distributions in Pulp which now breaks the namespace in Galaxy NG. Recreating it is the "simple" fix to get it
# working again.
# These may not be needed in the future, especially if 1 becomes configurable by an env var but for now they must be
# done.
OVERRIDES = b'''#!/usr/bin/execlineb -S0
foreground {
sed -i "0,/\\"127.0.0.1:24817\\"/s//\\"0.0.0.0:24817\\"/" /etc/services.d/pulpcore-api/run
}
# This sed calls changes the first occurrence to "allow" which is conveniently the delete operation for a namespace.
# https://github.com/ansible/galaxy_ng/blob/master/galaxy_ng/app/access_control/statements/standalone.py#L9-L11.
backtick NG_PREFIX { python -c "import galaxy_ng; print(galaxy_ng.__path__[0], end='')" }
importas ng_prefix NG_PREFIX
foreground {
sed -i "0,/\\"effect\\": \\"deny\\"/s//\\"effect\\": \\"allow\\"/" ${ng_prefix}/app/access_control/statements/standalone.py
}'''
class GalaxyProvider(CloudProvider):
"""Galaxy plugin.
Sets up pulp (ansible-galaxy) servers for tests.
The pulp source itself resides at: https://github.com/pulp/pulp-oci-images
"""
def __init__(self, args):
"""
:type args: TestConfig
"""
super(GalaxyProvider, self).__init__(args)
self.pulp = os.environ.get(
'ANSIBLE_PULP_CONTAINER',
'docker.io/pulp/pulp-galaxy-ng@sha256:b79a7be64eff86d8f58db9ca83ed4967bd8b4e45c99addb17a91d11926480cf1'
)
self.containers = []
def filter(self, targets, exclude):
"""Filter out the tests with the necessary config and res unavailable.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
docker_cmd = 'docker'
docker = find_executable(docker_cmd, required=False)
if docker:
return
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which require the "%s" command: %s'
% (skip.rstrip('/'), docker_cmd, ', '.join(skipped)))
def setup(self):
"""Setup cloud resource before delegation and reg cleanup callback."""
super(GalaxyProvider, self).setup()
container_id = get_docker_container_id()
p_results = docker_inspect(self.args, 'ansible-ci-pulp')
if p_results and not p_results[0].get('State', {}).get('Running'):
docker_rm(self.args, 'ansible-ci-pulp')
p_results = []
display.info('%s ansible-ci-pulp docker container.'
% ('Using the existing' if p_results else 'Starting a new'),
verbosity=1)
galaxy_port = 80
pulp_port = 24817
if not p_results:
if self.args.docker or container_id:
publish_ports = []
else:
# publish the simulator ports when not running inside docker
publish_ports = [
'-p', ':'.join((str(galaxy_port),) * 2),
'-p', ':'.join((str(pulp_port),) * 2),
]
docker_pull(self.args, self.pulp)
# Create the container, don't run it, we need to inject configs before it starts
stdout, _dummy = docker_run(
self.args,
self.pulp,
['--name', 'ansible-ci-pulp'] + publish_ports,
create_only=True
)
pulp_id = stdout.strip()
injected_files = {
'/etc/pulp/settings.py': SETTINGS,
'/etc/cont-init.d/111-postgres': SET_ADMIN_PASSWORD,
'/etc/cont-init.d/000-ansible-test-overrides': OVERRIDES,
}
for path, content in injected_files.items():
with tempfile.NamedTemporaryFile() as temp_fd:
temp_fd.write(content)
temp_fd.flush()
docker_command(self.args, ['cp', temp_fd.name, '%s:%s' % (pulp_id, path)])
# Start the container
docker_start(self.args, 'ansible-ci-pulp', [])
self.containers.append('ansible-ci-pulp')
if self.args.docker:
pulp_host = 'ansible-ci-pulp'
elif container_id:
pulp_host = self._get_simulator_address('ansible-ci-pulp')
display.info('Found Galaxy simulator container address: %s' % pulp_host, verbosity=1)
else:
pulp_host = get_docker_hostname()
self._set_cloud_config('PULP_HOST', pulp_host)
self._set_cloud_config('PULP_PORT', str(pulp_port))
self._set_cloud_config('GALAXY_PORT', str(galaxy_port))
self._set_cloud_config('PULP_USER', 'admin')
self._set_cloud_config('PULP_PASSWORD', 'password')
def get_docker_run_options(self):
"""Get additional options needed when delegating tests to a container.
:rtype: list[str]
"""
network = get_docker_preferred_network_name(self.args)
if not is_docker_user_defined_network(network):
return ['--link', 'ansible-ci-pulp']
return []
def cleanup(self):
"""Clean up the resource and temporary configs files after tests."""
for container_name in self.containers:
docker_rm(self.args, container_name)
super(GalaxyProvider, self).cleanup()
def _get_simulator_address(self, container_name):
return get_docker_container_ip(self.args, container_name)
class GalaxyEnvironment(CloudEnvironment):
"""Galaxy environment plugin.
Updates integration test environment after delegation.
"""
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
pulp_user = self._get_cloud_config('PULP_USER')
pulp_password = self._get_cloud_config('PULP_PASSWORD')
pulp_host = self._get_cloud_config('PULP_HOST')
galaxy_port = self._get_cloud_config('GALAXY_PORT')
pulp_port = self._get_cloud_config('PULP_PORT')
return CloudEnvironmentConfig(
ansible_vars=dict(
pulp_user=pulp_user,
pulp_password=pulp_password,
pulp_v2_server='http://%s:%s/pulp_ansible/galaxy/published/api/' % (pulp_host, pulp_port),
pulp_v3_server='http://%s:%s/pulp_ansible/galaxy/published/api/' % (pulp_host, pulp_port),
pulp_api='http://%s:%s' % (pulp_host, pulp_port),
galaxy_ng_server='http://%s:%s/api/galaxy/' % (pulp_host, galaxy_port),
),
env_vars=dict(
PULP_USER=pulp_user,
PULP_PASSWORD=pulp_password,
PULP_V2_SERVER='http://%s:%s/pulp_ansible/galaxy/published/api/' % (pulp_host, pulp_port),
PULP_V3_SERVER='http://%s:%s/pulp_ansible/galaxy/published/api/' % (pulp_host, pulp_port),
GALAXY_NG_SERVER='http://%s:%s/api/galaxy/' % (pulp_host, galaxy_port),
),
)
|
agaffney/ansible
|
test/lib/ansible_test/_internal/cloud/galaxy.py
|
Python
|
gpl-3.0
| 9,011
|
[
"Galaxy"
] |
e56e64cf71408e05665f1db2fbcb2322f539416690ae94093b9e3d846c802def
|
"""
Tests GeoTsRepositoryCollection
"""
from __future__ import print_function
from __future__ import absolute_import
from os import path
# import random
import unittest
# import numpy as np
from shyft import api
from shyft import shyftdata_dir
from shyft.repository.geo_ts_repository_collection import GeoTsRepositoryCollection
from shyft.repository.geo_ts_repository_collection import GeoTsRepositoryCollectionError
from shyft.repository.netcdf import AromeDataRepository
from shyft.repository.netcdf import AromeDataRepositoryError
class GeoTsRepositoryCollectionTestCase(unittest.TestCase):
@property
def arome_epsg_bbox(self):
"""A slice of test-data located in shyft-data repository/arome."""
EPSG = 32632
x0 = 436100.0 # lower left
y0 = 6823000.0 # lower right
nx = 74
ny = 24
dx = 1000.0
dy = 1000.0
return EPSG, ([x0, x0 + nx * dx, x0 + nx * dx, x0], [y0, y0, y0 + ny * dy, y0 + ny * dy])
def test_get_timeseries_collection(self):
tc= api.YMDhms(2015, 8, 24, 6)
n_hours = 30
dt = api.deltahours(1)
utc = api.Calendar() # No offset gives Utc
t0 = utc.time(tc)
period = api.UtcPeriod(t0, t0 + api.deltahours(n_hours))
date_str = "{}{:02}{:02}_{:02}".format(tc.year, tc.month, tc.day, tc.hour)
epsg, bbox = self.arome_epsg_bbox
base_dir = path.join(shyftdata_dir, "repository", "arome_data_repository")
f1 = "arome_metcoop_red_default2_5km_{}.nc".format(date_str)
f2 = "arome_metcoop_red_test2_5km_{}.nc".format(date_str)
ar1 = AromeDataRepository(epsg, base_dir, filename=f1, allow_subset=True)
ar2 = AromeDataRepository(epsg, base_dir, filename=f2, elevation_file=f1, allow_subset=True)
geo_ts_repository = GeoTsRepositoryCollection([ar1, ar2])
sources = geo_ts_repository.get_timeseries(("temperature", "radiation"),
period, geo_location_criteria=bbox)
with self.assertRaises(GeoTsRepositoryCollectionError) as context:
GeoTsRepositoryCollection([ar1, ar2], reduce_type="foo")
geo_ts_repository = GeoTsRepositoryCollection([ar1, ar2], reduce_type="add")
with self.assertRaises(GeoTsRepositoryCollectionError) as context:
sources = geo_ts_repository.get_timeseries(("temperature", "radiation"),
period, geo_location_criteria=bbox)
def test_get_forecast_collection(self):
n_hours = 30
dt = api.deltahours(1)
utc = api.Calendar() # No offset gives Utc
tc = api.YMDhms(2015, 8, 24, 6)
t0 = utc.time(tc)
period = api.UtcPeriod(t0, t0 + api.deltahours(n_hours))
date_str = "{}{:02}{:02}_{:02}".format(tc.year, tc.month, tc.day, tc.hour)
epsg, bbox = self.arome_epsg_bbox
base_dir = path.join(shyftdata_dir, "repository", "arome_data_repository")
f1 = "arome_metcoop_red_default2_5km_{}.nc".format(date_str)
f2 = "arome_metcoop_red_test2_5km_{}.nc".format(date_str)
ar1 = AromeDataRepository(epsg, base_dir, filename=f1, allow_subset=True)
ar2 = AromeDataRepository(epsg, base_dir, filename=f2, elevation_file=f1, allow_subset=True)
geo_ts_repository = GeoTsRepositoryCollection([ar1, ar2])
source_names = ("temperature", "radiation")
sources = geo_ts_repository.get_forecast(source_names, period, t0,
geo_location_criteria=bbox)
self.assertTrue(all([x in source_names for x in sources]))
geo_ts_repository = GeoTsRepositoryCollection([ar1, ar2], reduce_type="add")
with self.assertRaises(GeoTsRepositoryCollectionError) as context:
sources = geo_ts_repository.get_forecast(("temperature", "radiation"),
period, t0, geo_location_criteria=bbox)
def test_get_ensemble_forecast_collection(self):
EPSG = 32633
upper_left_x = 436100.0
upper_left_y = 7417800.0
nx = 74
ny = 94
dx = 1000.0
dy = 1000.0
t0 = api.YMDhms(2015, 7, 26, 0)
n_hours = 30
utc = api.Calendar() # No offset gives Utc
period = api.UtcPeriod(utc.time(t0), utc.time(t0) + api.deltahours(n_hours))
t_c = utc.time(t0) + api.deltahours(1)
base_dir = path.join(shyftdata_dir, "netcdf", "arome")
pattern = "fc2015072600.nc"
bbox = ([upper_left_x, upper_left_x + nx * dx,
upper_left_x + nx * dx, upper_left_x],
[upper_left_y, upper_left_y,
upper_left_y - ny * dy, upper_left_y - ny * dy])
try:
ar1 = AromeDataRepository(EPSG, base_dir, filename=pattern, bounding_box=bbox)
ar2 = AromeDataRepository(EPSG, base_dir, filename=pattern, bounding_box=bbox)
repos = GeoTsRepositoryCollection([ar1, ar2])
data_names = ("temperature", "wind_speed", "relative_humidity")
ensemble = repos.get_forecast_ensemble(data_names, period, t_c, None)
self.assertTrue(isinstance(ensemble, list))
self.assertEqual(len(ensemble), 10)
with self.assertRaises(GeoTsRepositoryCollectionError) as context:
repos = GeoTsRepositoryCollection([ar1, ar2], reduce_type="add")
repos.get_forecast_ensemble(data_names, period, t_c, None)
self.assertEqual("Only replace is supported yet", context.exception.args[0])
except AromeDataRepositoryError as adre:
self.skipTest("(test inconclusive- missing arome-data {0})".format(adre))
if __name__ == '__main__':
unittest.main()
|
felixmatt/shyft
|
shyft/tests/test_geo_ts_repository_collection.py
|
Python
|
lgpl-3.0
| 5,798
|
[
"NetCDF"
] |
29f44b47dfe48ea1d26d155fae00aa5d65268b22e9513fc77a901b23f886e7f5
|
# -*- coding: utf-8 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
"""
.. module:: astm.asynclib
:synopsis: Forked version of asyncore mixed with asynchat.
.. moduleauthor:: Sam Rushing <rushing@nightmare.com>
.. sectionauthor:: Christopher Petrilli <petrilli@amber.org>
.. sectionauthor:: Steve Holden <sholden@holdenweb.com>
.. heavily adapted from original documentation by Sam Rushing
"""
import heapq
import logging
import os
import select
import socket
import sys
import time
from collections import deque
from errno import (
EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL,
ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN,
errorcode
)
from .compat import long, b, bytes, buffer
class ExitNow(Exception):
pass
_DISCONNECTED = frozenset((ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
EBADF))
_RERAISEABLE_EXC = (ExitNow, KeyboardInterrupt, SystemExit)
_SOCKET_MAP = {}
_SCHEDULED_TASKS = []
log = logging.getLogger(__name__)
def _strerror(err):
try:
return os.strerror(err)
except (ValueError, OverflowError, NameError):
if err in errorcode:
return errorcode[err]
return "Unknown error %s" % err
def read(obj):
"""Triggers ``handle_read_event`` for specified object."""
try:
obj.handle_read_event()
except _RERAISEABLE_EXC:
raise
except Exception:
obj.handle_error()
def write(obj):
"""Triggers ``handle_write_event`` for specified object."""
try:
obj.handle_write_event()
except _RERAISEABLE_EXC:
raise
except Exception:
obj.handle_error()
def exception(obj):
"""Triggers ``handle_exception_event`` for specified object."""
try:
obj.handle_exception_event()
except _RERAISEABLE_EXC:
raise
except Exception:
obj.handle_error()
def readwrite(obj, flags):
try:
if flags & select.POLLIN:
obj.handle_read_event()
if flags & select.POLLOUT:
obj.handle_write_event()
if flags & select.POLLPRI:
obj.handle_exception_event()
if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
obj.handle_close()
except socket.error as e:
if e.args[0] not in _DISCONNECTED:
obj.handle_error()
else:
obj.handle_close()
except _RERAISEABLE_EXC:
raise
except Exception:
obj.handle_error()
def poll(timeout=0.0, map=None):
if map is None:
map = map or _SOCKET_MAP
if map:
r = []; w = []; e = []
for fd, obj in map.items():
is_r = obj.readable()
is_w = obj.writable()
if is_r:
r.append(fd)
# accepting sockets should not be writable
if is_w and not obj.accepting:
w.append(fd)
if is_r or is_w:
e.append(fd)
if [] == r == w == e:
time.sleep(timeout)
return
try:
r, w, e = select.select(r, w, e, timeout)
except select.error as err:
if err.args[0] != EINTR:
raise
else:
return
for fd in r:
obj = map.get(fd)
if obj is None:
continue
read(obj)
for fd in w:
obj = map.get(fd)
if obj is None:
continue
write(obj)
for fd in e:
obj = map.get(fd)
if obj is None:
continue
exception(obj)
def scheduler(tasks=None):
if tasks is None:
tasks = _SCHEDULED_TASKS
now = time.time()
while tasks and now >= tasks[0].timeout:
call = heapq.heappop(tasks)
if call.repush:
heapq.heappush(tasks, call)
call.repush = False
continue
try:
call.call()
finally:
if not call.cancelled:
call.cancel()
def loop(timeout=30.0, map=None, tasks=None, count=None):
"""
Enter a polling loop that terminates after count passes or all open
channels have been closed. All arguments are optional. The *count*
parameter defaults to None, resulting in the loop terminating only when all
channels have been closed. The *timeout* argument sets the timeout
parameter for the appropriate :func:`select` or :func:`poll` call, measured
in seconds; the default is 30 seconds. The *use_poll* parameter, if true,
indicates that :func:`poll` should be used in preference to :func:`select`
(the default is ``False``).
The *map* parameter is a dictionary whose items are the channels to watch.
As channels are closed they are deleted from their map. If *map* is
omitted, a global map is used. Channels (instances of
:class:`asyncore.dispatcher`, :class:`asynchat.async_chat` and subclasses
thereof) can freely be mixed in the map.
"""
if map is None:
map = _SOCKET_MAP
if tasks is None:
tasks = _SCHEDULED_TASKS
if count is None:
while map or tasks:
if map:
poll(timeout, map)
if tasks:
scheduler()
else:
while (map or tasks) and count > 0:
if map:
poll(timeout, map)
if tasks:
scheduler()
count -= 1
class call_later:
"""Calls a function at a later time.
It can be used to asynchronously schedule a call within the polling
loop without blocking it. The instance returned is an object that
can be used to cancel or reschedule the call.
"""
def __init__(self, seconds, target, *args, **kwargs):
"""
- seconds: the number of seconds to wait
- target: the callable object to call later
- args: the arguments to call it with
- kwargs: the keyword arguments to call it with
- _tasks: a reserved keyword to specify a different list to
store the delayed call instances.
"""
assert callable(target), "%s is not callable" % target
assert seconds >= 0, \
"%s is not greater than or equal to 0 seconds" % (seconds)
self.__delay = seconds
self.__target = target
self.__args = args
self.__kwargs = kwargs
self.__tasks = kwargs.pop('_tasks', _SCHEDULED_TASKS)
# seconds from the epoch at which to call the function
self.timeout = time.time() + self.__delay
self.repush = False
self.cancelled = False
heapq.heappush(self.__tasks, self)
def __lt__(self, other):
return self.timeout <= other.timeout
def call(self):
"""Call this scheduled function."""
assert not self.cancelled, "Already cancelled"
self.__target(*self.__args, **self.__kwargs)
def reset(self):
"""Reschedule this call resetting the current countdown."""
assert not self.cancelled, "Already cancelled"
self.timeout = time.time() + self.__delay
self.repush = True
def delay(self, seconds):
"""Reschedule this call for a later time."""
assert not self.cancelled, "Already cancelled."
assert seconds >= 0, \
"%s is not greater than or equal to 0 seconds" % (seconds)
self.__delay = seconds
newtime = time.time() + self.__delay
if newtime > self.timeout:
self.timeout = newtime
self.repush = True
else:
# XXX - slow, can be improved
self.timeout = newtime
heapq.heapify(self.__tasks)
def cancel(self):
"""Unschedule this call."""
assert not self.cancelled, "Already cancelled"
self.cancelled = True
del self.__target, self.__args, self.__kwargs
if self in self.__tasks:
pos = self.__tasks.index(self)
if pos == 0:
heapq.heappop(self.__tasks)
elif pos == len(self.__tasks) - 1:
self.__tasks.pop(pos)
else:
self.__tasks[pos] = self.__tasks.pop()
heapq._siftup(self.__tasks, pos)
class Dispatcher(object):
"""
The :class:`Dispatcher` class is a thin wrapper around a low-level socket
object. To make it more useful, it has a few methods for event-handling
which are called from the asynchronous loop. Otherwise, it can be treated
as a normal non-blocking socket object.
The firing of low-level events at certain times or in certain connection
states tells the asynchronous loop that certain higher-level events have
taken place. For example, if we have asked for a socket to connect to
another host, we know that the connection has been made when the socket
becomes writable for the first time (at this point you know that you may
write to it with the expectation of success). The implied higher-level
events are:
+----------------------+----------------------------------------+
| Event | Description |
+======================+========================================+
| ``handle_connect()`` | Implied by the first read or write |
| | event |
+----------------------+----------------------------------------+
| ``handle_close()`` | Implied by a read event with no data |
| | available |
+----------------------+----------------------------------------+
| ``handle_accept()`` | Implied by a read event on a listening |
| | socket |
+----------------------+----------------------------------------+
During asynchronous processing, each mapped channel's :meth:`readable` and
:meth:`writable` methods are used to determine whether the channel's socket
should be added to the list of channels :c:func:`select`\ ed or
:c:func:`poll`\ ed for read and write events.
"""
connected = False
accepting = False
addr = None
def __init__(self, sock=None, map=None):
if map is None:
self._map = _SOCKET_MAP
else:
self._map = map
self._fileno = None
if sock:
# Set to nonblocking just to make sure for cases where we
# get a socket from a blocking source.
sock.setblocking(0)
self.set_socket(sock, map)
self.connected = True
# The constructor no longer requires that the socket
# passed be connected.
try:
self.addr = sock.getpeername()
except socket.error as err:
if err.args[0] == ENOTCONN:
# To handle the case where we got an unconnected
# socket.
self.connected = False
else:
# The socket is broken in some unknown way, alert
# the user and remove it from the map (to prevent
# polling of broken sockets).
self._del_channel(map)
raise
else:
self.socket = None
def __repr__(self):
status = [self.__class__.__module__ + '.' + self.__class__.__name__]
if self.accepting and self.addr:
status.append('listening')
elif self.connected:
status.append('connected')
if self.addr is not None:
try:
status.append('%s:%d' % self.addr)
except TypeError:
status.append(repr(self.addr))
return '<%s at %#x>' % (' '.join(status), id(self))
__str__ = __repr__
def _add_channel(self, map=None):
log.debug('Adding channel %s' % self)
if map is None:
map = self._map
map[self._fileno] = self
def _del_channel(self, map=None):
fd = self._fileno
if map is None:
map = self._map
if fd in map:
log.debug('Closing channel %d:%s' % (fd, self))
del map[fd]
self._fileno = None
def create_socket(self, family, type):
"""
This is identical to the creation of a normal socket, and will use
the same options for creation. Refer to the :mod:`socket` documentation
for information on creating sockets.
"""
self.family_and_type = family, type
sock = socket.socket(family, type)
sock.setblocking(0)
self.set_socket(sock)
def set_socket(self, sock, map=None):
self.socket = sock
self._fileno = sock.fileno()
self._add_channel(map)
def set_reuse_addr(self):
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR,
self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR) | 1
)
except socket.error:
pass
def readable(self):
"""
Called each time around the asynchronous loop to determine whether a
channel's socket should be added to the list on which read events can
occur. The default method simply returns ``True``, indicating that by
default, all channels will be interested in read events."""
return True
def writable(self):
"""
Called each time around the asynchronous loop to determine whether a
channel's socket should be added to the list on which write events can
occur. The default method simply returns ``True``, indicating that by
default, all channels will be interested in write events.
"""
return True
def listen(self, num):
"""Listen for connections made to the socket.
The `num` argument specifies the maximum number of queued connections
and should be at least 1; the maximum value is system-dependent
(usually 5)."""
self.accepting = True
if os.name == 'nt' and num > 5:
num = 5
return self.socket.listen(num)
def bind(self, address):
"""Bind the socket to `address`.
The socket must not already be bound. The format of `address` depends
on the address family --- refer to the :mod:`socket` documentation for
more information. To mark the socket as re-usable (setting the
:const:`SO_REUSEADDR` option), call the :class:`Dispatcher` object's
:meth:`set_reuse_addr` method.
"""
self.addr = address
return self.socket.bind(address)
def connect(self, address):
"""
As with the normal socket object, `address` is a tuple with the first
element the host to connect to, and the second the port number.
"""
self.connected = False
self.addr = address
err = self.socket.connect_ex(address)
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK)\
or err == EINVAL and os.name in ('nt', 'ce'):
return
if err in (0, EISCONN):
self.handle_connect_event()
else:
raise socket.error(err, errorcode[err])
def accept(self):
"""Accept a connection.
The socket must be bound to an address and listening for connections.
The return value can be either ``None`` or a pair ``(conn, address)``
where `conn` is a *new* socket object usable to send and receive data on
the connection, and *address* is the address bound to the socket on the
other end of the connection.
When ``None`` is returned it means the connection didn't take place, in
which case the server should just ignore this event and keep listening
for further incoming connections.
"""
try:
conn, addr = self.socket.accept()
except TypeError:
return None
except socket.error as err:
if err.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
return None
else:
raise
else:
return conn, addr
def send(self, data):
"""Send `data` to the remote end-point of the socket."""
try:
log.debug('[%s:%d] <<< %r', self.addr[0], self.addr[1], str(data))
result = self.socket.send(data)
return result
except socket.error as err:
if err.args[0] == EWOULDBLOCK:
return 0
elif err.args[0] in _DISCONNECTED:
self.handle_close()
return 0
else:
raise
def recv(self, buffer_size):
"""Read at most `buffer_size` bytes from the socket's remote end-point.
An empty string implies that the channel has been closed from the other
end.
"""
try:
data = self.socket.recv(buffer_size)
log.debug('[%s:%d] >>> %r', self.addr[0], self.addr[1], data)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
self.handle_close()
return b''
else:
return data
except socket.error as err:
# winsock sometimes throws ENOTCONN
if err.args[0] in _DISCONNECTED:
self.handle_close()
return b''
else:
raise
def close(self):
"""Close the socket.
All future operations on the socket object will fail.
The remote end-point will receive no more data (after queued data is
flushed). Sockets are automatically closed when they are
garbage-collected.
"""
self.connected = False
self.accepting = False
self._del_channel()
try:
self.socket.close()
except socket.error as err:
if err.args[0] not in (ENOTCONN, EBADF):
raise
def handle_read_event(self):
if self.accepting:
# accepting sockets are never connected, they "spawn" new
# sockets that are connected
self.handle_accept()
elif not self.connected:
self.handle_connect_event()
self.handle_read()
else:
self.handle_read()
def handle_connect_event(self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect()
self.connected = True
def handle_write_event(self):
if self.accepting:
# Accepting sockets shouldn't get a write event.
# We will pretend it didn't happen.
return
if not self.connected:
#check for errors
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect_event()
self.handle_write()
def handle_exception_event(self):
# handle_exception_event() is called if there might be an error on the
# socket, or if there is OOB data
# check for the error condition first
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
# we can get here when select.select() says that there is an
# exceptional condition on the socket
# since there is an error, we'll go ahead and close the socket
# like we would in a subclassed handle_read() that received no
# data
self.handle_close()
else:
self.handle_exception()
def handle_error(self):
"""
Called when an exception is raised and not otherwise handled.
The default version prints a condensed traceback.
"""
try:
self_repr = repr(self)
except Exception:
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
log.exception('Uncatched python exception, closing channel %s',
self_repr)
self.handle_close()
def handle_exception(self):
log.exception('Unknown error')
def handle_read(self):
log.debug('Unhandled read event')
def handle_write(self):
"""
Called when the asynchronous loop detects that a writable socket can be
written. Often this method will implement the necessary buffering for
performance. For example::
def handle_write(self):
sent = self.send(self.buffer)
self.buffer = self.buffer[sent:]
"""
log.debug('Unhandled write event')
def handle_connect(self):
"""
Called when the active opener's socket actually makes a connection.
Might send a "welcome" banner, or initiate a protocol negotiation with
the remote endpoint, for example.
"""
log.info('[%s:%d] Connection established', self.addr[0], self.addr[1])
def handle_accept(self):
"""
Called on listening channels (passive openers) when a connection can be
established with a new remote endpoint that has issued a :meth:`connect`
call for the local endpoint.
"""
log.info('[%s:%d] Connection accepted', self.addr[0], self.addr[1])
def handle_close(self):
"""Called when the socket is closed."""
log.info('[%s:%d] Connection closed', self.addr[0], self.addr[1])
self.close()
def close_all(map=None, tasks=None, ignore_all=False):
if map is None:
map = _SOCKET_MAP
if tasks is None:
tasks = _SCHEDULED_TASKS
for x in list(map.values()):
try:
x.close()
except OSError as err:
if err.args[0] == EBADF:
pass
elif not ignore_all:
raise
except _RERAISEABLE_EXC:
raise
except Exception:
if not ignore_all:
raise
map.clear()
for x in tasks:
try:
x.cancel()
except _RERAISEABLE_EXC:
raise
except Exception:
if not ignore_all:
raise
del tasks[:]
class AsyncChat(Dispatcher):
"""
This class is an abstract subclass of :class:`Dispatcher`. To make
practical use of the code you must subclass :class:`AsyncChat`, providing
meaningful meth:`found_terminator` method.
The :class:`Dispatcher` methods can be used, although not all make
sense in a message/response context.
Like :class:`Dispatcher`, :class:`AsyncChat` defines a set of
events that are generated by an analysis of socket conditions after a
:c:func:`select` call. Once the polling loop has been started the
:class:`AsyncChat` object's methods are called by the event-processing
framework with no action on the part of the programmer.
"""
# these are overridable defaults
#: The asynchronous input buffer size.
recv_buffer_size = 4096
#: The asynchronous output buffer size.
send_buffer_size = 4096
#: Encoding usage is not enabled by default, because that is a
#: sign of an application bug that we don't want to pass silently.
use_encoding = False
#: Default encoding.
encoding = 'latin-1'
#: Remove terminator from the result data.
strip_terminator = True
_terminator = None
def __init__(self, sock=None, map=None):
# for string terminator matching
self._input_buffer = b''
self.inbox = deque()
self.outbox = deque()
super(AsyncChat, self).__init__(sock, map)
self.collect_incoming_data = self.pull
self.initiate_send = self.flush
def pull(self, data):
"""Puts `data` into incoming queue. Also available by alias
`collect_incoming_data`.
"""
self.inbox.append(data)
def found_terminator(self):
"""
Called when the incoming data stream matches the :attr:`termination`
condition. The default method, which must be overridden, raises a
:exc:`NotImplementedError` exception. The buffered input data should be
available via an instance attribute.
"""
raise NotImplementedError("must be implemented in subclass")
def _set_terminator(self, term):
self._terminator = term
def _get_terminator(self):
return self._terminator
#: The input delimiter and the terminating condition to be recognized on the
#: channel. May be any of three types of value, corresponding to three
#: different ways to handle incoming protocol data.
#:
#: +-----------+---------------------------------------------+
#: | term | Description |
#: +===========+=============================================+
#: | *string* | Will call :meth:`found_terminator` when the |
#: | | string is found in the input stream |
#: +-----------+---------------------------------------------+
#: | *integer* | Will call :meth:`found_terminator` when the |
#: | | indicated number of characters have been |
#: | | received |
#: +-----------+---------------------------------------------+
#: | ``None`` | The channel continues to collect data |
#: | | forever |
#: +-----------+---------------------------------------------+
#:
#: Note that any data following the terminator will be available for reading
#: by the channel after :meth:`found_terminator` is called.
terminator = property(_get_terminator, _set_terminator)
def handle_read(self):
try:
data = self.recv(self.recv_buffer_size)
except socket.error as err:
self.handle_error()
return
if self.use_encoding and not isinstance():
data = data.decode(self.encoding)
self._input_buffer += data
while self._input_buffer:
terminator = self.terminator
if not terminator:
handler = self._lookup_none_terminator
elif isinstance(terminator, (int, long)):
handler = self._lookup_int_terminator
elif isinstance(terminator, str):
handler = self._lookup_str_terminator
else:
handler = self._lookup_list_terminator
res = handler(self.terminator)
if res is None:
break
def _lookup_none_terminator(self, terminator):
self.pull(self._input_buffer)
self._input_buffer = ''
return False
def _lookup_int_terminator(self, terminator):
if len(self._input_buffer) < terminator:
self.pull(self._input_buffer)
self._input_buffer = ''
return False
else:
self.pull(self._input_buffer[:terminator])
self._input_buffer = self._input_buffer[terminator:]
self.found_terminator()
return True
def _lookup_list_terminator(self, terminator):
for item in terminator:
if self._input_buffer.find(item) != -1:
return self._lookup_str_terminator(item)
return self._lookup_none_terminator(terminator)
def _lookup_str_terminator(self, terminator):
# 3 cases:
# 1) end of buffer matches terminator exactly:
# collect data, transition
# 2) end of buffer matches some prefix:
# collect data to the prefix
# 3) end of buffer does not match any prefix:
# collect data
terminator_len = len(terminator)
index = self._input_buffer.find(terminator)
if index != -1:
# we found the terminator
if self.strip_terminator and index > 0:
self.pull(self._input_buffer[:index])
elif not self.strip_terminator:
self.pull(self._input_buffer[:index+terminator_len])
self._input_buffer = self._input_buffer[index+terminator_len:]
# This does the Right Thing if the terminator is changed here.
self.found_terminator()
return True
else:
# check for a prefix of the terminator
index = find_prefix_at_end(self._input_buffer, terminator)
if index:
if index != len(self._input_buffer):
# we found a prefix, collect up to the prefix
self.pull(self._input_buffer[:-index])
self._input_buffer = self._input_buffer[-index:]
return None
else:
# no prefix, collect it all
self.pull(self._input_buffer)
self._input_buffer = ''
return False
def handle_write(self):
self.flush()
def push(self, data):
"""
Pushes data on to the channel's fifo to ensure its transmission.
This is all you need to do to have the channel write the data out to
the network.
"""
sabs = self.send_buffer_size
if len(data) > sabs:
for i in range(0, len(data), sabs):
self.outbox.append(data[i:i+sabs])
else:
self.outbox.append(data)
return self.flush()
def push_with_producer(self, producer):
self.outbox.append(producer)
return self.flush()
def readable(self):
"""Predicate for inclusion in the readable for select()"""
return True
def writable(self):
"""Predicate for inclusion in the writable for select()"""
# For nonblocking sockets connect() will not set self.connected flag,
# due to EINPROGRESS socket error which is actually promise for
# successful connection.
return bool(self.outbox or not self.connected)
def close_when_done(self):
"""Automatically close this channel once the outgoing queue is empty."""
self.outbox.append(None)
def flush(self):
"""Sends all data from outgoing queue."""
while self.outbox and self.connected:
self._send_chunky(self.outbox.popleft())
def _send_chunky(self, data):
"""Sends data as chunks sized by ``send_buffer_size`` value.
Returns ``True`` on success, ``False`` on error and ``None`` on closing
event.
"""
if self.use_encoding and not isinstance(data, bytes):
data = data.encode(self.encoding)
while True:
if data is None:
self.handle_close()
return
obs = self.send_buffer_size
bdata = buffer(data, 0, obs)
try:
num_sent = self.send(bdata)
except socket.error:
self.handle_error()
return False
if num_sent and num_sent < len(bdata) or obs < len(data):
data = data[num_sent:]
else:
return True
def discard_buffers(self):
"""In emergencies this method will discard any data held in the input
and output buffers."""
self.discard_input_buffers()
self.discard_output_buffers()
def discard_input_buffers(self):
self._input_buffer = b('')
self.inbox.clear()
def discard_output_buffers(self):
self.outbox.clear()
def find_prefix_at_end(haystack, needle):
l = len(needle) - 1
while l and not haystack.endswith(needle[:l]):
l -= 1
return l
|
eddiep1101/python-astm
|
build/lib/astm/asynclib.py
|
Python
|
bsd-3-clause
| 32,188
|
[
"Amber"
] |
9d66837527c5dd05f3b22bd0fd7152f99005b0e4806fe1ddc94484715b47d3f5
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Palo Alto Networks Ansible Galaxy Role'
copyright = '2019, Palo Alto Networks'
author = 'Palo Alto Networks'
# The full version, including alpha/beta/rc tags
release = '2.1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'recommonmark',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
def setup(app):
app.add_stylesheet('css/partial_theme.css')
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Leave this undefined to use the RTD default theme.
#html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
PaloAltoNetworks-BD/ansible-pan
|
docs/conf.py
|
Python
|
isc
| 2,088
|
[
"Galaxy"
] |
216e44a1be2501ec67a2339b6c5eed98aa6cb0d1f226959923893ae445d98765
|
'''
Script to mosaic TopoWx tiles into single CONUS-wide netCDF files
'''
from twx.interp import TileMosaic
from twx.utils import TwxConfig, mkdir_p
import fnmatch
import numpy as np
import os
if __name__ == '__main__':
np.seterr(all='raise')
np.seterr(under='ignore')
twx_cfg = TwxConfig(os.getenv('TOPOWX_INI'))
fpath_mask = os.path.join(twx_cfg.path_predictor_rasters, 'mask.nc')
tile_names = fnmatch.filter(os.listdir(twx_cfg.path_tile_out),
"h[0-9][0-9]v[0-9][0-9]")
elems = ['tmin', 'tmax']
mtwx = TileMosaic(fpath_mask, tile_size_y=250, tile_size_x=250,
chk_size_y=50, chk_size_x=50)
# Create normals mosaics
print "Creating normals mosaics..."
for a_elem in elems:
fpath_mosaic_out = os.path.join(twx_cfg.path_mosaic_norms,
'normals_%s.nc'%a_elem)
print "Mosaicing %d %s tiles to: %s" % (len(tile_names), a_elem, fpath_mosaic_out)
mtwx.create_normals_mosaic(tile_names, a_elem, twx_cfg.path_tile_out,
fpath_mosaic_out, twx_cfg.twx_data_version)
# Create daily mosaics
print "Creating daily mosaics..."
for a_elem in elems:
path_mosaic_out = os.path.join(twx_cfg.path_mosaic_daily, a_elem)
mkdir_p(path_mosaic_out)
mtwx.create_dly_ann_mosaics(tile_names, a_elem, twx_cfg.path_tile_out,
path_mosaic_out, twx_cfg.interp_start_date.year,
twx_cfg.interp_end_date.year, twx_cfg.twx_data_version,
chunk_cache_size = 250000000.0) #250 MB
|
jaredwo/topowx
|
scripts/step26_mosaic_tiles.py
|
Python
|
gpl-3.0
| 1,767
|
[
"NetCDF"
] |
f91c9f2a6206474e0e977a03462b43599f38dd032c74952b8a22c4dd404a54ec
|
"""setuptools.command.bdist_egg
Build .egg distributions"""
# This module should be kept compatible with Python 2.3
import sys, os, marshal
from setuptools import Command
from distutils.dir_util import remove_tree, mkpath
try:
from distutils.sysconfig import get_python_version, get_python_lib
except ImportError:
from sysconfig import get_python_version
from distutils.sysconfig import get_python_lib
from distutils import log
from distutils.errors import DistutilsSetupError
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from types import CodeType
from setuptools.extension import Library
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
f = open(pyfile,'w')
f.write('\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __loader__, __file__",
" import sys, pkg_resources, imp",
" __file__ = pkg_resources.resource_filename(__name__,%r)"
% resource,
" __loader__ = None; del __bootstrap__, __loader__",
" imp.load_dynamic(__name__,__file__)",
"__bootstrap__()",
"" # terminal \n
]))
f.close()
# stub __init__.py for packages distributed without one
NS_PKG_STUB = '__import__("pkg_resources").declare_namespace(__name__)'
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename+'.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(get_python_lib()))
old, self.distribution.data_files = self.distribution.data_files,[]
for item in old:
if isinstance(item,tuple) and len(item)==2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized==site_packages or normalized.startswith(
site_packages+os.sep
):
item = realpath[len(site_packages)+1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self,cmdname,**kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname,self.bdist_dir)
kw.setdefault('skip_build',self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root; instcmd.root = None
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p,ext_name) in enumerate(ext_outputs):
filename,ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename)+'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep,'/')
to_compile.extend(self.make_init_files())
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root,'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts',install_dir=script_dir,no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root,'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info,'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution,'dist_files',[]).append(
('bdist_egg',get_python_version(),self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base,dirs,files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base,name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution,'zip_safe',None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def make_init_files(self):
"""Create missing package __init__ files"""
init_files = []
for base,dirs,files in walk_egg(self.bdist_dir):
if base==self.bdist_dir:
# don't put an __init__ in the root
continue
for name in files:
if name.endswith('.py'):
if '__init__.py' not in files:
pkg = base[len(self.bdist_dir)+1:].replace(os.sep,'.')
if self.distribution.has_contents_for(pkg):
log.warn("Creating missing __init__.py for %s",pkg)
filename = os.path.join(base,'__init__.py')
if not self.dry_run:
f = open(filename,'w'); f.write(NS_PKG_STUB)
f.close()
init_files.append(filename)
break
else:
# not a package, don't traverse to subdirectories
dirs[:] = []
return init_files
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation',{}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info,'')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir:''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base]+filename)
for filename in dirs:
paths[os.path.join(base,filename)] = paths[base]+filename+'/'
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext,Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir,filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base,dirs,files = walker.next()
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base,dirs,files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag,fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir,'EGG-INFO',fn)):
return flag
if not can_scan(): return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag,fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe)<>flag:
os.unlink(fn)
elif safe is not None and bool(safe)==flag:
f=open(fn,'wt'); f.write('\n'); f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base,name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir)+1:].replace(os.sep,'.')
module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0]
f = open(filename,'rb'); f.read(8) # skip magic & date
code = marshal.load(f); f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
if '__name__' in symbols and '__main__' in symbols and '.' not in module:
if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5
log.warn("%s: top-level module may be 'python -m' script", module)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names: yield name
for const in code.co_consts:
if isinstance(const,basestring):
yield const
elif isinstance(const,CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None,
mode='w'
):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir)+1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
if compress is None:
compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits
compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)]
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
#
|
diego-d5000/MisValesMd
|
env/lib/python2.7/site-packages/distribute-0.6.24-py2.7.egg/setuptools/command/bdist_egg.py
|
Python
|
mit
| 18,884
|
[
"VisIt"
] |
d60c7b14414aba0bb640c889a45d6e0b208b751fd64b3bb1bcf8bb5d809f93c3
|
# -*- coding: utf-8 -*-
"""
jinja2.meta
~~~~~~~~~~~
This module implements various functions that exposes information about
templates that might be interesting for various kinds of applications.
:copyright: (c) 2010 by the Jinja Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.compiler import CodeGenerator
from jinja2._compat import string_types
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment):
CodeGenerator.__init__(self, environment, '<introspection>',
'<introspection>')
self.undeclared_identifiers = set()
def write(self, x):
"""Don't write."""
def pull_locals(self, frame):
"""Remember all undeclared identifiers."""
self.undeclared_identifiers.update(frame.identifiers.undeclared)
def find_undeclared_variables(ast):
"""Returns a set of all variables in the AST that will be looked up from
the context at runtime. Because at compile time it's not known which
variables will be used depending on the path the execution takes at
runtime, all variables are returned.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
>>> meta.find_undeclared_variables(ast) == set(['bar'])
True
.. admonition:: Implementation
Internally the code generator is used for finding undeclared variables.
This is good to know because the code generator might raise a
:exc:`TemplateAssertionError` during compilation and as a matter of
fact this function can currently raise that exception as well.
"""
codegen = TrackingCodeGenerator(ast.environment)
codegen.visit(ast)
return codegen.undeclared_identifiers
def find_referenced_templates(ast):
"""Finds all the referenced templates from the AST. This will return an
iterator over all the hardcoded template extensions, inclusions and
imports. If dynamic inheritance or inclusion is used, `None` will be
yielded.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
>>> list(meta.find_referenced_templates(ast))
['layout.html', None]
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
nodes.Include)):
if not isinstance(node.template, nodes.Const):
# a tuple with some non consts in there
if isinstance(node.template, (nodes.Tuple, nodes.List)):
for template_name in node.template.items:
# something const, only yield the strings and ignore
# non-string consts that really just make no sense
if isinstance(template_name, nodes.Const):
if isinstance(template_name.value, string_types):
yield template_name.value
# something dynamic in there
else:
yield None
# something dynamic we don't know about here
else:
yield None
continue
# constant is a basestring, direct template name
if isinstance(node.template.value, string_types):
yield node.template.value
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
elif isinstance(node, nodes.Include) and \
isinstance(node.template.value, (tuple, list)):
for template_name in node.template.value:
if isinstance(template_name, string_types):
yield template_name
# something else we don't care about, we could warn here
else:
yield None
|
fancasy/final
|
lib/jinja2/meta.py
|
Python
|
apache-2.0
| 4,301
|
[
"VisIt"
] |
451412f982c4da29e088a58dac0733016b6f6c205f0bd23fcd9a178c59f48825
|
#!/usr/bin/python
import pylab
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
import readpng as rpng
import tikhonov as tik
import scipy.sparse.linalg
import time
def create_data_and_designmatrix(img,N=500,width=10,p=1.0,deltaa=None,ixrand=None,iyrand=None,Mx=None,My=None):
if Mx is None or My is None:
Mx=img.shape[0]
My=img.shape[1]
if ixrand is None or iyrand is None:
ixrand=np.array(list(map(int,np.random.rand(N)*Mx*p))) # x-position of beams
iyrand=np.array(list(map(int,np.random.rand(N)*My))) # y-position of beams
if deltaa is None:
deltaa=np.array(list(map(int,np.random.rand(Mx*My)*width))).reshape(Mx,My) #width of beams
d=[]
g=[]
imgext=np.zeros((Mx,My))
imgext[:,:]=None
for i in range(0,N):
ix=ixrand[i]
iy=iyrand[i]
delta=deltaa[ix,iy]
ix1=max(ix-delta,0)
ix2=min(Mx,ix+delta)
iy1=max(iy-delta,0)
iy2=min(My,iy+delta)
val=np.sum(img[ix1:ix2,iy1:iy2])
d.append(val)
gzero=np.zeros((Mx,My),dtype="float")
gzero[ix1:ix2,iy1:iy2]=1.0
g.append(gzero.flatten())
imgext[ix,iy]=np.mean(img[ix1:ix2,iy1:iy2])
d=np.array(d)
g=np.array(g)
return d, g, imgext
def plot_lcurve(residualseq,modelnormseq,curveseq,imax):
fig = plt.figure()
ax = fig.add_subplot(121)
pylab.xscale('log')
pylab.yscale('log')
pylab.ylabel("Norm of Model")
pylab.xlabel("Norm of Prediction Error")
ax.plot(residualseq,modelnormseq,marker=".",c="green")
ax.plot([residualseq[imax]],[modelnormseq[imax]],marker="o",c="red")
ax2 = fig.add_subplot(122)
pylab.xscale('log')
pylab.ylabel("Curvature")
pylab.xlabel("Norm of Prediction Error")
ax2.plot(residualseq,curveseq,marker=".",c="green")
ax2.plot([residualseq[imax]],[curveseq[imax]],marker="o",c="red")
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Inverse Problem test program by Hajime Kawahara')
parser.add_argument('-f', nargs=1, required=True, help='png file')
parser.add_argument('-n', nargs=1, default=[1000], help='number of light beams', type=int)
parser.add_argument('-l', nargs=1, default=[0.001], help='lambda', type=float)
parser.add_argument('-L', nargs=2, help='L curve criterion ON. Input search area', type=float)
parser.add_argument('-nl', nargs=1, default=[40], help='number of lambda grid for L curve', type=int)
parser.add_argument('-w', nargs=1, default=[20.0], help='mean beam diameter', type=float)
parser.add_argument('-p', nargs=1, default=[1.0], help='beams probe on upper 100 p percent area ', type=float)
parser.add_argument('-lim', nargs=1, default=[1.e-12], help='NGIM or TSVD limit (singular values below this are regarded as zero)', type=float)
parser.add_argument('-s', nargs=1, help='STD of gaussian noise', type=float)
parser.add_argument('-save', nargs=1, help='save G and d of search light', type=str)
parser.add_argument('-load', nargs=1, help='load G and d of search light', type=str)
parser.add_argument('-solver', nargs=1, default=["fullsvd"], help='SVD solver. fullsvd or iterative', type=str)
args = parser.parse_args()
solver=args.solver[0]
img=rpng.get_bwimg(args.f[0])
lamb=args.l[0]
width=args.w[0]
N=args.n[0]
p=args.p[0]
M=Mx*My
print("# of Data = ", N, "# of Model = ", M)
if args.load:
#load d,g,imgext
print("Load G and d from ",args.load[0])
data=np.load(args.load[0]+".npz")
d=data["arr_0"]
g=data["arr_1"]
imgext=data["arr_2"]
else:
#create eclipse curves
d,g,imgext=create_data_and_designmatrix(img,N,width,p)
if args.s:
sigma=args.s[0]
print("noise injection to data: sigma=",sigma)
d=d+np.random.normal(0.0,sigma,N)
if args.save:
print("Save G and d to ",args.save[0])
np.savez(args.save[0],d,g,imgext)
print("compute svd")
if solver == "fullsvd":
start = time.time()
U,S,VT=np.linalg.svd(g)
elapsed_time = time.time() - start
print("solved by np.linalg.svd: time=",elapsed_time)
p=None
elif solver == "iterative":
nk=min(M,N)-1
print(nk, M, N)
start = time.time()
U,S,VT=scipy.sparse.linalg.svds(g,k=nk)
elapsed_time = time.time() - start
#convert values in decending order
S=S[::-1]
U=U[:,::-1]
VT=VT[::-1,:]
#remove nan
mask=(S==S)
S=S[mask]
U=U[:,mask]
VT=VT[mask,:]
print("solved by scipy.sparse.linalg.svds: time=",elapsed_time)
p=len(S)
else:
sys.exit("invalid solver option. specify fullsvd or iterative. EXIT.")
mprior=np.zeros(M)
if args.L:
method="Choose adequate lambda by L-curve criterion"
print(method)
modelnormseq=[]
residualseq=[]
curveseq=[]
nlcurve=args.nl[0]
lamseq=np.logspace(np.log10(args.L[0]),np.log10(args.L[1]),num=nlcurve)
print("lamb", "curvature")
for lamb in lamseq:
mest,dpre,residual,modelnorm,curv_lcurve=tik.tikhonov_regularization(g,d,mprior,U,VT,S,lamb,p=p)
modelnormseq.append(modelnorm)
residualseq.append(residual)
curveseq.append(curv_lcurve)
print(lamb, curv_lcurve)
residualseq=np.array(residualseq)
modelnormseq=np.array(modelnormseq)
imax=np.argmax(curveseq)
lamb=lamseq[imax]
print("Best lambda=",lamb)
plot_lcurve(residualseq,modelnormseq,curveseq,imax)
if lamb==0:
method="NGIM/TSVD"
lim=args.lim[0]
print(method)
fig =plt.figure()
ax=fig.add_subplot(111)
ax.plot(np.arange(0,len(S),1),S,".")
pylab.yscale("log")
pylab.axhline(lim,color="red")
pylab.xlabel("i")
pylab.ylabel("Singular Value")
plt.show()
mest,dpre,residual,modelnorm=tik.NGIM_regularization(g,d,mprior,U,VT,S,lim)
cap="# of beams = "+str(N)+", Mean beam diameter = "+str(width)+" pixels,"+" singular value cutoff = "+str(lim)
else:
method="Tikhonov regularization"
print(method)
mest,dpre,residual,modelnorm,curv_lcurve=tik.tikhonov_regularization(g,d,mprior,U,VT,S,lamb,p=p)
cap="# of beams = "+str(N)+", Mean beam diameter = "+str(width)+" pixels, $\lambda$ = "+str(lamb)
imgest=mest.reshape(Mx,My)
fig =plt.figure()
ax=fig.add_subplot(131)
ax.imshow(img,cmap="gray")
pylab.title("input")
ax=fig.add_subplot(132)
ax.imshow(imgext,cmap="gray")
pylab.title("data (averaged)")
ax.annotate(method, xy=(0.5, 1.15), xycoords='axes fraction',horizontalalignment="center", fontsize=16)
ax.annotate(cap, xy=(0.5, -0.2), xycoords='axes fraction',horizontalalignment="center", fontsize=12,color="gray")
ax=fig.add_subplot(133)
ax.imshow(imgest,cmap="gray")
pylab.title("estimate")
plt.show()
|
HajimeKawahara/pinvprob
|
pinvprob/random_light.py
|
Python
|
gpl-2.0
| 7,271
|
[
"Gaussian"
] |
4c43d7ba17f93f624b3d408479ab806a288f9fc56128f90967199e1b150f42d3
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <johan.gronqvist@gmail.com>
# Copyright (C) 2007-2009 Gary Burton <gary.burton@zen.co.uk>
# Copyright (C) 2007-2009 Stephane Charette <stephanecharette@gmail.com>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <jason@bohemianalps.com>
# Copyright (C) 2008-2011 Rob G. Healey <robhealey1@gmail.com>
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010- Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
# Copyright (C) 2018 Theo van Rijn
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classes:
NavWebReport - main class that produces the report. Entry point to produce
the report is write_report
NavWebOptions - class that defines the options and provides the handling
interface
"""
#------------------------------------------------
# python modules
#------------------------------------------------
import logging
from functools import partial
import os
import sys
import time
import shutil
import tarfile
from io import BytesIO, TextIOWrapper
from collections import defaultdict
from decimal import getcontext
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.lib import (EventType, Name,
Person,
Family, Event, Place, PlaceName, Source,
Citation, Media, Repository, Note, Tag)
from gramps.gen.lib.date import Today
from gramps.gen.plug.menu import (PersonOption, NumberOption, StringOption,
BooleanOption, EnumeratedListOption,
FilterOption, NoteOption, MediaOption,
DestinationOption)
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import stdoptions
from gramps.gen.constfunc import win, get_curr_dir
from gramps.gen.config import config
from gramps.gen.datehandler import displayer as _dd
from gramps.gen.display.name import displayer as _nd
from gramps.gen.display.place import displayer as _pd
from gramps.gen.proxy import CacheProxyDb
from gramps.plugins.lib.libhtmlconst import _CHARACTER_SETS, _CC, _COPY_OPTIONS
from gramps.gen.relationship import get_relationship_calculator
#------------------------------------------------
# specific narrative web import
#------------------------------------------------
from gramps.plugins.webreport.basepage import BasePage
from gramps.plugins.webreport.person import PersonPages
from gramps.plugins.webreport.family import FamilyPages
from gramps.plugins.webreport.event import EventPages
from gramps.plugins.webreport.media import MediaPages
from gramps.plugins.webreport.place import PlacePages
from gramps.plugins.webreport.source import SourcePages
from gramps.plugins.webreport.repository import RepositoryPages
from gramps.plugins.webreport.citation import CitationPages
from gramps.plugins.webreport.surnamelist import SurnameListPage
from gramps.plugins.webreport.surname import SurnamePage
from gramps.plugins.webreport.thumbnail import ThumbnailPreviewPage
from gramps.plugins.webreport.statistics import StatisticsPage
from gramps.plugins.webreport.updates import UpdatesPage
from gramps.plugins.webreport.multilang import IndexPage
from gramps.plugins.webreport.home import HomePage
from gramps.plugins.webreport.contact import ContactPage
from gramps.plugins.webreport.download import DownloadPage
from gramps.plugins.webreport.introduction import IntroductionPage
from gramps.plugins.webreport.addressbook import AddressBookPage
from gramps.plugins.webreport.addressbooklist import AddressBookListPage
from gramps.plugins.webreport.calendar import CalendarPage
from gramps.plugins.webreport.common import (get_gendex_data,
HTTP, HTTPS, _WEB_EXT, CSS,
_NARRATIVESCREEN, _NARRATIVEPRINT,
_WRONGMEDIAPATH, sort_people)
LOG = logging.getLogger(".NarrativeWeb")
_ = glocale.translation.sgettext
getcontext().prec = 8
#------------------------------------------------
# constants
#------------------------------------------------
_DEFAULT_MAX_IMG_WIDTH = 800 # resize images that are wider than this
_DEFAULT_MAX_IMG_HEIGHT = 600 # resize images that are taller than this
# The two values above are settable in options.
class NavWebReport(Report):
"""
Create WebReport object that produces the report.
"""
def __init__(self, database, options, user):
"""
@param: database -- The Gramps database instance
@param: options -- Instance of the Options class for this report
@param: user -- Instance of a gen.user.User()
"""
Report.__init__(self, database, options, user)
self.user = user
menu = options.menu
self.link_prefix_up = True
self.options = {}
for optname in menu.get_all_option_names():
menuopt = menu.get_option_by_name(optname)
self.options[optname] = menuopt.get_value()
self.set_locale(options.menu.get_option_by_name('trans').get_value())
stdoptions.run_date_format_option(self, menu)
self.rlocale = self._ = self._locale
self.the_lang = self.rlocale.language[0]
stdoptions.run_private_data_option(self, menu)
stdoptions.run_living_people_option(self, menu)
self.database = CacheProxyDb(self.database)
self._db = self.database
filters_option = menu.get_option_by_name('filter')
self.filter = filters_option.get_filter()
self.copyright = self.options['cright']
self.target_path = self.options['target']
self.ext = self.options['ext']
self.css = self.options['css']
self.navigation = self.options["navigation"]
self.citationreferents = self.options['citationreferents']
self.inc_tags = self.options['inc_tags']
self.title = self.options['title']
self.inc_gallery = self.options['gallery']
self.inc_unused_gallery = self.options['unused']
self.create_thumbs_only = self.options['create_thumbs_only']
self.create_thumbs_index = self.options['create_thumbs_index']
self.create_images_index = self.options['create_images_index']
self.opts = self.options
self.inc_contact = self.opts['contactnote'] or self.opts['contactimg']
# name format options
self.name_format = self.options['name_format']
# include families or not?
self.inc_families = self.options['inc_families']
# create an event pages or not?
self.inc_events = self.options['inc_events']
# create places pages or not?
self.inc_places = self.options['inc_places']
# create sources pages or not?
self.inc_sources = self.options['inc_sources']
# include repository page or not?
self.inc_repository = self.options['inc_repository']
# include GENDEX page or not?
self.inc_gendex = self.options['inc_gendex']
# Download Options Tab
self.inc_download = self.options['incdownload']
self.nb_download = self.options['nbdownload']
self.dl_descr = {}
self.dl_fname = {}
for count in range(1, self.nb_download+1):
fnamex = 'down_fname%c' % str(count)
descrx = 'dl_descr%c' % str(count)
self.dl_fname[count] = self.options[fnamex]
self.dl_descr[count] = self.options[descrx]
self.encoding = self.options['encoding']
self.use_archive = self.options['archive']
self.use_intro = self.options['intronote'] or self.options['introimg']
self.use_home = self.options['homenote'] or self.options['homeimg']
self.use_contact = self.opts['contactnote'] or self.opts['contactimg']
self.inc_stats = self.opts['inc_stats']
self.inc_updates = self.opts['updates']
self.create_unused_media = self.opts['unused']
# Do we need to include this in a CMS?
self.usecms = self.options['usecms']
self.target_uri = self.options['cmsuri']
# Do we add an extra page?
# extrapage is the URI
# extrapagename is the visible name in the navigation bar.
self.extrapage = self.options['extrapage']
self.extrapagename = self.options['extrapagename']
# Do we need to include web calendar?
self.usecal = self.options['usecal']
self.calendar = None
# Do we need to include news and updates page?
self.inc_updates = self.options['updates']
# either include the gender graphics or not?
self.ancestortree = self.options['ancestortree']
# whether to display children in birthorder or entry order?
self.birthorder = self.options['birthorder']
# get option for Internet Address Book
self.inc_addressbook = self.options["inc_addressbook"]
# Place Map tab options
self.placemappages = self.options['placemappages']
self.familymappages = self.options['familymappages']
self.mapservice = self.options['mapservice']
self.googleopts = self.options['googleopts']
self.googlemapkey = self.options['googlemapkey']
self.stamenopts = self.options['stamenopts']
self.reference_sort = self.options['reference_sort']
if self.use_home:
self.index_fname = "index"
self.surname_fname = "surnames"
self.intro_fname = "introduction"
elif self.use_intro:
self.index_fname = None
self.surname_fname = "surnames"
self.intro_fname = "index"
else:
self.index_fname = None
self.surname_fname = "index"
self.intro_fname = None
self.archive = None
self.cur_fname = None # Internal use. The name of the output file,
# to be used for the tar archive.
self.string_io = None
if self.use_archive:
self.html_dir = None
else:
self.html_dir = self.target_path
self.warn_dir = True # Only give warning once.
self.obj_dict = None
self.visited = None
self.bkref_dict = None
self.rel_class = None
self.tab = None
self.fam_link = {}
if self.options['securesite']:
self.secure_mode = HTTPS
else:
self.secure_mode = HTTP
self.languages = None
self.default_lang = None
self.the_title = None
def write_report(self):
"""
The first method called to write the Narrative Web after loading options
"""
# begin performance check initialization
#import cProfile, pstats, io
#pr = cProfile.Profile()
#pr.enable()
# end performance check
global _WRONGMEDIAPATH
_WRONGMEDIAPATH = []
if not self.use_archive:
dir_name = self.target_path
if dir_name is None:
dir_name = get_curr_dir()
elif not os.path.isdir(dir_name):
parent_dir = os.path.dirname(dir_name)
if not os.path.isdir(parent_dir):
msg = _("Neither %(current)s nor %(parent)s "
"are directories") % {
'current': dir_name, 'parent': parent_dir}
self.user.notify_error(msg)
return
else:
try:
os.mkdir(dir_name)
except IOError as value:
msg = _("Could not create the directory: %s"
) % dir_name + "\n" + value.strerror
self.user.notify_error(msg)
return
except Exception as exception:
LOG.exception(exception)
msg = _("Could not create the directory: %s") % dir_name
self.user.notify_error(msg)
return
try:
image_dir_name = os.path.join(dir_name, 'images')
if not os.path.isdir(image_dir_name):
os.mkdir(image_dir_name)
image_dir_name = os.path.join(dir_name, 'thumb')
if not os.path.isdir(image_dir_name):
os.mkdir(image_dir_name)
except IOError as value:
msg = _("Could not create the directory: %s"
) % image_dir_name + "\n" + value.strerror
self.user.notify_error(msg)
return
except Exception as exception:
LOG.exception(exception)
msg = _("Could not create the directory: %s"
) % image_dir_name + "\n" + str(exception)
self.user.notify_error(msg)
return
else:
if os.path.isdir(self.target_path):
self.user.notify_error(
_('Invalid file name'),
_('The archive file must be a file, not a directory'))
return
try:
self.archive = tarfile.open(self.target_path, "w:gz")
except (OSError, IOError) as value:
self.user.notify_error(
_("Could not create %s") % self.target_path,
str(value))
return
config.set('paths.website-directory',
os.path.dirname(self.target_path) + os.sep)
if self.usecms:
config.set('paths.website-cms-uri',
os.path.dirname(self.target_uri))
# for use with discovering biological, half, and step siblings for use
# in display_ind_parents()...
self.rel_class = get_relationship_calculator(reinit=True,
clocale=self.rlocale)
#################################################
#
# Pass 0 Initialise the plug-ins
#
#################################################
# FIXME: The whole of this section of code should be implemented by the
# registration process for the Web Page plugins.
# Note that by use of a dictionary we ensure that at most one Web Page
# plugin is provided for any object class
self.tab = {}
# FIXME: Initialising self.tab in this way means that this code has to
# run before the Web Page registration - I am not sure whether this is
# possible, in which case an alternative approach to providing the
# mapping of object class to Web Page plugin will be needed.
for obj_class in ("Person", "Family", "Source", "Citation", "Place",
"Event", "Media", "Repository"):
# FIXME: Would it be better if the Web Page plugins used a different
# base class rather than BasePage, which is really just for each web
# page
self.tab[obj_class] = BasePage(self, None, None)
# Note that by not initialising any Web Page plugins that are not going
# to generate pages, we ensure that there is not performance implication
# for such plugins.
self.tab["Person"] = PersonPages(self, None, None)
if self.inc_families:
self.tab["Family"] = FamilyPages(self, None, None)
if self.inc_events:
self.tab["Event"] = EventPages(self, None, None)
if self.inc_gallery:
self.tab["Media"] = MediaPages(self, None, None)
self.tab["Place"] = PlacePages(self, None, None)
self.tab["Source"] = SourcePages(self, None, None)
self.tab["Repository"] = RepositoryPages(self, None, None)
self.tab["Citation"] = CitationPages(self, None, None)
# FIXME: The following routines that are not run in two passes have not
# yet been converted to a form suitable for separation into Web Page
# plugins: SurnamePage, SurnameListPage, IntroductionPage, HomePage,
# ThumbnailPreviewPage, DownloadPage, ContactPage,AddressBookListPage,
# AddressBookPage
#################################################
#
# Pass 1 Build the lists of objects to be output
#
#################################################
self._build_obj_dict()
#################################################
#
# Add images for home, contact and introduction pages
# if they are not associated to any used objects.
#
#################################################
if self.use_home:
img = self.options['homeimg']
if img:
media = self._db.get_media_from_gramps_id(img)
if media:
self._add_media(media.handle, Media, media.handle)
if self.inc_contact:
img = self.options['contactimg']
if img:
media = self._db.get_media_from_gramps_id(img)
if media:
self._add_media(media.handle, Media, media.handle)
if self.use_intro:
img = self.options['introimg']
if img:
media = self._db.get_media_from_gramps_id(img)
if media:
self._add_media(media.handle, Media, media.handle)
#################################################
#
# Pass 2 Generate the web pages
#
#################################################
self.languages = []
self.default_lang = self.options['trans']
if self.default_lang == "default":
self.default_lang = self.rlocale.language[0]
self.languages.append((self.default_lang, self.options['title']))
if self.options['multitrans']:
for idx in range(2, 7):
lang = "lang%c" % str(idx)
titl = "title%c" % str(idx)
if self.options[lang] != "default":
cur_lang = self.options[lang]
cur_title = self.options[titl]
self.languages.append((cur_lang, cur_title))
self.visited = []
if len(self.languages) > 1:
IndexPage(self, self.languages)
for the_lang, the_title in self.languages:
if len(self.languages) == 1:
the_lang = None
the_title = self.title
if the_lang == "default":
the_lang = self.rlocale.language[0]
self.the_lang = the_lang
self.the_title = the_title
self.base_pages()
# build classes IndividualListPage and IndividualPage
self.tab["Person"].display_pages(the_lang, the_title)
self.build_gendex(self.obj_dict[Person], the_lang)
# build classes SurnameListPage and SurnamePage
self.surname_pages(self.obj_dict[Person], the_lang, the_title)
# build classes FamilyListPage and FamilyPage
if self.inc_families:
self.tab["Family"].display_pages(the_lang, the_title)
# build classes EventListPage and EventPage
if self.inc_events:
self.tab["Event"].display_pages(the_lang, the_title)
# build classes PlaceListPage and PlacePage
self.tab["Place"].display_pages(the_lang, the_title)
# build classes RepositoryListPage and RepositoryPage
if self.inc_repository:
self.tab["Repository"].display_pages(the_lang, the_title)
# build classes MediaListPage and MediaPage
if self.inc_gallery:
if not self.create_thumbs_only:
self.tab["Media"].display_pages(the_lang, the_title)
# build Thumbnail Preview Page...
self.thumbnail_preview_page()
# build classes AddressBookListPage and AddressBookPage
if self.inc_addressbook:
self.addressbook_pages(self.obj_dict[Person])
# build classes SourceListPage and SourcePage
self.tab["Source"].display_pages(the_lang, the_title)
# build calendar for the current year
if self.usecal:
self.calendar = CalendarPage(self, None, None)
self.calendar.display_pages(the_lang, the_title)
# build classes StatisticsPage
if self.inc_stats:
self.statistics_preview_page()
# build classes Updates
if self.inc_updates:
self.updates_preview_page()
# copy all of the necessary files
self.copy_narrated_files()
# if an archive is being used, close it?
if self.archive:
self.archive.close()
if _WRONGMEDIAPATH:
error = '\n'.join([
_('ID=%(grampsid)s, path=%(dir)s') % {
'grampsid' : x[0],
'dir' : x[1]} for x in _WRONGMEDIAPATH[:10]])
if len(_WRONGMEDIAPATH) > 10:
error += '\n ...'
self.user.warn(_("Missing media objects:"), error)
self.database.clear_cache()
# begin print performance check
#pr.disable()
#pr.print_stats()
# end print performance check
def _build_obj_dict(self):
"""
Construct the dictionaries of objects to be included in the reports.
There are two dictionaries, which have the same structure: they are two
level dictionaries,the first key is the class of object
(e.g. gen.lib.Person).
The second key is the handle of the object.
For the obj_dict, the value is a tuple containing the gramps_id,
the text name for the object, and the file name for the display.
For the bkref_dict, the value is a tuple containing the class of object
and the handle for the object that refers to the 'key' object.
"""
_obj_class_list = (Person, Family, Event, Place, Source, Citation,
Media, Repository, Note, Tag, PlaceName)
# setup a dictionary of the required structure
self.obj_dict = defaultdict(lambda: defaultdict(set))
self.bkref_dict = defaultdict(lambda: defaultdict(set))
# initialise the dictionary to empty in case no objects of any
# particular class are included in the web report
for obj_class in _obj_class_list:
self.obj_dict[obj_class] = defaultdict(set)
ind_list = self._db.iter_person_handles()
ind_list = self.filter.apply(self._db, ind_list, user=self.user)
message = _('Constructing list of other objects...')
pgr_title = self.pgrs_title(None)
with self.user.progress(pgr_title, message,
sum(1 for _ in ind_list)) as step:
index = 1
for handle in ind_list:
self._add_person(handle, "", "")
step()
index += 1
LOG.debug("final object dictionary \n" +
"".join(("%s: %s\n" % item)
for item in self.obj_dict.items()))
LOG.debug("final backref dictionary \n" +
"".join(("%s: %s\n" % item)
for item in self.bkref_dict.items()))
def _add_person(self, person_handle, bkref_class, bkref_handle):
"""
Add person_handle to the obj_dict, and recursively all referenced
objects
@param: person_handle -- The handle for the person to add
@param: bkref_class -- The class associated to this handle (person)
@param: bkref_handle -- The handle associated to this person
"""
if self.obj_dict[Person][person_handle]:
# This person is already in the list of selected people.
# This can be achieved with associated people.
return
person = self._db.get_person_from_handle(person_handle)
if person:
person_name = self.get_person_name(person)
person_fname = self.build_url_fname(person_handle, "ppl",
False, init=True) + self.ext
self.obj_dict[Person][person_handle] = (person_fname, person_name,
person.gramps_id)
self.bkref_dict[Person][person_handle].add((bkref_class,
bkref_handle,
""))
############### Header section ##############
for citation_handle in person.get_citation_list():
self._add_citation(citation_handle, Person, person_handle)
############### Name section ##############
for name in [person.get_primary_name()
] + person.get_alternate_names():
for citation_handle in name.get_citation_list():
self._add_citation(citation_handle, Person, person_handle)
############### Events section ##############
# Now tell the events tab to display the individual events
evt_ref_list = person.get_event_ref_list()
if evt_ref_list:
for evt_ref in evt_ref_list:
role = evt_ref.get_role().xml_str()
event = self._db.get_event_from_handle(evt_ref.ref)
if event:
self._add_event(evt_ref.ref, Person, person_handle,
role)
place_handle = event.get_place_handle()
if place_handle:
self._add_place(place_handle, Person,
person_handle, event)
# If event pages are not being output, then tell the
# media tab to display the person's event media. If
# events are being displayed, then the media are linked
# from the event tab
if not self.inc_events:
for media_ref in event.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Person,
person_handle)
for citation_handle in event.get_citation_list():
self._add_citation(citation_handle, Person,
person_handle)
############### Families section ##############
# Tell the families tab to display this individuals families
family_handle_list = person.get_family_handle_list()
if family_handle_list:
for family_handle in person.get_family_handle_list():
self._add_family(family_handle, Person, person_handle)
# Tell the events tab to display the family events which
# are referenced from the individual page.
family = self._db.get_family_from_handle(family_handle)
if family:
family_evt_ref_list = family.get_event_ref_list()
if family_evt_ref_list:
for evt_ref in family_evt_ref_list:
role = evt_ref.get_role().xml_str()
event = self._db.get_event_from_handle(
evt_ref.ref)
if event:
self._add_event(evt_ref.ref, Person,
person_handle, "Primary")
place_handle = event.get_place_handle()
if place_handle:
self._add_place(place_handle, Person,
person_handle, event)
for cite_hdl in event.get_citation_list():
self._add_citation(cite_hdl, Person,
person_handle)
# add the family media and the family event media if the
# families page is not being displayed (If it is displayed,
# the media are linked from the families page)
if not self.inc_families:
for m_ref in event.get_media_list():
m_hdl = m_ref.get_reference_handle()
self._add_media(m_hdl, Person,
person_handle)
for lds_ord in family.get_lds_ord_list():
for citation_handle in lds_ord.get_citation_list():
self._add_citation(citation_handle,
Person, person_handle)
for attr in family.get_attribute_list():
for citation_handle in attr.get_citation_list():
self._add_citation(citation_handle,
Person, person_handle)
if not self.inc_families:
for media_ref in family.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Person,
person_handle)
############### Associations section ##############
for assoc in person.get_person_ref_list():
self._add_person(assoc.ref, "", "")
############### LDS Ordinance section ##############
for lds_ord in person.get_lds_ord_list():
for citation_handle in lds_ord.get_citation_list():
self._add_citation(citation_handle, Person, person_handle)
############### Attribute section ##############
for attr in person.get_attribute_list():
for citation_handle in attr.get_citation_list():
self._add_citation(citation_handle, Person, person_handle)
############### Address section ##############
for addr in person.get_address_list():
for addr_handle in addr.get_citation_list():
self._add_citation(addr_handle, Person, person_handle)
############### Media section ##############
# Now tell the Media tab which media objects to display
# First the person's media objects
for media_ref in person.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Person, person_handle)
def get_person_name(self, person):
"""
Return a string containing the person's primary name in the name
format chosen in the web report options
@param: person -- person object from database
"""
name_format = self.options['name_format']
primary_name = person.get_primary_name()
name = Name(primary_name)
name.set_display_as(name_format)
return _nd.display_name(name)
def _add_family(self, family_handle, bkref_class, bkref_handle):
"""
Add family to the Family object list
@param: family_handle -- The handle for the family to add
@param: bkref_class -- The class associated to this handle (family)
@param: bkref_handle -- The handle associated to this family
"""
family = self._db.get_family_from_handle(family_handle)
family_name = self.get_family_name(family)
if self.inc_families:
family_fname = self.build_url_fname(family_handle, "fam",
False, init=True) + self.ext
else:
family_fname = ""
self.obj_dict[Family][family_handle] = (family_fname, family_name,
family.gramps_id)
self.bkref_dict[Family][family_handle].add((bkref_class,
bkref_handle,
""))
if self.inc_gallery:
for media_ref in family.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Family, family_handle)
############### Events section ##############
for evt_ref in family.get_event_ref_list():
role = evt_ref.get_role().xml_str()
event = self._db.get_event_from_handle(evt_ref.ref)
place_handle = event.get_place_handle()
if place_handle:
self._add_place(place_handle, Family, family_handle, event)
if self.inc_events:
# detail for family events are displayed on the events pages as
# well as on this family page
self._add_event(evt_ref.ref, Family, family_handle, role)
else:
# There is no event page. Family events are displayed on the
# family page, but the associated family event media may need to
# be displayed on the media page
if self.inc_gallery:
for media_ref in event.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Family, family_handle)
############### LDS Ordinance section ##############
for lds_ord in family.get_lds_ord_list():
for citation_handle in lds_ord.get_citation_list():
self._add_citation(citation_handle, Family, family_handle)
############### Attributes section ##############
for attr in family.get_attribute_list():
for citation_handle in attr.get_citation_list():
self._add_citation(citation_handle, Family, family_handle)
############### Sources section ##############
for citation_handle in family.get_citation_list():
self._add_citation(citation_handle, Family, family_handle)
def get_family_name(self, family):
"""
Return a string containing the name of the family (e.g. 'Family of John
Doe and Jane Doe')
@param: family -- family object from database
"""
self.rlocale = self.set_locale(self.the_lang)
self._ = self.rlocale.translation.sgettext
husband_handle = family.get_father_handle()
spouse_handle = family.get_mother_handle()
if husband_handle:
husband = self._db.get_person_from_handle(husband_handle)
else:
husband = None
if spouse_handle:
spouse = self._db.get_person_from_handle(spouse_handle)
else:
spouse = None
if husband and spouse:
husband_name = self.get_person_name(husband)
spouse_name = self.get_person_name(spouse)
title_str = self._("Family of %(husband)s and %(spouse)s"
) % {'husband' : husband_name,
'spouse' : spouse_name}
elif husband:
husband_name = self.get_person_name(husband)
# Only the name of the husband is known
title_str = self._("Family of %s") % husband_name
elif spouse:
spouse_name = self.get_person_name(spouse)
# Only the name of the wife is known
title_str = self._("Family of %s") % spouse_name
else:
title_str = ''
return title_str
def _add_event(self, event_handle, bkref_class, bkref_handle, role):
"""
Add event to the Event object list
@param: event_handle -- The handle for the event to add
@param: bkref_class -- The class associated to this handle (event)
@param: bkref_handle -- The handle associated to this event
"""
event = self._db.get_event_from_handle(event_handle)
event_name = event.get_description()
# The event description can be Y on import from GEDCOM. See the
# following quote from the GEDCOM spec: "The occurrence of an event is
# asserted by the presence of either a DATE tag and value or a PLACe tag
# and value in the event structure. When neither the date value nor the
# place value are known then a Y(es) value on the parent event tag line
# is required to assert that the event happened.""
if event_name == "" or event_name is None or event_name == 'Y':
event_name = str(event.get_type())
# begin add generated descriptions to media pages
# (request 7074 : acrider)
ref_name = ""
for reference in self._db.find_backlink_handles(event_handle):
ref_class, ref_handle = reference
if ref_class == 'Person':
person = self._db.get_person_from_handle(ref_handle)
ref_name = self.get_person_name(person)
elif ref_class == 'Family':
family = self._db.get_family_from_handle(ref_handle)
ref_name = self.get_family_name(family)
if ref_name != "":
# TODO for Arabic, should the next line's comma be translated?
event_name += ", " + ref_name
# end descriptions to media pages
if self.inc_events:
event_fname = self.build_url_fname(event_handle, "evt",
False, init=True) + self.ext
else:
event_fname = ""
self.obj_dict[Event][event_handle] = (event_fname, event_name,
event.gramps_id)
self.bkref_dict[Event][event_handle].add((bkref_class, bkref_handle,
role))
############### Attribute section ##############
for attr in event.get_attribute_list():
for citation_handle in attr.get_citation_list():
self._add_citation(citation_handle, Event, event_handle)
############### Source section ##############
for citation_handle in event.get_citation_list():
self._add_citation(citation_handle, Event, event_handle)
############### Media section ##############
if self.inc_gallery:
for media_ref in event.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Event, event_handle)
def _add_place(self, place_handle, bkref_class, bkref_handle, event):
"""
Add place to the Place object list
@param: place_handle -- The handle for the place to add
@param: bkref_class -- The class associated to this handle (place)
@param: bkref_handle -- The handle associated to this place
"""
place = self._db.get_place_from_handle(place_handle)
if place is None:
return
if bkref_class == Person:
person = self._db.get_person_from_handle(bkref_handle)
name = _nd.display(person)
else:
family = self._db.get_family_from_handle(bkref_handle)
husband_handle = family.get_father_handle()
if husband_handle:
person = self._db.get_person_from_handle(husband_handle)
name = _nd.display(person)
else:
name = ""
if config.get('preferences.place-auto'):
place_name = _pd.display_event(self._db, event)
pplace_name = _pd.display(self._db, place)
else:
place_name = place.get_title()
pplace_name = place_name
if event:
if self.reference_sort:
role_or_date = name
else:
date = event.get_date_object()
# calendar is the original date calendar
calendar = str(date.get_calendar())
# convert date to Gregorian for a correct sort
_date = str(date.to_calendar("gregorian"))
role_or_date = calendar + ":" + _date
else:
role_or_date = ""
place_fname = self.build_url_fname(place_handle, "plc",
False, init=True) + self.ext
self.obj_dict[Place][place_handle] = (place_fname, place_name,
place.gramps_id, event)
self.obj_dict[PlaceName][place_name] = (place_handle, place_name,
place.gramps_id, event)
if place_name != pplace_name:
self.obj_dict[PlaceName][pplace_name] = (place_handle, pplace_name,
place.gramps_id, event)
self.bkref_dict[Place][place_handle].add((bkref_class, bkref_handle,
role_or_date
))
############### Media section ##############
if self.inc_gallery:
for media_ref in place.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Place, place_handle)
############### Sources section ##############
for citation_handle in place.get_citation_list():
self._add_citation(citation_handle, Place, place_handle)
def _add_source(self, source_handle, bkref_class, bkref_handle):
"""
Add source to the Source object list
@param: source_handle -- The handle for the source to add
@param: bkref_class -- The class associated to this handle (source)
@param: bkref_handle -- The handle associated to this source
"""
if self.obj_dict[Source][source_handle]:
for bkref in self.bkref_dict[Source][source_handle]:
if bkref_handle == bkref[1]:
return
source = self._db.get_source_from_handle(source_handle)
source_name = source.get_title()
source_fname = self.build_url_fname(source_handle, "src",
False, init=True) + self.ext
self.obj_dict[Source][source_handle] = (source_fname, source_name,
source.gramps_id)
self.bkref_dict[Source][source_handle].add((bkref_class,
bkref_handle,
"" # no role
))
############### Media section ##############
if self.inc_gallery:
for media_ref in source.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Source, source_handle)
############### Repository section ##############
if self.inc_repository:
for repo_ref in source.get_reporef_list():
repo_handle = repo_ref.get_reference_handle()
self._add_repository(repo_handle, Source, source_handle)
def _add_citation(self, citation_handle, bkref_class, bkref_handle):
"""
Add citation to the Citation object list
@param: citation_handle -- The handle for the citation to add
@param: bkref_class -- The class associated to this handle
@param: bkref_handle -- The handle associated to this citation
"""
if self.obj_dict[Citation][citation_handle]:
for bkref in self.bkref_dict[Citation][citation_handle]:
if bkref_handle == bkref[1]:
return
citation = self._db.get_citation_from_handle(citation_handle)
# If Page is none, we want to make sure that a tuple is generated for
# the source backreference
citation_name = citation.get_page() or ""
source_handle = citation.get_reference_handle()
self.obj_dict[Citation][citation_handle] = ("", citation_name,
citation.gramps_id)
self.bkref_dict[Citation][citation_handle].add((bkref_class,
bkref_handle,
"" # no role
))
############### Source section ##############
self._add_source(source_handle, Citation, citation_handle)
############### Media section ##############
if self.inc_gallery:
for media_ref in citation.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Citation, citation_handle)
def _add_media(self, media_handle, bkref_class, bkref_handle):
"""
Add media to the Media object list
@param: media_handle -- The handle for the media to add
@param: bkref_class -- The class associated to this handle (media)
@param: bkref_handle -- The handle associated to this media
"""
if self.obj_dict[Media][media_handle]:
for bkref in self.bkref_dict[Media][media_handle]:
if bkref_handle == bkref[1]:
return
media_refs = self.bkref_dict[Media].get(media_handle)
if media_refs and (bkref_class, bkref_handle) in media_refs:
return
media = self._db.get_media_from_handle(media_handle)
# use media title (request 7074 acrider)
media_name = media.get_description()
if media_name is None or media_name == "":
media_name = "Media"
#end media title
if self.inc_gallery:
media_fname = self.build_url_fname(media_handle, "img",
False, init=True) + self.ext
else:
media_fname = ""
self.obj_dict[Media][media_handle] = (media_fname, media_name,
media.gramps_id)
self.bkref_dict[Media][media_handle].add((bkref_class, bkref_handle,
"" # no role for a media
))
############### Attribute section ##############
for attr in media.get_attribute_list():
for citation_handle in attr.get_citation_list():
self._add_citation(citation_handle, Media, media_handle)
############### Sources section ##############
for citation_handle in media.get_citation_list():
self._add_citation(citation_handle, Media, media_handle)
def _add_repository(self, repos_handle, bkref_class, bkref_handle):
"""
Add repository to the Repository object list
@param: repos_handle -- The handle for the repository to add
@param: bkref_class -- The class associated to this handle (source)
@param: bkref_handle -- The handle associated to this source
"""
if self.obj_dict[Repository][repos_handle]:
for bkref in self.bkref_dict[Repository][repos_handle]:
if bkref_handle == bkref[1]:
return
repos = self._db.get_repository_from_handle(repos_handle)
repos_name = repos.name
if self.inc_repository:
repos_fname = self.build_url_fname(repos_handle, "repo",
False, init=True) + self.ext
else:
repos_fname = ""
self.obj_dict[Repository][repos_handle] = (repos_fname, repos_name,
repos.gramps_id)
self.bkref_dict[Repository][repos_handle].add((bkref_class,
bkref_handle,
"" # no role
))
def copy_narrated_files(self):
"""
Copy all of the CSS, image, and javascript files for Narrative Web
"""
imgs = []
# copy all screen style sheet
for css_f in CSS:
already_done = []
for css_fn in ("UsEr_", "Basic", "Mainz", "Nebraska", "Vis"):
if css_fn in css_f and css_f not in already_done:
already_done.append(css_f)
fname = CSS[css_f]["filename"]
# add images for this css
imgs += CSS[css_f]["images"]
css_f = css_f.replace("UsEr_", "")
self.copy_file(fname, css_f + ".css", "css")
# copy screen style sheet
if CSS[self.css]["filename"]:
fname = CSS[self.css]["filename"]
self.copy_file(fname, _NARRATIVESCREEN, "css")
# copy printer style sheet
fname = CSS["Print-Default"]["filename"]
self.copy_file(fname, _NARRATIVEPRINT, "css")
# copy ancestor tree style sheet if tree is being created?
if self.ancestortree:
fname = CSS["ancestortree"]["filename"]
self.copy_file(fname, "ancestortree.css", "css")
# copy behaviour style sheet
fname = CSS["behaviour"]["filename"]
self.copy_file(fname, "behaviour.css", "css")
# copy Menu Layout Style Sheet if Blue or Visually is being
# used as the stylesheet?
if CSS[self.css]["navigation"]:
if self.navigation == "Horizontal":
fname = CSS["Horizontal-Menus"]["filename"]
elif self.navigation == "Vertical":
fname = CSS["Vertical-Menus"]["filename"]
elif self.navigation == "Fade":
fname = CSS["Fade-Menus"]["filename"]
elif self.navigation == "dropdown":
fname = CSS["DropDown-Menus"]["filename"]
self.copy_file(fname, "narrative-menus.css", "css")
# copy narrative-maps Style Sheet if Place or Family Map pages
# are being created?
if self.placemappages or self.familymappages:
fname = CSS["NarrativeMaps"]["filename"]
self.copy_file(fname, "narrative-maps.css", "css")
# Copy the Creative Commons icon if the Creative Commons
# license is requested
if 0 < self.copyright <= len(_CC):
imgs += [CSS["Copyright"]["filename"]]
# copy Gramps favorite icon #2
imgs += [CSS["favicon2"]["filename"]]
# we need the blank image gif needed by behaviour.css
# add the document.png file for media other than photos
imgs += CSS["All Images"]["images"]
# copy Ancestor Tree graphics if needed???
if self.ancestortree:
imgs += CSS["ancestortree"]["images"]
# Anything css-specific:
imgs += CSS[self.css]["images"]
# copy all to images subdir:
for from_path in imgs:
dummy_fdir, fname = os.path.split(from_path)
self.copy_file(from_path, fname, "images")
# copy Gramps marker icon for openstreetmap
fname = CSS["marker"]["filename"]
self.copy_file(fname, "marker.png", "images")
def build_gendex(self, ind_list, the_lang):
"""
Create a gendex file
@param: ind_list -- The list of person to use
@param: the_lang -- The lang to process
"""
if self.inc_gendex:
message = _('Creating GENDEX file')
pgr_title = self.pgrs_title(the_lang)
with self.user.progress(pgr_title, message, len(ind_list)) as step:
fp_gendex, gendex_io = self.create_file("gendex", ext=".txt")
date = 0
index = 1
for person_handle in ind_list:
step()
index += 1
person = self._db.get_person_from_handle(person_handle)
datex = person.get_change_time()
if datex > date:
date = datex
if self.archive:
self.write_gendex(gendex_io, person)
else:
self.write_gendex(fp_gendex, person)
self.close_file(fp_gendex, gendex_io, date)
def write_gendex(self, filep, person):
"""
Reference|SURNAME|given name /SURNAME/|date of birth|place of birth|
date of death|place of death|
* field 1: file name of web page referring to the individual
* field 2: surname of the individual
* field 3: full name of the individual
* field 4: date of birth or christening (optional)
* field 5: place of birth or christening (optional)
* field 6: date of death or burial (optional)
* field 7: place of death or burial (optional)
@param: filep -- The GENDEX output filename
@param: person -- The person to use for GENDEX file
"""
url = self.build_url_fname_html(person.handle, "ppl")
surname = person.get_primary_name().get_surname()
fullname = person.get_primary_name().get_gedcom_name()
# get birth info:
dob, pob = get_gendex_data(self._db, person.get_birth_ref())
# get death info:
dod, pod = get_gendex_data(self._db, person.get_death_ref())
linew = '|'.join((url, surname, fullname, dob, pob, dod, pod)) + '|\n'
if self.archive:
filep.write(bytes(linew, "utf8"))
else:
filep.write(linew)
def surname_pages(self, ind_list, the_lang, the_title):
"""
Generates the surname-related pages from list of individual
people.
@param: ind_list -- The list of person to use
@param: the_lang -- The lang to process
@param: the_title -- The title page for the lang
"""
local_list = sort_people(self._db, ind_list, self.rlocale)
message = _("Creating surname pages")
pgr_title = self.pgrs_title(the_lang)
with self.user.progress(pgr_title, message, len(local_list)) as step:
SurnameListPage(self, the_lang, the_title, ind_list,
SurnameListPage.ORDER_BY_NAME, self.surname_fname)
SurnameListPage(self, the_lang, the_title, ind_list,
SurnameListPage.ORDER_BY_COUNT, "surnames_count")
index = 1
for (surname, handle_list) in local_list:
SurnamePage(self, the_lang, the_title, surname,
sorted(handle_list))
step()
index += 1
def thumbnail_preview_page(self):
"""
creates the thumbnail preview page
"""
if self.create_unused_media:
media_count = len(self._db.get_media_handles())
else:
media_count = len(self.obj_dict[Media])
pgr_title = self.pgrs_title(self.the_lang)
with self.user.progress(pgr_title,
_("Creating thumbnail preview page..."),
media_count) as step:
ThumbnailPreviewPage(self, self.the_lang, self.the_title, step)
def statistics_preview_page(self):
"""
creates the statistics preview page
"""
pgr_title = self.pgrs_title(self.the_lang)
with self.user.progress(pgr_title,
_("Creating statistics page..."),
1) as step:
StatisticsPage(self, self.the_lang, self.the_title, step)
def updates_preview_page(self):
"""
creates the statistics preview page
"""
pgr_title = self.pgrs_title(self.the_lang)
with self.user.progress(pgr_title,
_("Creating updates page..."),
1):
UpdatesPage(self, self.the_lang, self.the_title)
def addressbook_pages(self, ind_list):
"""
Create a webpage with a list of address availability for each person
and the associated individual address pages.
@param: ind_list -- The list of person to use
"""
url_addr_res = []
for person_handle in ind_list:
person = self._db.get_person_from_handle(person_handle)
addrlist = person.get_address_list()
evt_ref_list = person.get_event_ref_list()
urllist = person.get_url_list()
add = addrlist or None
url = urllist or None
res = []
for event_ref in evt_ref_list:
event = self._db.get_event_from_handle(event_ref.ref)
if event.get_type() == EventType.RESIDENCE:
res.append(event)
if add or res or url:
primary_name = person.get_primary_name()
sort_name = ''.join([primary_name.get_surname(), ", ",
primary_name.get_first_name()])
url_addr_res.append((sort_name, person_handle, add, res, url))
url_addr_res.sort()
AddressBookListPage(self, self.the_lang, self.the_title, url_addr_res)
# begin Address Book pages
addr_size = len(url_addr_res)
message = _("Creating address book pages ...")
pgr_title = self.pgrs_title(self.the_lang)
with self.user.progress(pgr_title, message, addr_size) as step:
index = 1
for (sort_name, person_handle, add, res, url) in url_addr_res:
AddressBookPage(self, self.the_lang, self.the_title,
person_handle, add, res, url)
step()
index += 1
def base_pages(self):
"""
creates HomePage, ContactPage, DownloadPage and IntroductionPage
if requested by options in plugin
"""
if self.use_home:
HomePage(self, self.the_lang, self.the_title)
if self.inc_contact:
ContactPage(self, self.the_lang, self.the_title)
if self.inc_download:
DownloadPage(self, self.the_lang, self.the_title)
if self.use_intro:
IntroductionPage(self, self.the_lang, self.the_title)
def build_subdirs(self, subdir, fname, uplink=False, image=False,
init=False):
"""
If subdir is given, then two extra levels of subdirectory are inserted
between 'subdir' and the filename. The reason is to prevent directories
with too many entries.
For example, this may return "8/1/aec934857df74d36618"
@param: subdir -- The subdirectory name to use
@param: fname -- The file name for which we need to build the path
@param: uplink -- If True, then "../../../" is inserted in front of the
result.
If uplink = None then [./] for use in EventListPage
@param: image -- We are processing a thumbnail or an image
@param: init -- We are building the objects table.
Don't try to manage the lang.
"""
subdirs = []
if subdir:
subdirs.append(subdir)
subdirs.append(fname[-1].lower())
subdirs.append(fname[-2].lower())
if init:
return subdirs
nb_dir = 0
if self.the_lang and image:
nb_dir = 1
if self.usecms:
if subdir:
if self.the_lang and subdir not in ["css", "images", "thumb"]:
subdirs = [self.target_uri] + [self.the_lang] + subdirs
else:
subdirs = [self.target_uri] + subdirs
elif self.target_uri not in fname:
if self.the_lang and subdir not in ["css", "images", "thumb"]:
subdirs = [self.target_uri] + [self.the_lang] + [fname]
else:
subdirs = [self.target_uri] + [fname]
else:
subdirs = []
else:
if self.the_lang and image and uplink != 2:
if subdir and subdir[0:3] not in ["css", "ima", "thu"]:
subdirs = [self.the_lang] + subdirs
if uplink is True:
nb_dir += 3
subdirs = ['..']*nb_dir + subdirs
elif uplink == 2:
# special case for the add_image method
if subdir and subdir[0:3] in ["css", "ima", "thu"]:
if nb_dir == 1:
subdirs = ['..'] + subdirs
elif uplink is None:
# added for use in EventListPage
subdirs = ['.'] + subdirs
return subdirs
def build_path(self, subdir, fname, uplink=False, image=False):
"""
Return the name of the subdirectory.
Notice that we DO use os.path.join() here.
@param: subdir -- The subdirectory name to use
@param: fname -- The file name for which we need to build the path
@param: uplink -- If True, then "../../../" is inserted in front of the
result.
@param: image -- We are processing a thumbnail or an image
"""
return os.path.join(*self.build_subdirs(subdir, fname, uplink, image))
def build_url_lang(self, fname, subdir=None, uplink=False):
"""
builds a url for an extra language
@param: fname -- The file name for which we need to build the path
@param: subdir -- The subdirectory name to use
@param: uplink -- If True, then "../../../" is inserted in front of the
result.
"""
subdirs = []
if uplink:
nb_dir = 4 if self.the_lang else 3
else:
nb_dir = 1
if self.usecms:
# remove self.target_uri
fname = fname.replace(self.target_uri + "/", "")
# remove the lang
(dummy_1_field, dummy_sep, second_field) = fname.partition("/")
fname = second_field
elif self.the_lang:
(first_field, dummy_sep, second_field) = fname.partition("/")
if [(lang, title) for lang, title in self.languages
if lang == first_field]:
# remove the lang
fname = second_field
if subdir:
subdirs.append(subdir)
if self.usecms:
if self.target_uri not in subdirs:
subdirs = [self.target_uri] + subdirs
else:
subdirs = ['..']*nb_dir + subdirs
nname = "/".join(subdirs + [fname])
if win():
nname = nname.replace('\\', "/")
return nname
def build_url_image(self, fname, subdir=None, uplink=False):
"""
builds a url from an image
@param: fname -- The file name for which we need to build the path
@param: subdir -- The subdirectory name to use
@param: uplink -- If True, then "../../../" is inserted in front of the
result.
"""
subdirs = []
if uplink:
nb_dir = 4 if self.the_lang else 3
else:
nb_dir = 1
if subdir:
subdirs.append(subdir)
if self.usecms:
if self.target_uri not in fname:
subdirs = [self.target_uri] + subdirs
else:
if uplink:
subdirs = ['..']*nb_dir + subdirs
nname = "/".join(subdirs + [fname])
if win():
nname = nname.replace('\\', "/")
return nname
def build_url_fname_html(self, fname, subdir=None, uplink=False):
"""
builds a url filename from html
@param: fname -- The file name to create
@param: subdir -- The subdirectory name to use
@param: uplink -- If True, then "../../../" is inserted in front of the
result.
"""
return self.build_url_fname(fname, subdir, uplink) + self.ext
def build_link(self, prop, handle, obj_class):
"""
Build a link to an item.
@param: prop -- Property
@param: handle -- The handle for which we need to build a link
@param: obj_class -- The class of the related object.
"""
if prop == "gramps_id":
func = self._db.method('get_%s_from_gramps_id', obj_class)
if func:
obj = func(handle)
if obj:
handle = obj.handle
else:
raise AttributeError("gramps_id '%s' not found in '%s'" %
handle, obj_class)
else:
raise AttributeError("invalid gramps_id lookup "
"in table name '%s'" % obj_class)
uplink = self.link_prefix_up
# handle, ppl
if obj_class == "Person":
if self.person_in_webreport(handle):
return self.build_url_fname(handle, "ppl", uplink) + self.ext
else:
return None
elif obj_class == "Source":
subdir = "src"
elif obj_class == "Place":
subdir = "plc"
elif obj_class == "Event":
subdir = "evt"
elif obj_class == "Media":
subdir = "img"
elif obj_class == "Repository":
subdir = "repo"
elif obj_class == "Family":
subdir = "fam"
else:
print("NarrativeWeb ignoring link type '%s'" % obj_class)
return None
return self.build_url_fname(handle, subdir, uplink) + self.ext
def build_url_fname(self, fname, subdir=None, uplink=False,
image=False, init=False):
"""
Create part of the URL given the filename and optionally the
subdirectory. If the subdirectory is given, then two extra levels of
subdirectory are inserted between 'subdir' and the filename.
The reason is to prevent directories with too many entries.
@param: fname -- The filename to create
@param: subdir -- The subdirectory name to use
@param: uplink -- if True, then "../../../" is inserted in front of the
result.
@param: image -- We are processing a thumbnail or an image
@param: init -- We are building the objects table.
Don't try to manage the lang.
The extension is added to the filename as well.
Notice that we do NOT use os.path.join() because we're creating a URL.
"""
if not fname:
return ""
if win():
fname = fname.replace('\\', "/")
if init:
subdirs = self.build_subdirs(subdir, fname, False, init=init)
return "/".join(subdirs + [fname])
fname = fname.replace(self.target_uri + "/", "")
if self.usecms:
if self.the_lang:
if subdir:
subdirs = self.build_subdirs(subdir, fname,
False, image)
if self.target_uri in subdirs and image:
subdirs.remove(self.target_uri)
if subdir[0:3] in ["css", "img", "ima", "thu"]:
subdirs = [self.target_uri] + subdirs
else:
if fname[0:3] in ["css", "img", "ima", "thu"]:
subdirs = [self.target_uri]
elif fname[3:6] in ["css", "img", "ima", "thu"]:
subdirs = [self.target_uri]
fname = fname[3:]
else:
subdirs = [self.target_uri] + [self.the_lang]
else:
if subdir:
subdirs = self.build_subdirs(subdir, fname,
False, image)
else:
subdirs = [self.target_uri]
# remove None value in subdir. this is related to the lang
if isinstance(subdirs, list):
subdirs = [val for val in subdirs if val is not None]
elif self.the_lang:
(dummy_1_field, separator, second_field) = fname.partition("/")
if separator == "/" and second_field[0:3] in ["ima", "thu"]:
fname = second_field
subdirs = self.build_subdirs(subdir, second_field,
uplink, image)
if not uplink:
subdirs = [".."] + subdirs
else:
subdirs = self.build_subdirs(subdir, fname, uplink, image)
else:
subdirs = self.build_subdirs(subdir, fname, uplink, image)
return "/".join(subdirs + [fname])
def create_file(self, fname, subdir=None, ext=None):
"""
will create filename given
@param: fname -- Filename to be created
@param: subdir -- A subdir to be added to filename
@param: ext -- An extension to be added to filename
"""
if ext is None:
ext = self.ext
if self.usecms and not subdir:
if self.the_lang:
if ext != "index":
target = os.path.join(self.target_uri, self.the_lang)
self.cur_fname = os.path.join(target, fname) + ext
else:
self.cur_fname = os.path.join(self.target_uri,
fname) + self.ext
else:
self.cur_fname = os.path.join(self.target_uri, fname) + ext
else:
if self.the_lang and self.archive:
if subdir:
if not self.usecms:
subdir = os.path.join(self.the_lang, subdir)
elif ext != "index":
fname = os.path.join(self.the_lang, fname)
if subdir:
subdir = self.build_path(subdir, fname)
self.cur_fname = os.path.join(subdir, fname) + ext
else:
if ext == "index":
self.cur_fname = os.path.join(fname) + self.ext
else:
self.cur_fname = fname + ext
if self.archive:
string_io = BytesIO()
output_file = TextIOWrapper(string_io, encoding=self.encoding,
errors='xmlcharrefreplace')
else:
string_io = None
if subdir:
if self.the_lang:
subdir = os.path.join(self.html_dir, self.the_lang, subdir)
else:
subdir = os.path.join(self.html_dir, subdir)
else:
if self.the_lang:
subdir = os.path.join(self.html_dir, self.the_lang)
else:
subdir = os.path.join(self.html_dir)
if self.the_lang:
if ext == "index":
self.cur_fname = os.path.join(fname) + self.ext
fname = os.path.join(self.html_dir, self.cur_fname)
else:
fname = os.path.join(self.html_dir, self.the_lang,
self.cur_fname)
else:
fname = os.path.join(self.html_dir, self.cur_fname)
dir_name = os.path.dirname(fname)
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
output_file = open(fname, 'w', encoding=self.encoding,
errors='xmlcharrefreplace')
return (output_file, string_io)
def close_file(self, output_file, string_io, date):
"""
will close any file passed to it
@param: output_file -- The output file to flush
@param: string_io -- The string IO used when we are in archive mode
@param: date -- The last modification date for this object
If we have "zero", we use the current time.
This is related to bug #8950 and very useful
when we use rsync.
"""
if self.archive:
if self.cur_fname not in self.archive.getnames():
# The current file not already archived.
output_file.flush()
tarinfo = tarfile.TarInfo(self.cur_fname)
tarinfo.size = len(string_io.getvalue())
tarinfo.mtime = date if date != 0 else time.time()
if not win():
tarinfo.uid = os.getuid()
tarinfo.gid = os.getgid()
string_io.seek(0)
self.archive.addfile(tarinfo, string_io)
output_file.close()
else:
output_file.close()
if date is not None and date > 0:
os.utime(output_file.name, (date, date))
def prepare_copy_media(self, photo):
"""
prepares a media object to copy
@param: photo -- The photo for which we need a real path
and a thumbnail path
"""
handle = photo.get_handle()
ext = os.path.splitext(photo.get_path())[1]
real_path = os.path.join(self.build_path('images', handle,
uplink=2, image=True),
handle + ext)
thumb_path = os.path.join(self.build_path('thumb', handle,
uplink=2, image=True),
handle + '.png')
return real_path, thumb_path
def copy_file(self, from_fname, to_fname, to_dir=''):
"""
Copy a file from a source to a (report) destination.
If to_dir is not present and if the target is not an archive,
then the destination directory will be created.
@param: from_fname -- The path of the file to copy.
@param: to_fname -- Will be just a filename, without directory path.
@param: to_dir -- Is the relative path name in the destination root.
It will be prepended before 'to_fname'.
"""
if self.usecms:
to_dir = "/".join([self.target_uri, to_dir])
LOG.debug("copying '%s' to '%s/%s'", from_fname, to_dir, to_fname)
mtime = os.stat(from_fname).st_mtime
if self.archive:
def set_mtime(tarinfo):
"""
For each file, we set the last modification time.
We could also set uid, gid, uname, gname and mode
#tarinfo.uid = os.getuid()
#tarinfo.mode = 0660
#tarinfo.uname = tarinfo.gname = "www-data"
"""
tarinfo.mtime = mtime
return tarinfo
dest = os.path.join(to_dir, to_fname)
if dest not in self.archive.getnames():
# The current file not already archived.
self.archive.add(from_fname, dest, filter=set_mtime)
else:
dest = os.path.join(self.html_dir, to_dir, to_fname)
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
os.makedirs(destdir)
if from_fname != dest:
if not os.path.exists(dest):
try:
shutil.copyfile(from_fname, dest)
os.utime(dest, (mtime, mtime))
except Exception as exception:
LOG.exception(exception)
print("Copying error: %s" % sys.exc_info()[1])
print("Continuing...")
elif self.warn_dir:
self.user.warn(
_("Possible destination error") + "\n" +
_("You appear to have set your target directory "
"to a directory used for data storage. This "
"could create problems with file management. "
"It is recommended that you consider using "
"a different directory to store your generated "
"web pages."))
self.warn_dir = False
def person_in_webreport(self, person_handle):
"""
Return the handle if we created a page for this person.
@param: person_handle -- The person we are looking for
"""
return person_handle in self.obj_dict[Person]
def pgrs_title(self, the_lang):
"""Set the user progress popup message depending on the lang."""
if the_lang:
languages = glocale.get_language_dict()
lang = "???"
for language in languages:
if languages[language] == the_lang:
lang = language
break
return _("Narrative Website Report for the %s language") % lang
else:
return _("Narrative Website Report")
#################################################
#
# Creates the NarrativeWeb Report Menu Options
#
#################################################
class NavWebOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
"""
@param: name -- The name of the report
@param: dbase -- The Gramps database instance
"""
self.__db = dbase
self.__archive = None
self.__target = None
self.__target_uri = None
self.__pid = None
self.__filter = None
self.__graph = None
self.__graphgens = None
self.__living = None
self.__yearsafterdeath = None
self.__usecms = None
self.__cms_uri = None
self.__usecal = None
self.__calendar_uri = None
self.__create_thumbs_only = None
self.__create_images_index = None
self.__create_thumbs_index = None
self.__mapservice = None
self.__maxinitialimagewidth = None
self.__citationreferents = None
self.__incdownload = None
self.__max_download = 4 # Add 1 to this counter: In reality 3 downloads
self.__nbdownload = None
self.__dl_descr = {}
self.__down_fname = {}
self.__placemappages = None
self.__familymappages = None
self.__stamenopts = None
self.__googleopts = None
self.__googlemapkey = None
self.__ancestortree = None
self.__css = None
self.__gallery = None
self.__updates = None
self.__maxdays = None
self.__maxupdates = None
self.__unused = None
self.__navigation = None
self.__securesite = False
self.__extra_page_name = None
self.__extra_page = None
self.__relation = False
self.__prevnext = False
self.__multitrans = False
self.__lang_2 = None
self.__lang_3 = None
self.__lang_4 = None
self.__lang_5 = None
self.__lang_6 = None
self.__titl_2 = None
self.__titl_3 = None
self.__titl_4 = None
self.__titl_5 = None
self.__titl_6 = None
self.__start_dow = None
self.__maiden_name = None
self.__makeoneday = None
self.__birthdays = None
self.__anniv = None
self.__alive = None
self.__toggle = None
self.__death_anniv = None
self.__after_year = None
self.__ext = None
self.__phpnote = None
db_options = name + ' ' + dbase.get_dbname()
MenuReportOptions.__init__(self, db_options, dbase)
def add_menu_options(self, menu):
"""
Add options to the menu for the website.
@param: menu -- The menu for which we add options
"""
self.__add_report_options(menu)
self.__add_report_html(menu)
self.__add_report_display(menu)
self.__add_page_generation_options(menu)
self.__add_more_pages(menu)
self.__add_images_generation_options(menu)
self.__add_download_options(menu)
self.__add_advanced_options(menu)
self.__add_advanced_options_2(menu)
self.__add_place_map_options(menu)
self.__add_others_options(menu)
self.__add_translations(menu)
self.__add_calendar_options(menu)
def __add_report_options(self, menu):
"""
Options on the "Report Options" tab.
@param: menu -- The menu for which we add options
"""
category_name = _("Report Options")
addopt = partial(menu.add_option, category_name)
self.__archive = BooleanOption(_('Store website in .tar.gz archive'),
False)
self.__archive.set_help(_('Whether to store the website in an '
'archive file'))
addopt("archive", self.__archive)
self.__archive.connect('value-changed', self.__archive_changed)
dbname = self.__db.get_dbname()
default_dir = dbname + "_" + "NAVWEB"
self.__target = DestinationOption(
_("Destination"),
os.path.join(config.get('paths.website-directory'),
default_dir))
self.__target.set_help(_("The destination directory for the web "
"files"))
addopt("target", self.__target)
self.__archive_changed()
title = StringOption(_("Website title"), _('My Family Tree'))
title.set_help(_("The title of the website"))
addopt("title", title)
self.__filter = FilterOption(_("Filter"), 0)
self.__filter.set_help(
_("Select filter to restrict people that appear on the website"))
addopt("filter", self.__filter)
self.__filter.connect('value-changed', self.__filter_changed)
self.__pid = PersonOption(_("Filter Person"))
self.__pid.set_help(_("The center person for the filter"))
addopt("pid", self.__pid)
self.__pid.connect('value-changed', self.__update_filters)
self.__relation = BooleanOption(_("Show the relationship between the "
"current person and the active person"
), False)
self.__relation.set_help(_("For each person page, show the relationship"
" between this person and the active person."
))
addopt("relation", self.__relation)
self.__pid.connect('value-changed', self.__update_filters)
self.__update_filters()
stdoptions.add_living_people_option(menu, category_name)
stdoptions.add_private_data_option(menu, category_name, default=False)
addopt = partial(menu.add_option, category_name)
def __add_report_html(self, menu):
"""
Html Options for the Report.
@param: menu -- The menu for which we add options
"""
category_name = _("Html options")
addopt = partial(menu.add_option, category_name)
self.__ext = EnumeratedListOption(_("File extension"), ".html")
for etype in _WEB_EXT:
self.__ext.add_item(etype, etype)
self.__ext.set_help(_("The extension to be used for the web files"))
addopt("ext", self.__ext)
self.__ext.connect("value-changed", self.__ext_changed)
cright = EnumeratedListOption(_('Copyright'), 0)
for index, copt in enumerate(_COPY_OPTIONS):
cright.add_item(index, copt)
cright.set_help(_("The copyright to be used for the web files"))
addopt("cright", cright)
self.__css = EnumeratedListOption(_('StyleSheet'),
CSS["Basic-Ash"]["id"])
for (dummy_fname, gid) in sorted(
[(CSS[key]["translation"], CSS[key]["id"])
for key in list(CSS.keys())]):
if CSS[gid]["user"]:
self.__css.add_item(CSS[gid]["id"], CSS[gid]["translation"])
self.__css.set_help(_('The default stylesheet to be used for'
' the pages'))
addopt("css", self.__css)
self.__css.connect("value-changed", self.__stylesheet_changed)
_nav_opts = [
(_("Horizontal -- Default"), "Horizontal"),
(_("Vertical -- Left Side"), "Vertical"),
(_("Fade -- WebKit Browsers Only"), "Fade"),
(_("Drop-Down -- WebKit Browsers Only"), "dropdown")
]
self.__navigation = EnumeratedListOption(_("Navigation Menu Layout"),
_nav_opts[0][1])
for layout in _nav_opts:
self.__navigation.add_item(layout[1], layout[0])
self.__navigation.set_help(_("Choose which layout "
"for the Navigation Menus."))
addopt("navigation", self.__navigation)
self.__stylesheet_changed()
_cit_opts = [
(_("Normal Outline Style"), "Outline"),
(_("Drop-Down -- WebKit Browsers Only"), "DropDown")
]
self.__citationreferents = EnumeratedListOption(
_("Citation Referents Layout"), _cit_opts[0][1])
for layout in _cit_opts:
self.__citationreferents.add_item(layout[1], layout[0])
self.__citationreferents.set_help(
_("Determine the default layout for the "
"Source Page's Citation Referents section"))
addopt("citationreferents", self.__citationreferents)
self.__ancestortree = BooleanOption(_("Include ancestor's tree"), True)
self.__ancestortree.set_help(_('Whether to include an ancestor '
'graph on each individual page'))
addopt("ancestortree", self.__ancestortree)
self.__ancestortree.connect('value-changed', self.__graph_changed)
self.__prevnext = BooleanOption(_("Add previous/next"), False)
self.__prevnext.set_help(_("Add previous/next to the navigation bar."))
addopt("prevnext", self.__prevnext)
self.__securesite = BooleanOption(_("This is a secure site (HTTPS)"),
False)
self.__securesite.set_help(_('Whether to use http:// or https://'))
addopt("securesite", self.__securesite)
self.__toggle = BooleanOption(_("Toggle sections"), False)
self.__toggle.set_help(_('Check it if you want to open/close'
' a section'))
addopt("toggle", self.__toggle)
def __add_more_pages(self, menu):
"""
Add more extra pages to the report
@param: menu -- The menu for which we add options
"""
category_name = _("Extra pages")
addopt = partial(menu.add_option, category_name)
default_path_name = config.get('paths.website-extra-page-name')
self.__extra_page_name = StringOption(_("Extra page name"),
default_path_name)
self.__extra_page_name.set_help(
_("Your extra page name like it is shown in the menubar"))
self.__extra_page_name.connect('value-changed',
self.__extra_page_name_changed)
addopt("extrapagename", self.__extra_page_name)
default_path = config.get('paths.website-extra-page-uri')
self.__extra_page = DestinationOption(_("Your extra page path"),
default_path)
self.__extra_page.set_help(
_("Your extra page path without extension"))
self.__extra_page.connect('value-changed', self.__extra_page_changed)
addopt("extrapage", self.__extra_page)
def __add_report_display(self, menu):
"""
How to display names, datyes, ...
@param: menu -- The menu for which we add options
"""
category_name = _("Display")
addopt = partial(menu.add_option, category_name)
stdoptions.add_name_format_option(menu, category_name)
self.__multitrans = BooleanOption(
_('Do we use multiple translations?'), False)
self.__multitrans.set_help(
_('Whether to display the narrative web in multiple languages.'
'\nSee the translation tab to add new languages to the default'
' one defined in the next field.'))
addopt("multitrans", self.__multitrans)
self.__multitrans.connect('value-changed',
self.__activate_translations)
locale_opt = stdoptions.add_localization_option(menu, category_name)
stdoptions.add_date_format_option(menu, category_name, locale_opt)
stdoptions.add_gramps_id_option(menu, category_name)
stdoptions.add_tags_option(menu, category_name)
birthorder = BooleanOption(
_('Sort all children in birth order'), False)
birthorder.set_help(
_('Whether to display children in birth order or in entry order.'))
addopt("birthorder", birthorder)
coordinates = BooleanOption(
_('Do we display coordinates in the places list?'), False)
coordinates.set_help(
_('Whether to display latitude/longitude in the places list.'))
addopt("coordinates", coordinates)
reference_sort = BooleanOption(
_('Sort places references either by date or by name'), False)
reference_sort.set_help(
_('Sort the places references by date or by name.'
' Not set means by date.'))
addopt("reference_sort", reference_sort)
self.__graphgens = NumberOption(_("Graph generations"), 4, 2, 20)
self.__graphgens.set_help(_("The number of generations to include in "
"the ancestor graph"))
addopt("graphgens", self.__graphgens)
self.__graph_changed()
notes = BooleanOption(
_('Include narrative notes just after name, gender'), True)
notes.set_help(
_('Include narrative notes just after name, gender and'
' age at death (default) or include them just before'
' attributes.'))
addopt("notes", notes)
def __add_page_generation_options(self, menu):
"""
Options on the "Page Generation" tab.
@param: menu -- The menu for which we add options
"""
category_name = _("Page Generation")
addopt = partial(menu.add_option, category_name)
homenote = NoteOption(_('Home page note'))
homenote.set_help(_("A note to be used on the home page"))
addopt("homenote", homenote)
homeimg = MediaOption(_('Home page image'))
homeimg.set_help(_("An image to be used on the home page"))
addopt("homeimg", homeimg)
intronote = NoteOption(_('Introduction note'))
intronote.set_help(_("A note to be used as the introduction"))
addopt("intronote", intronote)
introimg = MediaOption(_('Introduction image'))
introimg.set_help(_("An image to be used as the introduction"))
addopt("introimg", introimg)
contactnote = NoteOption(_("Publisher contact note"))
contactnote.set_help(_("A note to be used as the publisher contact."
"\nIf no publisher information is given,"
"\nno contact page will be created")
)
addopt("contactnote", contactnote)
contactimg = MediaOption(_("Publisher contact image"))
contactimg.set_help(_("An image to be used as the publisher contact."
"\nIf no publisher information is given,"
"\nno contact page will be created")
)
addopt("contactimg", contactimg)
headernote = NoteOption(_('HTML user header'))
headernote.set_help(_("A note to be used as the page header"
" or a PHP code to insert."))
addopt("headernote", headernote)
footernote = NoteOption(_('HTML user footer'))
footernote.set_help(_("A note to be used as the page footer"))
addopt("footernote", footernote)
# This option will be available only if you select ".php" in the
# "File extension" from the "Html" tab
self.__phpnote = NoteOption(_('PHP user session'))
self.__phpnote.set_help(_("A note to use for starting the php session."
"\nThis option will be available only if "
"the .php file extension is selected."))
addopt("phpnote", self.__phpnote)
def __add_images_generation_options(self, menu):
"""
Options on the "Page Generation" tab.
@param: menu -- The menu for which we add options
"""
category_name = _("Images Generation")
addopt = partial(menu.add_option, category_name)
self.__gallery = BooleanOption(_("Include images and media objects"),
True)
self.__gallery.set_help(_('Whether to include '
'a gallery of media objects'))
addopt("gallery", self.__gallery)
self.__gallery.connect('value-changed', self.__gallery_changed)
self.__create_images_index = BooleanOption(
_("Create the images index"), False)
self.__create_images_index.set_help(
_("This option allows you to create the images index"))
addopt("create_images_index", self.__create_images_index)
self.__create_images_index.connect("value-changed",
self.__gallery_changed)
self.__unused = BooleanOption(
_("Include unused images and media objects"), False)
self.__unused.set_help(_('Whether to include unused or unreferenced'
' media objects'))
addopt("unused", self.__unused)
self.__create_thumbs_only = BooleanOption(
_("Create and only use thumbnail- sized images"), False)
self.__create_thumbs_only.set_help(
_("This option allows you to create only thumbnail images "
"instead of the full-sized images on the Media Page. "
"This will allow you to have a much "
"smaller total upload size to your web hosting site."))
addopt("create_thumbs_only", self.__create_thumbs_only)
self.__create_thumbs_only.connect("value-changed",
self.__gallery_changed)
self.__create_thumbs_index = BooleanOption(
_("Create the thumbnail index"), False)
self.__create_thumbs_index.set_help(
_("This option allows you to create the thumbnail index"))
addopt("create_thumbs_index", self.__create_thumbs_index)
self.__create_thumbs_index.connect("value-changed",
self.__gallery_changed)
self.__maxinitialimagewidth = NumberOption(
_("Max width of initial image"), _DEFAULT_MAX_IMG_WIDTH, 0, 2000)
self.__maxinitialimagewidth.set_help(
_("This allows you to set the maximum width "
"of the image shown on the media page."))
addopt("maxinitialimagewidth", self.__maxinitialimagewidth)
self.__gallery_changed()
def __add_download_options(self, menu):
"""
Options for the download tab ...
@param: menu -- The menu for which we add options
"""
category_name = _("Download")
addopt = partial(menu.add_option, category_name)
self.__incdownload = BooleanOption(_("Include download page"), False)
self.__incdownload.set_help(
_('Whether to include a database download option'))
addopt("incdownload", self.__incdownload)
self.__incdownload.connect('value-changed', self.__download_changed)
self.__nbdownload = NumberOption(_("How many downloads"),
2, 1, self.__max_download-1)
self.__nbdownload.set_help(_("The number of download files to include "
"in the download page"))
addopt("nbdownload", self.__nbdownload)
self.__nbdownload.connect('value-changed', self.__download_changed)
for count in range(1, self.__max_download):
fnamex = 'down_fname%c' % str(count)
descrx = 'dl_descr%c' % str(count)
wdir = os.path.join(config.get('paths.website-directory'), "")
__down_fname = DestinationOption(_("Download Filename #%c") %
str(count), wdir)
__down_fname.set_help(
_("File to be used for downloading of database"))
addopt(fnamex, __down_fname)
self.__down_fname[count] = __down_fname
__dl_descr = StringOption(_("Description for download"),
_('Family Tree #%c') % str(count))
__dl_descr.set_help(_('Give a description for this file.'))
addopt(descrx, __dl_descr)
self.__dl_descr[count] = __dl_descr
self.__download_changed()
def __add_advanced_options(self, menu):
"""
Options on the "Advanced" tab.
@param: menu -- The menu for which we add options
"""
category_name = _("Advanced Options")
addopt = partial(menu.add_option, category_name)
encoding = EnumeratedListOption(_('Character set encoding'),
_CHARACTER_SETS[0][1])
for eopt in _CHARACTER_SETS:
encoding.add_item(eopt[1], eopt[0])
encoding.set_help(_("The encoding to be used for the web files"))
addopt("encoding", encoding)
linkhome = BooleanOption(
_('Include link to active person on every page'), False)
linkhome.set_help(
_('Include a link to the active person (if they have a webpage)'))
addopt("linkhome", linkhome)
showbirth = BooleanOption(
_("Include a column for birth dates on the index pages"), True)
showbirth.set_help(_('Whether to include a birth column'))
addopt("showbirth", showbirth)
showdeath = BooleanOption(
_("Include a column for death dates on the index pages"), False)
showdeath.set_help(_('Whether to include a death column'))
addopt("showdeath", showdeath)
showpartner = BooleanOption(_("Include a column for partners on the "
"index pages"), False)
showpartner.set_help(_('Whether to include a partners column'))
menu.add_option(category_name, 'showpartner', showpartner)
showparents = BooleanOption(_("Include a column for parents on the "
"index pages"), False)
showparents.set_help(_('Whether to include a parents column'))
addopt("showparents", showparents)
showallsiblings = BooleanOption(
_("Include half and/or step-siblings on the individual pages"),
False)
showallsiblings.set_help(
_("Whether to include half and/or "
"step-siblings with the parents and siblings"))
addopt('showhalfsiblings', showallsiblings)
def __add_advanced_options_2(self, menu):
"""
Continue options on the "Advanced" tab.
@param: menu -- The menu for which we add options
"""
category_name = _("Include")
addopt = partial(menu.add_option, category_name)
inc_families = BooleanOption(_("Include family pages"), False)
inc_families.set_help(_("Whether or not to include family pages."))
addopt("inc_families", inc_families)
inc_events = BooleanOption(_('Include event pages'), False)
inc_events.set_help(
_('Add a complete events list and relevant pages or not'))
addopt("inc_events", inc_events)
inc_places = BooleanOption(_('Include place pages'), False)
inc_places.set_help(
_('Whether or not to include the place pages.'))
addopt("inc_places", inc_places)
inc_uplaces = BooleanOption(_('Include unused place pages'), False)
inc_uplaces.set_help(
_('Whether or not to include the unused place pages.'))
addopt("inc_uplaces", inc_uplaces)
inc_sources = BooleanOption(_('Include source pages'), False)
inc_sources.set_help(
_('Whether or not to include the source pages.'))
addopt("inc_sources", inc_sources)
inc_repository = BooleanOption(_('Include repository pages'), False)
inc_repository.set_help(
_('Whether or not to include the repository pages.'))
addopt("inc_repository", inc_repository)
inc_gendex = BooleanOption(
_('Include GENDEX file (/gendex.txt)'), False)
inc_gendex.set_help(_('Whether to include a GENDEX file or not'))
addopt("inc_gendex", inc_gendex)
inc_addressbook = BooleanOption(_("Include address book pages"), False)
inc_addressbook.set_help(_("Whether or not to add Address Book pages,"
"which can include e-mail and website "
"addresses and personal address/ residence "
"events."))
addopt("inc_addressbook", inc_addressbook)
inc_statistics = BooleanOption(_("Include the statistics page"), False)
inc_statistics.set_help(_("Whether or not to add statistics page"))
addopt("inc_stats", inc_statistics)
def __add_place_map_options(self, menu):
"""
options for the Place Map tab.
@param: menu -- The menu for which we add options
"""
category_name = _("Place Map Options")
addopt = partial(menu.add_option, category_name)
mapopts = [
[_("OpenStreetMap"), "OpenStreetMap"],
[_("Stamen Map"), "StamenMap"],
[_("Google"), "Google"]]
self.__mapservice = EnumeratedListOption(_("Map Service"),
mapopts[0][1])
for trans, opt in mapopts:
self.__mapservice.add_item(opt, trans)
self.__mapservice.set_help(_("Choose your choice of map service for "
"creating the Place Map Pages."))
self.__mapservice.connect("value-changed", self.__placemap_options)
addopt("mapservice", self.__mapservice)
self.__placemappages = BooleanOption(
_("Include Place map on Place Pages"), False)
self.__placemappages.set_help(
_("Whether to include a place map on the Place Pages, "
"where Latitude/ Longitude are available."))
self.__placemappages.connect("value-changed", self.__placemap_options)
addopt("placemappages", self.__placemappages)
self.__familymappages = BooleanOption(_("Include Family Map Pages with "
"all places shown on the map"),
False)
self.__familymappages.set_help(
_("Whether or not to add an individual page map "
"showing all the places on this page. "
"This will allow you to see how your family "
"traveled around the country."))
self.__familymappages.connect("value-changed", self.__placemap_options)
addopt("familymappages", self.__familymappages)
googleopts = [
(_("Family Links"), "FamilyLinks"),
(_("Drop"), "Drop"),
(_("Markers"), "Markers")]
self.__googleopts = EnumeratedListOption(_("Google/ FamilyMap Option"),
googleopts[0][1])
for trans, opt in googleopts:
self.__googleopts.add_item(opt, trans)
self.__googleopts.set_help(
_("Select which option that you would like "
"to have for the Google Maps family-map pages..."))
addopt("googleopts", self.__googleopts)
self.__googlemapkey = StringOption(_("Google maps API key"), "")
self.__googlemapkey.set_help(_("The API key used for the Google maps"))
addopt("googlemapkey", self.__googlemapkey)
stamenopts = [
(_("Toner"), "toner"),
(_("Terrain"), "terrain"),
(_("WaterColor"), "watercolor")]
self.__stamenopts = EnumeratedListOption(_("Stamen Option"),
stamenopts[0][1])
for trans, opt in stamenopts:
self.__stamenopts.add_item(opt, trans)
self.__stamenopts.set_help(
_("Select which option that you would like "
"to have for the Stamen map map-pages..."))
addopt("stamenopts", self.__stamenopts)
self.__placemap_options()
def __add_others_options(self, menu):
"""
Options for the cms tab, web calendar inclusion, PHP ...
@param: menu -- The menu for which we add options
"""
category_name = _("Other inclusion (CMS, web calendar, PHP)")
addopt = partial(menu.add_option, category_name)
self.__usecms = BooleanOption(
_("Do we include these pages in a CMS web?"), False)
self.__usecms.connect('value-changed', self.__usecms_changed)
addopt("usecms", self.__usecms)
default_dir = "/NAVWEB"
self.__cms_uri = DestinationOption(_("URI"),
os.path.join(
config.get(
'paths.website-cms-uri'),
default_dir))
self.__cms_uri.set_help(
_("Where do you place your website? default = /NAVWEB"))
self.__cms_uri.connect('value-changed', self.__cms_uri_changed)
addopt("cmsuri", self.__cms_uri)
self.__cms_uri_changed()
self.__graph_changed()
self.__updates = BooleanOption(_("Include the news and updates page"),
True)
self.__updates.set_help(_('Whether to include '
'a page with the last updates'))
self.__updates.connect('value-changed', self.__updates_changed)
addopt("updates", self.__updates)
self.__maxdays = NumberOption(_("Max days for updates"), 1, 1, 300)
self.__maxdays.set_help(_("You want to see the last updates on how"
" many days?"))
addopt("maxdays", self.__maxdays)
self.__maxupdates = NumberOption(_("Max number of updates per object"
" to show"), 1, 1, 100)
self.__maxupdates.set_help(_("How many updates do you want to see max"
))
addopt("maxupdates", self.__maxupdates)
def __add_translations(self, menu):
"""
Options for selecting multiple languages. The default one is
displayed in the display tab. If the option "use multiple
languages" is not selected, all the fields in this menu will be
grayed out.
@param: menu -- The menu for which we add options
"""
category_name = _("Translations")
addopt = partial(menu.add_option, category_name)
mess = _("second language")
self.__lang_2 = stdoptions.add_extra_localization_option(menu,
category_name,
mess, "lang2")
self.__titl_2 = StringOption(_("Site name for your second language"),
_('This site title'))
self.__titl_2.set_help(_('Enter a title in the respective language'))
addopt("title2", self.__titl_2)
mess = _("third language")
self.__lang_3 = stdoptions.add_extra_localization_option(menu,
category_name,
mess, "lang3")
self.__titl_3 = StringOption(_("Site name for your third language"),
_('This site title'))
self.__titl_3.set_help(_('Enter a title in the respective language'))
addopt("title3", self.__titl_3)
mess = _("fourth language")
self.__lang_4 = stdoptions.add_extra_localization_option(menu,
category_name,
mess, "lang4")
self.__titl_4 = StringOption(_("Site name for your fourth language"),
_('This site title'))
self.__titl_4.set_help(_('Enter a title in the respective language'))
addopt("title4", self.__titl_4)
mess = _("fifth language")
self.__lang_5 = stdoptions.add_extra_localization_option(menu,
category_name,
mess, "lang5")
self.__titl_5 = StringOption(_("Site name for your fifth language"),
_('This site title'))
self.__titl_5.set_help(_('Enter a title in the respective language'))
addopt("title5", self.__titl_5)
mess = _("sixth language")
self.__lang_6 = stdoptions.add_extra_localization_option(menu,
category_name,
mess, "lang6")
self.__titl_6 = StringOption(_("Site name for your sixth language"),
_('This site title'))
self.__titl_6.set_help(_('Enter a title in the respective language'))
addopt("title6", self.__titl_6)
def __activate_translations(self):
"""
Make the possible extra languages selectable.
"""
status = self.__multitrans.get_value()
self.__lang_2.set_available(status)
self.__titl_2.set_available(status)
self.__lang_3.set_available(status)
self.__titl_3.set_available(status)
self.__lang_4.set_available(status)
self.__titl_4.set_available(status)
self.__lang_5.set_available(status)
self.__titl_5.set_available(status)
self.__lang_6.set_available(status)
self.__titl_6.set_available(status)
def __updates_changed(self):
"""
Update the change of storage: archive or directory
"""
_updates_option = self.__updates.get_value()
if _updates_option:
self.__maxupdates.set_available(True)
self.__maxdays.set_available(True)
else:
self.__maxupdates.set_available(False)
self.__maxdays.set_available(False)
def __ext_changed(self):
"""
The file extension changed.
If .php selected, we must set the PHP user session available
"""
if self.__ext.get_value()[:4] == ".php":
self.__phpnote.set_available(True)
else:
self.__phpnote.set_available(False)
def __usecms_changed(self):
"""
We need to use CMS or not
If we use a CMS, the storage must be an archive
"""
if self.__usecms.get_value():
self.__archive.set_value(True)
self.__target_uri = self.__cms_uri.get_value()
def __cms_uri_changed(self):
"""
Update the change of storage: archive or directory
"""
self.__target_uri = self.__cms_uri.get_value()
def __extra_page_name_changed(self):
"""
Update the change of the extra page name
"""
extra_page_name = self.__extra_page_name.get_value()
if extra_page_name != "":
config.set('paths.website-extra-page-name', extra_page_name)
def __extra_page_changed(self):
"""
Update the change of the extra page without extension
"""
extra_page = self.__extra_page.get_value()
if extra_page != "":
config.set('paths.website-extra-page-uri', extra_page)
def __archive_changed(self):
"""
Update the change of storage: archive or directory
"""
if self.__archive.get_value() is True:
self.__target.set_extension(".tar.gz")
self.__target.set_directory_entry(False)
else:
self.__target.set_directory_entry(True)
# We don't use an archive. If usecms is True, set it to False
if self.__usecms:
self.__usecms.set_value(False)
def __update_filters(self):
"""
Update the filter list based on the selected person
"""
gid = self.__pid.get_value()
person = self.__db.get_person_from_gramps_id(gid)
filter_list = utils.get_person_filters(person, include_single=False)
self.__filter.set_filters(filter_list)
def __filter_changed(self):
"""
Handle filter change. If the filter is not specific to a person,
disable the "Person" option
"""
filter_value = self.__filter.get_value()
if filter_value == 0: # "Entire Database" (as "include_single=False")
self.__pid.set_available(False)
else:
# The other filters need a center person (assume custom ones too)
self.__pid.set_available(True)
def __stylesheet_changed(self):
"""
Handles the changing nature of the stylesheet
"""
css_opts = self.__css.get_value()
if CSS[css_opts]["navigation"]:
self.__navigation.set_available(True)
else:
self.__navigation.set_available(False)
self.__navigation.set_value("Horizontal")
def __graph_changed(self):
"""
Handle enabling or disabling the ancestor graph
"""
self.__graphgens.set_available(self.__ancestortree.get_value())
def __gallery_changed(self):
"""
Handles the changing nature of gallery
"""
_gallery_option = self.__gallery.get_value()
_create_thumbs_only_option = self.__create_thumbs_only.get_value()
# images and media objects to be used, make all opti8ons available...
if _gallery_option:
self.__create_thumbs_only.set_available(True)
self.__maxinitialimagewidth.set_available(True)
self.__create_images_index.set_available(True)
self.__create_thumbs_index.set_available(True)
self.__unused.set_available(True)
# thumbnail-sized images only...
if _create_thumbs_only_option:
self.__maxinitialimagewidth.set_available(False)
# full- sized images and Media Pages will be created...
else:
self.__maxinitialimagewidth.set_available(True)
# no images or media objects are to be used...
else:
self.__create_thumbs_only.set_available(False)
self.__maxinitialimagewidth.set_available(False)
self.__create_images_index.set_available(False)
self.__create_thumbs_index.set_available(False)
self.__unused.set_available(False)
def __download_changed(self):
"""
Handles the changing nature of include download page
"""
if self.__incdownload.get_value():
self.__nbdownload.set_available(True)
for count in range(1, self.__max_download):
if count <= self.__nbdownload.get_value():
self.__down_fname[count].set_available(True)
self.__dl_descr[count].set_available(True)
else:
self.__down_fname[count].set_available(False)
self.__dl_descr[count].set_available(False)
else:
self.__nbdownload.set_available(False)
for count in range(1, self.__max_download):
if count <= self.__nbdownload.get_value():
self.__down_fname[count].set_available(False)
self.__dl_descr[count].set_available(False)
else:
self.__down_fname[count].set_available(False)
self.__dl_descr[count].set_available(False)
def __placemap_options(self):
"""
Handles the changing nature of the "Place map" options
"""
# get values for all Place Map Options tab...
place_active = self.__placemappages.get_value()
family_active = self.__familymappages.get_value()
mapservice_opts = self.__mapservice.get_value()
#google_opts = self.__googleopts.get_value()
if place_active or family_active:
self.__mapservice.set_available(True)
else:
self.__mapservice.set_available(False)
if mapservice_opts == "StamenMap":
self.__stamenopts.set_available(True)
else:
self.__stamenopts.set_available(False)
if family_active and mapservice_opts == "Google":
self.__googleopts.set_available(True)
else:
self.__googleopts.set_available(False)
if (place_active or family_active) and mapservice_opts == "Google":
self.__googlemapkey.set_available(True)
else:
self.__googlemapkey.set_available(False)
def __add_calendar_options(self, menu):
"""
Options on the "Calendar Options" tab.
"""
category_name = _("Calendar Options")
addopt = partial(menu.add_option, category_name)
# set to today's date for use in menu, etc.
today = Today()
self.__usecal = BooleanOption(
_("Do we include the web calendar ?"), False)
self.__usecal.set_help(_('Whether to include '
'a calendar for year %s' % today.get_year()))
self.__usecal.connect('value-changed', self.__usecal_changed)
addopt("usecal", self.__usecal)
self.__start_dow = EnumeratedListOption(_("First day of week"), 1)
for count in range(1, 8):
self.__start_dow.add_item(count, _dd.long_days[count].capitalize())
self.__start_dow.set_help(_("Select the first day of the week "
"for the calendar"))
menu.add_option(category_name, "start_dow", self.__start_dow)
maiden_name = EnumeratedListOption(_("Birthday surname"), "own")
maiden_name.add_item('spouse_first', _("Wives use husband's surname "
"(from first family listed)"))
maiden_name.add_item('spouse_last', _("Wives use husband's surname "
"(from last family listed)"))
maiden_name.add_item("own", _("Wives use their own surname"))
maiden_name.set_help(_("Select married women's displayed surname"))
menu.add_option(category_name, "maiden_name", maiden_name)
self.__maiden_name = maiden_name
self.__makeoneday = BooleanOption(_('Create one day event pages for'
' Year At A Glance calendar'),
False)
self.__makeoneday.set_help(_('Whether to create one day pages or not'))
menu.add_option(category_name, 'makeoneday', self.__makeoneday)
self.__birthdays = BooleanOption(_("Include birthdays"), True)
self.__birthdays.set_help(_("Include birthdays in the calendar"))
menu.add_option(category_name, "birthdays", self.__birthdays)
self.__anniv = BooleanOption(_("Include anniversaries"), True)
self.__anniv.set_help(_("Include anniversaries in the calendar"))
menu.add_option(category_name, "anniversaries", self.__anniv)
self.__death_anniv = BooleanOption(_('Include death dates'), False)
self.__death_anniv.set_help(_('Include death anniversaries in '
'the calendar'))
menu.add_option(category_name, 'death_anniv', self.__death_anniv)
self.__alive = BooleanOption(_("Include only living people"), True)
self.__alive.set_help(_("Include only living people in the calendar"))
menu.add_option(category_name, "alive", self.__alive)
default_before = config.get('behavior.max-age-prob-alive')
self.__after_year = NumberOption(_('Show data only after year'),
(today.get_year() - default_before),
0, today.get_year())
self.__after_year.set_help(_("Show data only after this year."
" Default is current year - "
" 'maximum age probably alive' which is "
"defined in the dates preference tab."))
menu.add_option(category_name, 'after_year', self.__after_year)
def __usecal_changed(self):
"""
Do we need to choose calendar options ?
"""
if self.__usecal.get_value():
self.__start_dow.set_available(True)
self.__maiden_name.set_available(True)
self.__makeoneday.set_available(True)
self.__birthdays.set_available(True)
self.__anniv.set_available(True)
self.__alive.set_available(True)
self.__death_anniv.set_available(True)
self.__after_year.set_available(True)
else:
self.__start_dow.set_available(False)
self.__maiden_name.set_available(False)
self.__makeoneday.set_available(False)
self.__birthdays.set_available(False)
self.__anniv.set_available(False)
self.__alive.set_available(False)
self.__death_anniv.set_available(False)
self.__after_year.set_available(False)
# See : http://www.gramps-project.org/bugs/view.php?id = 4423
# Contraction data taken from CLDR 22.1. Only the default variant is considered.
# The languages included below are, by no means, all the languages that have
# contractions - just a sample of languages that have been supported
# At the time of writing (Feb 2013), the following languages have greater that
# 50% coverage of translation of Gramps: bg Bulgarian, ca Catalan, cs Czech, da
# Danish, de German, el Greek, en_GB, es Spanish, fi Finish, fr French, he
# Hebrew, hr Croation, hu Hungarian, it Italian, ja Japanese, lt Lithuanian, nb
# Noregian Bokmål, nn Norwegian Nynorsk, nl Dutch, pl Polish, pt_BR Portuguese
# (Brazil), pt_P Portugeuse (Portugal), ru Russian, sk Slovak, sl Slovenian, sv
# Swedish, vi Vietnamese, zh_CN Chinese.
# Key is the language (or language and country), Value is a list of
# contractions. Each contraction consists of a tuple. First element of the
# tuple is the list of characters, second element is the string to use as the
# index entry.
# The DUCET contractions (e.g. LATIN CAPIAL LETTER L, MIDDLE DOT) are ignored,
# as are the supresscontractions in some locales.
CONTRACTIONS_DICT = {
# bg Bulgarian validSubLocales="bg_BG" no contractions
# ca Catalan validSubLocales="ca_AD ca_ES"
"ca" : [(("l·", "L·"), "L")],
# Czech, validSubLocales="cs_CZ" Czech_Czech Republic
"cs" : [(("ch", "cH", "Ch", "CH"), "CH")],
# Danish validSubLocales="da_DK" Danish_Denmark
"da" : [(("aa", "Aa", "AA"), "Å")],
# de German validSubLocales="de_AT de_BE de_CH de_DE de_LI de_LU" no
# contractions in standard collation.
# el Greek validSubLocales="el_CY el_GR" no contractions.
# es Spanish validSubLocales="es_419 es_AR es_BO es_CL es_CO es_CR es_CU
# es_DO es_EA es_EC es_ES es_GQ es_GT es_HN es_IC es_MX es_NI es_PA es_PE
# es_PH es_PR es_PY es_SV es_US es_UY es_VE" no contractions in standard
# collation.
# fi Finish validSubLocales="fi_FI" no contractions in default (phonebook)
# collation.
# fr French no collation data.
# he Hebrew validSubLocales="he_IL" no contractions
# hr Croation validSubLocales="hr_BA hr_HR"
"hr" : [(("dž", "Dž"), "dž"),
(("lj", "Lj", 'LJ'), "LJ"),
(("Nj", "NJ", "nj"), "NJ")],
# Hungarian hu_HU for two and three character contractions.
"hu" : [(("cs", "Cs", "CS"), "CS"),
(("dzs", "Dzs", "DZS"), "DZS"), # order is important
(("dz", "Dz", "DZ"), "DZ"),
(("gy", "Gy", "GY"), "GY"),
(("ly", "Ly", "LY"), "LY"),
(("ny", "Ny", "NY"), "NY"),
(("sz", "Sz", "SZ"), "SZ"),
(("ty", "Ty", "TY"), "TY"),
(("zs", "Zs", "ZS"), "ZS")
],
# it Italian no collation data.
# ja Japanese unable to process the data as it is too complex.
# lt Lithuanian no contractions.
# Norwegian Bokmål
"nb" : [(("aa", "Aa", "AA"), "Å")],
# nn Norwegian Nynorsk validSubLocales="nn_NO"
"nn" : [(("aa", "Aa", "AA"), "Å")],
# nl Dutch no collation data.
# pl Polish validSubLocales="pl_PL" no contractions
# pt Portuguese no collation data.
# ru Russian validSubLocales="ru_BY ru_KG ru_KZ ru_MD ru_RU ru_UA" no
# contractions
# Slovak, validSubLocales="sk_SK" Slovak_Slovakia
# having DZ in Slovak as a contraction was rejected in
# http://unicode.org/cldr/trac/ticket/2968
"sk" : [(("ch", "cH", "Ch", "CH"), "Ch")],
# sl Slovenian validSubLocales="sl_SI" no contractions
# sv Swedish validSubLocales="sv_AX sv_FI sv_SE" default collation is
# "reformed" no contractions.
# vi Vietnamese validSubLocales="vi_VN" no contractions.
# zh Chinese validSubLocales="zh_Hans zh_Hans_CN zh_Hans_SG" no contractions
# in Latin characters the others are too complex.
}
# The comment below from the glibc locale sv_SE in
# localedata/locales/sv_SE :
#
# % The letter w is normally not present in the Swedish alphabet. It
# % exists in some names in Swedish and foreign words, but is accounted
# % for as a variant of 'v'. Words and names with 'w' are in Swedish
# % ordered alphabetically among the words and names with 'v'. If two
# % words or names are only to be distinguished by 'v' or % 'w', 'v' is
# % placed before 'w'.
#
# See : http://www.gramps-project.org/bugs/view.php?id = 2933
#
# HOWEVER: the characters V and W in Swedish are not considered as a special
# case for several reasons. (1) The default collation for Swedish (called the
# 'reformed' collation type) regards the difference between 'v' and 'w' as a
# primary difference. (2) 'v' and 'w' in the 'standard' (non-default) collation
# type are not a contraction, just a case where the difference is secondary
# rather than primary. (3) There are plenty of other languages where a
# difference that is primary in other languages is secondary, and those are not
# specially handled.
|
Nick-Hall/gramps
|
gramps/plugins/webreport/narrativeweb.py
|
Python
|
gpl-2.0
| 129,918
|
[
"Brian"
] |
d95cdd3c228857437be314ef69d53704b32b4c0285964ae9f214788e393f6ec0
|
# Copyright (C) 2016 Collin Capano, Christopher M. Biwer, Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes and functions for drawing and calculating the
probability density function of distributions.
"""
# imports needed for functions below
from six.moves import configparser as _ConfigParser
from pycbc.distributions import constraints
from pycbc import VARARGS_DELIM as _VARARGS_DELIM
# Promote some classes/functions to the distributions name space
from pycbc.distributions.angular import UniformAngle, SinAngle, CosAngle, \
UniformSolidAngle
from pycbc.distributions.arbitrary import Arbitrary, FromFile
from pycbc.distributions.gaussian import Gaussian
from pycbc.distributions.power_law import UniformPowerLaw, UniformRadius
from pycbc.distributions.sky_location import UniformSky
from pycbc.distributions.uniform import Uniform
from pycbc.distributions.uniform_log import UniformLog10
from pycbc.distributions.spins import IndependentChiPChiEff
from pycbc.distributions.qnm import UniformF0Tau
from pycbc.distributions.joint import JointDistribution
from pycbc.distributions.external import External
from pycbc.distributions.fixedsamples import FixedSamples
from pycbc.distributions.mass import MchirpfromUniformMass1Mass2, \
QfromUniformMass1Mass2
# a dict of all available distributions
distribs = {
IndependentChiPChiEff.name : IndependentChiPChiEff,
Arbitrary.name : Arbitrary,
FromFile.name : FromFile,
Gaussian.name : Gaussian,
UniformPowerLaw.name : UniformPowerLaw,
UniformRadius.name : UniformRadius,
Uniform.name : Uniform,
UniformAngle.name : UniformAngle,
CosAngle.name : CosAngle,
SinAngle.name : SinAngle,
UniformSolidAngle.name : UniformSolidAngle,
UniformSky.name : UniformSky,
UniformLog10.name : UniformLog10,
UniformF0Tau.name : UniformF0Tau,
External.name: External,
FixedSamples.name: FixedSamples,
MchirpfromUniformMass1Mass2.name: MchirpfromUniformMass1Mass2,
QfromUniformMass1Mass2.name: QfromUniformMass1Mass2
}
def read_distributions_from_config(cp, section="prior"):
"""Returns a list of PyCBC distribution instances for a section in the
given configuration file.
Parameters
----------
cp : WorflowConfigParser
An open config file to read.
section : {"prior", string}
Prefix on section names from which to retrieve the distributions.
Returns
-------
list
A list of the parsed distributions.
"""
dists = []
variable_args = []
for subsection in cp.get_subsections(section):
name = cp.get_opt_tag(section, "name", subsection)
dist = distribs[name].from_config(cp, section, subsection)
if set(dist.params).isdisjoint(variable_args):
dists.append(dist)
variable_args += dist.params
else:
raise ValueError("Same parameter in more than one distribution.")
return dists
def _convert_liststring_to_list(lstring):
"""Checks if an argument of the configuration file is a string of a list
and returns the corresponding list (of strings).
The argument is considered to be a list if it starts with '[' and ends
with ']'. List elements should be comma separated. For example, passing
`'[foo bar, cat]'` will result in `['foo bar', 'cat']` being returned. If
the argument does not start and end with '[' and ']', the argument will
just be returned as is.
"""
if lstring[0]=='[' and lstring[-1]==']':
lstring = [str(lstring[1:-1].split(',')[n].strip().strip("'"))
for n in range(len(lstring[1:-1].split(',')))]
return lstring
def read_params_from_config(cp, prior_section='prior',
vargs_section='variable_args',
sargs_section='static_args'):
"""Loads static and variable parameters from a configuration file.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
prior_section : str, optional
Check that priors exist in the given section. Default is 'prior.'
vargs_section : str, optional
The section to get the parameters that will be varied/need priors
defined for them. Default is 'variable_args'.
sargs_section : str, optional
The section to get the parameters that will remain fixed. Default is
'static_args'.
Returns
-------
variable_args : list
The names of the parameters to vary in the PE run.
static_args : dict
Dictionary of names -> values giving the parameters to keep fixed.
"""
# sanity check that each parameter in [variable_args] has a priors section
variable_args = cp.options(vargs_section)
subsections = cp.get_subsections(prior_section)
tags = set([p for tag in subsections for p in tag.split('+')])
missing_prior = set(variable_args) - tags
if any(missing_prior):
raise KeyError("You are missing a priors section in the config file "
"for parameter(s): {}".format(', '.join(missing_prior)))
# sanity check that each parameter with a priors section is in
# [variable_args]
missing_variable = tags - set(variable_args)
if any(missing_variable):
raise KeyError("Prior section found for parameter(s) {} but not "
"listed as variable parameter(s)."
.format(', '.join(missing_variable)))
# get static args
try:
static_args = dict([(key, cp.get_opt_tags(sargs_section, key, []))
for key in cp.options(sargs_section)])
except _ConfigParser.NoSectionError:
static_args = {}
# sanity check that each parameter in [variable_args]
# is not repeated in [static_args]
for arg in variable_args:
if arg in static_args:
raise KeyError("Parameter {} found both in static_args and in "
"variable_args sections.".format(arg))
# try converting values to float
for key in static_args:
val = static_args[key]
try:
# the following will raise a ValueError if it cannot be cast to
# float (as we would expect for string arguments)
static_args[key] = float(val)
except ValueError:
# try converting to a list of strings; this function will just
# return val if it does not begin (end) with [ (])
static_args[key] = _convert_liststring_to_list(val)
return variable_args, static_args
def read_constraints_from_config(cp, transforms=None,
constraint_section='constraint'):
"""Loads parameter constraints from a configuration file.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
transforms : list, optional
List of transforms to apply to parameters before applying constraints.
constraint_section : str, optional
The section to get the constraints from. Default is 'constraint'.
Returns
-------
list
List of ``Constraint`` objects. Empty if no constraints were provided.
"""
cons = []
for subsection in cp.get_subsections(constraint_section):
name = cp.get_opt_tag(constraint_section, "name", subsection)
constraint_arg = cp.get_opt_tag(
constraint_section, "constraint_arg", subsection)
# get any other keyword arguments
kwargs = {}
section = constraint_section + "-" + subsection
extra_opts = [key for key in cp.options(section)
if key not in ["name", "constraint_arg"]]
for key in extra_opts:
val = cp.get(section, key)
if key == "required_parameters":
val = val.split(_VARARGS_DELIM)
else:
try:
val = float(val)
except ValueError:
pass
kwargs[key] = val
cons.append(constraints.constraints[name](constraint_arg,
transforms=transforms,
**kwargs))
return cons
|
cdcapano/pycbc
|
pycbc/distributions/__init__.py
|
Python
|
gpl-3.0
| 8,988
|
[
"Gaussian"
] |
e64199c1b32b7a592e604ed01ad0bdd69d97d475b6672b9bd9ee991b0e741896
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import random
import time
from numpy import random as nprand
import moose
def make_network():
size = 1024
timestep = 0.2
runtime = 100.0
delayMin = timestep
delayMax = 4
weightMax = 0.02
Vmax = 1.0
thresh = 0.2
tau = 1 # Range of tau
tau0 = 0.5 # minimum tau
refr = 0.3
refr0 = 0.2
connectionProbability = 0.1
random.seed( 123 )
nprand.seed( 456 )
t0 = time.time()
clock = moose.element( '/clock' )
network = moose.IntFire( 'network', size, 1 );
network.vec.bufferTime = [delayMax * 2] * size
moose.le( '/network' )
network.vec.numSynapses = [1] * size
# Interesting. This fails because we haven't yet allocated
# the synapses. I guess it is fair to avoid instances of objects that
# don't have allocations.
#synapse = moose.element( '/network/synapse' )
sv = moose.vec( '/network/synapse' )
print('before connect t = ', time.time() - t0)
mid = moose.connect( network, 'spikeOut', sv, 'addSpike', 'Sparse')
print('after connect t = ', time.time() - t0)
#print mid.destFields
m2 = moose.element( mid )
m2.setRandomConnectivity( connectionProbability, 5489 )
print('after setting connectivity, t = ', time.time() - t0)
network.vec.Vm = [(Vmax*random.random()) for r in range(size)]
network.vec.thresh = thresh
network.vec.refractoryPeriod = [( refr0 + refr * random.random()) for r in range( size) ]
network.vec.tau = [(tau0 + tau*random.random()) for r in range(size)]
numSynVec = network.vec.numSynapses
print('Middle of setup, t = ', time.time() - t0)
numTotSyn = sum( numSynVec )
for item in network.vec:
neuron = moose.element( item )
neuron.synapse.delay = [ (delayMin + random.random() * delayMax) for r in range( len( neuron.synapse ) ) ]
neuron.synapse.weight = nprand.rand( len( neuron.synapse ) ) * weightMax
print('after setup, t = ', time.time() - t0, ", numTotSyn = ", numTotSyn)
"""
netvec = network.vec
for i in range( size ):
synvec = netvec[i].synapse.vec
synvec.weight = [ (random.random() * weightMax) for r in range( synvec.len )]
synvec.delay = [ (delayMin + random.random() * delayMax) for r in range( synvec.len )]
"""
#moose.useClock( 9, '/postmaster', 'process' )
moose.useClock( 0, '/network', 'process' )
moose.setClock( 0, timestep )
moose.setClock( 9, timestep )
t1 = time.time()
moose.reinit()
print('reinit time t = ', time.time() - t1)
network.vec.Vm = [(Vmax*random.random()) for r in range(size)]
print('setting Vm , t = ', time.time() - t1)
t1 = time.time()
print('starting')
moose.start(runtime)
print('runtime, t = ', time.time() - t1)
print('Vm100:103', network.vec.Vm[100:103])
print('Vm900:903', network.vec.Vm[900:903])
print('weights 100:', network.vec[100].synapse.delay[0:5])
print('weights 900:', network.vec[900].synapse.delay[0:5])
make_network()
|
upibhalla/moose-core
|
tests/python/mpi/recurrentIntFire.py
|
Python
|
gpl-3.0
| 2,839
|
[
"MOOSE",
"NEURON"
] |
2bca83131ffe565ceb6da18f5a7bca7dd640e813f03649b06789c2fcf064c731
|
"""
Support for managing apps (as created with "0install add").
@since: 1.9
"""
# Copyright (C) 2012, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, SafeException, logger
from zeroinstall.support import basedir, portable_rename
from zeroinstall.injector import namespaces, selections, qdom, model
import re, os, time, tempfile
# Avoid characters that are likely to cause problems (reject : and ; everywhere
# so that apps can be portable between POSIX and Windows).
valid_name = re.compile(r'''^[^./\\:=;'"][^/\\:=;'"]*$''')
def validate_name(name):
if valid_name.match(name): return
raise SafeException("Invalid application name '{name}'".format(name = name))
def _export(name, value):
"""Try to guess the command to set an environment variable."""
shell = os.environ.get('SHELL', '?')
if 'csh' in shell:
return "setenv %s %s" % (name, value)
return "export %s=%s" % (name, value)
def find_bin_dir(paths = None):
"""Find the first writable path in the list (default $PATH),
skipping /bin, /sbin and everything under /usr except /usr/local/bin"""
if paths is None:
paths = os.environ['PATH'].split(os.pathsep)
for path in paths:
if path.startswith('/usr/') and not path.startswith('/usr/local/bin'):
# (/usr/local/bin is OK if we're running as root)
pass
elif path.startswith('/bin') or path.startswith('/sbin'):
pass
elif os.path.realpath(path).startswith(basedir.xdg_cache_home):
pass # print "Skipping cache", first_path
elif not os.access(path, os.W_OK):
pass # print "No access", first_path
else:
break
else:
path = os.path.expanduser('~/bin/')
logger.warn('%s is not in $PATH. Add it with:\n%s' % (path, _export('PATH', path + ':$PATH')))
if not os.path.isdir(path):
os.makedirs(path)
return path
_command_template = """#!/bin/sh
exec 0install run {app} "$@"
"""
class AppScriptInfo:
"""@since: 1.12"""
name = None
command = None
def parse_script_header(stream):
"""If stream is a shell script for an application, return the app details.
@param stream: the executable file's stream (will seek)
@type stream: file-like object
@return: the app details, if any
@rtype: L{AppScriptInfo} | None
@since: 1.12"""
try:
stream.seek(0)
template_header = _command_template[:_command_template.index("{app}")]
actual_header = stream.read(len(template_header))
stream.seek(0)
if template_header == actual_header:
# If it's a launcher script, it should be quite short!
rest = stream.read()
line = rest.split('\n')[1]
else:
return None
except UnicodeDecodeError as ex:
logger.info("Not an app script '%s': %s", stream, ex)
return None
info = AppScriptInfo()
info.name = line.split()[3]
return info
class App:
def __init__(self, config, path):
self.config = config
self.path = path
def set_selections(self, sels, set_last_checked = True):
"""Store a new set of selections. We include today's date in the filename
so that we keep a history of previous selections (max one per day), in case
we want to to roll back later."""
date = time.strftime('%Y-%m-%d')
sels_file = os.path.join(self.path, 'selections-{date}.xml'.format(date = date))
dom = sels.toDOM()
tmp = tempfile.NamedTemporaryFile(prefix = 'selections.xml-', dir = self.path, delete = False, mode = 'wt')
try:
dom.writexml(tmp, addindent=" ", newl="\n", encoding = 'utf-8')
except:
tmp.close()
os.unlink(tmp.name)
raise
tmp.close()
portable_rename(tmp.name, sels_file)
sels_latest = os.path.join(self.path, 'selections.xml')
if os.path.exists(sels_latest):
os.unlink(sels_latest)
os.symlink(os.path.basename(sels_file), sels_latest)
if set_last_checked:
self.set_last_checked()
def get_selections(self, snapshot_date = None, may_update = False):
"""Load the selections.
@param may_update: whether to check for updates
@type may_update: bool
@param snapshot_date: get a historical snapshot
@type snapshot_date: (as returned by L{get_history}) | None
@return: the selections
@rtype: L{selections.Selections}"""
if snapshot_date:
sels_file = os.path.join(self.path, 'selections-' + snapshot_date + '.xml')
else:
sels_file = os.path.join(self.path, 'selections.xml')
with open(sels_file, 'rb') as stream:
sels = selections.Selections(qdom.parse(stream))
if may_update:
sels = self._check_for_updates(sels)
return sels
def get_history(self):
"""Get the dates of the available snapshots, starting with the most recent.
@rtype: [str]"""
date_re = re.compile('selections-(\d\d\d\d-\d\d-\d\d).xml')
snapshots = []
for f in os.listdir(self.path):
match = date_re.match(f)
if match:
snapshots.append(match.group(1))
snapshots.sort(reverse = True)
return snapshots
def download_selections(self, sels):
"""Download any missing implementations.
@return: a blocker which resolves when all needed implementations are available
@rtype: L{tasks.Blocker} | None"""
return sels.download_missing(self.config) # TODO: package impls
def _check_for_updates(self, sels):
"""Check whether the selections need to be updated.
If any input feeds have changed, we re-run the solver. If the
new selections require a download, we schedule one in the
background and return the old selections. Otherwise, we return the
new selections. If we can select better versions without downloading,
we update the app's selections and return the new selections.
We also schedule a background update from time-to-time anyway.
@return: the selections to use
@rtype: L{selections.Selections}"""
need_solve = False # Rerun solver (cached feeds have changed)
need_update = False # Update over the network
utime = self._get_mtime('last-checked', warn_if_missing = True)
last_solve = max(self._get_mtime('last-solve', warn_if_missing = False), utime)
# Ideally, this would return all the files which were inputs into the solver's
# decision. Currently, we approximate with:
# - the previously selected feed files (local or cached)
# - configuration files for the selected interfaces
# - the global configuration
# We currently ignore feeds and interfaces which were
# considered but not selected.
# Can yield None (ignored), paths or (path, mtime) tuples.
# If this throws an exception, we will log it and resolve anyway.
def get_inputs():
for sel in sels.selections.values():
logger.info("Checking %s", sel.feed)
feed = iface_cache.get_feed(sel.feed)
if not feed:
raise IOError("Input %s missing; update" % sel.feed)
else:
if feed.local_path:
yield feed.local_path
else:
yield (feed.url, feed.last_modified)
# Per-feed configuration
yield basedir.load_first_config(namespaces.config_site, namespaces.config_prog,
'interfaces', model._pretty_escape(sel.interface))
# Global configuration
yield basedir.load_first_config(namespaces.config_site, namespaces.config_prog, 'global')
# If any of the feeds we used have been updated since the last check, do a quick re-solve
iface_cache = self.config.iface_cache
try:
for item in get_inputs():
if not item: continue
if isinstance(item, tuple):
path, mtime = item
else:
path = item
mtime = os.stat(path).st_mtime
if mtime and mtime > last_solve:
logger.info("Triggering update to %s because %s has changed", self, path)
need_solve = True
break
except Exception as ex:
logger.info("Error checking modification times: %s", ex)
need_solve = True
need_update = True
# Is it time for a background update anyway?
if not need_update:
staleness = time.time() - utime
logger.info("Staleness of app %s is %d hours", self, staleness / (60 * 60))
freshness_threshold = self.config.freshness
if freshness_threshold > 0 and staleness >= freshness_threshold:
need_update = True
if need_solve:
from zeroinstall.injector.driver import Driver
driver = Driver(config = self.config, requirements = self.get_requirements())
if driver.need_download():
# Continue with the current (hopefully cached) selections while we download
need_update = True
else:
old_sels = sels
sels = driver.solver.selections
from zeroinstall.support import xmltools
if not xmltools.nodes_equal(sels.toDOM(), old_sels.toDOM()):
self.set_selections(sels, set_last_checked = False)
self._touch('last-solve')
# If we tried to check within the last hour, don't try again.
if need_update:
last_check_attempt = self._get_mtime('last-check-attempt', warn_if_missing = False)
if last_check_attempt and last_check_attempt + 60 * 60 > time.time():
logger.info("Tried to check within last hour; not trying again now")
need_update = False
if need_update:
self.set_last_check_attempt()
from zeroinstall.injector import background
r = self.get_requirements()
background.spawn_background_update2(r, False, self)
return sels
def set_requirements(self, requirements):
import json
tmp = tempfile.NamedTemporaryFile(prefix = 'tmp-requirements-', dir = self.path, delete = False, mode = 'wt')
try:
json.dump(dict((key, getattr(requirements, key)) for key in requirements.__slots__), tmp)
except:
tmp.close()
os.unlink(tmp.name)
raise
tmp.close()
reqs_file = os.path.join(self.path, 'requirements.json')
portable_rename(tmp.name, reqs_file)
def get_requirements(self):
import json
from zeroinstall.injector import requirements
r = requirements.Requirements(None)
reqs_file = os.path.join(self.path, 'requirements.json')
with open(reqs_file, 'rt') as stream:
values = json.load(stream)
for k, v in values.items():
setattr(r, k, v)
return r
def set_last_check_attempt(self):
self._touch('last-check-attempt')
def set_last_checked(self):
self._touch('last-checked')
def _touch(self, name):
timestamp_path = os.path.join(self.path, name)
fd = os.open(timestamp_path, os.O_WRONLY | os.O_CREAT, 0o644)
os.close(fd)
os.utime(timestamp_path, None) # In case file already exists
def _get_mtime(self, name, warn_if_missing = True):
timestamp_path = os.path.join(self.path, name)
try:
return os.stat(timestamp_path).st_mtime
except Exception as ex:
if warn_if_missing:
logger.warn("Failed to get time-stamp of %s: %s", timestamp_path, ex)
return 0
def get_last_checked(self):
"""Get the time of the last successful check for updates.
@return: the timestamp (or None on error)
@rtype: float | None"""
return self._get_mtime('last-checked', warn_if_missing = True)
def get_last_check_attempt(self):
"""Get the time of the last attempted check.
@return: the timestamp, or None if we updated successfully.
@rtype: float | None"""
last_check_attempt = self._get_mtime('last-check-attempt', warn_if_missing = False)
if last_check_attempt:
last_checked = self.get_last_checked()
if last_checked < last_check_attempt:
return last_check_attempt
return None
def destroy(self):
# Check for shell command
# TODO: remember which commands we own instead of guessing
name = self.get_name()
bin_dir = find_bin_dir()
launcher = os.path.join(bin_dir, name)
expanded_template = _command_template.format(app = name)
if os.path.exists(launcher) and os.path.getsize(launcher) == len(expanded_template):
with open(launcher, 'r') as stream:
contents = stream.read()
if contents == expanded_template:
#print "rm", launcher
os.unlink(launcher)
# Remove the app itself
import shutil
shutil.rmtree(self.path)
def integrate_shell(self, name):
# TODO: remember which commands we create
if not valid_name.match(name):
raise SafeException("Invalid shell command name '{name}'".format(name = name))
bin_dir = find_bin_dir()
launcher = os.path.join(bin_dir, name)
if os.path.exists(launcher):
raise SafeException("Command already exists: {path}".format(path = launcher))
with open(launcher, 'w') as stream:
stream.write(_command_template.format(app = self.get_name()))
# Make new script executable
os.chmod(launcher, 0o111 | os.fstat(stream.fileno()).st_mode)
def get_name(self):
return os.path.basename(self.path)
def __str__(self):
return '<app ' + self.get_name() + '>'
class AppManager:
def __init__(self, config):
self.config = config
def create_app(self, name, requirements):
validate_name(name)
apps_dir = basedir.save_config_path(namespaces.config_site, "apps")
app_dir = os.path.join(apps_dir, name)
if os.path.isdir(app_dir):
raise SafeException(_("Application '{name}' already exists: {path}").format(name = name, path = app_dir))
os.mkdir(app_dir)
app = App(self.config, app_dir)
app.set_requirements(requirements)
app.set_last_checked()
return app
def lookup_app(self, name, missing_ok = False):
"""Get the App for name.
Returns None if name is not an application (doesn't exist or is not a valid name).
Since / and : are not valid name characters, it is generally safe to try this
before calling L{injector.model.canonical_iface_uri}."""
if not valid_name.match(name):
if missing_ok:
return None
else:
raise SafeException("Invalid application name '{name}'".format(name = name))
app_dir = basedir.load_first_config(namespaces.config_site, "apps", name)
if app_dir:
return App(self.config, app_dir)
if missing_ok:
return None
else:
raise SafeException("No such application '{name}'".format(name = name))
|
dabrahams/0install
|
zeroinstall/apps.py
|
Python
|
lgpl-2.1
| 13,519
|
[
"VisIt"
] |
98ab00a9ba97cd523970f0b7463d276f248cd2efdec73277c13dc5195bcdb4e6
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""A visitor that pauses the print at a layer
"""
from gcodeutils.visit.visitor import GCodeVisitor
__author__ = "wireddown"
PAUSE_COMMAND = "M226"
class PauseAtLayer(GCodeVisitor):
"""This class inserts a pause command at the start of each layer specified
"""
def __init__(self, pause_layer_list):
self.__pause_layer_list = pause_layer_list
def did_visit_layer(self, layer_as_pyline_list, gcode_iterator_info):
"""Inserts a pause command if the layer matches the list
"""
is_printed = gcode_iterator_info.is_printed
is_pause_layer = gcode_iterator_info.layer_number in self.__pause_layer_list
should_pause = is_printed and is_pause_layer
if should_pause:
gcode = gcode_iterator_info.gcode
layer_index = gcode_iterator_info.layer_index
commands_to_prepend = [PAUSE_COMMAND]
gcode.prepend_to_layer(commands_to_prepend, layer_index)
|
zeograd/gcodeutils
|
gcodeutils/visit/pause_at_layer.py
|
Python
|
gpl-2.0
| 1,005
|
[
"VisIt"
] |
b6311dac738616605e64a073a553c0281c529045c3f168af9d105cb5dd9b04d1
|
from django.db import connection
from django.db.models import Sum, Q
from datetime import date, timedelta
from . import TemplateFileReport, register_report
REPORT_TYPE = 'clinic'
TEMPLATE_DIR = 'tracking/reports/clinic'
class ClinicReport(TemplateFileReport):
''' Base class for Email Reports'''
def __init__(self, clinic=None, logger=None):
self.clinic = clinic
super(ClinicReport, self).__init__(logger=logger)
def __str__(self):
return '<{0}(setting={1}>'.format(self.name, self.clinic)
@register_report(REPORT_TYPE, name='visit_history')
class VisitHistoryReport(ClinicReport):
subject_tpl = 'Patient visit history report'
template_file = '{}/visit_history.html'.format(TEMPLATE_DIR)
def get_extra_context(self):
from tracking.models import PatientVisit
assert self.clinic, 'clinic should be set!'
today = date.today()
visit_date = today - timedelta(days=1)
patient_visits = PatientVisit.objects.filter(
clinic=self.clinic,
visit_date=visit_date).order_by('-visit_date')
extra = {
'patient_visits': patient_visits,
'visit_date': visit_date.strftime('%B %d, %Y'),
}
return extra
@register_report(REPORT_TYPE, name='weekly_visit')
class WeeklyVisitReport(ClinicReport):
subject_tpl = 'Weekly visit report ({{from_date}} - {{to_date}})'
template_file = '{}/weekly_visit.html'.format(TEMPLATE_DIR)
def get_extra_context(self):
from tracking.models import PatientVisit, TreatingProvider
assert self.clinic, 'clinic should be set!'
yesterday = date.today() - timedelta(days=1)
start_week = yesterday - timedelta(yesterday.weekday())
end_week = start_week + timedelta(4)
start_month = yesterday.replace(day=1)
end_month = (yesterday.replace(day=10) + timedelta(days=30)
).replace(day=1) - timedelta(days=1)
start_date = min(start_month, start_week)
end_date = max(end_month, end_week)
week_days = [start_week + timedelta(days=i) for i in range(5)]
query = PatientVisit.objects.filter(
referring_entity__organization__org_type__isnull=False,
clinic=self.clinic, visit_date__range=(start_date, end_date)
).extra(select={
'is_current': "tracking_organization.org_type='INT'"}
).values('visit_date', 'treating_provider__provider_name',
'is_current').annotate(total=Sum('visit_count')).all()
data = {}
has_no_name = False
UNKNOWN_PVD = '<Unknown>'
CLINIC_TOTAL = 'Clinic totals'
MONTH_RANGE = 'Month'
for r in query:
provider = r['treating_provider__provider_name']
visit_date = r['visit_date']
if not provider:
has_no_name = True
provider = UNKNOWN_PVD
val_idx = 0 if r['is_current'] else 1
if visit_date >= start_week and visit_date <= end_week:
vals = data.setdefault(visit_date,
{}).setdefault(provider, [0, 0])
vals[val_idx] += r['total']
if visit_date >= start_month and visit_date <= end_month:
vals = data.setdefault(MONTH_RANGE,
{}).setdefault(provider, [0, 0])
vals[val_idx] += r['total']
providers = [t.provider_name for t in TreatingProvider.objects.filter(
clinic=self.clinic).order_by('id').all()]
if has_no_name:
providers.append(UNKNOWN_PVD)
records = []
clinic_total = [[0, 0, 0] for i in range(len(week_days)+1)]
for pvd in providers:
row = [pvd]
for i in range(len(week_days)+1):
if i == len(week_days):
dt = MONTH_RANGE
else:
dt = week_days[i]
val = data.get(dt, {}).get(pvd, None) or [0, 0]
val.append(val[0]+val[1])
row.append(val)
clinic_total[i][0] += val[0]
clinic_total[i][1] += val[1]
clinic_total[i][2] += val[2]
records.append(row)
records.insert(0, [CLINIC_TOTAL] + clinic_total)
self.logger.info("records: %s", records)
extra = {
'from_date': start_week.strftime('%b %d, %Y'),
'to_date': end_week.strftime('%b %d, %Y'),
'week_days': week_days,
'records': records,
}
return extra
@register_report(REPORT_TYPE, name='monthly_visit')
class MonthlyVisitReport(ClinicReport):
subject_tpl = 'Monthly visit report ({{from_date}} - {{to_date}})'
template_file = '{}/monthly_visit.html'.format(TEMPLATE_DIR)
def get_extra_context(self):
from tracking.models import PatientVisit, TreatingProvider
assert self.clinic, 'clinic should be set!'
today = date.today()
end_cur_year = today.replace(day=1) - timedelta(days=1)
start_cur_year = date(end_cur_year.year, 1, 1)
start_prev_year = start_cur_year.replace(year=start_cur_year.year-1)
end_prev_year = end_cur_year.replace(year=end_cur_year.year-1)
year_months = [start_cur_year.replace(month=i+1) for i in
range(end_cur_year.month)]
CUR_YEAR = start_cur_year.year
PREV_YEAR = start_prev_year.year
month_partial = connection.ops.date_trunc_sql('month', 'visit_date')
date_range = Q(visit_date__range=(start_cur_year, end_cur_year)) | \
Q(visit_date__range=(start_prev_year, end_prev_year))
query = PatientVisit.objects.filter(
date_range,
referring_entity__organization__org_type__isnull=False,
clinic=self.clinic,
).extra(select={
'month': month_partial,
'is_current': "tracking_organization.org_type='INT'"}
).values('month', 'treating_provider__provider_name',
'is_current').annotate(total=Sum('visit_count')).all()
data = {}
has_no_name = False
UNKNOWN_PVD = '<Unknown>'
CLINIC_TOTAL = 'Clinic totals'
for r in query:
provider = r['treating_provider__provider_name']
month_date = r['month'].date()
if not provider:
has_no_name = True
provider = UNKNOWN_PVD
val_idx = 0 if r['is_current'] else 1
if month_date.year == CUR_YEAR:
vals = data.setdefault(month_date,
{}).setdefault(provider, [0, 0])
vals[val_idx] += r['total']
else:
vals = data.setdefault(PREV_YEAR,
{}).setdefault(provider, [0, 0])
vals[val_idx] += r['total']
providers = [t.provider_name for t in TreatingProvider.objects.filter(
clinic=self.clinic).order_by('id').all()]
if has_no_name:
providers.append(UNKNOWN_PVD)
records = []
clinic_total = [[0, 0, 0] for i in range(len(year_months)+1)]
for pvd in providers:
row = [pvd]
year_total = [0, 0, 0]
for i in range(len(year_months)+1):
if i == len(year_months):
dt = PREV_YEAR
else:
dt = year_months[i]
val = data.get(dt, {}).get(pvd, None) or [0, 0]
val.append(val[0]+val[1])
row.append(val)
clinic_total[i][0] += val[0]
clinic_total[i][1] += val[1]
clinic_total[i][2] += val[2]
if i < len(year_months):
year_total[0] += val[0]
year_total[1] += val[1]
year_total[2] += val[2]
row.insert(len(row)-1, year_total)
records.append(row)
clinic_total.insert(len(clinic_total)-1,
list(map(sum, zip(*clinic_total[:-1]))))
records.insert(0, [CLINIC_TOTAL] + clinic_total)
self.logger.info("data: %s", data)
self.logger.info("records: %s", records)
extra = {
'from_date': start_cur_year.strftime('%b %Y'),
'to_date': end_cur_year.strftime('%b %Y'),
'year_months': year_months,
'current_year': CUR_YEAR,
'previous_year': PREV_YEAR,
'records': records,
}
return extra
|
Heteroskedastic/Dr-referral-tracker
|
tracking/reports/clinic_reports.py
|
Python
|
mit
| 8,655
|
[
"VisIt"
] |
91d6a641ad39a13fe1cd4560497d581f2ec5487f33a7f723254c33380a725b3a
|
#!/usr/bin/env python
#=============================================================================
#
# Gradient Noise Generation
#
# TODO:
#
# Add a more intelligent noise filter that is aware of the number of samples
# needed between the gradient positions, and expands the filter envelope to
# more evenly (Gaussian) distribute the noise between samples.
#
#=============================================================================
"""
Gradient Noise Generation
=========================
Low-delta gradients cause stair-step transitions (banding). The typical
solution to hide the banding is to introduce general-purpose dithering of the
entire image space. This is an attempt to see if a special-purpose gradient
noise function can produce good-enough results.
"""
import logging
import random
import sys
import mpng
__version__ = '0.0.0'
#=============================================================================
class Gradient( object ):
"""
Gradient objects.
"""
#=========================================================================
def __init__( self, start_color = 0x000000FF, end_color = 0xFFFFFFFF ):
"""
Initializes a Gradient object.
"""
self.stops = []
self.add_stop( start_color, 0.0 )
self.add_stop( end_color, 1.0 )
self.channel_filter = round
#=========================================================================
def add_stop( self, color, position ):
"""
Add new color stop to gradient.
"""
self.stops.append( Stop( color, position ) )
self.stops.sort( key = lambda o : o.position )
#=========================================================================
def get( self, position = 0.5 ):
"""
Retrieves a color value at a position along the gradient.
Note: Does not currently support gradient extrapolation.
Note: Does not currently support gradient clipping.
"""
# don't mess around with non-float positions
position = float( position )
# start of gradient
if position <= 0.0:
return self.stops[ 0 ].color
# end of gradient
elif position >= 1.0:
return self.stops[ -1 ].color
# set last stop
last_stop = self.stops[ 0 ]
# scan stops in gradient
for stop in self.stops:
# check for the first stop past the requested position
if stop.position > position:
break
# update last stop
last_stop = stop
# determine localized position between stops
delta = stop.position - last_stop.position
if delta > 0:
position = ( position - last_stop.position ) / delta
else:
position = 0.0
# compute color channel deltas
ar = float( ( last_stop.color >> 24 ) & 0xFF )
ag = float( ( last_stop.color >> 16 ) & 0xFF )
ab = float( ( last_stop.color >> 8 ) & 0xFF )
aa = float( ( last_stop.color >> 0 ) & 0xFF )
br = float( ( stop.color >> 24 ) & 0xFF )
bg = float( ( stop.color >> 16 ) & 0xFF )
bb = float( ( stop.color >> 8 ) & 0xFF )
ba = float( ( stop.color >> 0 ) & 0xFF )
dr = br - ar
dg = bg - ag
db = bb - ab
da = ba - aa
# compute channel values at this position
gr = ar + ( dr * position )
gg = ag + ( dg * position )
gb = ab + ( db * position )
ga = aa + ( da * position )
# filter each channel
fr = int( self.channel_filter( gr ) )
fg = int( self.channel_filter( gg ) )
fb = int( self.channel_filter( gb ) )
fa = int( self.channel_filter( ga ) )
# test filter output for issues
if fr > 255:
logging.warn( 'Exceeded 8-bit channel value: R={}'.format( fr ) )
if fg > 255:
logging.warn( 'Exceeded 8-bit channel value: G={}'.format( fg ) )
if fb > 255:
logging.warn( 'Exceeded 8-bit channel value: B={}'.format( fb ) )
if fa > 255:
logging.warn( 'Exceeded 8-bit channel value: A={}'.format( fa ) )
# construct the color output value
cr = ( fr & 0xFF ) << 24
cg = ( fg & 0xFF ) << 16
cb = ( fb & 0xFF ) << 8
ca = ( fa & 0xFF ) << 0
color = cr | cg | cb | ca
# return the color value
return color
#=============================================================================
class Stop( object ):
"""
Gradient stop objects.
"""
#=========================================================================
def __init__( self, color, position ):
"""
Initializes a Stop object.
"""
self.color = color
self.position = position
#=============================================================================
def simple_random_filter( value ):
"""
Simple channel randomization filter.
"""
# how much relative noise to introduce
jitter = 0.2
# window potentially clipped noise (ZIH - not sure I like this yet)
#min_value = value - jitter
#max_value = value + jitter
#if min_value < 0.0:
# value -= min_value
#elif max_value > 1.0:
# value -= 1.0 - max_value
# make some noise centered around a central value
noise = ( jitter * random.random() ) - ( jitter / 2.0 )
# add the noise to the input to generate the output
output = value + noise
# the value should be pushed to the nearest integer
return round( output )
#=============================================================================
def test_simple():
"""
Tests simple gradient noise.
"""
w = 320
h = 1024
start = 0x08262AFF
stop = 0x001620FF
### DEBUGGING
dfh = open( 'dbg.csv', 'w' )
# generate the pixel data for the simple gradient noise
def gen_simple( w, h ):
# create some low-delta gradients
rg = Gradient( start, stop )
ng = Gradient( start, stop )
ng.channel_filter = simple_random_filter
# create a row of pixels
for row in range( h ):
# list of pixel channel value in this row
pixels = []
if row == 0:
position = 1.0
elif row >= ( h - 1 ):
position = 0.0
else:
position = ( h - row ) / float( h )
### DEBUGGING
dbg = [ '{:03},{:1.4f}'.format( row, position ) ]
# reference gradient
color = rg.get( position )
values = [
( ( color >> 24 ) & 0xFF ),
( ( color >> 16 ) & 0xFF ),
( ( color >> 8 ) & 0xFF )
]
pixels.extend( values * ( w // 2 ) )
dbg.append( '{:08X}'.format( color ) )
# noise gradient
for col in range( w // 2, w ):
color = ng.get( position )
values = [
( ( color >> 24 ) & 0xFF ),
( ( color >> 16 ) & 0xFF ),
( ( color >> 8 ) & 0xFF )
]
pixels.extend( values )
if col % 32 == 0:
dbg.append( '{:08X}'.format( color ) )
dfh.write( '{}\n'.format( ','.join( dbg ) ) )
# yield a full row of pixel data
yield pixels
# generate the test image
with open( 'test_simple.png', 'wb' ) as ifh:
png_writer = mpng.Writer( w, h )
png_writer.write( ifh, gen_simple( w, h ) )
### DEBUGGING
dfh.close()
#=============================================================================
def main( argv ):
"""
Script execution entry point
@param argv List of arguments passed to the script
@return Shell exit code (0 = success)
"""
# imports when using this as a script
import argparse
# create and configure an argument parser
parser = argparse.ArgumentParser(
description = 'Gradient Noise Generation',
add_help = False
)
parser.add_argument(
'-h',
'--help',
default = False,
help = 'Display this help message and exit.',
action = 'help'
)
parser.add_argument(
'-v',
'--version',
default = False,
help = 'Display script version and exit.',
action = 'version',
version = __version__
)
# parse the arguments
args = parser.parse_args( argv[ 1 : ] )
# run the simple test
test_simple()
# return success
return 0
#=============================================================================
if __name__ == "__main__":
sys.exit( main( sys.argv ) )
|
zhester/hzpy
|
development/gradnoise.py
|
Python
|
bsd-2-clause
| 8,891
|
[
"Gaussian"
] |
9d53781c1116910f6788fc59d48268c90fa2f4beba5ee9fc7d65a4f73304d8d6
|
# -*- coding: utf-8 -*-
"""
Enrich BEL graphs
=================
In the current build it is possible to enrich BEL graphs containing metabolites with associated
disease or protein information and to enrich BEL graphs containing disease or protein information with associated metabolites.
This can be done with the functions further explained in `BEL Serialization`_
.. _BEL Serialization: bel_serialization.html
2. Enriching BEL graphs
-----------------------
Using an BEL graph with metabolites (represented using the `HMDB namespace`_) it can be enriched with disease and protein information from HMDB.
.. _HMDB namespace: construct_namspaces.html
2.1 Metabolites-Proteins
~~~~~~~~~~~~~~~~~~~~~~~~
For a graph containing metabolites:
>>> enrich_metabolites_proteins(bel_graph, manager)
The result of this will be a BEL graph which now includes relations between the metabolites and proteins.
For a graph containing proteins (named using uniprot identifiers):
>>> enrich_proteins_metabolites(bel_graph, manager)
This will result in a BEL graph where the proteins are linked to associated metabolites.
2.2 Metabolites-Diseases
~~~~~~~~~~~~~~~~~~~~~~~~
For a graph containing metabolites:
>>> enrich_metabolites_diseases(bel_graph, manager)
The result of this will be a BEL graph which now includes relations between the metabolites and diseases.
For a graph containing diseases (named using HMDB identifiers):
>>> enrich_diseases_metabolites(bel_graph, manager)
This will result in a BEL graph where the diseases are linked to associated metabolites.
"""
import logging
from typing import Optional
from pybel import BELGraph
from pybel.constants import (
ABUNDANCE, ANNOTATIONS, ASSOCIATION, CITATION, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED, EVIDENCE,
FUNCTION, NAME, NAMESPACE, PATHOLOGY, PROTEIN, RELATION,
)
from pybel.struct.pipeline.decorators import in_place_transformation
from .manager import Manager
log = logging.getLogger(__name__)
def _check_namespaces(data, bel_function, bel_namespace):
"""Makes code more structured and reusable."""
if data[FUNCTION] != bel_function:
return False
if NAMESPACE not in data:
return False
if data[NAMESPACE] == bel_namespace:
return True
elif data[NAMESPACE] != bel_namespace:
log.warning("Unable to map namespace: %s", data[NAMESPACE])
return False
# enrich proteins and metabolites
@in_place_transformation
def enrich_metabolites_proteins(graph: BELGraph, manager: Optional[Manager] = None):
"""Enrich a given BEL graph, which includes metabolites with proteins, that are associated to the metabolites."""
if manager is None:
manager = Manager()
for node in list(graph):
if _check_namespaces(node, ABUNDANCE, 'HMDB'):
metabolite_protein_interactions = manager.query_metabolite_associated_proteins(node[NAME])
else:
continue
if not metabolite_protein_interactions:
log.warning("Unable to find node: %s", node)
continue
for association in metabolite_protein_interactions:
protein_data = association.protein.serialize_to_bel()
protein_tuple = graph.add_node_from_data(protein_data)
graph.add_edge(protein_tuple, node, attr_dict={
RELATION: ASSOCIATION,
EVIDENCE: None,
CITATION: {
CITATION_TYPE: None,
CITATION_REFERENCE: None,
},
ANNOTATIONS: {
'name': association.protein.name,
'protein_type': association.protein.protein_type
}
})
@in_place_transformation
def enrich_proteins_metabolites(graph: BELGraph, manager: Optional[Manager] = None):
"""Enrich a given BEL graph, which includes uniprot proteins with HMDB metabolites,
that are associated to the proteins.
"""
if manager is None:
manager = Manager()
for node in list(graph):
if _check_namespaces(node, PROTEIN, 'UP'):
protein_metabolite_interactions = manager.query_protein_associated_metabolites(node[NAME])
else:
continue
if protein_metabolite_interactions is None:
log.warning("Unable to find node: %s", node)
continue
for association in protein_metabolite_interactions:
metabolite_data = association.metabolite.serialize_to_bel()
metabolite_tuple = graph.add_node_from_data(metabolite_data)
graph.add_edge(metabolite_tuple, node, attr_dict={
RELATION: ASSOCIATION,
EVIDENCE: None,
CITATION: {
CITATION_TYPE: None,
CITATION_REFERENCE: None,
},
ANNOTATIONS: {
'name': association.protein.name,
'protein_type': association.protein.protein_type
}
})
# enrich diseases and metabolites
@in_place_transformation
def enrich_metabolites_diseases(graph: BELGraph, manager: Optional[Manager] = None):
"""Enrich a given BEL graph, which includes metabolites with diseases, to which the metabolites are associated."""
if manager is None:
manager = Manager()
for data in list(graph):
if _check_namespaces(data, ABUNDANCE, 'HMDB'):
metabolite_disease_interactions = manager.query_metabolite_associated_diseases(data[NAME])
else:
continue
if metabolite_disease_interactions is None:
log.warning("Unable to find node: %s", data)
continue
# add edges and collect all the references for this edge
i = 0
while i < len(metabolite_disease_interactions):
association = metabolite_disease_interactions[i]
references = [] # list for storing the reference articles
old_disease = association.disease
while True: # collect the references for the metabolite disease interaction
try:
if old_disease != metabolite_disease_interactions[i].disease:
break # break if disease has changed
references.append(metabolite_disease_interactions[i].reference.pubmed_id)
i += 1
except IndexError:
break
# add disease node and construct edge
disease_data = association.disease.serialize_to_bel()
disease_tuple = graph.add_node_from_data(disease_data)
graph.add_edge(disease_tuple, data, attr_dict={
RELATION: ASSOCIATION,
EVIDENCE: None,
CITATION: {
CITATION_TYPE: CITATION_TYPE_PUBMED,
CITATION_REFERENCE: references[0],
},
ANNOTATIONS: {
'omim_id': association.disease.omim_id,
'additional_references': references[1::]
}
})
@in_place_transformation
def enrich_diseases_metabolites(graph: BELGraph, manager: Optional[Manager] = None):
"""Enrich a given BEL graph, which includes HMDB diseases with HMDB metabolites, which are associated to the
diseases."""
if manager is None:
manager = Manager()
for data in list(graph):
if _check_namespaces(data, PATHOLOGY, 'HMDB_D'):
disease_metabolite_interactions = manager.query_disease_associated_metabolites(data[NAME])
else:
continue
if not disease_metabolite_interactions:
log.warning("Unable to find node: %s", data)
continue
# add edges and collect all the references for this edge
i = 0
while i < len(disease_metabolite_interactions):
association = disease_metabolite_interactions[i]
references = [] # list for storing the reference articles
old_metabolite = association.metabolite
while True: # collect the references for the metabolite disease interaction
try:
if old_metabolite != disease_metabolite_interactions[i].metabolite:
break # break if disease has changed
references.append(disease_metabolite_interactions[i].reference.pubmed_id)
i += 1
except IndexError:
break
# add disease node and construct edge
metabolite_data = association.metabolite.serialize_to_bel()
metabolite_tuple = graph.add_node_from_data(metabolite_data)
graph.add_edge(metabolite_tuple, data, attr_dict={
RELATION: ASSOCIATION,
EVIDENCE: None,
CITATION: {
CITATION_TYPE: CITATION_TYPE_PUBMED,
CITATION_REFERENCE: references[0],
},
ANNOTATIONS: {
'omim_id': association.disease.omim_id,
'additional_references': references[1::]
}
})
|
bio2bel/hmdb
|
src/bio2bel_hmdb/enrich.py
|
Python
|
mit
| 9,194
|
[
"Pybel"
] |
2d1e184f9f8a20d840edfb5244d7faeebe831757b0a8b495f81539619315a28f
|
#!/usr/bin/env python
"""
Simple little script that allows one to automatically update DNS records through Fastmail's GUI.
I could not find an official API, so this relies on beautiful soup to naviagate through fastmail's
HTML pages.
"""
import requests, re, os, sys
import json
from bs4 import BeautifulSoup
FASTMAIL_URL = "https://www.fastmail.com"
class FastmailUpdater():
""" The fastmail updater manages the state necessary to navigate Fastmail's
Web interface. By maintaining an internal session, you can perform multiple
DNS updates, but only need to perform a login once. """
def __init__(self):
self.sess = requests.session()
self.logged_in = False
self.user_id = None
def login(self, username, password):
""" Attempts to login to fastmail with the given username and password.
If we fail to login to fastmail, this method will throw a RuntimeError """
response = self.sess.get(FASTMAIL_URL)
session_key = re.search('<input value="([0-9a-g]*)" name="sessionKey"', response.content, re.IGNORECASE).group(1)
form = {"sessionKey": session_key,
"dologin": 1,
"hasPushState": 1,
"interface": 'text',
'username': username,
'password': password,
'screenSize': 'desktop',
}
response = self.sess.post(FASTMAIL_URL, form, headers={'referer':FASTMAIL_URL})
result = re.search("u=([0-9a-g]*)&", response.content)
if result is None:
raise ValueError("Could not find user_id! (Wrong username/password?)")
else:
self.user_id = result.group(1)
self.logged_in = True
def parse_static_fields(self, page):
""" Parses out all of the constant fields present in the page that are
hidden input fields with fixed values. We don't change these, but the
server-side probably validates whether or not they are present/valid.
Most of them are probably related to CSRF, authentication, or other security issues.
"""
inputs = []
for input_field in page.body.form.find_all('input'):
if 'value' in input_field.attrs and 'name' in input_field.attrs:
inputs.append([input_field.attrs['name'], input_field.attrs['value']])
f_to_extract = ("MLS", 'SCD-DM', "MSS", "MSignalFeedback", "MSessionKey", "MSessionKeySeed", "FCD-DM")
static_inputs = [inp for inp in inputs if "CKS" in inp[0] or inp[0] in f_to_extract]
results = {'FCD-HasCustomDNS': '1'}
for inp in static_inputs:
results[inp[0]] = inp[1]
return results
def parse_domain(self, row):
""" Parses out the domain name from the given DNS record's row.
Returns ('domains', '<domainname>') """
# The domain name is split up in the first three columns
# Column one is the subdomain, column two is just a period,
# and three is the TLD itself.
# Ex. 'mail' | '.' | 'example.com'
text = [col.text for col in row[0:3]]
if text[0] == '':
# There is no subdomain, so skip the subdomain column and period
# I.E. ('', '.', 'example.com') -> 'example.com'
text = text[2]
else:
text = "".join(text)
return "domain", text
def parse_ttl(self, row):
""" Parses out the ttl value of the given DNS record's row.
This is a little sketchy, since fastmail configures TTL in the UI with a combo box.
Returns ('ttl', ('<inputname>', '<ttlvalue>')) """
column_index = 3
select_box = row[column_index].find('select')
for select_option in select_box.find_all('option'):
if 'selected' in select_option.attrs:
return 'ttl', (select_box.attrs['name'], select_option.attrs['value'])
raise NameError("Unable to find TTL option!")
def parse_rec_type(self, row):
""" Parses out the record type of the given DNS record's row. (A, MX, NS, TXT, etc.)
Returns ('type', '<recordtype>') """
column_index = 4
return "type", row[column_index].text
def parse_rec_data(self, row):
""" Parses out the actual data value contained in the given DNS record's row.
The contents vary depending on row type. (IP addresses for A records, strings for CNAMEs, etc.)
Returns ('data', ('<inputname>', '<recordvalue')) """
column_index = 5
column = row[column_index]
return 'data', (column.input.attrs['name'], column.input.attrs['value'])
def parse_active(self, row):
""" Parses out the active checkbox from the given DNS record's row.
Returns ('active', ('<inputname>', 'on'|'off')) """
column_index = 6
column = row[column_index]
return 'active', (column.input.attrs['name'], 'on' if column.input.attrs['checked'] == 'checked' else 'off')
def get_dns_records(self, page):
""" Gets a list of all the DNS records in the DNS config pages' table. """
table = page.body.form.find(class_="contentTable")
rows = table.find_all("tr")
# The first row is just column headers, the last is for adding new records. We skip both.
rows = rows[1:-1]
rows = [row.find_all('td') for row in rows] # Break up individual columns
records = []
for row in rows:
record = {}
record.update([self.parse_domain(row)])
record.update([self.parse_ttl(row)])
record.update([self.parse_rec_type(row)])
record.update([self.parse_rec_data(row)])
record.update([self.parse_active(row)])
records.append(record)
return records
def dns_update(self, node_key, new_value, types_to_change=('A',)):
""" Checks to see if the records of type 'types_to_change' and domain 'node_key' need
to be updated. If so, it submits fastmail's configuration page. Only performs
an update if it detects a change. """
if not self.logged_in:
raise RuntimeError("Please log in first")
dns_page = FASTMAIL_URL + "/html/?MLS=ASE-*&u=%s&MSignal=CD-*U-1" % self.user_id
response = self.sess.get(dns_page)
page = BeautifulSoup(response.content)
static_inps = self.parse_static_fields(page)
records = self.get_dns_records(page)
form = {}
form_needs_update = False
form.update(static_inps) #These are hidden inputs at the top of the page. Probably for CSRF or something
for record in records:
form.update([record['ttl']])
form.update([record['active']])
data_key, original_data_val = record['data']
if record['domain'] == node_key and record['type'] in types_to_change:
if str(original_data_val).strip() != str(new_value).strip():
print("Update: The '%s' record for domain '%s' needs updating" % (record['type'], record['domain']))
print("Old value: '%s', New Value: '%s'" % (original_data_val, new_value))
form[data_key] = str(new_value)
form_needs_update = True
else:
form[data_key] = original_data_val
if form_needs_update:
print("One or more records need updating. Submitting request...")
response = self.sess.post(FASTMAIL_URL + "/html/?u=%s" % self.user_id, form)
if not response.ok:
print("Failed to submit request: %s" % response.text)
response.raise_for_status() #Should raise an HTTPError with error code/msg
else:
print("Found no changes in records %s for domain '%s'." % (types_to_change, node_key))
def get_ip():
""" Simple helper method that retrieves the current hosts outward facing IP address.
Uses the free service at ifconfig.me """
ip = requests.get("http://whatismyip.akamai.com").text
return ip.strip()
if __name__ == "__main__":
if len(sys.argv) > 1:
print("Using configuration file: %s" % sys.argv[1])
conf = sys.argv[1]
else:
conf = "/etc/ddfastmail.conf"
if not os.path.exists(conf):
raise RuntimeError("File does not exist! %s" % conf)
if os.stat(conf).st_mode & 044 > 0:
raise RuntimeError("Config permissions too open! Please use mode 400 or 600 (go-rwx).")
with open(conf, 'r') as conf_in:
config = json.load(conf_in)
updater = FastmailUpdater()
updater.login(config['username'], config['password'])
#configuration constants defined here:
current_ip = get_ip()
#pylint: disable=W0123
for domain in config['domains']:
dns_records = config['domains'][domain]
if type(dns_records) == dict:
# Multiple record types for single domain stored in a dict
for record_type in dns_records:
if dns_records[record_type].startswith('$'):
dns_records[record_type] = eval(dns_records[record_type][1:])
updater.dns_update(domain, dns_records[record_type], types_to_change=record_type)
elif type(dns_records) in (str, unicode):
# Default record type for a domain ('A' record), stored in a single string
if dns_records.startswith('$'):
dns_records = eval(dns_records[1:])
updater.dns_update(domain, dns_records)
|
ZachAnders/ddfastmail
|
ddfastmail.py
|
Python
|
bsd-3-clause
| 8,377
|
[
"ASE"
] |
467455bc8671cf6d7a5176dca82334caadb066eb7339e2e66ab690964d76d0a9
|
import re
from collections import defaultdict, namedtuple
from datetime import datetime, timedelta
from dateutil.parser import parse as dt_parse
from hashlib import md5
from optparse import make_option
from user_agents import parse
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import reset_queries
from django.db.models.aggregates import Sum, Count
from djanalytics import models
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option(
'-s', '--start', type='string',
help='Beginning date for request events. If not provided, '
'request events from the beginning of time will be '
'selected.'
),
make_option(
'-e', '--end', type='string',
help='End date for request events. If not provided, '
'up to the most recent request event will be selected.'
),
make_option(
'-a', '--max-age', type='int', dest='max_age', default=30,
help='Max number of days in the past to look for Visit and '
'PageVisit objects. Default is 30.'
)
)
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.device_type_cache = {}
def handle(self, start=None, end=None, max_age=30, **options):
self.max_age = max_age
self.create_page_visits(start, end)
self.calculate_page_durations()
self.collect_visit_data()
def create_page_visits(self, start, end):
page_pattern_cache = defaultdict(list)
for page_pattern in models.PagePattern.objects.order_by('client'):
page_pattern_cache[page_pattern.client.pk].append(
namedtuple('CachedPagePattern', 'pattern,page_type')(
re.compile(page_pattern.pattern, flags=re.IGNORECASE),
page_pattern.page_type
)
)
web_property_cache = {
domain.name: domain.web_property
for domain in models.Domain.objects.select_related()
}
device_cache = {}
visitor_cache = {}
visit_cache = {}
page_cache = {}
created_page_visits = []
query_dict = {
'pagevisit': None,
'domain__in': web_property_cache.keys(),
}
if start:
start = dt_parse(start)
query_dict['created__gte'] = start
if end:
end = dt_parse(end)
query_dict['created__lte'] = end
query = models.RequestEvent.objects.filter(
**query_dict
).select_related().order_by('tracking_user_id', 'created')
total_events = query.count()
query = query.iterator()
self.stdout.write(
'Processing %s RequestEvents - Start: %s\tEnd: %s\n' % (
total_events,
start.strftime('%m/%d/%Y') if start else 'None',
end.strftime('%m/%d/%Y') if end else 'None'
)
)
for cnt, request_event in enumerate(query):
if cnt % 1000 == 0:
self.stdout.write('Processed %s of %s\n' % (cnt, total_events))
# in DEBUG mode, django holds on to all database queries
# this can cause memory issues in pre-production environments
# with large sets of data
if settings.DEBUG:
reset_queries()
if cnt % 20000 == 0:
self.stdout.write('Clearing caches')
visit_cache = {}
visitor_cache = {}
page_cache = {}
device_cache = {}
models.PageVisit.objects.bulk_create(
created_page_visits
)
created_page_visits = []
user_agent_md5 = device = None
if request_event.user_agent:
user_agent_md5 = md5(request_event.user_agent.encode('utf-8')).hexdigest()
device = device_cache.get(user_agent_md5)
if user_agent_md5 and not device:
user_agent = parse(request_event.user_agent)
device, created = models.Device.objects.get_or_create(
user_agent_md5=user_agent_md5,
defaults={
'user_agent': request_event.user_agent,
'screen_width': request_event.screen_width,
'screen_height': request_event.screen_height,
'os': user_agent.os.family,
'os_version': user_agent.os.version_string,
'browser': user_agent.browser.family,
'browser_version': user_agent.browser.version_string,
'device': user_agent.device.family,
'device_type': self._get_device_type(user_agent),
}
)
device_cache[request_event.user_agent] = device
if not device:
continue
visitor = visitor_cache.get(request_event.tracking_user_id)
if not visitor:
visitor, _created = models.Visitor.objects.get_or_create(
uuid=request_event.tracking_user_id,
)
visitor_cache[request_event.tracking_user_id] = visitor
visitor.clients.add(request_event.client)
page_key = (request_event.path, request_event.client)
page = page_cache.get(page_key)
if not page:
page_type = None
for page_pattern in page_pattern_cache[request_event.client.pk]:
if page_pattern.pattern.match(request_event.path):
page_type = page_pattern.page_type
break
page, created = models.Page.objects.get_or_create(
path=request_event.path,
client=request_event.client,
defaults={"page_type": page_type}
)
page_cache[page_key] = page
web_property = web_property_cache.get(request_event.domain.lower())
if not web_property:
self.stdout.write(
'No WebProperty found for domain: %s - skipping' % request_event.domain
)
continue
visit = visit_cache.get(request_event.tracking_key)
if not visit:
visit, _created = models.Visit.objects.get_or_create(
uuid=request_event.tracking_key,
visitor=visitor,
defaults={
'first_page': page,
'device': device,
'visit_date': request_event.created.date(),
'web_property': web_property,
},
)
visit_cache[request_event.tracking_key] = visit
created_page_visits.append(
models.PageVisit(
page=page,
visit=visit,
request_event=request_event
)
)
if created_page_visits:
models.PageVisit.objects.bulk_create(created_page_visits)
def calculate_page_durations(self):
# finding all page visits with no duration
# from the last 'max_age' days
query = models.PageVisit.objects.filter(
duration=None,
request_event__created__gte=datetime.now() - timedelta(
days=self.max_age
),
)
# This looks a little messy, but makes use of python to get a distinct list
# as opposed to the database. At least with MySQL, the 'distinct' can be
# a performance hit.
all_visits = list(set(
query.values_list(
'request_event__tracking_key', flat=True
)
))
total_visits = len(all_visits)
start_idx = 0
subset = 100000 if total_visits > 100000 else total_visits
while start_idx < total_visits:
self.stdout.write('\n')
self.stdout.write('Processing %s to %s of %s total '
'page visits for duration\n' % (
start_idx,
start_idx + subset
if start_idx + subset < total_visits else total_visits,
total_visits
)
)
tracking_keys = all_visits[start_idx: start_idx+subset]
start_idx += subset
# finding sessions with more than one request event
events = list(
models.RequestEvent.objects.filter(
tracking_key__in=tracking_keys
).values(
'tracking_key'
).annotate(
Count('tracking_key')
).order_by().filter(
tracking_key__count__gt=1
).values_list('tracking_key', flat=True).distinct()
)
# limit the page visits to just those with sessions that
# have more than one request event
subquery = query.filter(
request_event__tracking_key__in=events
).select_related('request_event')
total_page_visits = subquery.count()
self.stdout.write('Processing %s page visits for durations\n' % total_page_visits)
for cnt, page_visit in enumerate(subquery):
if cnt % 1000 == 0:
self.stdout.write(
'Processed %s of %s page visits\n' % (cnt, total_page_visits)
)
if settings.DEBUG:
reset_queries()
try:
# get the next chronological request event
next_event = models.RequestEvent.objects.filter(
tracking_key=page_visit.request_event.tracking_key,
created__gt=page_visit.request_event.created
).earliest()
elapsed_delta = next_event.created - page_visit.request_event.created
page_visit.duration = round(elapsed_delta.total_seconds())
page_visit.save()
except models.RequestEvent.DoesNotExist:
# nothing newer than the current PageVisit
pass
def collect_visit_data(self):
# Visit data
page_pattern_cache = {}
for pattern in models.PagePattern.objects.select_related().filter(
page_type__code__in=(
models.PageType.CONVERSION,
models.PageType.FUNNEL
)
).order_by('client','page_type').distinct():
if not pattern.client.pk in page_pattern_cache:
page_pattern_cache[pattern.client.pk] = defaultdict(list)
page_pattern_cache[pattern.client.pk][pattern.page_type.code].append(
re.compile(pattern.pattern, flags=re.IGNORECASE),
)
query = models.Visit.objects.filter(
visit_date__gte=(datetime.now() - timedelta(days=self.max_age))
).distinct().annotate(
calc_duration=Sum('pagevisit__duration')
).prefetch_related(
'pagevisit_set__page__page_type', 'pagevisit_set__page__client',
'pagevisit_set__request_event'
)
total_visits = query.count()
start_idx = 0
subset = 100000 if total_visits > 100000 else total_visits
while start_idx < total_visits:
self.stdout.write('\n')
self.stdout.write('Processing %s to %s of %s total '
'page visits for other data\n' % (
start_idx,
start_idx + subset
if start_idx + subset < total_visits else total_visits,
total_visits
)
)
visit_subset = query[start_idx: start_idx+subset]
start_idx += subset
for cnt, visit in enumerate(visit_subset):
if cnt % 1000 == 0:
self.stdout.write(
'Processed %s of %s visits\n' % (cnt, len(visit_subset))
)
if settings.DEBUG:
reset_queries()
update_fields = []
pagevisits = sorted(
visit.pagevisit_set.all(),
cmp=lambda x, y: cmp(x.request_event.created, y.request_event.created)
)
if not visit.last_page or visit.last_page.pk != pagevisits[-1].pk:
visit.last_page = pagevisits[-1].page
update_fields.append('last_page')
order_ids = []
conversion_count = 0
funnel_found = False
for page_visit in pagevisits:
page_patterns = page_pattern_cache.get(page_visit.page.client.pk, {})
# in order to count a conversion, the visitor has to have hit
# at least one funnel page
for pattern in page_patterns.get(models.PageType.FUNNEL, []):
if pattern.match(page_visit.page.path):
funnel_found = True
break
for pattern in page_patterns.get(models.PageType.CONVERSION, []):
m = pattern.match(page_visit.page.path)
if m and len(m.groups()) > 0 and funnel_found:
order_ids.append(m.group(1))
funnel_found = False
break
conversion_count = len(set(order_ids))
if visit.conversion_count != conversion_count:
visit.conversion_count = conversion_count
update_fields.append('conversion_count')
if visit.duration != visit.calc_duration:
visit.duration = visit.calc_duration
update_fields.append('duration')
if update_fields:
visit.save(update_fields=update_fields)
def _get_device_type(self, user_agent):
code = models.DeviceType.UNKNOWN
if user_agent.is_mobile:
code = models.DeviceType.MOBILE
elif user_agent.is_tablet:
code = models.DeviceType.TABLET
elif user_agent.is_pc:
code = models.DeviceType.DESKTOP
elif user_agent.is_bot:
code = models.DeviceType.BOT
device_type = self.device_type_cache.get(code)
if not device_type:
device_type, _created = models.DeviceType.objects.get_or_create(
code=code,
defaults={'name': code.capitalize()}
)
self.device_type_cache[code] = device_type
return device_type
|
analytehealth/django-analytics
|
djanalytics/management/commands/collect_reporting_stats.py
|
Python
|
bsd-2-clause
| 15,156
|
[
"VisIt"
] |
74e96b383a680243c6272913845209f74cd654e3932eda50b651f4fe4bedcdbf
|
#!/usr/bin/python
from scipy.stats import cauchy
import random
import math
import csv
import numpy as np
#import netCDF4 as nc
import argparse
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
#import plotly.plotly as py
#import plotly.graph_objs as go
#####################################################
## TAKE IN NUMBER OF HII REGIONS FROM COMMAND LINE ##
#####################################################
parser = argparse.ArgumentParser()
parser.add_argument("numberRegions", type=int,
help="Number of HII Regions to Populate in Model")
args = parser.parse_args()
numRegions = args.numberRegions # Prompt User for number of Hii regions
############
## SETUP ##
############
useTremblin = False # Use the Tremblin 2014 model to determine HII region sizes
plot3D = False # Use Plotly to create interactive 3D plots of the HII region distribution
if useTremblin == True :
import netCDF4 as nc
ff=nc.Dataset('/Users/Marvin/Research/Projects/GalSims/3D/larson_radius_hypercube.ncdf') # Import data cube from Tremblin et. al. 2014
region = 1 # Start count of regions from 1 to NumRegions
HiiList = [] # Initialize list to store Hii data
(galRad,xRot,yRot,z,mass,lum,age,radius)=(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0)
(diffLum,barLum,ThreekpcLum,sprLum,totLum)=(0.0,0.0,0.0,0.0,0.0)
(diffCount,barCount,ThreekpcCount,sprCount,totCount)=(0,0,0,0,0)
###############################################
## TURN ON / OFF VARIOUS GALACTIC STRUCTURES ##
###############################################
# The following definitions determine which structures will
# be present in the galaxy and what their relative proportion is.
# See Hughes et al. ApJ April 2013 for relative proportion in M51
diffuse = True
bar = True
ThreekpcArm = True
spiral = True
diffusePercent = 20
barPercent = 5
ThreekpcArmPercent = 10
spiralPercent = 100 - (diffusePercent + barPercent +
ThreekpcArmPercent)
###########################
## STRUCTURAL PARAMETERS ##
###########################
extentOfBar = 4.4 # Length of bar in kiloparsecs.
# See Benjamin et al. ApJ Sept 2005.
cutoff = 3.87#3.41#4.1 # Looking to (cutoff)x the bar length.
# Max value ~6.86 due to model limitation (Tremblin, below)
galRange = extentOfBar*cutoff
sunPos = 8.4 # Distance of Sun from GC (Reid 2009)
sunHeight = 0.02 # Distance of Sun above galactic plane (kpc) (Humphreys 1995)
circRot = 220 # Solar circular rotation speed. Carroll & Ostlie (24.36)
v0 = 0 # Initial velocity of source. Only relevant to 3kpc arm.
galRot = 44.0*math.pi/180.0 # Rotates entire galaxy by (x) degrees.
# See Benjamin et al. ApJ Sept 2005.
random.seed( 1 ) # Seed random number generator. (ARBITRARY)
numSpirals = 4 # Determines Number of Spiral arms
pitchAngle = 12.*math.pi/180. # Determines curvature of arms
# 7.3 deg --> See Wu et al. A&A April 2014 for pitch angle estimate in Sagitarrius arm
# Vallee 2014 gives pitch angle of 12 deg.
warpParam = math.pi/2 # Determines degree of Galactic warp
# DEFINE/CONVERT TO AS ANGLE?
warpHeight = 0.08 # BY INSPECTION
maxSpiralRevolutions = 1.0 # Range for number of spiral revs. (ARBITRARY)
maxCluster = 2 # Maximum number of regions in a given cluster (ARBITRARY)
avgCluster = 1 # Most commonly found number of regions in cluster (ARBITRARY)
clusterRange = 20/1000 # Sets clustered regions to be within (x) pc of each other
# See Motte et al. ApJ 2002
sigma = 0.8/2.35 # Sets FWHM of spiral arms to (x) kpc
# 200 pc See Wu et al. A&A April 2014 for deviation
# from spiral estimate in Sagitarrius arm.
# Vallee 2014 gives width of 400 pc "from mid arm to dust lane"
# Therefore, FWHM would be 800 pc and sigma = .800/2.35
zmax = .15/5 # Sets max height in z as +/- (x) kpc
gamma =0# 0.01365 # Sets spread of Cauchy-Lorentz Z-distribution of regions
alpha = 0 # Sets HII region drop off as r^-alpha(after bar)
# Mass Limits, In Units of Stellar Mass. Sets lower bound for ionizing star
#(lowerMass, upperMass) = (10, 90)
(lowerMass, upperMass) = (9, 90)
(log_lowerMass, log_upperMass) = (math.log(lowerMass), math.log(upperMass))
while region <= numRegions :
########################
## RESET INDICES, ETC ##
########################
v0 = 0
i = 1
# Reset i each time to force a region to be populated
# if all requirements are met.
selectionParam = random.random()
# Determines if Hii region is kept or thrown away.
# Forces population of regions to follow linear trend
# to end of bar and power law drop-off after bar.
numCluster = 1
numClusterTot = random.randrange(1,maxCluster,1)
whereIsRegion = random.randrange(1, diffusePercent + barPercent
+ ThreekpcArmPercent
+ spiralPercent, 1)
# Determines location of one individual region.
##################
## DIFFUSE HALO ##
##################
# HII Region will be randomly populated in Galaxy, but will not be
# be placed in central region (within bar radius).
if (whereIsRegion <= diffusePercent) and (diffuse == True) :
while i != 0 : # This loop forces an Hii region to be populated diffusely
x = random.gauss(0,galRange/2) # Sets diffuse population to have
# FWHM of galRange/2
y = random.gauss(0,galRange/2)
theta = math.atan(x/y)
galRad = pow(pow(x,2)+pow(y,2),.5)# Region's distance from center
if galRad > 11 :
galWarp = ((galRad-11)/6)*math.sin(theta)+0.3*(((galRad-11)/6)**2)*(1-math.cos(2*theta))
else :
galWarp = 0
zpos = cauchy.rvs(loc=0,scale=zmax,size=1,random_state=None)[0]
z = zpos + galWarp # Produces Cauchy-Lorentz z distribution
i += 1
if (abs(x) > extentOfBar + random.gauss(0,sigma)) \
and (galRad < galRange + random.gauss(0,sigma)) \
and (selectionParam < pow(extentOfBar,alpha)/pow(galRad,alpha)):
region += numClusterTot # Increase region count
i = 0 # Escape loop
elif (abs(x) < extentOfBar + random.gauss(0,sigma)) \
and (extentOfBar < galRad < galRange + random.gauss(0,sigma)) \
and (selectionParam < pow(extentOfBar,alpha)/pow(galRad,alpha)):
region += numClusterTot # Increase region count
i = 0 # Escape loop
##################
## GALACTIC BAR ##
##################
elif (whereIsRegion > diffusePercent) \
and (whereIsRegion <= (diffusePercent + barPercent)) \
and (bar == True) :
while i != 0 : # This loop forces an Hii region to be populated in bar
x = random.uniform(-extentOfBar,extentOfBar) + random.gauss(0,sigma) # Returns random number between (-extentOfBar,extentOfBar)
y = random.gauss(0,sigma) # Sets thickness of bar to (sigma) kpc
theta = math.atan(x/y)
galRad = pow(pow(x,2)+pow(y,2),.5)# Region's distance from center
galWarp = 0 # No warp assigned within R_Gal = 11 kpc
zPos = cauchy.rvs(loc=0,scale=zmax,size=1,random_state=None)[0]
z = galWarp + zPos
# Produces Cauchy-Lorentz z distribution
galRad = pow(pow(x,2)+pow(y,2),.5) # Region's distance from center
i += 1
if (selectionParam < galRad/(extentOfBar)) \
and (galRad < galRange) :
region += numClusterTot # Increase region count
i = 0 # Escape loop
# Note: Distribution was slightly higher than observed. Dropped with 0.9 factor.
######################
## 3 KILOPARSEC ARM ##
######################
elif (whereIsRegion > (diffusePercent + barPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (ThreekpcArm == True) :
yInt = extentOfBar/2
ySign = random.randrange(-1,1)
while i != 0 : # This loop forces an Hii region to be populated in 3 kpc arm
xCart = random.uniform(-extentOfBar,extentOfBar)
yCart = math.copysign(yInt*pow(1-pow(xCart,2)/pow(extentOfBar,2),.5),ySign) # Produces 3 kpc arm structure
x = xCart + random.gauss(0, sigma) # Gaussian distribution around 3 kpc arm
y = yCart + random.gauss(0, sigma)
theta = math.atan(x/y)
zPos = cauchy.rvs(loc=0,scale=zmax,size=1,random_state=None)[0]
galWarp = 0 # No warp assigned within R_Gal = 11 kpc
z = galWarp + zPos # EDIT TO Produces Cauchy-Lorentz z distribution
galRad = pow(pow(x,2)+pow(y,2),.5) # Region's distance from center
i += 1
if (selectionParam < galRad/extentOfBar) \
and (galRad < galRange) :
v0 = 53 # Expansion of 3kpc arm
region += numClusterTot # Increase region count
i = 0 # Escape loop
#################
## SPIRAL ARMS ##
#################
elif (whereIsRegion > (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent + spiralPercent)) \
and (spiral == True):
while i != 0 : # This loop forces an Hii region to be populated in arms
whichArm = random.randint(0,numSpirals-1)
theta = random.uniform(0,2*np.pi*maxSpiralRevolutions)
if whichArm == 0:
phi0 = 223.*math.pi/180
elif whichArm == 1:
phi0 = 108.*math.pi/180
elif whichArm == 2:
phi0 = 43.*math.pi/180
elif whichArm == 3:
phi0 = 288.*math.pi/180
r = extentOfBar*math.exp(pitchAngle*theta)
xCart = r*math.cos(theta-phi0)
yCart = r*math.sin(theta-phi0)
x = xCart + random.gauss(0,sigma) # Gaussian distribution around spiral
y = yCart + random.gauss(0,sigma)
#theta = math.atan(x/y)
galRad = pow(pow(x,2)+pow(y,2),.5)# Region's distance from center
if galRad > 11 :
galWarp = ((galRad-11)/6)*math.sin(theta)+0.3*(((galRad-11)/6)**2)*(1-math.cos(2*theta))
else :
galWarp = 0
zPos = cauchy.rvs(loc=0,scale=zmax,size=1,random_state=None)[0]
z = galWarp + zPos
galRad = pow(pow(x,2)+pow(y,2),.5) # Region's distance from center in kpc
i += 1
if (galRad < galRange) \
and (selectionParam < pow(extentOfBar,alpha)/pow(galRad,alpha)) :
region += numClusterTot # Increase region count
i = 0 # Escape Loop
############################################
## DETERMINE INDIVIDUAL REGION PARAMETERS ##
############################################
while (i == 0) and (numCluster <= numClusterTot) :
#######################################
## UPDATE REGION POSITION / DISTANCE ##
#######################################
# Rotate galaxy to match Milky Way's rotation
xRot = x*math.cos(galRot) - y*math.sin(galRot)
yRot = x*math.sin(galRot) + y*math.cos(galRot)
# Determine Distance and Galactic Coordinates
dist = pow(pow(xRot,2)+pow(yRot-sunPos,2),0.5)
l = math.copysign(math.acos((pow(dist,2)+pow(sunPos,2)-pow(galRad,2))/(2*sunPos*dist))*180/math.pi,xRot)
b = math.atan((z-sunHeight)/dist)
# Set velocity of source
omega = circRot/galRad # Assume flat rotation curve.
omega0 = circRot/sunPos
if (whereIsRegion > diffusePercent) \
and (whereIsRegion <= (diffusePercent + barPercent)) \
and (bar == True) :
vR = galRad/extentOfBar*((omega - omega0)*sunPos*math.sin(l*math.pi/180)+v0*math.cos(theta))
elif (whereIsRegion > (diffusePercent + barPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (ThreekpcArm == True) :
vR = galRad/extentOfBar*((omega - omega0)*sunPos*math.sin(l*math.pi/180)+v0*math.cos(theta))
else :
vR = (omega - omega0)*sunPos*math.sin(l*math.pi/180)+v0*math.cos(theta)
######################
## AGE DISTRIBUTION ##
######################
# Set Age Distribution
timeParam = random.randint(0,99)
age = timeParam*.127 # Age in Myr (12.7 Myr limit) in Trebmlin model
##########################
## ELECTRON TEMPERATURE ##
##########################
# Set Electron Temperature Distribution
# Relationship taken from Balser et.al. 2011, put in range accepted by Tremblin model
# Tremblin model ranges from 5000 K to 15000 K in 1000 K increments
T_e = 5756 + 303*random.uniform(-1,1) + galRad*(299 + 31*random.uniform(-1,1))
# T_e = 6080 + galRad*378 # Averaged value suggested in Tremblin 2014
TeParam = int(round(T_e,-3)/1000 - 5)
###################################
## NEUTRAL HYDROGEN DISTRIBUTION ##
###################################
# Set Neutral Hydrogen Density Distribution
# Tremblin model ranges from 1700 cm-3 to 5100 cm-3 in 340 cm-3 increments
densityParam = random.randint(0,10)
#######################
## MASS DISTRIBUTION ##
#######################
# Set Host Star Mass Distribution
massParam = random.random() # Used in forcing powerlaw fit
while massParam > 0. :
log_mass = random.uniform(log_lowerMass,log_upperMass)
mass = math.exp(log_mass)
# Compute likelihood of candidate from Salpeter IMF
likelihood = math.pow(mass, 1.0 - 2.35)
maxLikelihood = math.pow(lowerMass, 1.0 - 2.35)
massParam = random.uniform(0,maxLikelihood)
IMF = pow(lowerMass,2.35-1)*pow(mass,1-2.35)
#lifetime = 10000.*pow(mass,-2.5) # 10 billion years for Sun, less for higher mass stars
# L~M^3.5. Lifetime ~ M/L ~ M^(1-3.5) ~ M^(-2.5)
#print str(mass) + " : " + str(massParam) + " <? " + str(IMF) + " : " + str(age) + " <? " + str(lifetime)
if (massParam < likelihood) :#and (age < lifetime) : # Makes power law fit
massParam = 0. # Escape loop
#########################
## IONIZING LUMINOSITY ##
#########################
'''
lumPowerLaw = 3.5 # Used 1.94 previously (WHY?)
lumMin = math.log10(pow(lowerMass,lumPowerLaw))
lumMax = math.log10(pow(upperMass,lumPowerLaw))
lumParam = int(round((math.log10(pow(mass,lumPowerLaw))-lumMin)/(lumMax-lumMin)*16,0)) # Use this line to access all values of Lum from 10^47 - 10^51
# fluxParam = int(round((math.log10(pow(mass,1.94))-fluxMin)/(fluxMax-fluxMin)*12,0)+4)
'''
# Set Host Star Ionizing Luminosity Distribution
# Tremblin model ranges from 10^47 to 10^51 in quarter-dec increments
# In practice these are given as 47 to 51 in steps of 0.25
# B-star mass ranges come from Silaj et. al 2014 and Armentrout et al. 2017
# O-star mass ranges comes from Loren Anderson's Thesis (Eq 6.1, Boston University 2009)
'''
if mass < 18:
N_ly = 43.4818+0.231166*mass
else :
N_ly = 46.95*math.pow(mass-16.27,7./500.) # Fit to Sternberg 2003 by Anderson 2010
'''
# Set Host Star Ionizing Luminosity Distribution
# Tremblin model ranges from 10^47 to 10^51 in quarter-dec increments
# In practice these are given as 47 to 51 in steps of 0.25
# B-star mass ranges come from Silaj et. al 2014 and Armentrout et al. 2017
# O-star mass ranges comes from Sternberg 2003 and Armentrout et al. 2017
if mass < 9.11 :
N_ly = 45.57 # B2
elif mass < 10.135 : # interpolated
N_ly = 45.835
elif mass < 11.16 : # (13.21+9.11)/2., interpolated
N_ly = 46.1 # B1.5
elif mass < 12.185: # interpolated
N_ly = 46.3
elif mass < 13.21 :
N_ly = 46.5 # B1
elif mass < 14.1575: # interpolated
N_ly = 46.75
elif mass < 15.105 : # (13.21+17.)/2., interpolated
N_ly = 47. # B0.5
elif mass < 16.0525: # interpolated
N_ly = 47.2
elif mass < 17. :
N_ly = 47.4 # B0
elif mass < 20.15: # interpolated
N_ly = 47.48
elif mass < 23.3 :
N_ly = 47.56 # O9.5
elif mass < 24.35: # interpolated
N_ly = 47.73
elif mass < 25.4:
N_ly = 47.9 # O9
elif mass < 26.7 : # interpolated
N_ly = 48
elif mass < 28 :
N_ly = 48.1 # O8.5
elif mass < 29.4 : # interpolated
N_ly = 48.195
elif mass < 30.8 :
N_ly = 48.29 # O8
elif mass < 32.45 : # interpolated
N_ly = 48.365
elif mass < 34.1:
N_ly = 48.44 # O7.5
elif mass < 35.9 : # interpolated
N_ly = 48.535
elif mass < 37.7 :
N_ly = 48.63 # O7
elif mass < 39.35 : # interpolated
N_ly = 48.715
elif mass < 41 :
N_ly = 48.80 # O6.5
elif mass < 43.1 : # interpolated
N_ly = 48.88
elif mass < 45.2 :
N_ly = 48.96 # O6
elif mass < 47.8 : # interpolated
N_ly = 49.035
elif mass < 50.4 :
N_ly = 49.11 # O5.5
elif mass < 53.5 : # interpolated
N_ly = 49.185
elif mass < 56.6 :
N_ly = 49.26 # O5
elif mass < 62.75 : # interpolated
N_ly = 49.365
elif mass < 68.9 :
N_ly = 49.47 # O4
elif mass < 78.25 : # interpolated
N_ly = 49.55
else :
N_ly = 49.63 # O3
# Conform ionizing luminosities to fit Tremblin model
# Round ionizing luminosities to the nearsest quarter dec
if N_ly < 47 :
lumParam = 47
else :
lumParam = round(4.*N_ly)/4
freq_GHz = 10
regionLum = pow(10,N_ly)*pow(T_e,0.45)*pow(freq_GHz,-0.1)/(6.3*pow(10,52)) # Derived from Eq. 4 in Armentrout et al. 2017
regionFlux = regionLum/(4*math.pi*dist**2) # UNITS?
####################
## SIZE OF REGION ##
####################
# From Distributions, Determine HII Region Radius
if useTremblin == True :
# Using Pascal Tremblin's hypercube data
# TESTING. TAKE THESE OUT.
timeParam=2
lumParam = 47.25
TeParam = int(round((5756 + 303*random.uniform(-1,1)) + galRad*(299 + 31*random.uniform(-1,1)),-3)/1000 - 5)
densityParam = 2
radius = ff.variables['radius'][timeParam,lumParam,TeParam,densityParam]
else :
alpha_h = 3.*pow(10.,-13.)
n_e = 10.**3.
age_sec = age*10**6.*3.154*10**7.
soundSpeed = 20000 # in cm/s (0.2 km/s) Tremblin 14
rad_initial = pow(3.*pow(10.,N_ly)/(4.*math.pi*alpha_h*pow(n_e,2.)),(1./3.)) #radius in cm
radius= rad_initial*pow(1+7*age_sec*soundSpeed/(4*rad_initial),4./7.)*3.24*pow(10,-19.) #radius in pc, time evolution from Spitzer 1968
#############
## TESTING ##
#############
# This section allows the user to test various parameters for easy
# output to terminal (e.g. luminosity of various features, counts
# of regions in spiral versus bar, etc.)
if (whereIsRegion <= diffusePercent) \
and (diffuse == True) :
diffLum = diffLum + lum
diffCount += 1
regNum = 1
elif (whereIsRegion > diffusePercent) \
and (whereIsRegion <= (diffusePercent + barPercent)) \
and (bar == True) :
barLum = barLum + lum
barCount += 1
regNum = 2
elif (whereIsRegion > (diffusePercent + barPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (ThreekpcArm == True) :
ThreekpcLum = ThreekpcLum + lum
ThreekpcCount += 1
regNum = 3
elif (whereIsRegion > (diffusePercent + barPercent + ThreekpcArmPercent)) \
and (whereIsRegion <= (diffusePercent + barPercent + ThreekpcArmPercent + spiralPercent)) \
and (spiral == True):
sprLum = sprLum + lum
sprCount += 1
if whichArm == 0 :
regNum = 5
elif whichArm == 1 :
regNum = 6
elif whichArm == 2 :
regNum = 7
elif whichArm == 3 :
regNum = 8
totLum = totLum + lum
#print region
#####################
## APPEND TO ARRAY ##
#####################
HiiList.append([galRad,xRot,yRot,z,mass,N_ly,age,radius,l,vR,regNum,b,regionFlux])
numCluster += 1
######################
## PLOT DATA POINTS ##
######################
if plot3D == True :
i = 0
(xlist,ylist,zlist)=(list(),list(),list())
while i < len(HiiList):
xlist.append(HiiList[i][1])
ylist.append(HiiList[i][2])
zlist.append(HiiList[i][3])
i+=1
trace = go.Scatter3d(x=xlist,y=ylist,z=zlist,mode='markers',marker=dict(size=5,line=dict(color='rgba(217,217,217,0.14)',width=0.5),opacity=0.8))
data=[trace]
layout = go.Layout(margin=dict(l=0,r=0,b=0,t=0))
fig = go.Figure(data=data,layout=layout)
py.iplot(fig,filename='3dscatter')
###################
## WRITE TO FILE ##
###################
with open("HIIregion_popSynthesis.csv", "wb") as f:
writer = csv.writer(f)
writer.writerows(HiiList)
'''
print "Diffuse Luminosity : " + str(diffLum*100/totLum) + "% (" + str(diffCount) + " Regions)"
print "Bar Luminosity : " + str(barLum*100/totLum) + "% (" + str(barCount) + " Regions)"
print "3 kpc Arm Luminosity : " + str(ThreekpcLum*100/totLum) + "% ("+ str(ThreekpcCount) + " Regions)"
print "Spiral Luminosity : " + str(sprLum*100/totLum) + "% (" + str(sprCount) + " Regions)"
print "Total Luminosity : " + str((barLum+ThreekpcLum+sprLum+diffLum)*100/totLum) + "% (" + str(barCount+ThreekpcCount+sprCount+diffCount) + " Regions)"
'''
|
WillArmentrout/galSims
|
simulate/old/Simulate_Draft_ChangeArmPositions_6.5.18.py
|
Python
|
gpl-2.0
| 22,890
|
[
"Galaxy",
"Gaussian"
] |
0ecbd341466ff0c57fe0d38dd653c134c875f2e1068a7ac86c1c45d10bb4741f
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
import itertools
import unittest as ut
import unittest_decorators as utx
import espressomd
import espressomd.interactions
import espressomd.virtual_sites
@utx.skipIfMissingFeatures(['VIRTUAL_SITES_INERTIALESS_TRACERS'])
class IBM(ut.TestCase):
'''Test IBM implementation with a Langevin thermostat.'''
system = espressomd.System(box_l=3 * [8.])
system.time_step = 0.06
system.cell_system.skin = 0.1
def tearDown(self):
self.system.part.clear()
self.system.actors.clear()
self.system.thermostat.turn_off()
def compute_dihedral_angle(self, pos0, pos1, pos2, pos3):
# first normal vector
n1 = np.cross((pos1 - pos0), (pos2 - pos0))
n2 = np.cross((pos2 - pos0), (pos3 - pos0))
norm1 = np.linalg.norm(n1)
norm2 = np.linalg.norm(n2)
n1 = n1 / norm1
n2 = n2 / norm2
cos_alpha = min(1, np.dot(n1, n2))
alpha = np.arccos(cos_alpha)
return alpha
def test_tribend(self):
# two triangles with bending interaction
# move nodes, should relax back
system = self.system
system.virtual_sites = espressomd.virtual_sites.VirtualSitesInertialessTracers()
system.thermostat.set_langevin(kT=0, gamma=10, seed=1)
# Add four particles
p0 = system.part.add(pos=[4, 4, 4])
p1 = system.part.add(pos=[4, 4, 5])
p2 = system.part.add(pos=[4, 5, 5])
p3 = system.part.add(pos=[4, 5, 4])
# Add first triel, weak modulus
tri1 = espressomd.interactions.IBM_Triel(
ind1=p0.id, ind2=p1.id, ind3=p2.id, elasticLaw="Skalak", k1=0.1, k2=0, maxDist=2.4)
system.bonded_inter.add(tri1)
p0.add_bond((tri1, p1, p2))
# Add second triel, strong modulus
tri2 = espressomd.interactions.IBM_Triel(
ind1=p0.id, ind2=p2.id, ind3=p3.id, elasticLaw="Skalak", k1=10, k2=0, maxDist=2.4)
system.bonded_inter.add(tri2)
p0.add_bond((tri2, p2, p3))
# Add bending
tribend = espressomd.interactions.IBM_Tribend(
ind1=p0.id, ind2=p1.id, ind3=p2.id, ind4=p3.id, kb=1, refShape="Initial")
system.bonded_inter.add(tribend)
p0.add_bond((tribend, p1, p2, p3))
# twist
system.part.all().pos = system.part.all().pos + np.random.random((4, 3))
# Perform integration
system.integrator.run(200)
angle = self.compute_dihedral_angle(p0.pos, p1.pos, p2.pos, p3.pos)
self.assertLess(angle, 2E-2)
# IBM doesn't implement energy and pressure kernels.
energy = self.system.analysis.energy()
pressure = self.system.analysis.pressure()
self.assertAlmostEqual(energy['bonded'], 0., delta=1e-10)
self.assertAlmostEqual(pressure['bonded'], 0., delta=1e-10)
def test_triel(self):
system = self.system
system.virtual_sites = espressomd.virtual_sites.VirtualSitesInertialessTracers()
system.thermostat.set_langevin(kT=0, gamma=1, seed=1)
# Add particles: 0-2 are not bonded, 3-5 are bonded
non_bound = system.part.add(pos=[[4, 4, 4], [4, 4, 5], [4, 5, 5]])
p3 = system.part.add(pos=[1, 4, 4])
p4 = system.part.add(pos=[1, 4, 5])
p5 = system.part.add(pos=[1, 5, 5])
all_partcls = system.part.all()
# Add triel for 3-5
tri = espressomd.interactions.IBM_Triel(
ind1=p3.id, ind2=p4.id, ind3=p5.id, elasticLaw="Skalak", k1=15,
k2=0, maxDist=2.4)
system.bonded_inter.add(tri)
p3.add_bond((tri, p4, p5))
all_partcls.pos = all_partcls.pos + np.array((
(0, 0, 0), (1, -.2, .3), (1, 1, 1),
(0, 0, 0), (1, -.2, .3), (1, 1, 1)))
distorted_pos = np.copy(non_bound.pos)
system.integrator.run(110)
dist1bound = system.distance(p3, p4)
dist2bound = system.distance(p3, p5)
# check bonded particles. Distance should restore to initial config
self.assertAlmostEqual(dist1bound, 1, delta=0.05)
self.assertAlmostEqual(dist2bound, np.sqrt(2), delta=0.05)
# check not bonded particles. Positions should still be distorted
np.testing.assert_allclose(np.copy(non_bound.pos), distorted_pos)
def test_volcons(self):
'''Check volume conservation forces on a simple mesh (cube).'''
system = self.system
system.virtual_sites = espressomd.virtual_sites.VirtualSitesOff()
system.thermostat.set_langevin(kT=0, gamma=1, seed=1)
# Place particles on a cube.
positions = list(itertools.product((0, 1), repeat=3))
positions = positions[:4] + positions[6:] + positions[4:6]
positions = np.array(positions) - 0.5
mesh_center_ref = np.copy(system.box_l) / 2.
partcls = system.part.add(pos=positions + mesh_center_ref)
# Divide the cube. All triangle normals must point inside the mesh.
# Use the right hand rule to determine the order of the indices.
triangles = [(0, 1, 2), (1, 3, 2),
(2, 3, 4), (3, 5, 4),
(4, 5, 6), (5, 7, 6),
(6, 7, 0), (7, 1, 0),
(0, 2, 4), (0, 4, 6),
(1, 5, 3), (1, 7, 5)]
# Add triangle bonds that don't contribute to the force (infinite
# elasticity). These bonds are needed to calculate the mesh volume.
for id1, id2, id3 in triangles:
bond = espressomd.interactions.IBM_Triel(
ind1=id3, ind2=id2, ind3=id1, elasticLaw="Skalak", k1=0., k2=0., maxDist=3)
system.bonded_inter.add(bond)
system.part.by_id(id1).add_bond((bond, id2, id3))
# Add volume conservation force.
KAPPA_V = 0.01
volCons = espressomd.interactions.IBM_VolCons(
softID=15, kappaV=KAPPA_V)
system.bonded_inter.add(volCons)
for p in system.part:
p.add_bond((volCons,))
# Run the integrator to initialize the mesh reference volume.
system.integrator.run(0, recalc_forces=True)
self.assertAlmostEqual(volCons.current_volume(), 1., delta=1e-10)
# The restorative force is zero at the moment.
np.testing.assert_almost_equal(np.copy(partcls.f), 0.)
# Double the cube dimensions. The volume increases by a factor of 8.
partcls.pos = 2. * positions + mesh_center_ref
system.integrator.run(0, recalc_forces=True)
self.assertAlmostEqual(volCons.current_volume(), 8., delta=1e-10)
# Reference forces for that particular mesh geometry.
ref_forces = 1.75 * KAPPA_V * np.array(
[(1, 2, 2), (2, 1, -2), (2, -1, 1), (1, -2, -1),
(-1, -2, 2), (-2, -1, -2), (-2, 1, 1), (-1, 2, -1)])
np.testing.assert_almost_equal(
np.copy(partcls.f), ref_forces)
# IBM doesn't implement energy and pressure kernels.
energy = self.system.analysis.energy()
pressure = self.system.analysis.pressure()
self.assertAlmostEqual(energy['bonded'], 0., delta=1e-10)
self.assertAlmostEqual(pressure['bonded'], 0., delta=1e-10)
# Check the cube is shrinking. The geometrical center shouldn't move.
volume_diff_ref = 0.1 # arbitrary, but should work for the given setup
# warmup
system.integrator.run(10)
# sampling
previous_volume = volCons.current_volume()
for _ in range(10):
system.integrator.run(5)
current_volume = volCons.current_volume()
volume_diff = previous_volume - current_volume
self.assertLess(current_volume, previous_volume)
self.assertGreater(volume_diff, volume_diff_ref)
previous_volume = current_volume
mesh_center = np.mean(partcls.pos, axis=0)
np.testing.assert_allclose(mesh_center, mesh_center_ref, rtol=1e-3)
# Halve the cube dimensions. The volume decreases by a factor of 8.
partcls.pos = 0.5 * positions + mesh_center_ref
system.integrator.run(0, recalc_forces=True)
self.assertAlmostEqual(volCons.current_volume(), 1. / 8., delta=1e-10)
# Check the cube is expanding. The geometrical center shouldn't move.
volume_diff_ref = 0.005 # arbitrary, but should work for the given setup
# warmup
system.integrator.run(40)
# sampling
previous_volume = volCons.current_volume()
for _ in range(10):
system.integrator.run(5)
current_volume = volCons.current_volume()
volume_diff = current_volume - previous_volume
self.assertGreater(current_volume, previous_volume)
self.assertGreater(volume_diff, volume_diff_ref)
previous_volume = current_volume
mesh_center = np.mean(partcls.pos, axis=0)
np.testing.assert_allclose(mesh_center, mesh_center_ref, rtol=1e-3)
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/python/ibm.py
|
Python
|
gpl-3.0
| 9,741
|
[
"ESPResSo"
] |
dee9bee47fb42885ab3bca7f83d74a2f5889734344f13e5ce92bec51a9c0d720
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.db import models, migrations
from django.contrib.staticfiles.finders import find
from django.core.files import File
DEFAULT_ICON = ('default.jpg', 'Default')
ICON_CHOICES = (
DEFAULT_ICON,
('blake.jpg', 'Blake'),
('johnny.jpg', 'Johnny'),
('josh.jpg', 'Josh'),
('kit.jpg', 'Kit'),
('marla.jpg', 'Marla'),
('piper.jpg', 'Piper'),
('than.jpg', 'Than'),
('brian.jpg', 'Brian'),
('jon.jpg', 'Jon'),
('kevin.jpg', 'Kevin'),
('kyle.jpg', 'Kyle'),
('nathan.jpg', 'Nathan'),
('remi.jpg', 'Remi'),
('yoav.jpg', 'Yoav'),
)
def populate_player_icons(apps, schema_editor):
Player = apps.get_model("bughouse", "Player")
for player in Player.objects.all():
static_file_name = "{0}.jpg".format(player.name.lower())
static_image_path = find(os.path.join('images', 'player-icons', static_file_name))
if static_image_path:
static_file = open(static_image_path, 'r')
else:
static_file = open(find(os.path.join('images', 'player-icons', 'default.jpg')), 'r')
image_file = File(static_file)
player.icon.save(static_file_name, image_file, save=True)
class Migration(migrations.Migration):
dependencies = [
('bughouse', '0002_auto_20150207_1845'),
]
operations = [
migrations.RunPython(populate_player_icons),
]
|
simpleenergy/bughouse-ranking
|
bughouse/migrations/0003_auto_20150207_1846.py
|
Python
|
mit
| 1,458
|
[
"Brian"
] |
2830706a18fa9d081278e6a9b7c04861dee71e38933483ef6af137883bce559e
|
# -*- coding: utf-8 -*-
# Frozen Gaussian and quantum trajectories
# The GWPs is used to approximate the amplitude instead of the wavefunction
# Quantum force is computed from the approximated amplitude
# A problem is that the quantum force obtained from the approximate amplitude is not accurate at the tail
#import urllib2 as ur
#import re, time
#import os
#import pandas
import matplotlib.pyplot as plt
#from matplotlib import animation
import numpy as np
import numba
#from vmat import evolve
#import matplotlib.dates as mdates
#import datetime as dt
#from matplotlib.figure import Figure
#import matplotlib as mpl
import sys
def integrate(x,px,aj,y,py,ak):
dp = py - px
dq = y - x
return (aj*ak)**0.25 * np.sqrt(2./(aj+ak)) * np.exp( \
-0.5 * aj*ak/(aj+ak) * (dp**2/aj/ak + dq**2 \
+ 2.0*1j* (self.p/aj + py/ak) *dq)
)
def integrate_x(x,px,aj,y,py,beta):
d = integrate(x,px,aj,y,py,beta)
return d*(-1j*(px-py) + aj*x+y*beta)/(aj+beta)
def integrate_x2(x,px,aj,y,py,beta):
z = -1j*(px-py) + aj*x + y*beta
return z*z*integrate(x,px,aj,y,py,beta)
# def value(z):
# return np.sqrt(np.sqrt(self.alpha/np.pi))*np.exp(-self.alpha*(z-self.x)**2/2.0)
#def set_x(self,z):
#
# self.x = z
#
#def plot(self,ax,xmin=-4,xmax=4,Np=200):
#
# x = np.linspace(-4,4,Np)
#
# ax.plot(x,np.abs(self.value(x))**2)
#w = np.array([1.0/float(Ntraj) for i in range(Ntraj)])
cut = 1e-6
#xmin = x0-np.sqrt(-np.log(cut/np.sqrt(self.alpha/np.pi))/self.alpha)
#self.xmax = x0+np.sqrt(-np.log(cut/np.sqrt(self.alpha/np.pi))/self.alpha)
#pow = 4
#self.xmin = self.x - np.sqrt(2.0*pow/self.alpha)
#self.xmax = self.x + np.sqrt(2.0*pow/self.alpha)
#dx = (self.xmax-self.xmin)/(Ntraj-1)
#self.grid = np.linspace(self.xmin,self.xmax,Ntraj)
def overlap(x):
S = np.zeros((Ntraj,Ntraj),dtype=np.complex128)
for j in range(Ntraj):
aj,qj,pj = alpha, x[j], 0.0
for k in range(Ntraj):
ak, qk, pk = alpha, x[k], 0.0
dq = qk - qj
dp = pk - pj
S[j,k] = (aj*ak)**0.25 * np.sqrt(2./(aj+ak)) * np.exp( \
-0.5 * aj*ak/(aj+ak) * (dp**2/aj/ak + dq**2 \
+ 2.0*1j* (pj/aj + pk/ak) *dq) )
return S
def projection(x):
S = overlap(x)
b= np.array([integrate(x[i],0.0,alpha,x0,0.0,a0) for i in range(Ntraj)])
try:
c = np.linalg.solve(S, b)
except:
print("Error: ill-conditioned overlap matrix of initial basis.")
sys.exit()
return c
# def get_p(self, beta, smooth=True):
# """
# compute p = grad S at grid points
# """
# g = self.basis
#
# dq = 0.005
#
# p = np.zeros(Ntraj)
# dp = np.zeros(Ntraj)
#
# #print 'x = ',self.grid
# #print 'c = ',self.c
#
# if smooth:
#
# for j in range(Ntraj):
#
# dz, z = 0.0, 0.0
# dz1, z1 = 0.0, 0.0
# dz0, z0 = 0.0, 0.0
#
#
# q = g[j].x
#
# q1 = q + dq
# q0 = q - dq
#
# for k in range(Ntraj):
#
#
# qk, ak = g[k].x, g[k].alpha
#
# y = q - qk
#
# alfa = (ak*beta)/(ak+beta)
#
# an = (ak/np.pi)**0.25 * np.sqrt(beta/(ak+beta))
#
# z = z + self.c[k] * np.exp(-0.5*alfa*y**2) *an
#
# dz = dz - self.c[k] * alfa * y * np.exp(-0.5*alfa*y**2) * an
#
# #ddz = ddz + self.c[k] * (- alfa + alfa**2 * y**2) * np.exp(-0.5*alfa*y**2) * an
#
#
# p[j] = (dz/z).imag
# #dp.append((-dz/z**2+ddz/z).imag)
#
# for k in range(Ntraj):
#
#
# qk, ak = g[k].x, g[k].alpha
#
# y0, y1 = q0 - qk, q1 - qk
#
# alfa = (ak*beta)/(ak+beta)
#
# an = (ak/np.pi)**0.25 * np.sqrt(beta/(ak+beta))
#
# z0 = z0 + self.c[k] * np.exp(-0.5*alfa*y0**2) *an
# z1 = z1 + self.c[k] * np.exp(-0.5*alfa*y1**2) *an
#
# dz0 = dz0 - self.c[k] * alfa * y0 * np.exp(-0.5*alfa*y0**2) * an
# dz1 = dz1 - self.c[k] * alfa * y1 * np.exp(-0.5*alfa*y1**2) * an
#
#
# dp[j] = (((dz1/z1).imag-(dz0/z0).imag)/2./dq)
#
# #print 'p = ',p
#
# return p, dp
#
# def norm(self):
#
# c = self.c
# S = self.overlap()
#
# z = np.vdot(c,S.dot(c))
# #for j in range(Ntraj):
# # for k in range(Ntraj):
# # z += np.conj(c[j]) * S[j,k] * c[k]
# return z.real
#
# def overlap(self):
#
# g = self.basis
#
# S = np.zeros((Ntraj,Ntraj),dtype=np.complex128)
#
# for j in range(Ntraj):
#
# aj,qj,pj = g[j].alpha,g[j].x,g[j].p
#
# for k in range(Ntraj):
#
# ak, qk, pk = g[k].alpha, g[k].x, g[k].p
#
# dq = qk - qj
# dp = pk - pj
#
# S[j,k] = (aj*ak)**0.25 * np.sqrt(2./(aj+ak)) * np.exp( \
# -0.5 * aj*ak/(aj+ak) * (dp**2/aj/ak + dq**2 \
# + 2.0*1j* (pj/aj + pk/ak) *dq) )
#
# return S
class Tdse(Psi):
def __init__(self,c,g, **kwargs):
Psi.__init__(self,c,g)
try:
self.dt = kwargs['dt']
self.Nt = kwargs['Nt']
self.am = kwargs['am']
self.modelName = kwargs['modelName']
self.beta = kwargs['beta']
except:
print("ERROR: missing args for Tdse.")
def solve(self):
gx = np.array([basis.x for basis in self.basis])
gp = [x.p for x in self.basis]
alpha = [x.alpha for x in self.basis]
am = self.am
S = self.overlap()
p, dp = self.get_p(beta = self.beta)
D = dmat(am,gx,gp,p,dp,alpha,S)
V = vmat(gx,gp,alpha,S)
K = kmat(am,gx,gp,alpha,S)
c = self.c
#enk = np.vdot(c,K.dot(c))
#env = np.vdot(c,V.dot(c))
H = (K+V)-1j*D
b = np.dot(H,c)
b = -1j*b
#print 'ham = ', H
S = self.overlap()
#print 'overlap',S
try:
dc = np.linalg.solve(S, b)
except:
print("Error: ill-conditioned overlap matrix for dc")
print(S)
sys.exit()
return dc
def propagate_x(self):
"""
update quantum trajectories using momentum obtained from current wavefunction
also construct new GWP basis
"""
dt = self.dt
am = self.am
g = self.basis
p, dp = self.get_p(beta = self.beta)
self.grid = self.grid + p*dt/am
#g = [gwp(self.grid[i],0.0) for i in range(Ntraj)]
#print 'p',p
#print 'dp',dp
#print '\n'
for k in range(Ntraj):
g[k].set_x(self.grid[k])
# g[k].alpha = g[k].alpha - dp[k]/am * g[k].alpha * dt
def evolve(self, integrator='SOD'):
"""
SOD : second-order difference
Euler : first-order integration
"""
dt = self.dt
Nt = self.Nt
am = self.am
g = self.basis
f = open('x.dat','w')
f1 = open('cor.dat','w')
f2 = open('energy.dat','w')
t = 0.
# if integrator == 'Euler':
# for i in range(Nt):
#
# t = t + dt
#
# dc = self.solve()
# self.c = self.c + dc*dt
# self.propagate_x()
#
#
#
#
#
# f.write(str(t) + ' ' + ' '.join([str(self.grid[i]) for i in range(Ntraj)]) +'\n')
#
# elif integrator == 'SOD':
cold = self.c
dc= self.solve()
self.c = self.c + dc*dt
for i in xrange(Nt):
t = t + dt
#self.propagate_x()
#p, dp = self.get_p(beta = self.beta)
for k in range(Ntraj):
g[k].x += p[k]*dt/am
#g[k].alpha = g[k].alpha - dp[k]/am * g[k].alpha * dt
dc = self.solve()
cnew = cold + dc*2.0*dt
cold = self.c
self.c = cnew
#self.c = self.c/np.sqrt(self.norm())
if i% int(Nt/3) == 0:
#if False:
print '\n ------- step {:2d} --------\n'.format(i)
print 'norm {:6.2f} \n'.format(self.norm())
#print 'energy Ek = {:6.2f}, Ev = {:6.2f} '.format(enk.real,env.real)
#f.write(str(t) + ' ' + ' '.join([str(self.grid[i]) for i in range(Ntraj)]) +'\n')
z = self.corr()
f1.write('{:12.6f} {:12.6f} {:12.6f} \n'.format(t, z.real, z.imag))
#f2.write('{:12.6f} {:12.6f} {:12.6f}'.format(enk,env,enk+env))
f.close()
f1.close()
f2.close()
def corr(self):
c = self.c
S = self.overlap()
z = c.dot(S.dot(c))
return z
def quantum_force(x,c):
"""
compute quantum potential from the approximate amplitude
"""
dz,z = 0.0, 0.0
u = np.zeros(len(x))
du = np.zeros(len(x))
for j in range(len(x)):
dz, z = 0.0, 0.0
q = x[j]
for k in range(len(x)):
qk, ak = x[k], alpha
y = q - qk
an = (ak/np.pi)**0.25
z = z + c[k] * np.exp(-0.5*ak*y**2) * an
dz = dz + c[k] * (ak**2 * y**2 - ak) * np.exp(-0.5*ak*y**2) * an
dddz += c[k] * (-ak**2 * y * (ak * y**2 - 3.0)) * np.exp(-0.5*ak*y**2) * an
#ddz = ddz + self.c[k] * (- alfa + alfa**2 * y**2) * np.exp(-0.5*alfa*y**2) * an
u[j] = dz/z * (-0.5)
du[j] = (-0.5) * (dddz/z - dz/z**2)
return du
# global params
x0 = 0.0
a0 = 1.0
p0 = 0.0
Ntraj = 16
Ntraj = 20
alpha = 8.0 # basis width
x = np.random.randn(Ntraj) / np.sqrt(2.0 * a0) + x0
print('initial wavefunction x0 = {:6.2f}, p0={:6.2f}, a0 = {:6.2f} \n'.format(x0,p0,a0))
print('particle mass'.format(am))
print 'number of basis {:4d} \n'.format(Ntraj)
print('initial grid \n')
print(x)
print 'time interval = {:6.2f} time steps = {:4d} \n'.format(dt,Nt)
c = projection(x)
print('c(0) = ',c)
print('trajectory interval '.format(x[1]-x[0]))
print('variation of basis'.format(np.sqrt(1./alpha))
if start.grid[1]-start.grid[0] < np.sqrt(1./g[0].alpha):
print("WARNING: grid spacing is too small.")
# expanded initial wavefunction
print('\n initialization succeed ... \n')
# propagate quantum trajectories
for kt in range(Nt):
x += p * dt/m
# obtain quantum force from approximated wavefunction
du = quantum_force(x,c)
dv = dv(x)
p += (-dv - du) * dt
# update the amplitude
dp = linear_regression(p)
dc = update_amplitude(x,c,dp)
c += dc * dt
# output final data
Np = 200
x = np.linspace(-5,5,Np)
with open('pot.dat','w') as f:
for i in xrange(Np):
f.write('{:12.6f} {:12.6f} \n'.format(x[i], dv(x[i])[0]))
with open('wft.dat','w') as f:
for i in xrange(Np):
f.write('{:12.6f} {:12.6f} {:12.6f} \n'.format(x[i],np.abs(wf.value(x[i]))**2, \
np.abs(se.value(x[i]))**2 ))
# final wavefunction
ax.plot(x,np.abs(se.value(x))**2,'k--')
#plt.legend(loc=0)
plt.show()
@numba.jit
def dv(x):
pot = 'Double_well'
if pot == 'Morse':
a, x0, De = 1.02, 1.4, 0.176
d = (1.0-np.exp(-a*(x-x0)))
v0 = De*d**2
ddv = 2.0 * De * (-d*np.exp(-a*((x-x0)))*a**2 + (np.exp(-a*(x-x0)))**2*a**2)
elif pot == 'Double_well':
eta = 1.3544
v0 = x**4/16.0/eta - x**2/2.0
ddv = 3./4./eta * x**2 - 1.0
elif pot == 'Harmonic':
v0 = x**2/2.0
ddv = 1.0
else:
print "ERROR: there is no such potential."
return v0,ddv
@numba.jit
def vmat(x,p,alpha,S):
"""
local harmonic approximation (LHA)
v(x) = v0 + v'(x-x0) + v''(x-x0)**2/2
x0 = (alpha*x1+beta*x2)/(alpha+beta)
"""
Ntraj = len(x)
l = np.zeros((Ntraj,Ntraj),dtype=np.complex128)
for j in range(Ntraj):
aj,qj,pj = alpha[j], x[j], p[j]
for k in range(Ntraj):
ak,qk,pk = alpha[k], x[k], p[k]
x0 = (aj*qj+ak*qk)/(aj+ak)
v0,ddv = dv(x0)
d0 = v0 + ddv/2.0*((pj-pk)**2/(aj+ak)**2 + 1.0/(aj+ak))
l[j,k] = d0*S[j,k]
return l
@numba.jit
def kmat(am,x,p,alpha,S):
Ntraj = len(x)
l = np.zeros((Ntraj,Ntraj),dtype=np.complex128)
for j in range(Ntraj):
aj,qj,pj = alpha[j], x[j], p[j]
for k in range(Ntraj):
ak,qk,pk = alpha[k], x[k], p[k]
p0 = (aj*pk + ak*pj)/(aj+ak)
d0 = 0.5/am * ( (p0+1j*aj*ak/(aj+ak)*(qj-qk))**2 + aj*ak/(aj+ak) )
l[j,k] = d0*S[j,k]
return l
@numba.jit
def dmat(am,x,gp,p,dp,alpha,S):
"""
time-derivative matrix D = <gj|i d/dt | gk> = <gj| -dp/(2m)*ak*(x-qk)^2 + ak*pk/m * (x-qk) - dp/(4m) | gk>
the trajectory momentum p is different from the momentum for basis (gp)
"""
Ntraj = len(x)
l = np.zeros((Ntraj,Ntraj),dtype=np.complex128)
for j in range(Ntraj):
aj,qj,pj = alpha[j], x[j], gp[j]
for k in range(Ntraj):
ak,qk,pk = alpha[k], x[k], gp[k]
d2 = dp[k]/2./am * ak * 1./(aj+ak)**2 * ( 2j*aj*(pj-pk) * (qj-qk) - aj**2 * (qj-qk)**2 + (pj-pk)**2 - (aj+ak) )
d1 = ak*p[k]/am * (-(1j*(pj-pk)-aj*(qj-qk)))/(aj+ak)
d0 = - dp[k]/4.0/am
l[j,k] = (d0+d1+d2)*S[j,k]
return l
d = dict({'dt': 0.01, 'Nt' : 300, 'am' : 1.0, 'modelName':'Double_well', 'beta' : 1.0})
np.set_printoptions(precision=4,threshold=5)
|
binghongcha08/pyQMD
|
GWP/QTGB/Amplitude_GWP/gwp.py
|
Python
|
gpl-3.0
| 16,058
|
[
"Gaussian"
] |
e49dcdd167a38294214907475ccc4441a4f9b479bf0056627e97149c866b8fe5
|
from __future__ import unicode_literals
import io
import re
import ast
from .exceptions import MoultScannerError
from .compat import str_
from . import utils, log
_fallback_re = re.compile(r'''
^[\ \t]*(
from[\ \t]+[\w\.]+[\ \t]+import\s+\([\s\w,]+\)|
from[\ \t]+[\w\.]+[\ \t]+import[\ \t\w,]+|
import[\ \t]+\([\s\w,]+\)|
import[\ \t]+[\ \t\w,]+
)
''', re.VERBOSE | re.MULTILINE | re.UNICODE)
def ast_value(val, scope, return_name=False):
'''Recursively parse out an AST value. This makes no attempt to load
modules or reconstruct functions on purpose. We do not want to
inadvertently call destructive code.
'''
# :TODO: refactor the hell out of this
try:
if isinstance(val, (ast.Assign, ast.Delete)):
if hasattr(val, 'value'):
value = ast_value(val.value, scope)
else:
value = None
for t in val.targets:
name = ast_value(t, scope, return_name=True)
if isinstance(t.ctx, ast.Del):
if name in scope:
scope.pop(name)
elif isinstance(t.ctx, ast.Store):
scope[name] = value
return
elif isinstance(val, ast.Expr) and isinstance(val.value, ast.Name):
return ast_value(val.value)
if isinstance(val, ast.Name):
if isinstance(val.ctx, ast.Load):
if val.id == 'None':
return None
elif val.id == 'True':
return True
elif val.id == 'False':
return False
if val.id in scope:
return scope[val.id]
if return_name:
return val.id
elif isinstance(val.ctx, ast.Store):
if return_name:
return val.id
return None
if isinstance(val, ast.Subscript):
toslice = ast_value(val.value, scope)
theslice = ast_value(val.slice, scope)
return toslice[theslice]
elif isinstance(val, ast.Index):
return ast_value(val.value, scope)
elif isinstance(val, ast.Slice):
lower = ast_value(val.lower)
upper = ast_value(val.upper)
step = ast_value(val.step)
return slice(lower, upper, step)
if isinstance(val, list):
return [ast_value(x, scope) for x in val]
elif isinstance(val, tuple):
return tuple(ast_value(x, scope) for x in val)
if isinstance(val, ast.Attribute):
name = ast_value(val.value, scope, return_name=True)
if isinstance(val.ctx, ast.Load):
return '.'.join((name, val.attr))
if return_name:
return name
elif isinstance(val, ast.keyword):
return {val.arg: ast_value(val.value, scope)}
elif isinstance(val, ast.List):
return [ast_value(x, scope) for x in val.elts]
elif isinstance(val, ast.Tuple):
return tuple(ast_value(x, scope) for x in val.elts)
elif isinstance(val, ast.Dict):
return dict(zip([ast_value(x, scope) for x in val.keys],
[ast_value(x, scope) for x in val.values]))
elif isinstance(val, ast.Num):
return val.n
elif isinstance(val, ast.Str):
return val.s
elif hasattr(ast, 'Bytes') and isinstance(val, ast.Bytes):
return bytes(val.s)
except Exception:
# Don't care, just return None
pass
return None
def flatten_call_args(args, kwlist, starargs, kwargs):
if starargs:
args.extend(starargs)
keywords = {}
for kw in kwlist:
keywords.update(kw)
if kwargs:
keywords.update(keywords)
return args, keywords
def get_args(args, kwargs, arg_names):
'''Get arguments as a dict.
'''
n_args = len(arg_names)
if len(args) + len(kwargs) > n_args:
raise MoultScannerError('Too many arguments supplied. Expected: {}'.format(n_args))
out_args = {}
for i, a in enumerate(args):
out_args[arg_names[i]] = a
for a in arg_names:
if a not in out_args:
out_args[a] = None
out_args.update(kwargs)
return out_args
def parse_programmatic_import(node, scope):
name = ast_value(node.func, scope, return_name=True)
if not name:
return []
args, kwargs = flatten_call_args(ast_value(node.args, scope),
ast_value(node.keywords, scope),
ast_value(node.starargs, scope),
ast_value(node.kwargs, scope))
imports = []
if name.endswith('__import__'):
func_args = get_args(args, kwargs, ['name', 'globals', 'locals',
'fromlist', 'level'])
log.debug('Found `__import__` with args: {}'.format(func_args))
if not func_args['name']:
raise MoultScannerError('No name supplied for __import__')
if func_args['fromlist']:
if not hasattr(func_args['fromlist'], '__iter__'):
raise MoultScannerError('__import__ fromlist is not iterable type')
for fromname in func_args['fromlist']:
imports.append((func_args['name'], fromname))
else:
imports.append((None, func_args['name']))
elif name.endswith('import_module'):
func_args = get_args(args, kwargs, ['name', 'package'])
log.debug('Found `import_module` with args: {}'.format(func_args))
if not func_args['name']:
raise MoultScannerError('No name supplied for import_module')
if func_args['package'] and not isinstance(func_args['package'], (bytes, str_)):
raise MoultScannerError('import_module package not string type')
imports.append((func_args['package'], func_args['name']))
return imports
class ResolvedImport(object):
def __init__(self, import_path, import_root):
module = import_path.split('.', 1)[0]
self.module = module
self.import_path = import_path
self.is_stdlib = utils.is_stdlib(module)
self.filename = None
if not self.is_stdlib:
self.filename = utils.file_containing_import(import_path, import_root)
def __repr__(self):
return '<ResolvedImport {} ({})>'.format(self.import_path, self.filename)
class ImportNodeVisitor(ast.NodeVisitor):
'''A simplistic AST visitor that looks for easily identified imports.
It can resolve simple assignment variables defined within the module.
'''
def reset(self, filename):
self.filename = filename
self.import_path, self.import_root = utils.import_path_from_file(filename)
def add_import(self, *names):
for module, name in names:
if module and module.startswith('.'):
module = utils.resolve_import(module, self.import_path)
elif not module:
module = ''
module = '.'.join((module, name.strip('.'))).strip('.')
if module not in self._imports:
self._imports.add(module)
self.imports.append(ResolvedImport(module, self.import_root))
def visit_Module(self, node):
log.debug('Resetting AST visitor with module path: %s', self.import_path)
self._imports = set()
self.imports = []
self.scope = {}
if node:
self.generic_visit(node)
def visit_Import(self, node):
for n in node.names:
self.add_import((n.name, ''))
self.generic_visit(node)
def visit_ImportFrom(self, node):
module = '{}{}'.format('.' * node.level, str_(node.module or ''))
for n in node.names:
self.add_import((module, n.name))
self.generic_visit(node)
def visit_Expr(self, node):
if isinstance(node.value, ast.Call):
try:
self.add_import(*parse_programmatic_import(node.value, self.scope))
except MoultScannerError as e:
log.debug('%s, File: %s', e, self.filename)
elif isinstance(node.value, ast.Name):
ast_value(node.value, self.scope)
self.generic_visit(node)
def visit_Assign(self, node):
ast_value(node, self.scope)
def visit_Delete(self, node):
ast_value(node, self.scope)
def visit(self, node):
super(ImportNodeVisitor, self).visit(node)
ast_visitor = ImportNodeVisitor()
def _ast_scan_file_re(filename):
try:
with io.open(filename, 'rt', encoding='utf8') as fp:
script = fp.read()
normalized = ''
for imp in _fallback_re.finditer(script):
imp_line = imp.group(1)
try:
imp_line = imp_line.decode('utf8')
except AttributeError:
pass
except UnicodeEncodeError:
log.warn('Unicode import failed: %s', imp_line)
continue
imp_line = re.sub(r'[\(\)]', '', imp_line)
normalized += ' '.join(imp_line.split()).strip(',') + '\n'
log.debug('Normalized imports:\n%s', normalized)
try:
root = ast.parse(normalized, filename=filename)
except SyntaxError:
log.error('Could not parse file using regex scan: %s', filename)
log.info('Exception:', exc_info=True)
return None, None
log.debug('Starting AST Scan (regex): %s', filename)
ast_visitor.reset(filename)
ast_visitor.visit(root)
return ast_visitor.scope, ast_visitor.imports
except IOError:
log.warn('Could not open file: %s', filename)
return None, None
def ast_scan_file(filename, re_fallback=True):
'''Scans a file for imports using AST.
In addition to normal imports, try to get imports via `__import__`
or `import_module` calls. The AST parser should be able to resolve
simple variable assignments in cases where these functions are called
with variables instead of strings.
'''
try:
with io.open(filename, 'rb') as fp:
try:
root = ast.parse(fp.read(), filename=filename)
except (SyntaxError, IndentationError):
if re_fallback:
log.debug('Falling back to regex scanner')
return _ast_scan_file_re(filename)
else:
log.error('Could not parse file: %s', filename)
log.info('Exception:', exc_info=True)
return None, None
log.debug('Starting AST Scan: %s', filename)
ast_visitor.reset(filename)
ast_visitor.visit(root)
log.debug('Project path: %s', ast_visitor.import_root)
return ast_visitor.scope, ast_visitor.imports
except IOError:
log.warn('Could not open file: %s', filename)
return None, None
|
tweekmonster/moult
|
moult/ast_scanner.py
|
Python
|
mit
| 11,216
|
[
"VisIt"
] |
c4947cb82b2cabdcc88ebce1e25a55d23f32677dd8c47b55575dd0ce3121c0d7
|
"""
Common 2D and 3D plot, quiver, text functions
2013 (BSD-3) California Institute of Technology
"""
from __future__ import division
from warnings import warn
#import numpy as np
from matplotlib import pyplot as plt
from vectorized_meshgrid import vec2meshgrid
def dimension(ndarray):
"""dimension of ndarray
- ndim == 1:
dimension = 1
- ndim == 2:
dimension = shape[0]
"""
if ndarray.ndim < 2:
return ndarray.ndim
return ndarray.shape[0]
def plot(x, ax=None, **kwargs):
"""Plot column of matrix as points.
Plot points in matrix x in axes ax
passing kwargs to matplotlib.plot.
usage example: plot 10 random 3D points
>>> import numpy as np
>>> from matplotlib import pyplot as plt
>>> from pyvectorized import plot
>>> ax = plt.gca();
>>> ndim = 3;
>>> npoints = 10;
>>> x = np.rand(ndim, npoints);
>>> h = plot(ax, x, 'ro');
@param x: matrix of points to plot
@type x: [#dim x #pnts] numpy.ndarray
@param ax: axes object handle(s)
@type ax: [1 x #axes] (same plot in each axes pair)
| numpy.NaN (to turn off plotting)
| else no plotting and warning
@param args: plot formatting
@return h: handle to plotted object(s)
"""
# copy to multiple axes ?
try:
lines = [plot(x, i, **kwargs) for i in ax]
return lines
except:
pass
if not ax:
ax = plt.gca()
dim = dimension(x)
# >3D ?
if dim > 3:
warn('plot: ndim = ' +str(x.ndim) +
' > 3, plotting only 3D component.')
# select 2D or 3D
if dim < 1:
raise Exception('x.ndim == 0')
elif dim < 2:
line = ax.plot(x, **kwargs)
elif dim < 3:
line = ax.plot(x[0, :], x[1, :], **kwargs)
else:
line = ax.plot(x[0, :], x[1, :], x[2, :], **kwargs)
return line
def quiver(x, v, ax=None, **kwargs):
"""Multi-dimensional quiver.
Plot v columns at points in columns of x
in axes ax with plot formatting options in kwargs.
>>> import numpy as np
>>> import matplotlib as mpl
>>> from pyvectorized import quiver, dom2vec
>>> x = dom2vec([0, 10, 0, 11], [20, 20])
>>> v = np.vstack(np.sin(x[1, :] ), np.cos(x[2, :] ) )
>>> quiver(mpl.gca(), x, v)
see also
matplotlib.quiver, mayavi.quiver3
@param x: points where vectors are based
each column is a coordinate tuple
@type x: 2d lil | numpy.ndarray
@param v: vectors which to base at points x
@type v: 2d lil | numpy.ndarray
@param ax: axes handle, e.g., ax = gca())
@param x: matrix of points where vectors are plotted
@type x: [#dim x #points]
@param v: matrix of column vectors to plot at points x
@type v: [#dim x #points]
@param kwargs: plot formatting
@return: handle to plotted object(s)
"""
# multiple axes ?
try:
fields = [quiver(x, v, i, **kwargs) for i in ax]
return fields
except:
pass
if not ax:
ax = plt.gca()
dim = dimension(x)
if dim < 2:
raise Exception('ndim < 2')
elif dim < 3:
h = ax.quiver(x[0, :], x[1, :],
v[0, :], v[1, :], **kwargs)
else:
raise NotImplementedError
from mayavi.mlab import quiver3d
if ax:
print('axes arg ignored, mayavi used')
h = quiver3d(x[0, :], x[1, :], x[2, :],
v[0, :], v[1, :], v[2, :], **kwargs)
if dim > 3:
warn('quiver:ndim #dimensions > 3,' +
'plotting only 3D component.')
return h
def text(x, string, ax=None, **kwargs):
"""Text annotation in 2D or 3D.
text position x:
- x.ndim == 1: single point:
- 2-tuple: 2d plot
- n-tuple: nd plot
- x.ndim > 1: each column a point:
- [2 x #points]: 2D plot
- [n x #points]: nd plot
see also
matplotlib.pyplot.text,
mpl_toolkits.mplot3d.Axes3D.text
@param x: point where text is placed
@type x: [#dim x 1]
@param str: annotation text string
"""
# multiple axes ?
try:
h = [text(x, string, ax=i, **kwargs) for i in ax]
except:
pass
if not ax:
ax = plt.gca()
dim = dimension(x)
if dim < 2:
raise Exception('ndim < 2')
elif dim < 3:
h = ax.text(x[0, :], x[1, :], string, **kwargs)
else:
if dim > 3:
print('>3 dimensions, only first 3 plotted')
h = ax.text(x[0, :], x[1, :], x[2, :],
string, **kwargs)
return h
def vtext(q, num=None, ax=None, **kwargs):
"""Label points in q with numbers from num.
@param ax: axes object handle | []
@param q: matrix whose each column stores point coordinates
@type q: [#dim x #points]
@param num: array of numbers to use for annotation
@type num: [1 x #points]
@param kwargs: additional arguments passed to text
"""
# axes ?
if not ax:
ax = plt.gca()
# numbers ?
if not num:
num = range(1, (q.shape[1] +1))
# plot
strings = str(num.T)
text(q, strings, axes=ax, **kwargs)
def streamplot(x, v, res, ax, **kwargs):
"""Streamplot in 2d or 3d.
@param x: points
@type x: 2d array, each column a point
@param v: vectors based at points in x
@type v: 2d array, each column a vector
@param ax: axes
@param kwargs: passed to mpl streamplot
"""
dim = dimension(x)
if dim == 2:
(X, Y) = vec2meshgrid(x, res)
(U, V) = vec2meshgrid(v, res)
ax.streamplot(X[0, :], Y[:, 1],
U, V, **kwargs)
else:
raise NotImplementedError
|
johnyf/pyvectorized
|
pyvectorized/multidim_plot.py
|
Python
|
bsd-3-clause
| 5,949
|
[
"Mayavi"
] |
569691bc33e6ee229dd5987cf70e288e1b7c0017a4e5bf74e0b44b283e89de54
|
#!/usr/bin/env python
#
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Standard setup script.
"""
from __future__ import absolute_import
from __future__ import print_function
import glob
import os
import pkg_resources
import platform
import sys
from distutils.command.install_data import install_data
from distutils.command.sdist import sdist
from distutils.version import LooseVersion
from setuptools import version as setuptools_version
from setuptools import setup
from buildbot import version
if "bdist_wheel" in sys.argv:
BUILDING_WHEEL = True
else:
BUILDING_WHEEL = False
def include(d, e):
"""Generate a pair of (directory, file-list) for installation.
'd' -- A directory
'e' -- A glob pattern"""
return (d, [f for f in glob.glob('%s/%s' % (d, e)) if os.path.isfile(f)])
def include_statics(d):
r = []
for root, ds, fs in os.walk(d):
r.append((root, [os.path.join(root, f) for f in fs]))
return r
class install_data_twisted(install_data):
"""make sure data files are installed in package.
this is evil.
copied from Twisted/setup.py.
"""
def finalize_options(self):
self.set_undefined_options('install',
('install_lib', 'install_dir'),
)
install_data.finalize_options(self)
def run(self):
install_data.run(self)
# ensure there's a buildbot/VERSION file
fn = os.path.join(self.install_dir, 'buildbot', 'VERSION')
open(fn, 'w').write(version)
self.outfiles.append(fn)
class our_sdist(sdist):
def make_release_tree(self, base_dir, files):
sdist.make_release_tree(self, base_dir, files)
# ensure there's a buildbot/VERSION file
fn = os.path.join(base_dir, 'buildbot', 'VERSION')
open(fn, 'w').write(version)
# ensure that NEWS has a copy of the latest release notes, with the
# proper version substituted
src_fn = os.path.join('docs', 'relnotes/index.rst')
with open(src_fn) as f:
src = f.read()
src = src.replace('|version|', version)
dst_fn = os.path.join(base_dir, 'NEWS')
with open(dst_fn, 'w') as f:
f.write(src)
def define_plugin_entry(name, module_name):
"""
helper to produce lines suitable for setup.py's entry_points
"""
if isinstance(name, tuple):
entry, name = name
else:
entry = name
return '%s = %s:%s' % (entry, module_name, name)
def concat_dicts(*dicts):
result = dict()
for d in dicts:
result.update(d)
return result
def define_plugin_entries(groups):
"""
helper to all groups for plugins
"""
result = dict()
for group, modules in groups:
tempo = []
for module_name, names in modules:
tempo.extend([define_plugin_entry(name, module_name)
for name in names])
result[group] = tempo
return result
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as long_d_f:
long_description = long_d_f.read()
setup_args = {
'name': "buildbot",
'version': version,
'description': "The Continuous Integration Framework",
'long_description': long_description,
'author': "Brian Warner",
'author_email': "warner-buildbot@lothar.com",
'maintainer': "Dustin J. Mitchell",
'maintainer_email': "dustin@v.igoro.us",
'url': "http://buildbot.net/",
'license': "GNU GPL",
'classifiers': [
'Development Status :: 5 - Production/Stable',
'Environment :: No Input/Output (Daemon)',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Testing',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
'packages': [
"buildbot",
"buildbot.buildslave",
"buildbot.worker",
"buildbot.worker.protocols",
"buildbot.changes",
"buildbot.clients",
"buildbot.data",
"buildbot.db",
"buildbot.db.migrate.versions",
"buildbot.db.types",
"buildbot.monkeypatches",
"buildbot.mq",
"buildbot.plugins",
"buildbot.process",
"buildbot.process.users",
"buildbot.reporters",
"buildbot.schedulers",
"buildbot.scripts",
"buildbot.secrets",
"buildbot.statistics",
"buildbot.statistics.storage_backends",
"buildbot.status",
"buildbot.steps",
"buildbot.steps.package",
"buildbot.steps.package.deb",
"buildbot.steps.package.rpm",
"buildbot.steps.source",
"buildbot.util",
"buildbot.wamp",
"buildbot.www",
"buildbot.www.hooks",
"buildbot.www.authz",
] + ([] if BUILDING_WHEEL else [ # skip tests for wheels (save 50% of the archive)
"buildbot.test",
"buildbot.test.util",
"buildbot.test.fake",
"buildbot.test.fuzz",
"buildbot.test.integration",
"buildbot.test.regressions",
"buildbot.test.unit",
]),
'data_files': [
("buildbot", [
"buildbot/buildbot.png",
]),
include("buildbot/reporters/templates", "*.txt"),
("buildbot/db/migrate", [
"buildbot/db/migrate/migrate.cfg",
]),
include("buildbot/db/migrate/versions", "*.py"),
("buildbot/scripts", [
"buildbot/scripts/sample.cfg",
"buildbot/scripts/buildbot_tac.tmpl",
]),
include("buildbot/spec", "*.raml"),
include("buildbot/spec/types", "*.raml"),
include("buildbot/test/unit/test_templates_dir", "*.html"),
include("buildbot/test/unit/test_templates_dir/plugin", "*.*"),
] + include_statics("buildbot/www/static"),
'cmdclass': {'install_data': install_data_twisted,
'sdist': our_sdist},
'entry_points': concat_dicts(define_plugin_entries([
('buildbot.changes', [
('buildbot.changes.mail', [
'MaildirSource', 'CVSMaildirSource',
'SVNCommitEmailMaildirSource',
'BzrLaunchpadEmailMaildirSource']),
('buildbot.changes.bitbucket', ['BitbucketPullrequestPoller']),
('buildbot.changes.github', ['GitHubPullrequestPoller']),
('buildbot.changes.bonsaipoller', ['BonsaiPoller']),
('buildbot.changes.gerritchangesource', ['GerritChangeSource']),
('buildbot.changes.gitpoller', ['GitPoller']),
('buildbot.changes.hgpoller', ['HgPoller']),
('buildbot.changes.p4poller', ['P4Source']),
('buildbot.changes.pb', ['PBChangeSource']),
('buildbot.changes.svnpoller', ['SVNPoller'])
]),
('buildbot.schedulers', [
('buildbot.schedulers.basic', [
'SingleBranchScheduler', 'AnyBranchScheduler']),
('buildbot.schedulers.dependent', ['Dependent']),
('buildbot.schedulers.triggerable', ['Triggerable']),
('buildbot.schedulers.forcesched', ['ForceScheduler']),
('buildbot.schedulers.timed', [
'Periodic', 'Nightly', 'NightlyTriggerable']),
('buildbot.schedulers.trysched', [
'Try_Jobdir', 'Try_Userpass'])
]),
('buildbot.worker', [
('buildbot.worker.base', ['Worker']),
('buildbot.worker.ec2', ['EC2LatentWorker']),
('buildbot.worker.libvirt', ['LibVirtWorker']),
('buildbot.worker.openstack', ['OpenStackLatentWorker']),
('buildbot.worker.docker', ['DockerLatentWorker']),
('buildbot.worker.hyper', ['HyperLatentWorker']),
('buildbot.worker.local', ['LocalWorker']),
]),
('buildbot.steps', [
('buildbot.process.buildstep', ['BuildStep']),
('buildbot.steps.cmake', ['CMake']),
('buildbot.steps.cppcheck', ['Cppcheck']),
('buildbot.steps.http', [
'HTTPStep', 'POST', 'GET', 'PUT', 'DELETE', 'HEAD',
'OPTIONS']),
('buildbot.steps.master', [
'MasterShellCommand', 'SetProperty', 'SetProperties', 'LogRenderable']),
('buildbot.steps.maxq', ['MaxQ']),
('buildbot.steps.mswin', ['Robocopy']),
('buildbot.steps.mtrlogobserver', ['MTR']),
('buildbot.steps.package.deb.lintian', ['DebLintian']),
('buildbot.steps.package.deb.pbuilder', [
'DebPbuilder', 'DebCowbuilder', 'UbuPbuilder',
'UbuCowbuilder']),
('buildbot.steps.package.rpm.mock', [
'Mock', 'MockBuildSRPM', 'MockRebuild']),
('buildbot.steps.package.rpm.rpmbuild', ['RpmBuild']),
('buildbot.steps.package.rpm.rpmlint', ['RpmLint']),
('buildbot.steps.package.rpm.rpmspec', ['RpmSpec']),
('buildbot.steps.python', [
'BuildEPYDoc', 'PyFlakes', 'PyLint', 'Sphinx']),
('buildbot.steps.python_twisted', [
'HLint', 'Trial', 'RemovePYCs']),
('buildbot.steps.shell', [
'ShellCommand', 'TreeSize', 'SetPropertyFromCommand',
'Configure', 'WarningCountingShellCommand', 'Compile',
'Test', 'PerlModuleTest']),
('buildbot.steps.shellsequence', ['ShellSequence']),
('buildbot.steps.source.bzr', ['Bzr']),
('buildbot.steps.source.cvs', ['CVS']),
('buildbot.steps.source.darcs', ['Darcs']),
('buildbot.steps.source.gerrit', ['Gerrit']),
('buildbot.steps.source.git', ['Git']),
('buildbot.steps.source.github', ['GitHub']),
('buildbot.steps.source.mercurial', ['Mercurial']),
('buildbot.steps.source.mtn', ['Monotone']),
('buildbot.steps.source.p4', ['P4']),
('buildbot.steps.source.repo', ['Repo']),
('buildbot.steps.source.svn', ['SVN']),
('buildbot.steps.subunit', ['SubunitShellCommand']),
('buildbot.steps.transfer', [
'FileUpload', 'DirectoryUpload', 'MultipleFileUpload',
'FileDownload', 'StringDownload', 'JSONStringDownload',
'JSONPropertiesDownload']),
('buildbot.steps.trigger', ['Trigger']),
('buildbot.steps.vstudio', [
'VC6', 'VC7', 'VS2003', 'VC8', 'VS2005', 'VCExpress9', 'VC9',
'VS2008', 'VC10', 'VS2010', 'VC11', 'VS2012', 'VC12', 'VS2013',
'VC14', 'VS2015', 'MsBuild4', 'MsBuild', 'MsBuild12', 'MsBuild14']),
('buildbot.steps.worker', [
'SetPropertiesFromEnv', 'FileExists', 'CopyDirectory',
'RemoveDirectory', 'MakeDirectory']),
]),
('buildbot.reporters', [
('buildbot.reporters.mail', ['MailNotifier']),
('buildbot.reporters.message', ['MessageFormatter']),
('buildbot.reporters.gerrit', ['GerritStatusPush']),
('buildbot.reporters.gerrit_verify_status',
['GerritVerifyStatusPush']),
('buildbot.reporters.http', ['HttpStatusPush']),
('buildbot.reporters.github', ['GitHubStatusPush', 'GitHubCommentPush']),
('buildbot.reporters.gitlab', ['GitLabStatusPush']),
('buildbot.reporters.stash', ['StashStatusPush']),
('buildbot.reporters.bitbucket', ['BitbucketStatusPush']),
('buildbot.reporters.irc', ['IRC']),
]),
('buildbot.util', [
# Connection seems to be a way too generic name, though
('buildbot.worker.libvirt', ['Connection']),
('buildbot.changes.filter', ['ChangeFilter']),
('buildbot.changes.gerritchangesource', ['GerritChangeFilter']),
('buildbot.changes.svnpoller', [
('svn.split_file_projects_branches',
'split_file_projects_branches'),
('svn.split_file_branches', 'split_file_branches'),
('svn.split_file_alwaystrunk', 'split_file_alwaystrunk')]),
('buildbot.config', ['BuilderConfig']),
('buildbot.locks', [
'MasterLock',
'WorkerLock',
]),
('buildbot.manhole', [
'AuthorizedKeysManhole', 'PasswordManhole', 'TelnetManhole']),
('buildbot.process.builder', [
'enforceChosenWorker',
]),
('buildbot.process.factory', [
'BuildFactory', 'GNUAutoconf', 'CPAN', 'Distutils', 'Trial',
'BasicBuildFactory', 'QuickBuildFactory', 'BasicSVN']),
('buildbot.process.logobserver', ['LogLineObserver']),
('buildbot.process.properties', [
'FlattenList', 'Interpolate', 'Property', 'Transform',
'WithProperties', 'renderer']),
('buildbot.process.properties', [
'CommandlineUserManager']),
('buildbot.revlinks', ['RevlinkMatch']),
('buildbot.reporters.utils', ['URLForBuild']),
('buildbot.schedulers.forcesched', [
'AnyPropertyParameter', 'BooleanParameter',
'ChoiceStringParameter',
'CodebaseParameter', 'FixedParameter', 'InheritBuildParameter',
'IntParameter', 'NestedParameter', 'ParameterGroup',
'StringParameter', 'TextParameter', 'UserNameParameter',
'WorkerChoiceParameter',
]),
('buildbot.process.results', [
'Results', 'SUCCESS', 'WARNINGS', 'FAILURE', 'SKIPPED',
'EXCEPTION', 'RETRY', 'CANCELLED']),
('buildbot.steps.mtrlogobserver', ['EqConnectionPool']),
('buildbot.steps.source.repo', [
('repo.DownloadsFromChangeSource',
'RepoDownloadsFromChangeSource'),
('repo.DownloadsFromProperties',
'RepoDownloadsFromProperties')]),
('buildbot.steps.shellsequence', ['ShellArg']),
('buildbot.www.avatar', ['AvatarGravatar']),
('buildbot.www.auth', [
'UserPasswordAuth', 'HTPasswdAuth', 'RemoteUserAuth']),
('buildbot.www.ldapuserinfo', ['LdapUserInfo']),
('buildbot.www.oauth2', [
'GoogleAuth', 'GitHubAuth', 'GitLabAuth', 'BitbucketAuth']),
('buildbot.db.dbconfig', [
'DbConfig']),
('buildbot.www.authz', [
'Authz', 'fnmatchStrMatcher', 'reStrMatcher']),
('buildbot.www.authz.roles', [
'RolesFromEmails', 'RolesFromGroups', 'RolesFromOwner', 'RolesFromUsername']),
('buildbot.www.authz.endpointmatchers', [
'AnyEndpointMatcher', 'StopBuildEndpointMatcher', 'ForceBuildEndpointMatcher',
'RebuildBuildEndpointMatcher', 'AnyControlEndpointMatcher', 'EnableSchedulerEndpointMatcher']),
])
]), {
'console_scripts': [
'buildbot=buildbot.scripts.runner:run',
# this will also be shipped on non windows :-(
'buildbot_windows_service=buildbot.scripts.windows_service:HandleCommandLine',
]}
)
}
# set zip_safe to false to force Windows installs to always unpack eggs
# into directories, which seems to work better --
# see http://buildbot.net/trac/ticket/907
if sys.platform == "win32":
setup_args['zip_safe'] = False
py_26 = sys.version_info[0] > 2 or (
sys.version_info[0] == 2 and sys.version_info[1] >= 6)
if not py_26:
raise RuntimeError("Buildbot master requires at least Python-2.6")
# pip<1.4 doesn't have the --pre flag, and will thus attempt to install alpha
# and beta versions of Buildbot. Prevent that from happening.
VERSION_MSG = """
This is a pre-release version of Buildbot, which can only be installed with
pip-1.4 or later Try installing the latest stable version of Buildbot instead:
pip install buildbot==0.8.12
See https://pypi.python.org/pypi/buildbot to verify the current stable version.
"""
if 'a' in version or 'b' in version:
try:
pip_dist = pkg_resources.get_distribution('pip')
except pkg_resources.DistributionNotFound:
pip_dist = None
if pip_dist:
if LooseVersion(pip_dist.version) < LooseVersion('1.4'):
raise RuntimeError(VERSION_MSG)
if sys.version_info[0] >= 3:
twisted_ver = ">= 17.1.0"
else:
twisted_ver = ">= 14.0.1"
autobahn_ver = ">= 0.16.0"
txaio_ver = ">= 2.2.2"
bundle_version = version.split("-")[0]
# dependencies
setup_args['install_requires'] = [
'setuptools >= 8.0',
'Twisted ' + twisted_ver,
'Jinja2 >= 2.1',
# required for tests, but Twisted requires this anyway
'zope.interface >= 4.1.1',
# python-future required for py2/3 compatibility
'future',
'sqlalchemy>=0.8.0',
'sqlalchemy-migrate>=0.9',
'python-dateutil>=1.5',
'txaio ' + txaio_ver,
'autobahn ' + autobahn_ver,
'PyJWT',
]
# based on https://discourse.numenta.org/t/setup-py-error-invalid-environment-marker/1298/4
if LooseVersion(setuptools_version.__version__) >= LooseVersion("20.2.2"):
setup_args['install_requires'].append('distro;platform_system==="Linux"')
elif platform.system() == 'Linux':
setup_args['install_requires'].append('distro')
# Unit test dependencies.
test_deps = [
# http client libraries
'treq',
'txrequests',
# pyjade required for custom templates tests
'pyjade',
# boto3 and moto required for running EC2 tests
'boto3',
'moto',
# txgithub required to run buildbot.status.github module tests
'txgithub',
'ramlfications',
'mock>=2.0.0',
]
if sys.platform != 'win32':
test_deps += [
# LZ4 fails to build on Windows:
# https://github.com/steeve/python-lz4/issues/27
# lz4 required for log compression tests.
'lz4',
]
setup_args['tests_require'] = test_deps
setup_args['extras_require'] = {
'test': [
'setuptools_trial',
'isort',
# spellcheck introduced in version 1.4.0
'pylint>=1.4.0',
'pyenchant',
'flake8~=2.6.0',
] + test_deps,
'bundle': [
"buildbot-www=={0}".format(bundle_version),
"buildbot-worker=={0}".format(bundle_version),
"buildbot-waterfall-view=={0}".format(bundle_version),
"buildbot-console-view=={0}".format(bundle_version),
],
'tls': [
'Twisted[tls] ' + twisted_ver,
# There are bugs with extras inside extras:
# <https://github.com/pypa/pip/issues/3516>
# so we explicitly include Twisted[tls] dependencies.
'pyopenssl >= 16.0.0',
'service_identity',
'idna >= 0.6',
],
'docs': [
'docutils<0.13.0',
'sphinx>1.4.0',
'sphinxcontrib-blockdiag',
'sphinxcontrib-spelling',
'pyenchant',
'docutils>=0.8',
'ramlfications',
'sphinx-jinja',
'towncrier'
],
}
if '--help-commands' in sys.argv or 'trial' in sys.argv or 'test' in sys.argv:
setup_args['setup_requires'] = [
'setuptools_trial',
]
if os.getenv('NO_INSTALL_REQS'):
setup_args['install_requires'] = None
setup_args['extras_require'] = None
setup(**setup_args)
# Local Variables:
# fill-column: 71
# End:
|
grembo/buildbot
|
master/setup.py
|
Python
|
gpl-2.0
| 20,409
|
[
"Brian"
] |
229800312cc1257ec48933fc16f5a072c923304158cdabb86df2110f73178910
|
from __future__ import print_function
import random
import time
import threading
import thread
from hashlib import md5
from DIRAC.Core.Utilities.ReturnValues import S_ERROR, S_OK
from DIRAC.Core.Utilities.DIRACSingleton import DIRACSingleton
class LockRing( object ):
__metaclass__ = DIRACSingleton
def __init__( self ):
random.seed()
self.__locks = {}
self.__events = {}
def __genName( self, container ):
name = md5( str( time.time() + random.random() ) ).hexdigest()
retries = 10
while name in container and retries:
name = md5( str( time.time() + random.random() ) ).hexdigest()
retries -= 1
return name
def getLock( self, lockName = "", recursive = False ):
if not lockName:
lockName = self.__genName( self.__locks )
try:
return self.__locks[ lockName ]
except KeyError:
if recursive:
self.__locks[ lockName ] = threading.RLock()
else:
self.__locks[ lockName ] = threading.Lock()
return self.__locks[ lockName ]
def getEvent( self, evName = "" ):
if not evName:
evName = self.__genName( self.__events )
try:
return self.__events[ evName ]
except KeyError:
self.__events[ evName ] = threading.Event()
return self.__events[ evName ]
def acquire( self, lockName ):
try:
self.__locks[ lockName ].acquire()
except ValueError:
return S_ERROR( "No lock named %s" % lockName )
return S_OK()
def release( self, lockName ):
try:
self.__locks[ lockName ].release()
except ValueError:
return S_ERROR( "No lock named %s" % lockName )
return S_OK()
def _openAll( self ):
"""
WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
DO NOT USE EXCEPT IN JUST SPAWNED NEW CHILD PROCESSES!!!!!!!!
NEVER IN THE PARENT PROCESS!!!!!!
WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
"""
for lockName in self.__locks.keys():
try:
self.__locks[ lockName ].release()
except ( RuntimeError, thread.error, KeyError ):
pass
def _setAllEvents( self ):
"""
WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
DO NOT USE EXCEPT IN JUST SPAWNED NEW CHILD PROCESSES!!!!!!!!
NEVER IN THE PARENT PROCESS!!!!!!
WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
"""
for evName in self.__events.keys():
try:
self.__events[ evName ].set()
except KeyError:
pass
if __name__ == "__main__":
lr = LockRing()
lock = lr.getLock( "test1" )
print("ACQUIRING LOCK", lock)
lock.acquire()
print("IS THE SAME LOCK? ", lock == lr.getLock("test1"))
print("OPENING ALL LOCKS")
lr._openAll()
print("REACQUIRING LOCK", lock)
lr.acquire( "test1" )
print("RELEASING LOCK")
lr.release( "test1" )
print("IS SINGLETON", lr == LockRing())
ev = lr.getEvent( "POT" )
ev.set()
lr._setAllEvents()
print("ALL OK")
|
fstagni/DIRAC
|
Core/Utilities/LockRing.py
|
Python
|
gpl-3.0
| 2,987
|
[
"DIRAC"
] |
72eb085d053732ff87377ed750c87b6ec2da8eef0e020f043031146dce1d7219
|
"""
Base classes used by studio tests.
"""
from bok_choy.page_object import XSS_INJECTION
from common.test.acceptance.fixtures.course import CourseFixture
from common.test.acceptance.fixtures.library import LibraryFixture
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage
from common.test.acceptance.pages.studio.utils import verify_ordering
from common.test.acceptance.tests.helpers import AcceptanceTest, UniqueCourseTest
class StudioCourseTest(UniqueCourseTest):
"""
Base class for all Studio course tests.
"""
def setUp(self, is_staff=False, test_xss=True): # pylint: disable=arguments-differ
"""
Install a course with no content using a fixture.
"""
super(StudioCourseTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.test_xss = test_xss
self.install_course_fixture(is_staff)
def install_course_fixture(self, is_staff=False):
"""
Install a course fixture
"""
self.course_fixture = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name'],
)
if self.test_xss:
xss_injected_unique_id = XSS_INJECTION + self.unique_id
test_improper_escaping = {u"value": xss_injected_unique_id}
self.course_fixture.add_advanced_settings({
"advertised_start": test_improper_escaping,
"info_sidebar_name": test_improper_escaping,
"cert_name_short": test_improper_escaping,
"cert_name_long": test_improper_escaping,
"display_organization": test_improper_escaping,
"display_coursenumber": test_improper_escaping,
})
self.course_info['display_organization'] = xss_injected_unique_id
self.course_info['display_coursenumber'] = xss_injected_unique_id
self.populate_course_fixture(self.course_fixture)
self.course_fixture.install()
self.user = self.course_fixture.user
self.log_in(self.user, is_staff)
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
def log_in(self, user, is_staff=False):
"""
Log in as the user that created the course. The user will be given instructor access
to the course and enrolled in it. By default the user will not have staff access unless
is_staff is passed as True.
Args:
user(dict): dictionary containing user data: {'username': ..., 'email': ..., 'password': ...}
is_staff(bool): register this user as staff
"""
self.auth_page = AutoAuthPage( # lint-amnesty, pylint: disable=attribute-defined-outside-init
self.browser,
staff=is_staff,
username=user.get('username'),
email=user.get('email'),
password=user.get('password')
)
self.auth_page.visit()
class ContainerBase(StudioCourseTest):
"""
Base class for tests that do operations on the container page.
"""
def setUp(self, is_staff=False): # lint-amnesty, pylint: disable=arguments-differ
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(ContainerBase, self).setUp(is_staff=is_staff) # lint-amnesty, pylint: disable=super-with-arguments
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def go_to_nested_container_page(self):
"""
Go to the nested container page.
"""
unit = self.go_to_unit_page()
# The 0th entry is the unit page itself.
container = unit.xblocks[1].go_to_container()
return container
def go_to_unit_page(self, section_name='Test Section', subsection_name='Test Subsection', unit_name='Test Unit'):
"""
Go to the test unit page.
If make_draft is true, the unit page will be put into draft mode.
"""
self.outline.visit()
subsection = self.outline.section(section_name).subsection(subsection_name) # lint-amnesty, pylint: disable=no-member
return subsection.expand_subsection().unit(unit_name).go_to()
def do_action_and_verify(self, action, expected_ordering):
"""
Perform the supplied action and then verify the resulting ordering.
"""
container = self.go_to_nested_container_page()
action(container)
verify_ordering(self, container, expected_ordering)
# Reload the page to see that the change was persisted.
container = self.go_to_nested_container_page()
verify_ordering(self, container, expected_ordering)
class StudioLibraryTest(AcceptanceTest):
"""
Base class for all Studio library tests.
"""
as_staff = True
def setUp(self):
"""
Install a library with no content using a fixture.
"""
super(StudioLibraryTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
fixture = LibraryFixture(
'test_org',
self.unique_id,
u'Test Library {}'.format(self.unique_id),
)
self.populate_library_fixture(fixture)
fixture.install()
self.library_fixture = fixture
self.library_info = fixture.library_info
self.library_key = fixture.library_key
self.user = fixture.user
self.log_in(self.user, self.as_staff)
def populate_library_fixture(self, library_fixture):
"""
Populate the children of the test course fixture.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
def log_in(self, user, is_staff=False):
"""
Log in as the user that created the library.
By default the user will not have staff access unless is_staff is passed as True.
"""
auth_page = AutoAuthPage(
self.browser,
staff=is_staff,
username=user.get('username'),
email=user.get('email'),
password=user.get('password')
)
auth_page.visit()
|
stvstnfrd/edx-platform
|
common/test/acceptance/tests/studio/base_studio_test.py
|
Python
|
agpl-3.0
| 6,561
|
[
"VisIt"
] |
f03dd54a245aa485f5a0f3ecf61262402fc295bb374f4dea864e7e1e1be28a33
|
##-------------------------------------------------------------------------
## Author: Owen Arnold @ ISIS/Tessella
## Date: 24/03/2011
## Purpose: Show signal cell data as surface plot. Sets color range between 0 and 3 for signal value.
##
##-------------------------------------------------------------------------
try: paraview.simple
except: from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
activeSource = GetActiveSource()
display = GetDisplayProperties(activeSource)
lookupTable = GetLookupTableForArray( "signal", 1, RGBPoints=[0.0, 0.23000000000000001, 0.29899999999999999, 0.754, 3.0, 0.70599999999999996, 0.016, 0.14999999999999999], VectorMode='Magnitude', NanColor=[0.25, 0.0, 0.0], ColorSpace='Diverging', ScalarRangeInitialized=1.0, LockScalarRange=1 )
a1_signal_PiecewiseFunction = CreatePiecewiseFunction()
display.Representation = 'Surface'
display.ColorArrayName = 'signal'
display.LookupTable = lookupTable
display.ColorAttributeType = 'CELL_DATA'
Render()
|
dymkowsk/mantid
|
qt/paraview_ext/PVPlugins/Macros/ShowSignal.py
|
Python
|
gpl-3.0
| 1,010
|
[
"ParaView"
] |
238dac2707de088bfc73c9650cda5ea61644a6f5855b1e60a3c80d06d7950a0c
|
# this file does statistic analysis for the results
# from collections import Counter
from Bio.Blast import NCBIXML
from Bio import SeqIO
import os
from combine_files import *
#============analyse the output sam file : notchok1_virus.final.sort.sam==========
#=================================================================================
def sam_result(filename):
result = file(filename).readlines()
data = [] # stores each line of result into tab separated
for i in range(len(result)):
data.append(result[i].split("\t"))
ref = [] # stores duplicated reference genome names in sam file
#-----------------list unique reference genomes------------------------------
for i in range(len(data)):
ref.append(data[i][2])
num_ref = list(set(ref)) # unique reference names
#---------------calculate duplication of each genome-------------------------
# dupli = [] # stores how many reads mapping to unique reference genome
# stat = Counter(ref) # generate a dictionary count the number
# for reference in num_ref:
# dupli.append(stat[reference])
#-----------calculate how many reads map to reference uniquely---------------
reads = [] # stores number of reads mapping to each of num_ref
uniq_read = [] # stores number of uniq reads mapping to each of num_ref
uniq_seq = [] # store number of uniq seq mapping to each of num_ref
for i in range(len(num_ref)):
inter = []
inter_read = [] # read names
inter_seq = []
for j in range(len(ref)):
if num_ref[i] == ref[j]:
inter.append(num_ref[i])
inter_read.append(data[j][0])
inter_seq.append(data[j][9])
reads.append(len(inter_read))
uniq_read.append(len(list(set(inter_read))))
uniq_seq.append(len(list(set(inter_seq))))
return [num_ref,[reads,uniq_read,uniq_seq]]
##==============================================================================
##=====analyse the blast output tabular file: neither_chok1_virus1.txt======
##==============================================================================
def single_blast_result(filename,orig_file):
#filename is blast tabular file
#orig_file is neitherchok1_virus.fa
result = file(filename).readlines()
data = [] # stores each tab separated line of result
for i in range(len(result)):
data.append(result[i].split("\t"))
ref = [] # stores all duplicated reference
#-------------------------unique reference genome----------------------------
for i in range(len(data)):
ref.append(data[i][1])
num_ref = list(set(ref)) # stores unique reference
#--------------------calculate duplication of each genome--------------------
# dupli = []
# stat = Counter(ref)
# for reference in num_ref:
# dupli.append(stat[reference])
#---------------calculate how many reads map to reference uniquely-----------
reads = []
reads_name = []
uniq_read = [] # store number of uniq reads mapping to each of num_ref
uniq_read_name = []
for i in range(len(num_ref)):
inter_read = []
for j in range(len(ref)):
if num_ref[i] == ref[j]:
inter_read.append(data[j][0])
reads.append(len(inter_read))
reads_name.append(inter_read)
uniq_read.append(len(list(set(inter_read))))
uniq_read_name.append(list(set(inter_read)))
#--------------find number of unique sequences mapping to each reference----------------
result = SeqIO.parse(open(orig_file,"rU"),"fasta")
orig_read = []
orig_seq = []
uniq_seq = []
#-------------store orig_file sequence and read name into two variables
for item in result:
orig_read.append(item.id)
orig_seq.append(str(item.seq))
for i in range(len(num_ref)):
inter_seq = []
for j in range(len(uniq_read_name[i])):
index = orig_read.index(uniq_read_name[i][j])
inter_seq.append(orig_seq[index])
uniq_seq.append(len(list(set(inter_seq))))
return [num_ref,[reads,uniq_read,uniq_seq]]
#------------analyze two blast result----------------------------------------
# def pair_blast_result(path,file1,file2,origion1,origion2):
# #file are two blast tablar result
# #origion are two neitherchok1_virus.fa
# combine_files(path + '/output.txt', file1,file2)
# combine_files(path + '/origion.fa',origion1,origion2)
# [num_ref,[dupli,uniq_read,uniq_seq]] = single_blast_result(path + '/output.txt', path + '/origion.fa')
# os.remove(path + '/origion.fa')
# os.remove(path + '/output.txt')
# return [num_ref,[dupli,uniq_read,uniq_seq]]
#============================================================================
#===============combine sam result and blastresult===========================
def combine_results(fir_ref,fir_stat,sec_ref,sec_stat):
# first_ref = a list of reference names. first_dupli = the namber of
# reads mapped to each of the reference. They have same length.
ref = fir_ref
sam = fir_stat
blast_dupli = sec_stat[0]
blast_read =sec_stat[1]
blast_seq = sec_stat[2]
dupli = [0]*len(ref) # these three stores the statistic results
read = [0] * len(ref)
sequence = [0] * len(ref)
for reference in sec_ref:
if reference in fir_ref:
index = fir_ref.index(reference) # index of blast reference in sam reference
n = sec_ref.index(reference) # index of blast reference in blast reference
dupli[index] = blast_dupli[n]
read[index] = blast_read[n]
sequence[index] = blast_seq[n]
else:
ref.append(reference)
for i in range(len(sam)):
sam[i].append(0)
index = sec_ref.index(reference)
dupli.append(blast_dupli[index])
read.append(blast_read[index])
sequence.append(blast_seq[index])
sam.append(dupli); sam.append(read);sam.append(sequence)
sam.insert(0,ref)
return sam
##========================get final results============================================
##=====================================================================================
def get_final_results(output,*paths):
#this function combines all the results into one variable
sam_file = 'notchok1_virus.final.sort.sam'
blast_file1 = 'neither_chok1_virus1.txt'
blast_file2 = 'neither_chok1_virus2.txt'
orig1 = 'neither_chok1_virus1.fa'
orig2 = 'neither_chok1_virus2.fa'
[sam_ref,sam_stat] = sam_result(paths[0] + '/' + sam_file)
print 'sam result done'
[blast_ref,blast_stat] = pair_blast_result(paths[0],paths[0] + '/' + blast_file1,paths[0] + '/' + blast_file2, paths[0]+'/'+ orig1,paths[0]+'/'+orig2)
print 'blast result done'
result = combine_results(sam_ref,sam_stat,blast_ref,blast_stat)
for i in range(1,len(paths)):
[sam_ref,sam_stat] = sam_result(paths[i] + '/' + sam_file)
print 'sam result done'
result = combine_results(result[0],result[1:],sam_ref,sam_stat)
print 'sam combine done'
[blast_ref,blast_stat] = pair_blast_result(paths[i],paths[i] + '/' + blast_file1,paths[i] + '/' + blast_file2, paths[i]+'/'+ orig1,paths[i]+'/'+orig2)
print 'blast result done'
result = combine_results(result[0],result[1:],blast_ref,blast_stat)
print 'blast combine done'
# get name
integrate_name_type(result,0)
# get results for simplified version
simp_result = result[:3]
n = len(result)
iteration = (n - 3)/6
for i in range(iteration):
simp_dupli = [result[i*6 + 3 ][j] + result[i*6 + 6][j] for j in range(len(result[0]))]
simp_read = [result[i*6 + 4][j] + result[i*6 + 7][j] for j in range(len(result[0]))]
simp_seq = [result[i*6 + 5][j] + result[i*6 +8][j] for j in range(len(result[0]))]
simp_result.append(simp_dupli)
simp_result.append(simp_read)
simp_result.append(simp_seq)
write2file(output,result,simp_result)
return [result,simp_result]
#======================================================================================
##====================================================================================
#------------get gene name and type---------------------------------
from Bio import Entrez
Entrez.email = "shangzhong0619@gmail.com"
def get_name(refname,datatype):
index = refname.replace('|','x',1).index('|')
ncid = refname[3:index]
handle = Entrez.efetch(db=datatype,id=ncid,rettype='gb')
record = handle.read()
gene_result = record.split()
defination = gene_result.index("DEFINITION")
accession = gene_result.index("ACCESSION")
gene_name = " ".join(gene_result[defination+1:accession])
if datatype == 'nucleotide':
gene_type = gene_result[4]
else:
if 'accession' in gene_result:
nt_accession = gene_result.index('accession')
ncid = gene_result[nt_accession + 1]
handle = Entrez.efetch(db='nucleotide',id=ncid, rettype='gb')
record = handle.read()
gene_result = record.split()
gene_type = gene_result[4]
else:
gene_type ='linear'
return [gene_name,gene_type]
# #============================================================================
##---integrate genome and type------------
def integrate_name_type(result,column,datatype):
# result is the array that has the statistic resuls. column indicate which column of result stores the name information
# each item in result represent a column in the matrix
ref_name = []
ref_type = []
name = result[column]
for i in range(len(name)):
[gene_name,gene_type]=get_name(name[i],datatype)
ref_name.append(gene_name)
ref_type.append(gene_type)
result.insert(1,ref_name)
result.insert(1,ref_type)
#------output the result------------
def write2file(output,result,simp_result):
stat_result = open(output + '/' + "stat_result.txt",'w')
for i in range(len(result[0])):
for j in range(len(result)):
stat_result.write(str(result[j][i])+"\t")
stat_result.write("\n")
stat_result.close()
stat_result = open(output + '/' + "simp_result.txt",'w')
for i in range(len(simp_result[0])):
for j in range(len(simp_result)):
stat_result.write(str(simp_result[j][i])+"\t")
stat_result.write("\n")
stat_result.close()
def write_file(outputfile,result):
stat_result = open(outputfile,'w')
for i in range(len(result[0])):
for j in range(len(result)):
stat_result.write(str('\t' + result[j][i]))
stat_result.close()
#============================================================================
# get stat of reads used for assembly =========
#============================================================================
# the sam result would be the same, the blast result is different.
def stat_assem(sam1,sam2,blast1,blast2):
# these two files are final blast paired reads. blast_reads1.fa, blast_reads2.fa
sam_stat1 = list(SeqIO.parse(open(sam1,"rU"),"fasta"))
sam_stat2 = list(SeqIO.parse(open(sam1,"rU"),"fasta"))
sam_sequence = []
sam_reads = len(sam_stat1) # number of uniq pairs
for item in sam_stat1:
sam_sequence.append(str(item.seq))
for item in sam_stat2:
sam_sequence.append(str(item.seq))
sam_uniq_seq = len(list(set(sam_sequence))) # number of uniq sequence
# blast results
blast_stat1 = list(SeqIO.parse(open(blast1,"rU"),"fasta"))
blast_stat2 = list(SeqIO.parse(open(blast2,"rU"),"fasta"))
blast_seq = []
blast_reads = len(blast_stat1)
for item in blast_stat1:
blast_seq.append(item.seq)
for item in blast_stat2:
blast_seq.append(item.seq)
blast_uniq_seq = len(list(set(blast_seq)))
uniq_seq = sam_uniq_seq + blast_uniq_seq
uniq_reads = sam_reads + blast_reads
return [uniq_reads, uniq_seq]
def stat4allassem(output,*paths):
n = len(paths)
sam1 = 'notchok1_virus1.fa'
sam2 = 'notchok1_virus2.fa'
blast1 = 'blast_reads1.fa'
blast2 = 'blast_reads2.fa'
result = []
for i in range(n):
[uniq_reads,uniq_seq] = stat_assem(paths[i] + '/' + sam1, paths[i] + '/' + sam2, paths[i] + '/' + blast1, paths[i] + '/' + blast2)
result.append(uniq_reads)
result.append(uniq_seq)
return result
|
shl198/Pipeline
|
DetectVirus/reads_stat.py
|
Python
|
mit
| 12,645
|
[
"BLAST"
] |
1cc938183561e416a6a9161f7bf17a24075ff071d79c2061edfc1f5f478046ef
|
# !/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
import math
import time
import traceback
from pickle import Pickler, Unpickler, UnpicklingError
import attr
from pysol_cards.cards import ms_rearrange
from pysol_cards.random import random__int2str
from pysollib.game.dump import pysolDumpGame
from pysollib.gamedb import GI
from pysollib.help import help_about
from pysollib.hint import DefaultHint
from pysollib.mfxutil import Image, ImageTk, USE_PIL
from pysollib.mfxutil import Struct, SubclassResponsibility, destruct
from pysollib.mfxutil import format_time, print_err
from pysollib.mfxutil import uclock, usleep
from pysollib.move import AFlipAllMove
from pysollib.move import AFlipAndMoveMove
from pysollib.move import AFlipMove
from pysollib.move import AMoveMove
from pysollib.move import ANextRoundMove
from pysollib.move import ASaveSeedMove
from pysollib.move import ASaveStateMove
from pysollib.move import AShuffleStackMove
from pysollib.move import ASingleCardMove
from pysollib.move import ASingleFlipMove
from pysollib.move import ATurnStackMove
from pysollib.move import AUpdateStackMove
from pysollib.mygettext import _
from pysollib.mygettext import ungettext
from pysollib.pysolrandom import LCRandom31, PysolRandom, construct_random
from pysollib.pysoltk import CURSOR_WATCH
from pysollib.pysoltk import Card
from pysollib.pysoltk import EVENT_HANDLED, EVENT_PROPAGATE
from pysollib.pysoltk import MfxCanvasLine, MfxCanvasRectangle, MfxCanvasText
from pysollib.pysoltk import MfxExceptionDialog, MfxMessageDialog
from pysollib.pysoltk import after, after_cancel, after_idle
from pysollib.pysoltk import bind, wm_map
from pysollib.settings import DEBUG
from pysollib.settings import PACKAGE, TITLE, TOOLKIT, TOP_SIZE
from pysollib.settings import VERSION, VERSION_TUPLE
from pysollib.struct_new import NewStruct
import random2
import six
from six import BytesIO
from six.moves import range
if TOOLKIT == 'tk':
from pysollib.ui.tktile.solverdialog import reset_solver_dialog
else:
from pysollib.pysoltk import reset_solver_dialog
# See: https://github.com/shlomif/PySolFC/issues/159 .
# 'factory=' is absent from older versions.
assert getattr(attr, '__version_info__', (0, 0, 0)) >= (18, 2, 0), (
"Newer version of https://pypi.org/project/attrs/ is required.")
PLAY_TIME_TIMEOUT = 200
S_PLAY = 0x40
# ************************************************************************
# * Base class for all solitaire games
# *
# * Handles:
# * load/save
# * undo/redo (using a move history)
# * hints/demo
# ************************************************************************
def _updateStatus_process_key_val(tb, sb, k, v):
if k == "gamenumber":
if v is None:
if sb:
sb.updateText(gamenumber="")
# self.top.wm_title("%s - %s"
# % (TITLE, self.getTitleName()))
return
if isinstance(v, six.string_types):
if sb:
sb.updateText(gamenumber=v)
# self.top.wm_title("%s - %s %s" % (TITLE,
# self.getTitleName(), v))
return
if k == "info":
# print 'updateStatus info:', v
if v is None:
if sb:
sb.updateText(info="")
return
if isinstance(v, str):
if sb:
sb.updateText(info=v)
return
if k == "moves":
if v is None:
# if tb: tb.updateText(moves="Moves\n")
if sb:
sb.updateText(moves="")
return
if isinstance(v, tuple):
# if tb: tb.updateText(moves="Moves\n%d/%d" % v)
if sb:
sb.updateText(moves="%d/%d" % v)
return
if isinstance(v, int):
# if tb: tb.updateText(moves="Moves\n%d" % v)
if sb:
sb.updateText(moves="%d" % v)
return
if isinstance(v, str):
# if tb: tb.updateText(moves=v)
if sb:
sb.updateText(moves=v)
return
if k == "player":
if v is None:
if tb:
tb.updateText(player=_("Player\n"))
return
if isinstance(v, six.string_types):
if tb:
# if self.app.opt.toolbar_size:
if tb.getSize():
tb.updateText(player=_("Player\n") + v)
else:
tb.updateText(player=v)
return
if k == "stats":
if v is None:
if sb:
sb.updateText(stats="")
return
if isinstance(v, tuple):
t = "%d: %d/%d" % (v[0]+v[1], v[0], v[1])
if sb:
sb.updateText(stats=t)
return
if k == "time":
if v is None:
if sb:
sb.updateText(time='')
if isinstance(v, six.string_types):
if sb:
sb.updateText(time=v)
return
if k == 'stuck':
if sb:
sb.updateText(stuck=v)
return
raise AttributeError(k)
def _stats__is_perfect(stats):
"""docstring for _stats__is_perfect"""
return (stats.undo_moves == 0 and
stats.goto_bookmark_moves == 0 and
# stats.quickplay_moves == 0 and
stats.highlight_piles == 0 and
stats.highlight_cards == 0 and
stats.shuffle_moves == 0)
def _highlightCards__calc_item(canvas, delta, cw, ch, s, c1, c2, color):
assert c1 in s.cards and c2 in s.cards
tkraise = False
if c1 is c2:
# highlight single card
sx0, sy0 = s.getOffsetFor(c1)
x1, y1 = s.getPositionFor(c1)
x2, y2 = x1, y1
if c1 is s.cards[-1]:
# last card in the stack (for Pyramid-like games)
tkraise = True
else:
# highlight pile
if len(s.CARD_XOFFSET) > 1:
sx0 = 0
else:
sx0 = s.CARD_XOFFSET[0]
if len(s.CARD_YOFFSET) > 1:
sy0 = 0
else:
sy0 = s.CARD_YOFFSET[0]
x1, y1 = s.getPositionFor(c1)
x2, y2 = s.getPositionFor(c2)
if sx0 != 0 and sy0 == 0:
# horizontal stack
y2 += ch
if c2 is s.cards[-1]: # top card
x2 += cw
else:
if sx0 > 0:
# left to right
x2 += sx0
else:
# right to left
x1 += cw
x2 += cw + sx0
elif sx0 == 0 and sy0 != 0:
# vertical stack
x2 += cw
if c2 is s.cards[-1]: # top card
y2 += ch
else:
if sy0 > 0:
# up to down
y2 = y2 + sy0
else:
# down to up
y1 += ch
y2 += ch + sy0
else:
x2 += cw
y2 += ch
tkraise = True
# print c1, c2, x1, y1, x2, y2
x1, x2 = x1-delta[0], x2+delta[1]
y1, y2 = y1-delta[2], y2+delta[3]
if TOOLKIT == 'tk':
r = MfxCanvasRectangle(canvas, x1, y1, x2, y2,
width=4, fill=None, outline=color)
if tkraise:
r.tkraise(c2.item)
elif TOOLKIT == 'kivy':
r = MfxCanvasRectangle(canvas, x1, y1, x2, y2,
width=4, fill=None, outline=color)
if tkraise:
r.tkraise(c2.item)
elif TOOLKIT == 'gtk':
r = MfxCanvasRectangle(canvas, x1, y1, x2, y2,
width=4, fill=None, outline=color,
group=s.group)
if tkraise:
i = s.cards.index(c2)
for c in s.cards[i+1:]:
c.tkraise(1)
return r
@attr.s
class StackGroups(NewStruct):
dropstacks = attr.ib(factory=list)
hp_stacks = attr.ib(factory=list) # for getHightlightPilesStacks()
openstacks = attr.ib(factory=list)
reservestacks = attr.ib(factory=list)
talonstacks = attr.ib(factory=list)
def to_tuples(self):
"""docstring for to_tuples"""
self.openstacks = [s for s in self.openstacks
if s.cap.max_accept >= s.cap.min_accept]
self.hp_stacks = [s for s in self.dropstacks
if s.cap.max_move >= 2]
self.openstacks = tuple(self.openstacks)
self.talonstacks = tuple(self.talonstacks)
self.dropstacks = tuple(self.dropstacks)
self.reservestacks = tuple(self.reservestacks)
self.hp_stacks = tuple(self.hp_stacks)
@attr.s
class StackRegions(NewStruct):
# list of tuples(stacks, rect)
info = attr.ib(factory=list)
# list of stacks in no region
remaining = attr.ib(factory=list)
data = attr.ib(factory=list)
# init info (at the start)
init_info = attr.ib(factory=list)
def calc_info(self, xf, yf, widthpad=0, heightpad=0):
"""docstring for calc_info"""
info = []
for stacks, rect in self.init_info:
newrect = (int(round((rect[0] + widthpad) * xf)),
int(round((rect[1] + heightpad) * yf)),
int(round((rect[2] + widthpad) * xf)),
int(round((rect[3] + heightpad) * yf)))
info.append((stacks, newrect))
self.info = tuple(info)
def optimize(self, remaining):
"""docstring for optimize"""
# sort data by priority
self.data.sort()
self.data.reverse()
# copy (stacks, rect) to info
self.info = []
for d in self.data:
self.info.append((d[2], d[3]))
self.info = tuple(self.info)
# determine remaining stacks
for stacks, rect in self.info:
for stack in stacks:
while stack in remaining:
remaining.remove(stack)
self.remaining = tuple(remaining)
self.init_info = self.info
@attr.s
class GameStacks(NewStruct):
talon = attr.ib(default=None)
waste = attr.ib(default=None)
foundations = attr.ib(factory=list)
rows = attr.ib(factory=list) # for getHightlightPilesStacks()
reserves = attr.ib(factory=list)
internals = attr.ib(factory=list)
def to_tuples(self):
self.foundations = tuple(self.foundations)
self.rows = tuple(self.rows)
self.reserves = tuple(self.reserves)
self.internals = tuple(self.internals)
@attr.s
class GameDrag(NewStruct):
event = attr.ib(default=None)
timer = attr.ib(default=None)
start_x = attr.ib(default=0)
start_y = attr.ib(default=0)
index = attr.ib(default=-1)
stack = attr.ib(default=None)
shade_stack = attr.ib(default=None)
shade_img = attr.ib(default=None)
cards = attr.ib(factory=list)
canshade_stacks = attr.ib(factory=list)
noshade_stacks = attr.ib(factory=list)
shadows = attr.ib(factory=list)
@attr.s
class GameTexts(NewStruct):
info = attr.ib(default=None)
help = attr.ib(default=None)
misc = attr.ib(default=None)
score = attr.ib(default=None)
base_rank = attr.ib(default=None)
list = attr.ib(factory=list)
@attr.s
class GameHints(NewStruct):
list = attr.ib(default=None)
index = attr.ib(default=-1)
level = attr.ib(default=-1)
@attr.s
class GameStatsStruct(NewStruct):
hints = attr.ib(default=0) # number of hints consumed
# number of highlight piles consumed
highlight_piles = attr.ib(default=0)
# number of highlight matching cards consumed
highlight_cards = attr.ib(default=0)
# number of highlight same rank consumed
highlight_samerank = attr.ib(default=0)
undo_moves = attr.ib(default=0) # number of undos
redo_moves = attr.ib(default=0) # number of redos
# number of total moves in this game
total_moves = attr.ib(default=0)
player_moves = attr.ib(default=0) # number of moves
# number of moves while in demo mode
demo_moves = attr.ib(default=0)
autoplay_moves = attr.ib(default=0) # number of moves
quickplay_moves = attr.ib(default=0) # number of quickplay moves
goto_bookmark_moves = attr.ib(default=0) # number of goto bookmark
shuffle_moves = attr.ib(default=0) # number of shuffles (Mahjongg)
# did this game already update the demo stats ?
demo_updated = attr.ib(default=0)
update_time = attr.ib()
@update_time.default
def _foofoo(self):
return time.time() # for updateTime()
elapsed_time = attr.ib(default=0.0)
pause_start_time = attr.ib(default=0.0)
def _reset_statistics(self):
"""docstring for _reset_stats"""
self.undo_moves = 0
self.redo_moves = 0
self.player_moves = 0
self.demo_moves = 0
self.total_moves = 0
self.quickplay_moves = 0
self.goto_bookmark_moves = 0
_GLOBAL_U_PLAY = 0
@attr.s
class GameGlobalStatsStruct(NewStruct):
holded = attr.ib(default=0) # is this a holded game
# number of times this game was loaded
loaded = attr.ib(default=0)
# number of times this game was saved
saved = attr.ib(default=0)
# number of times this game was restarted
restarted = attr.ib(default=0)
goto_bookmark_moves = attr.ib(default=0) # number of goto bookmark
# did this game already update the player stats ?
updated = attr.ib(default=_GLOBAL_U_PLAY)
start_time = attr.ib()
@start_time.default
def _foofoo(self):
return time.time() # for updateTime()
total_elapsed_time = attr.ib(default=0.0)
start_player = attr.ib(default=None)
@attr.s
class GameWinAnimation(NewStruct):
timer = attr.ib(default=None)
images = attr.ib(factory=list)
tk_images = attr.ib(factory=list) # saved tk images
saved_images = attr.ib(factory=dict) # saved resampled images
canvas_images = attr.ib(factory=list) # ids of canvas images
frame_num = attr.ib(default=0) # number of the current frame
width = attr.ib(default=0)
height = attr.ib(default=0)
@attr.s
class GameMoves(NewStruct):
current = attr.ib(factory=list)
history = attr.ib(factory=list)
index = attr.ib(default=0)
state = attr.ib(default=S_PLAY)
# used when loading a game
@attr.s
class GameLoadInfo(NewStruct):
ncards = attr.ib(default=0)
stacks = attr.ib(factory=list)
talon_round = attr.ib(default=1)
# global saveinfo survives a game restart
@attr.s
class GameGlobalSaveInfo(NewStruct):
bookmarks = attr.ib(factory=dict)
comment = attr.ib(default="")
# Needed for saving a game
@attr.s
class GameSaveInfo(NewStruct):
stack_caps = attr.ib(factory=list)
_Game_LOAD_CLASSES = [GameGlobalSaveInfo, GameGlobalStatsStruct, GameMoves,
GameSaveInfo, GameStatsStruct, ]
class Game(object):
# for self.gstats.updated
U_PLAY = _GLOBAL_U_PLAY
U_WON = -2
U_LOST = -3
U_PERFECT = -4
# for self.moves.state
S_INIT = 0x00
S_DEAL = 0x10
S_FILL = 0x20
S_RESTORE = 0x30
S_UNDO = 0x50
S_PLAY = S_PLAY
S_REDO = 0x60
# for loading and saving - subclasses should override if
# the format for a saved game changed (see also canLoadGame())
GAME_VERSION = 1
# only basic initialization here
def __init__(self, gameinfo):
self.preview = 0
self.random = None
self.gameinfo = gameinfo
self.id = gameinfo.id
assert self.id > 0
self.busy = 0
self.pause = False
self.finished = False
self.version = VERSION
self.version_tuple = VERSION_TUPLE
self.cards = []
self.stackmap = {} # dict with (x,y) tuples as key
self.allstacks = []
self.sn_groups = [] # snapshot groups; list of list of similar stacks
self.snapshots = []
self.failed_snapshots = []
self.stackdesc_list = []
self.demo_logo = None
self.pause_logo = None
self.s = GameStacks()
self.sg = StackGroups()
self.regions = StackRegions()
self.init_size = (0, 0)
self.center_offset = (0, 0)
self.event_handled = False # if click event handled by Stack (???)
self.reset()
# main constructor
def create(self, app):
# print 'Game.create'
old_busy = self.busy
self.__createCommon(app)
self.setCursor(cursor=CURSOR_WATCH)
# print 'gameid:', self.id
self.top.wm_title(TITLE + " - " + self.getTitleName())
self.top.wm_iconname(TITLE + " - " + self.getTitleName())
# create the game
if self.app.intro.progress:
self.app.intro.progress.update(step=1)
self.createGame()
# set some defaults
self.createSnGroups()
# convert stackgroups to tuples (speed)
self.allstacks = tuple(self.allstacks)
self.sg.to_tuples()
self.s.to_tuples()
# init the stack view
for stack in self.allstacks:
stack.prepareStack()
stack.assertStack()
if self.s.talon:
assert hasattr(self.s.talon, "round")
assert hasattr(self.s.talon, "max_rounds")
if DEBUG:
self._checkGame()
self.optimizeRegions()
# create cards
if not self.cards:
self.cards = self.createCards(progress=self.app.intro.progress)
self.initBindings()
# self.top.bind('<ButtonPress>', self.top._sleepEvent)
# self.top.bind('<3>', self.top._sleepEvent)
# update display properties
self.canvas.busy = True
# geometry
mycond = (self.app.opt.save_games_geometry and
self.id in self.app.opt.games_geometry)
if mycond:
# restore game geometry
w, h = self.app.opt.games_geometry[self.id]
self.canvas.config(width=w, height=h)
if True and USE_PIL:
if self.app.opt.auto_scale:
w, h = self.app.opt.game_geometry
self.canvas.setInitialSize(w, h, margins=False,
scrollregion=False)
# self.canvas.config(width=w, height=h)
# dx, dy = self.canvas.xmargin, self.canvas.ymargin
# self.canvas.config(scrollregion=(-dx, -dy, dx, dy))
else:
if not mycond:
w = int(round(self.width * self.app.opt.scale_x))
h = int(round(self.height * self.app.opt.scale_y))
self.canvas.setInitialSize(w, h)
self.top.wm_geometry("") # cancel user-specified geometry
# preserve texts positions
for t in ('info', 'help', 'misc', 'score', 'base_rank'):
item = getattr(self.texts, t)
if item:
coords = self.canvas.coords(item)
setattr(self.init_texts, t, coords)
#
for item in self.texts.list:
coords = self.canvas.coords(item)
self.init_texts.list.append(coords)
# resize
self.resizeGame()
# fix coords of cards (see self.createCards)
x, y = self.s.talon.x, self.s.talon.y
for c in self.cards:
c.moveTo(x, y)
else:
# no PIL
self.canvas.setInitialSize(self.width, self.height)
self.top.wm_geometry("") # cancel user-specified geometry
# done; update view
self.top.update_idletasks()
self.canvas.busy = False
if DEBUG >= 4:
MfxCanvasRectangle(self.canvas, 0, 0, self.width, self.height,
width=2, fill=None, outline='green')
#
self.stats.update_time = time.time()
self.showHelp() # just in case
hint_class = self.getHintClass()
if hint_class is not None:
self.Stuck_Class = hint_class(self, 0)
self.busy = old_busy
def _checkGame(self):
class_name = self.__class__.__name__
if self.s.foundations:
ncards = 0
for stack in self.s.foundations:
ncards += stack.cap.max_cards
if ncards != self.gameinfo.ncards:
print_err('invalid sum of foundations.max_cards: '
'%s: %s %s' %
(class_name, ncards, self.gameinfo.ncards),
2)
if self.s.rows:
from pysollib.stack import AC_RowStack, UD_AC_RowStack, \
SS_RowStack, UD_SS_RowStack, \
RK_RowStack, UD_RK_RowStack, \
Spider_AC_RowStack, Spider_SS_RowStack
r = self.s.rows[0]
for c, f in (
((Spider_AC_RowStack, Spider_SS_RowStack),
(self._shallHighlightMatch_RK,
self._shallHighlightMatch_RKW)),
((AC_RowStack, UD_AC_RowStack),
(self._shallHighlightMatch_AC,
self._shallHighlightMatch_ACW)),
((SS_RowStack, UD_SS_RowStack),
(self._shallHighlightMatch_SS,
self._shallHighlightMatch_SSW)),
((RK_RowStack, UD_RK_RowStack),
(self._shallHighlightMatch_RK,
self._shallHighlightMatch_RKW)),):
if isinstance(r, c):
if self.shallHighlightMatch not in f:
print_err('shallHighlightMatch is not valid: '
' %s, %s' % (class_name, r.__class__), 2)
if r.cap.mod == 13 and self.shallHighlightMatch != f[1]:
print_err('shallHighlightMatch is not valid (wrap): '
' %s, %s' % (class_name, r.__class__), 2)
break
if self.s.talon.max_rounds > 1 and self.s.talon.texts.rounds is None:
print_err('max_rounds > 1, but talon.texts.rounds is None: '
'%s' % class_name, 2)
elif (self.s.talon.max_rounds <= 1 and
self.s.talon.texts.rounds is not None):
print_err('max_rounds <= 1, but talon.texts.rounds is not None: '
'%s' % class_name, 2)
def _calcMouseBind(self, binding_format):
"""docstring for _calcMouseBind"""
return self.app.opt.calcCustomMouseButtonsBinding(binding_format)
def initBindings(self):
# note: a Game is only allowed to bind self.canvas and not to self.top
# bind(self.canvas, "<Double-1>", self.undoHandler)
bind(self.canvas,
self._calcMouseBind("<{mouse_button1}>"), self.undoHandler)
bind(self.canvas,
self._calcMouseBind("<{mouse_button2}>"), self.dropHandler)
bind(self.canvas,
self._calcMouseBind("<{mouse_button3}>"), self.redoHandler)
bind(self.canvas, '<Unmap>', self._unmapHandler)
bind(self.canvas, '<Configure>', self._configureHandler, add=True)
def __createCommon(self, app):
self.busy = 1
self.app = app
self.top = app.top
self.canvas = app.canvas
self.filename = ""
self.drag = GameDrag()
if self.gstats.start_player is None:
self.gstats.start_player = self.app.opt.player
# optional MfxCanvasText items
self.texts = GameTexts()
# initial position of the texts
self.init_texts = GameTexts()
def createPreview(self, app):
old_busy = self.busy
self.__createCommon(app)
self.preview = max(1, self.canvas.preview)
# create game
self.createGame()
# set some defaults
self.sg.openstacks = [s for s in self.sg.openstacks
if s.cap.max_accept >= s.cap.min_accept]
self.sg.hp_stacks = [s for s in self.sg.dropstacks
if s.cap.max_move >= 2]
# init the stack view
for stack in self.allstacks:
stack.prepareStack()
stack.assertStack()
self.optimizeRegions()
# create cards
self.cards = self.createCards()
#
self.canvas.setInitialSize(self.width, self.height)
self.busy = old_busy
def destruct(self):
# help breaking circular references
for obj in self.cards:
destruct(obj)
for obj in self.allstacks:
obj.destruct()
destruct(obj)
# Do not destroy game structure (like stacks and cards) here !
def reset(self, restart=0):
self.filename = ""
self.demo = None
self.solver = None
self.hints = GameHints()
self.saveinfo = GameSaveInfo()
self.loadinfo = GameLoadInfo()
self.snapshots = []
self.failed_snapshots = []
# local statistics are reset on each game restart
self.stats = GameStatsStruct()
self.startMoves()
if restart:
return
# global statistics survive a game restart
self.gstats = GameGlobalStatsStruct()
self.gsaveinfo = GameGlobalSaveInfo()
# some vars for win animation
self.win_animation = GameWinAnimation()
def getTitleName(self):
return self.app.getGameTitleName(self.id)
def getGameNumber(self, format):
s = self.random.getSeedAsStr()
if format:
return "# " + s
return s
# this is called from within createGame()
def setSize(self, w, h):
self.width, self.height = int(round(w)), int(round(h))
dx, dy = self.canvas.xmargin, self.canvas.ymargin
self.init_size = self.width+2*dx, self.height+2*dy
def setCursor(self, cursor):
if self.canvas:
self.canvas.config(cursor=cursor)
# self.canvas.update_idletasks()
# if self.app and self.app.toolbar:
# self.app.toolbar.setCursor(cursor=cursor)
def newGame(self, random=None, restart=0, autoplay=1, shuffle=True,
dealer=None):
self.finished = False
old_busy, self.busy = self.busy, 1
self.setCursor(cursor=CURSOR_WATCH)
self.stopWinAnimation()
self.disableMenus()
if shuffle:
self.redealAnimation()
self.reset(restart=restart)
self.resetGame()
self.createRandom(random)
if shuffle:
self.shuffle()
assert len(self.s.talon.cards) == self.gameinfo.ncards
for stack in self.allstacks:
stack.updateText()
self.updateText()
self.updateStatus(
player=self.app.opt.player,
gamenumber=self.getGameNumber(format=1),
moves=(0, 0),
stats=self.app.stats.getStats(
self.app.opt.player,
self.id),
stuck='')
reset_solver_dialog()
# unhide toplevel when we use a progress bar
if not self.preview:
wm_map(self.top, maximized=self.app.opt.wm_maximized)
self.top.busyUpdate()
if TOOLKIT == 'gtk':
# FIXME
if self.top:
self.top.update_idletasks()
self.top.show_now()
self.stopSamples()
self.moves.state = self.S_INIT
if dealer:
dealer()
else:
if not self.preview:
self.resizeGame()
self.startGame()
self.startMoves()
for stack in self.allstacks:
stack.updateText()
self.updateSnapshots()
self.updateText()
self.updateStatus(moves=(0, 0))
self.updateMenus()
self.stopSamples()
if autoplay:
self.autoPlay()
self.stats.player_moves = 0
self.setCursor(cursor=self.app.top_cursor)
self.stats.update_time = time.time()
if not self.preview:
self.startPlayTimer()
self.busy = old_busy
def restoreGame(self, game, reset=1):
old_busy, self.busy = self.busy, 1
if reset:
self.reset()
self.resetGame()
# 1) copy loaded variables
self.filename = game.filename
self.version = game.version
self.version_tuple = game.version_tuple
self.random = game.random
self.moves = game.moves
self.stats = game.stats
self.gstats = game.gstats
# 2) copy extra save-/loadinfo
self.saveinfo = game.saveinfo
self.gsaveinfo = game.gsaveinfo
self.s.talon.round = game.loadinfo.talon_round
self.finished = game.finished
self.snapshots = game.snapshots
# 3) move cards to stacks
assert len(self.allstacks) == len(game.loadinfo.stacks)
old_state = game.moves.state
game.moves.state = self.S_RESTORE
for i in range(len(self.allstacks)):
for t in game.loadinfo.stacks[i]:
card_id, face_up = t
card = self.cards[card_id]
if face_up:
card.showFace()
else:
card.showBack()
self.allstacks[i].addCard(card)
game.moves.state = old_state
# 4) update settings
for stack_id, cap in self.saveinfo.stack_caps:
# print stack_id, cap
self.allstacks[stack_id].cap.update(cap.__dict__)
# 5) subclass settings
self._restoreGameHook(game)
# 6) update view
for stack in self.allstacks:
stack.updateText()
self.updateText()
self.updateStatus(
player=self.app.opt.player,
gamenumber=self.getGameNumber(format=1),
moves=(self.moves.index, self.stats.total_moves),
stats=self.app.stats.getStats(self.app.opt.player, self.id))
if not self.preview:
self.updateMenus()
wm_map(self.top, maximized=self.app.opt.wm_maximized)
self.setCursor(cursor=self.app.top_cursor)
self.stats.update_time = time.time()
self.busy = old_busy
# wait for canvas is mapped
after(self.top, 200, self._configureHandler)
if TOOLKIT == 'gtk':
# FIXME
if self.top:
self.top.update_idletasks()
self.top.show_now()
self.startPlayTimer()
def restoreGameFromBookmark(self, bookmark):
old_busy, self.busy = self.busy, 1
file = BytesIO(bookmark)
p = Unpickler(file)
game = self._undumpGame(p, self.app)
assert game.id == self.id
self.restoreGame(game, reset=0)
destruct(game)
self.busy = old_busy
def resetGame(self):
self.hints.list = None
self.s.talon.removeAllCards()
for stack in self.allstacks:
stack.resetGame()
if TOOLKIT == 'gtk':
# FIXME (pyramid like games)
stack.group.tkraise()
if self.preview <= 1:
for t in (self.texts.score, self.texts.base_rank,):
if t:
t.config(text="")
def nextGameFlags(self, id, random=None):
f = 0
if id != self.id:
f |= 1
if self.app.nextgame.cardset is not self.app.cardset:
f |= 2
if random is not None:
if ((random.__class__ is not self.random.__class__) or
random.initial_seed != self.random.initial_seed):
f |= 16
return f
# quit to outer mainloop in class App, possibly restarting
# with another game from there
def quitGame(self, id=0, random=None, loadedgame=None,
startdemo=0, bookmark=0, holdgame=0):
self.updateTime()
if bookmark:
id, random = self.id, self.random
f = BytesIO()
self._dumpGame(Pickler(f, 1), bookmark=1)
self.app.nextgame.bookmark = f.getvalue()
if id > 0:
self.setCursor(cursor=CURSOR_WATCH)
self.app.nextgame.id = id
self.app.nextgame.random = random
self.app.nextgame.loadedgame = loadedgame
self.app.nextgame.startdemo = startdemo
self.app.nextgame.holdgame = holdgame
self.updateStatus(time=None, moves=None, gamenumber=None, stats=None)
self.top.mainquit()
# This should be called directly before newGame(),
# restoreGame(), restoreGameFromBookmark() and quitGame().
def endGame(self, restart=0, bookmark=0, holdgame=0):
if self.preview:
return
self.app.wm_save_state()
if self.pause:
self.doPause()
if holdgame:
return
if bookmark:
return
if restart:
if self.moves.index > 0 and self.getPlayerMoves() > 0:
self.gstats.restarted += 1
return
self.updateStats()
stats = self.app.stats
if self.shallUpdateBalance():
b = self.getGameBalance()
if b:
stats.total_balance[self.id] = \
stats.total_balance.get(self.id, 0) + b
stats.session_balance[self.id] = \
stats.session_balance.get(self.id, 0) + b
stats.gameid_balance = stats.gameid_balance + b
def restartGame(self):
self.endGame(restart=1)
self.newGame(restart=1, random=self.random)
def resizeImages(self, manually=False):
self.center_offset = (0, 0)
if self.canvas.winfo_ismapped():
# apparent size of canvas
vw = self.canvas.winfo_width()
vh = self.canvas.winfo_height()
else:
# we have no a real size of canvas
# (winfo_width / winfo_reqwidth)
# so we use a saved size
vw, vh = self.app.opt.game_geometry
if not vw:
# first run of the game
return 1, 1, 1, 1
# requested size of canvas (createGame -> setSize)
iw, ih = self.init_size
# resizing images and cards
if (self.app.opt.auto_scale or
(self.app.opt.spread_stacks and not manually)):
# calculate factor of resizing
xf = float(vw)/iw
yf = float(vh)/ih
if (self.app.opt.preserve_aspect_ratio
and not self.app.opt.spread_stacks):
xf = yf = min(xf, yf)
else:
xf, yf = self.app.opt.scale_x, self.app.opt.scale_y
self.center_offset = self.getCenterOffset(vw, vh, iw, ih, xf, yf)
if (not self.app.opt.spread_stacks or manually):
# images
self.app.images.resize(xf, yf)
# cards
for card in self.cards:
card.update(card.id, card.deck, card.suit, card.rank, self)
return xf, yf, self.app.images._xfactor, self.app.images._yfactor
def getCenterOffset(self, vw, vh, iw, ih, xf, yf):
if (not self.app.opt.center_layout or self.app.opt.spread_stacks or
(self.app.opt.auto_scale and not
self.app.opt.preserve_aspect_ratio)):
return 0, 0
if ((vw > iw and vh > ih) or self.app.opt.auto_scale):
return (vw / xf - iw) / 2, (vh / yf - ih) / 2
elif (vw >= iw and vh < ih):
return (vw / xf - iw) / 2, 0
elif (vw < iw and vh >= ih):
return 0, (vh / yf - ih) / 2
else:
return 0, 0
def resizeGame(self, card_size_manually=False):
# if self.busy:
# return
if not USE_PIL:
return
self.deleteStackDesc()
xf, yf, xf0, yf0 = \
self.resizeImages(manually=card_size_manually)
cw, ch = self.center_offset[0], self.center_offset[1]
for stack in self.allstacks:
if (self.app.opt.spread_stacks):
# Do not move Talons
# (because one would need to reposition
# 'empty cross' and 'redeal' figures)
# But in that case,
# games with talon not placed top-left corner
# will get it misplaced when auto_scale
# e.g. Suit Elevens
# => player can fix that issue by setting auto_scale false
if stack is self.s.talon:
# stack.init_coord=(x, y)
if card_size_manually:
stack.resize(xf, yf0, widthpad=cw, heightpad=ch)
else:
stack.resize(xf0, yf0, widthpad=cw, heightpad=ch)
else:
stack.resize(xf, yf0, widthpad=cw, heightpad=ch)
else:
stack.resize(xf, yf, widthpad=cw, heightpad=ch)
stack.updatePositions()
self.regions.calc_info(xf, yf, widthpad=cw, heightpad=ch)
# texts
for t in ('info', 'help', 'misc', 'score', 'base_rank'):
init_coord = getattr(self.init_texts, t)
if init_coord:
item = getattr(self.texts, t)
x, y = int(round((init_coord[0] + cw) * xf)), \
int(round((init_coord[1] + ch) * yf))
self.canvas.coords(item, x, y)
for i in range(len(self.texts.list)):
init_coord = self.init_texts.list[i]
item = self.texts.list[i]
x, y = int(round((init_coord[0] + cw) * xf)), \
int(round((init_coord[1] + ch) * yf))
self.canvas.coords(item, x, y)
def createRandom(self, random):
if random is None:
if isinstance(self.random, PysolRandom):
state = self.random.getstate()
self.app.gamerandom.setstate(state)
# we want at least 17 digits
seed = self.app.gamerandom.randrange(
int('10000000000000000'),
PysolRandom.MAX_SEED
)
self.random = PysolRandom(seed)
self.random.origin = self.random.ORIGIN_RANDOM
else:
self.random = random
self.random.reset()
def enterState(self, state):
old_state = self.moves.state
if state < old_state:
self.moves.state = state
return old_state
def leaveState(self, old_state):
self.moves.state = old_state
def getSnapshot(self):
# generate hash (unique string) of current move
sn = []
for stack in self.allstacks:
s = []
for card in stack.cards:
s.append('%d%03d%d' % (card.suit, card.rank, card.face_up))
sn.append(''.join(s))
sn = '-'.join(sn)
# optimisation
sn = hash(sn)
return sn
def createSnGroups(self):
# group stacks by class and cap
sg = {}
for s in self.allstacks:
for k in sg:
if s.__class__ is k.__class__ and \
s.cap.__dict__ == k.cap.__dict__:
g = sg[k]
g.append(s.id)
break
else:
# new group
sg[s] = [s.id]
sg = list(sg.values())
self.sn_groups = sg
def updateSnapshots(self):
sn = self.getSnapshot()
if sn in self.snapshots:
# self.updateStatus(snapshot=True)
pass
else:
self.snapshots.append(sn)
# self.updateStatus(snapshot=False)
# Create all cards for the game.
def createCards(self, progress=None):
gi = self.gameinfo
pstep = 0
if progress:
pstep = (100.0 - progress.percent) / gi.ncards
cards = []
id = [0]
x, y = self.s.talon.x, self.s.talon.y
for deck in range(gi.decks):
def _iter_ranks(ranks, suit):
for rank in ranks:
card = self._createCard(id[0], deck, suit, rank, x=x, y=y)
if card is None:
continue
cards.append(card)
id[0] += 1
if progress:
progress.update(step=pstep)
for suit in gi.suits:
_iter_ranks(gi.ranks, suit)
_iter_ranks(gi.trumps, len(gi.suits))
if progress:
progress.update(percent=100)
assert len(cards) == gi.ncards
return cards
def _createCard(self, id, deck, suit, rank, x, y):
return Card(id, deck, suit, rank, game=self, x=x, y=y)
# shuffle cards
def shuffle(self):
# get a fresh copy of the original game-cards
cards = list(self.cards)
# init random generator
if isinstance(self.random, LCRandom31):
cards = ms_rearrange(cards)
self.random.reset() # reset to initial seed
# shuffle
self.random.shuffle(cards)
# subclass hook
cards = self._shuffleHook(cards)
# finally add the shuffled cards to the Talon
for card in cards:
self.s.talon.addCard(card, update=0)
card.showBack(unhide=0)
# shuffle cards, but keep decks together
def shuffleSeparateDecks(self):
cards = []
self.random.reset()
n = self.gameinfo.ncards // self.gameinfo.decks
for deck in range(self.gameinfo.decks):
i = deck * n
deck_cards = list(self.cards)[i:i+n]
self.random.shuffle(deck_cards)
cards.extend(deck_cards)
cards = self._shuffleHook(cards)
for card in cards:
self.s.talon.addCard(card, update=0)
card.showBack(unhide=0)
# subclass overrideable (must use self.random)
def _shuffleHook(self, cards):
return cards
# utility for use by subclasses
def _shuffleHookMoveToTop(self, cards, func, ncards=999999):
# move cards to top of the Talon (i.e. first cards to be dealt)
cards, scards = self._shuffleHookMoveSorter(cards, func, ncards)
return cards + scards
def _shuffleHookMoveToBottom(self, cards, func, ncards=999999):
# move cards to bottom of the Talon (i.e. last cards to be dealt)
cards, scards = self._shuffleHookMoveSorter(cards, func, ncards)
return scards + cards
def _shuffleHookMoveSorter(self, cards, cb, ncards):
extracted, i, new = [], len(cards), []
for c in cards:
select, ord_ = cb(c)
if select:
extracted.append((ord_, i, c))
if len(extracted) >= ncards:
new += cards[(len(cards)-i+1):]
break
else:
new.append(c)
i -= 1
return new, [x[2] for x in reversed(sorted(extracted))]
def _finishDrag(self):
if self.demo:
self.stopDemo()
if self.busy:
return 1
if self.drag.stack:
self.drag.stack.finishDrag()
return 0
def _cancelDrag(self, break_pause=True):
self.stopWinAnimation()
if self.demo:
self.stopDemo()
if break_pause and self.pause:
self.doPause()
self.interruptSleep()
self.deleteStackDesc()
if self.busy:
return 1
if self.drag.stack:
self.drag.stack.cancelDrag()
return 0
def updateMenus(self):
if not self.preview:
self.app.menubar.updateMenus()
def disableMenus(self):
if not self.preview:
self.app.menubar.disableMenus()
def _defaultHandler(self, event):
if not self.app:
return True # FIXME (GTK)
if not self.app.opt.mouse_undo:
return True
if self.pause:
self.app.menubar.mPause()
return True
if not self.event_handled and self.stopWinAnimation():
return True
self.interruptSleep()
if self.deleteStackDesc():
# delete piles descriptions
return True
if self.demo:
self.stopDemo()
return True
if not self.event_handled and self.drag.stack:
self.drag.stack.cancelDrag(event)
return True
return False # continue this event
def dropHandler(self, event):
if not self._defaultHandler(event) and not self.event_handled:
self.app.menubar.mDrop()
self.event_handled = False
return EVENT_PROPAGATE
def undoHandler(self, event):
if not self._defaultHandler(event) and not self.event_handled:
self.app.menubar.mUndo()
self.event_handled = False
return EVENT_PROPAGATE
def redoHandler(self, event):
if not self._defaultHandler(event) and not self.event_handled:
self.app.menubar.mRedo()
self.event_handled = False
return EVENT_PROPAGATE
def updateStatus(self, **kw):
if self.preview:
return
tb, sb = self.app.toolbar, self.app.statusbar
for k, v in six.iteritems(kw):
_updateStatus_process_key_val(tb, sb, k, v)
def _unmapHandler(self, event):
# pause game if root window has been iconified
if self.app and not self.pause:
self.app.menubar.mPause()
_resizeHandlerID = None
def _resizeHandler(self):
self._resizeHandlerID = None
self.resizeGame()
def _configureHandler(self, event=None):
if False: # if not USE_PIL:
return
if not self.app:
return
if not self.canvas:
return
if (not self.app.opt.auto_scale and
not self.app.opt.spread_stacks and
not self.app.opt.center_layout):
return
if self.preview:
return
if self._resizeHandlerID:
self.canvas.after_cancel(self._resizeHandlerID)
self._resizeHandlerID = self.canvas.after(250, self._resizeHandler)
def playSample(self, name, priority=0, loop=0):
if name.startswith('deal'):
sampleopt = 'deal'
elif name not in self.app.opt.sound_samples:
sampleopt = 'extra'
else:
sampleopt = name
if sampleopt in self.app.opt.sound_samples and \
not self.app.opt.sound_samples[sampleopt]:
return 0
if self.app.audio:
return self.app.audio.playSample(
name,
priority=priority,
loop=loop)
return 0
def stopSamples(self):
if self.app.audio:
self.app.audio.stopSamples()
def stopSamplesLoop(self):
if self.app.audio:
self.app.audio.stopSamplesLoop()
def startDealSample(self, loop=999999):
a = self.app.opt.animations
if a and not self.preview:
self.canvas.update_idletasks()
if self.app.audio and self.app.opt.sound:
if a in (1, 2, 3, 10):
self.playSample("deal01", priority=100, loop=loop)
elif a == 4:
self.playSample("deal04", priority=100, loop=loop)
elif a == 5:
self.playSample("deal08", priority=100, loop=loop)
def areYouSure(self, title=None, text=None, confirm=-1, default=0):
if TOOLKIT == 'kivy':
return True
if self.preview:
return True
if confirm < 0:
confirm = self.app.opt.confirm
if confirm:
if not title:
title = TITLE
if not text:
text = _("Discard current game?")
self.playSample("areyousure")
d = MfxMessageDialog(self.top, title=title, text=text,
bitmap="question",
strings=(_("&OK"), _("&Cancel")))
if d.status != 0 or d.button != 0:
return False
return True
def notYetImplemented(self):
MfxMessageDialog(self.top, title="Not yet implemented",
text="This function is\nnot yet implemented.",
bitmap="error")
# main animation method
def animatedMoveTo(self, from_stack, to_stack, cards, x, y,
tkraise=1, frames=-1, shadow=-1):
# available values of app.opt.animations:
# 0 - without animations
# 1 - very fast (without timer)
# 2 - fast (default)
# 3 - medium (2/3 of fast speed)
# 4 - slow (1/4 of fast speed)
# 5 - very slow (1/8 of fast speed)
# 10 - used internally in game preview
if self.app.opt.animations == 0 or frames == 0:
return
# init timer - need a high resolution for this to work
clock, delay, skip = None, 1, 1
if self.app.opt.animations >= 2:
clock = uclock
SPF = 0.15 / 8 # animation speed - seconds per frame
if frames < 0:
frames = 8
assert frames >= 2
if self.app.opt.animations == 3: # medium
frames *= 3
SPF /= 2
elif self.app.opt.animations == 4: # slow
frames *= 8
SPF /= 2
elif self.app.opt.animations == 5: # very slow
frames *= 16
SPF /= 2
elif self.app.opt.animations == 10:
# this is used internally in game preview to speed up
# the initial dealing
# if self.moves.state == self.S_INIT and frames > 4:
# frames //= 2
return
if shadow < 0:
shadow = self.app.opt.shadow
shadows = ()
# start animation
if TOOLKIT == 'kivy':
c0 = cards[0]
dx, dy = (x - c0.x), (y - c0.y)
for card in cards:
base = float(self.app.opt.animations)
duration = base*0.1
card.animatedMove(dx, dy, duration)
return
if tkraise:
for card in cards:
card.tkraise()
c0 = cards[0]
dx, dy = (x - c0.x) / float(frames), (y - c0.y) / float(frames)
tx, ty = 0, 0
i = 1
if clock:
starttime = clock()
while i < frames:
mx, my = int(round(dx * i)) - tx, int(round(dy * i)) - ty
tx, ty = tx + mx, ty + my
if i == 1 and shadow and from_stack:
# create shadows in the first frame
sx, sy = self.app.images.SHADOW_XOFFSET, \
self.app.images.SHADOW_YOFFSET
shadows = from_stack.createShadows(cards, sx, sy)
for s in shadows:
s.move(mx, my)
for card in cards:
card.moveBy(mx, my)
self.canvas.update_idletasks()
step = 1
if clock:
endtime = starttime + i*SPF
sleep = endtime - clock()
if delay and sleep >= 0.005:
# we're fast - delay
# print "Delay frame", i, sleep
usleep(sleep)
elif skip and sleep <= -0.75*SPF:
# we're slow - skip 1 or 2 frames
# print "Skip frame", i, sleep
step += 1
if frames > 4 and sleep < -1.5*SPF:
step += 1
# print i, step, mx, my; time.sleep(0.5)
i += step
# last frame: delete shadows, move card to final position
for s in shadows:
s.delete()
dx, dy = x - c0.x, y - c0.y
for card in cards:
card.moveBy(dx, dy)
self.canvas.update_idletasks()
def doAnimatedFlipAndMove(self, from_stack, to_stack=None, frames=-1):
if self.app.opt.animations == 0 or frames == 0:
return False
if not from_stack.cards:
return False
if TOOLKIT == 'gtk':
return False
if not Image:
return False
canvas = self.canvas
card = from_stack.cards[-1]
im1 = card._active_image._pil_image
if card.face_up:
im2 = card._back_image._pil_image
else:
im2 = card._face_image._pil_image
w, h = im1.size
id = card.item.id
SPF = 0.1/8 # animation speed - seconds per frame
frames = 4.0 # num frames for each step
if self.app.opt.animations == 3: # medium
SPF = 0.1/8
frames = 7.0
elif self.app.opt.animations == 4: # slow
SPF = 0.1/8
frames = 12.0
elif self.app.opt.animations == 5: # very slow
SPF = 0.1/8
frames = 24.0
if to_stack is None:
x0, y0 = from_stack.getPositionFor(card)
x1, y1 = x0, y0
dest_x, dest_y = 0, 0
else:
x0, y0 = from_stack.getPositionFor(card)
x1, y1 = to_stack.getPositionForNextCard()
dest_x, dest_y = x1-x0, y1-y0
if dest_x == 0 and dest_y == 0:
# flip
# ascent_dx, ascent_dy = 0, self.app.images.SHADOW_YOFFSET/frames
ascent_dx, ascent_dy = 0, h/10.0/frames
min_size = w/10
shrink_dx = (w-min_size) / (frames-1)
shrink_dy = 0
elif dest_y == 0:
# move to left/right waste
# ascent_dx, ascent_dy = 0, self.app.images.SHADOW_YOFFSET/frames
ascent_dx, ascent_dy = 0, h/10.0/frames
min_size = w/10
shrink_dx = (w-min_size) / (frames-1)
shrink_dy = 0
elif dest_x == 0:
# move to top/bottom waste
if 0:
ascent_dx, ascent_dy = 0, h/10.0/frames
min_size = w/10
shrink_dx = (w-min_size) / (frames-1)
shrink_dy = 0
elif 0:
ascent_dx, ascent_dy = 0, 0
min_size = h/10
shrink_dx = 0
shrink_dy = (h-min_size) / (frames-1)
else:
return False
else:
# dest_x != 0 and dest_y != 0
return False
move_dx = dest_x / frames / 2
move_dy = dest_y / frames / 2
xpos, ypos = float(x0), float(y0)
card.tkraise()
# step 1
d_x = shrink_dx/2+move_dx-ascent_dx
d_y = shrink_dy/2+move_dy-ascent_dy
nframe = 0
while nframe < frames:
starttime = uclock()
# resize img
ww = w - nframe*shrink_dx
hh = h - nframe*shrink_dy
tmp = im1.resize((int(ww), int(hh)))
tk_tmp = ImageTk.PhotoImage(image=tmp)
canvas.itemconfig(id, image=tk_tmp)
# move img
xpos += d_x
ypos += d_y
card.moveTo(int(round(xpos)), int(round(ypos)))
canvas.update_idletasks()
nframe += 1
t = (SPF-(uclock()-starttime))*1000 # milliseconds
if t > 0:
usleep(t/1000)
# else:
# nframe += 1
# xpos += d_x
# ypos += d_y
# step 2
d_x = -shrink_dx/2+move_dx+ascent_dx
d_y = -shrink_dy/2+move_dy+ascent_dy
nframe = 0
while nframe < frames:
starttime = uclock()
# resize img
ww = w - (frames-nframe-1)*shrink_dx
hh = h - (frames-nframe-1)*shrink_dy
tmp = im2.resize((int(ww), int(hh)))
tk_tmp = ImageTk.PhotoImage(image=tmp)
canvas.itemconfig(id, image=tk_tmp)
# move img
xpos += d_x
ypos += d_y
card.moveTo(int(round(xpos)), int(round(ypos)))
canvas.update_idletasks()
nframe += 1
t = (SPF-(uclock()-starttime))*1000 # milliseconds
if t > 0:
usleep(t/1000)
# else:
# nframe += 1
# xpos += d_x
# ypos += d_y
card.moveTo(x1, y1)
# canvas.update_idletasks()
return True
def animatedFlip(self, stack):
if not self.app.opt.flip_animation:
return False
return self.doAnimatedFlipAndMove(stack)
def animatedFlipAndMove(self, from_stack, to_stack, frames=-1):
if not self.app.opt.flip_animation:
return False
return self.doAnimatedFlipAndMove(from_stack, to_stack, frames)
def winAnimationEvent(self):
# based on code from pygtk-demo
FRAME_DELAY = 80
CYCLE_LEN = 60
starttime = uclock()
images = self.win_animation.images
saved_images = self.win_animation.saved_images # cached images
canvas = self.canvas
canvas.delete(*self.win_animation.canvas_images)
self.win_animation.canvas_images = []
x0 = int(int(canvas.cget('width'))*(canvas.xview()[0]))
y0 = int(int(canvas.cget('height'))*(canvas.yview()[0]))
width, height = self.win_animation.width, self.win_animation.height
cw = self.canvas.winfo_width()
ch = self.canvas.winfo_height()
x0 -= (width-cw)/2
y0 -= (height-ch)/2
tmp_tk_images = []
raised_images = []
n_images = len(images)
xmid = width / 2.0
ymid = height / 2.0
radius = min(xmid, ymid) / 2.0
f = float(self.win_animation.frame_num % CYCLE_LEN) / float(CYCLE_LEN)
r = radius + (radius / 3.0) * math.sin(f * 2.0 * math.pi)
img_index = 0
for im in images:
iw, ih = im.size
ang = 2.0 * math.pi * img_index / n_images - f * 2.0 * math.pi
xpos = x0 + int(xmid + r * math.cos(ang) - iw / 2.0)
ypos = y0 + int(ymid + r * math.sin(ang) - ih / 2.0)
k = (math.sin if img_index & 1 else math.cos)(f * 2.0 * math.pi)
k = max(0.4, k ** 2)
round_k = int(round(k*100))
if img_index not in saved_images:
saved_images[img_index] = {}
if round_k in saved_images[img_index]:
tk_tmp = saved_images[img_index][round_k]
else:
new_size = (int(iw*k), int(ih*k))
if round_k == 100:
tmp = im
else:
tmp = im.resize(new_size, resample=Image.BILINEAR)
tk_tmp = ImageTk.PhotoImage(image=tmp)
saved_images[img_index][round_k] = tk_tmp
id = canvas.create_image(xpos, ypos, image=tk_tmp, anchor='nw')
self.win_animation.canvas_images.append(id)
if k > 0.6:
raised_images.append(id)
tmp_tk_images.append(tk_tmp)
img_index += 1
for id in raised_images:
canvas.tag_raise(id)
self.win_animation.frame_num = \
(self.win_animation.frame_num+1) % CYCLE_LEN
self.win_animation.tk_images = tmp_tk_images
canvas.update_idletasks()
# loop
t = FRAME_DELAY-int((uclock()-starttime)*1000)
if t > 0:
self.win_animation.timer = after(canvas, t, self.winAnimationEvent)
else:
self.win_animation.timer = after_idle(
canvas,
self.winAnimationEvent)
def stopWinAnimation(self):
if self.win_animation.timer:
after_cancel(self.win_animation.timer) # stop loop
self.win_animation.timer = None
self.canvas.delete(*self.win_animation.canvas_images)
self.win_animation.canvas_images = []
self.win_animation.tk_images = [] # delete all images
self.saved_images = {}
self.canvas.showAllItems()
return True
return False
def winAnimation(self, perfect=0):
if self.preview:
return
if not self.app.opt.win_animation:
return
if TOOLKIT == 'gtk':
return
if not Image:
return
self.canvas.hideAllItems()
# select some random cards
cards = self.cards[:]
scards = []
ncards = min(10, len(cards))
for i in range(ncards):
c = self.app.miscrandom.choice(cards)
scards.append(c)
cards.remove(c)
for c in scards:
self.win_animation.images.append(c._face_image._pil_image)
# compute visible geometry
self.win_animation.width = self.canvas.winfo_width()
self.win_animation.height = self.canvas.winfo_height()
# run win animation in background
# after_idle(self.canvas, self.winAnimationEvent)
after(self.canvas, 200, self.winAnimationEvent)
return
def redealAnimation(self):
if self.preview:
return
if not self.app.opt.animations or not self.app.opt.redeal_animation:
return
cards = []
for s in self.allstacks:
if s is not self.s.talon:
for c in s.cards:
cards.append((c, s))
if not cards:
return
self.setCursor(cursor=CURSOR_WATCH)
self.top.busyUpdate()
self.canvas.update_idletasks()
old_a = self.app.opt.animations
if old_a == 0:
self.app.opt.animations = 1 # very fast
elif old_a == 3: # medium
self.app.opt.animations = 2 # fast
elif old_a == 4: # very slow
self.app.opt.animations = 3 # slow
# select some random cards
acards = []
scards = cards[:]
for i in range(8):
c, s = self.app.miscrandom.choice(scards)
if c not in acards:
acards.append(c)
scards.remove((c, s))
if not scards:
break
# animate
sx, sy = self.s.talon.x, self.s.talon.y
w, h = self.width, self.height
while cards:
# get and un-tuple a random card
t = self.app.miscrandom.choice(cards)
c, s = t
s.removeCard(c, update=0)
# animation
if c in acards or len(cards) <= 2:
self.animatedMoveTo(
s, None, [c], w//2, h//2, tkraise=0, shadow=0)
self.animatedMoveTo(s, None, [c], sx, sy, tkraise=0, shadow=0)
else:
c.moveTo(sx, sy)
cards.remove(t)
self.app.opt.animations = old_a
def sleep(self, seconds):
# if 0 and self.canvas:
# self.canvas.update_idletasks()
if seconds > 0:
if self.top:
self.top.interruptSleep()
self.top.sleep(seconds)
else:
time.sleep(seconds)
def interruptSleep(self):
if self.top:
self.top.interruptSleep()
def getCardFaceImage(self, deck, suit, rank):
return self.app.images.getFace(deck, suit, rank)
def getCardBackImage(self, deck, suit, rank):
return self.app.images.getBack()
def getCardShadeImage(self):
return self.app.images.getShade()
def _getClosestStack(self, cx, cy, stacks, dragstack):
closest, cdist = None, 999999999
# Since we only compare distances,
# we don't bother to take the square root.
for stack in stacks:
dist = (stack.x - cx)**2 + (stack.y - cy)**2
if dist < cdist:
closest, cdist = stack, dist
return closest
def getClosestStack(self, card, dragstack):
cx, cy = card.x, card.y
for stacks, rect in self.regions.info:
if cx >= rect[0] and cx < rect[2] \
and cy >= rect[1] and cy < rect[3]:
return self._getClosestStack(cx, cy, stacks, dragstack)
return self._getClosestStack(cx, cy, self.regions.remaining, dragstack)
# define a region for use in getClosestStack()
def setRegion(self, stacks, rect, priority=0):
assert len(stacks) > 0
assert len(rect) == 4 and rect[0] < rect[2] and rect[1] < rect[3]
if DEBUG >= 2:
xf, yf = self.app.images._xfactor, self.app.images._yfactor
MfxCanvasRectangle(self.canvas,
xf*rect[0], yf*rect[1], xf*rect[2], yf*rect[3],
width=2, fill=None, outline='red')
for s in stacks:
assert s and s in self.allstacks
# verify that the stack lies within the rectangle
r = rect
if USE_PIL:
x, y = s.init_coord
else:
x, y = s.x, s.y
assert r[0] <= x <= r[2] and r[1] <= y <= r[3]
# verify that the stack is not already in another region
# with the same priority
for d in self.regions.data:
if priority == d[0]:
assert s not in d[2]
# add to regions
self.regions.data.append(
(priority, -len(self.regions.data), tuple(stacks), tuple(rect)))
# as getClosestStack() is called within the mouse motion handler
# event it is worth optimizing a little bit
def optimizeRegions(self):
return self.regions.optimize(list(self.sg.openstacks))
def getInvisibleCoords(self):
# for InvisibleStack, etc
# x, y = -500, -500 - len(game.allstacks)
cardw, cardh = self.app.images.CARDW, self.app.images.CARDH
xoffset = self.app.images.CARD_XOFFSET
yoffset = self.app.images.CARD_YOFFSET
x = cardw + xoffset + self.canvas.xmargin
y = cardh + yoffset + self.canvas.ymargin
return -x-10, -y-10
#
# Game - subclass overridable actions - IMPORTANT FOR GAME LOGIC
#
# create the game (create stacks, texts, etc.)
def createGame(self):
raise SubclassResponsibility
# start the game (i.e. deal initial cards)
def startGame(self):
raise SubclassResponsibility
# can we deal cards ?
def canDealCards(self):
# default: ask the Talon
return self.s.talon and self.s.talon.canDealCards()
# deal cards - return number of cards dealt
def dealCards(self, sound=True):
# default: set state to deal and pass dealing to Talon
if self.s.talon and self.canDealCards():
self.finishMove()
old_state = self.enterState(self.S_DEAL)
n = self.s.talon.dealCards(sound=sound)
self.leaveState(old_state)
self.finishMove()
if not self.checkForWin():
self.autoPlay()
return n
return 0
# fill a stack if rules require it (e.g. Picture Gallery)
def fillStack(self, stack):
pass
# redeal cards (used in RedealTalonStack; all cards already in talon)
def redealCards(self):
pass
# the actual hint class (or None)
Hint_Class = DefaultHint
Solver_Class = None
Stuck_Class = None
def getHintClass(self):
return self.Hint_Class
def getStrictness(self):
return 0
def canSaveGame(self):
return True
def canLoadGame(self, version_tuple, game_version):
return self.GAME_VERSION == game_version
def canSetBookmark(self):
return self.canSaveGame()
def canUndo(self):
return True
def canRedo(self):
return self.canUndo()
# Mahjongg
def canShuffle(self):
return False
# game changed - i.e. should we ask the player to discard the game
def changed(self, restart=False):
if self.gstats.updated < 0:
return 0 # already won or lost
# if self.gstats.loaded > 0:
# return 0 # loaded games account for no stats
if not restart:
if self.gstats.restarted > 0:
return 1 # game was restarted - always ask
if self.gstats.goto_bookmark_moves > 0:
return 1
if self.moves.index == 0 or self.getPlayerMoves() == 0:
return 0
return 2
def getWinStatus(self):
won = self.isGameWon() != 0
if not won or self.stats.hints > 0 or self.stats.demo_moves > 0:
# sorry, you lose
return won, 0, self.U_LOST
if _stats__is_perfect(self.stats):
return won, 2, self.U_PERFECT
return won, 1, self.U_WON
# update statistics when a game was won/ended/canceled/...
def updateStats(self, demo=0):
if self.preview:
return ''
if not demo:
self.stopPlayTimer()
won, status, updated = self.getWinStatus()
if demo and self.getPlayerMoves() == 0:
if not self.stats.demo_updated:
# a pure demo game - update demo stats
self.stats.demo_updated = updated
self.app.stats.updateStats(None, self, won)
return ''
elif self.changed():
# must update player stats
self.gstats.updated = updated
if self.app.opt.update_player_stats:
ret = self.app.stats.updateStats(
self.app.opt.player, self, status)
self.updateStatus(
stats=self.app.stats.getStats(
self.app.opt.player, self.id))
top_msg = ''
if ret:
if ret[0] and ret[1]:
top_msg = _(
'\nYou have reached\n# %(timerank)d in the top ' +
'%(tops)d of playing time\nand # %(movesrank)d ' +
'in the top %(tops)d of moves.') % {
'timerank': ret[0],
'movesrank': ret[1],
'tops': TOP_SIZE}
elif ret[0]: # playing time
top_msg = _(
'\nYou have reached\n# %(timerank)d in the top ' +
'%(tops)d of playing time.') % {
'timerank': ret[0],
'tops': TOP_SIZE}
elif ret[1]: # moves
top_msg = _(
'\nYou have reached\n# %(movesrank)d in the top ' +
'%(tops)s of moves.') % {
'movesrank': ret[1],
'tops': TOP_SIZE}
return top_msg
elif not demo:
# only update the session log
if self.app.opt.update_player_stats:
if self.gstats.loaded:
self.app.stats.updateStats(self.app.opt.player, self, -2)
elif self.gstats.updated == 0 and self.stats.demo_updated == 0:
self.app.stats.updateStats(self.app.opt.player, self, -1)
return ''
def checkForWin(self):
won, status, updated = self.getWinStatus()
if not won:
return False
self.finishMove() # just in case
if self.preview:
return True
if self.finished:
return True
if self.demo:
return status
if TOOLKIT == 'kivy':
if not self.app.opt.display_win_message:
return True
self.top.waitAnimation()
if status == 2:
top_msg = self.updateStats()
time = self.getTime()
self.finished = True
self.playSample("gameperfect", priority=1000)
self.winAnimation(perfect=1)
text = ungettext('Your playing time is %(time)s\nfor %(n)d move.',
'Your playing time is %(time)s\nfor %(n)d moves.',
self.moves.index)
text = text % {'time': time, 'n': self.moves.index}
congrats = _('Congratulations, this\nwas a truly perfect game!')
d = MfxMessageDialog(
self.top, title=_("Game won"),
text='\n' + congrats + '\n\n' + text + '\n' + top_msg + '\n',
strings=(_("&New game"), None, _("&Back to game"),
_("&Cancel")),
image=self.app.gimages.logos[5])
elif status == 1:
top_msg = self.updateStats()
time = self.getTime()
self.finished = True
self.playSample("gamewon", priority=1000)
self.winAnimation()
text = ungettext('Your playing time is %(time)s\nfor %(n)d move.',
'Your playing time is %(time)s\nfor %(n)d moves.',
self.moves.index)
text = text % {'time': time, 'n': self.moves.index}
congrats = _('Congratulations, you did it!')
d = MfxMessageDialog(
self.top, title=_("Game won"),
text='\n' + congrats + '\n\n' + text + '\n' + top_msg + '\n',
strings=(_("&New game"), None, _("&Back to game"),
_("&Cancel")),
image=self.app.gimages.logos[4])
elif self.gstats.updated < 0:
self.finished = True
self.playSample("gamefinished", priority=1000)
d = MfxMessageDialog(
self.top, title=_("Game finished"), bitmap="info",
text=_("\nGame finished\n"),
strings=(_("&New game"), None, None, _("&Close")))
else:
self.finished = True
self.playSample("gamelost", priority=1000)
d = MfxMessageDialog(
self.top, title=_("Game finished"), bitmap="info",
text=_("\nGame finished, but not without my help...\n"),
strings=(_("&New game"), _("&Restart"), None, _("&Cancel")))
self.updateMenus()
if TOOLKIT == 'kivy':
return True
if d.status == 0 and d.button == 0:
# new game
self.endGame()
self.newGame()
elif d.status == 0 and d.button == 1:
# restart game
self.restartGame()
elif d.status == 0 and d.button == 2:
self.stopWinAnimation()
return True
#
# Game - subclass overridable methods (but usually not)
#
def isGameWon(self):
# default: all Foundations must be filled
return sum([len(s.cards) for s in self.s.foundations]) == \
len(self.cards)
def getFoundationDir(self):
for s in self.s.foundations:
if len(s.cards) >= 2:
return s.getRankDir()
return 0
# determine the real number of player_moves
def getPlayerMoves(self):
return self.stats.player_moves
def updateTime(self):
if self.finished or self.pause:
return
t = time.time()
d = t - self.stats.update_time
if d > 0:
self.stats.elapsed_time += d
self.gstats.total_elapsed_time += d
self.stats.update_time = t
def getTime(self):
self.updateTime()
t = int(self.stats.elapsed_time)
return format_time(t)
#
# Game - subclass overridable intelligence
#
def getAutoStacks(self, event=None):
# returns (flipstacks, dropstacks, quickplaystacks)
# default: sg.dropstacks
return (self.sg.dropstacks, self.sg.dropstacks, self.sg.dropstacks)
# handles autofaceup, autodrop and autodeal
def autoPlay(self, autofaceup=-1, autodrop=-1, autodeal=-1, sound=True):
if self.demo:
return 0
old_busy, self.busy = self.busy, 1
if autofaceup < 0:
autofaceup = self.app.opt.autofaceup
if autodrop < 0:
autodrop = self.app.opt.autodrop
if autodeal < 0:
autodeal = self.app.opt.autodeal
moves = self.stats.total_moves
n = self._autoPlay(autofaceup, autodrop, autodeal, sound=sound)
self.finishMove()
self.stats.autoplay_moves += (self.stats.total_moves - moves)
self.busy = old_busy
return n
def _autoPlay(self, autofaceup, autodrop, autodeal, sound):
flipstacks, dropstacks, quickstacks = self.getAutoStacks()
done_something = 1
while done_something:
done_something = 0
# a) flip top cards face-up
if autofaceup and flipstacks:
for s in flipstacks:
if s.canFlipCard():
if sound:
self.playSample("autoflip", priority=5)
# ~s.flipMove()
s.flipMove(animation=True)
done_something = 1
# each single flip is undo-able unless opt.autofaceup
self.finishMove()
if self.checkForWin():
return 1
# b) drop cards
if autodrop and dropstacks:
for s in dropstacks:
to_stack, ncards = s.canDropCards(self.s.foundations)
if to_stack:
# each single drop is undo-able (note that this call
# is before the actual move)
self.finishMove()
if sound:
self.playSample("autodrop", priority=30)
s.moveMove(ncards, to_stack)
done_something = 1
if self.checkForWin():
return 1
# c) deal
if autodeal:
if self._autoDeal(sound=sound):
done_something = 1
self.finishMove()
if self.checkForWin():
return 1
return 0
def _autoDeal(self, sound=True):
# default: deal a card to the waste if the waste is empty
w = self.s.waste
if w and len(w.cards) == 0 and self.canDealCards():
return self.dealCards(sound=sound)
return 0
def autoDrop(self, autofaceup=-1):
old_a = self.app.opt.animations
if old_a == 3: # medium
self.app.opt.animations = 2 # fast
self.autoPlay(autofaceup=autofaceup, autodrop=1)
self.app.opt.animations = old_a
# for find_card_dialog
def highlightCard(self, suit, rank):
if not self.app:
return None
col = self.app.opt.colors['samerank_1']
info = []
for s in self.allstacks:
for c in s.cards:
if c.suit == suit and c.rank == rank:
if s.basicShallHighlightSameRank(c):
info.append((s, c, c, col))
return self._highlightCards(info, 0)
# highlight all moveable piles
def getHighlightPilesStacks(self):
# default: dropstacks with min pile length = 2
if self.sg.hp_stacks:
return ((self.sg.hp_stacks, 2),)
return ()
def _highlightCards(self, info, sleep=1.5, delta=(1, 1, 1, 1)):
if not info:
return 0
if self.pause:
return 0
self.stopWinAnimation()
cw, ch = self.app.images.getSize()
items = []
for s, c1, c2, color in info:
items.append(
_highlightCards__calc_item(
self.canvas, delta, cw, ch, s, c1, c2, color))
if not items:
return 0
self.canvas.update_idletasks()
if sleep:
self.sleep(sleep)
items.reverse()
for r in items:
r.delete()
self.canvas.update_idletasks()
return EVENT_HANDLED
else:
# remove items later (find_card_dialog)
return items
def highlightNotMatching(self):
if self.demo:
return
if not self.app.opt.highlight_not_matching:
return
# compute visible geometry
x = int(int(self.canvas.cget('width'))*(self.canvas.xview()[0]))
y = int(int(self.canvas.cget('height'))*(self.canvas.yview()[0]))
w, h = self.canvas.winfo_width(), self.canvas.winfo_height()
color = self.app.opt.colors['not_matching']
width = 6
xmargin, ymargin = self.canvas.xmargin, self.canvas.ymargin
if self.preview:
width = 4
xmargin, ymargin = 0, 0
x0, y0 = x+width//2-xmargin, y+width//2-ymargin
x1, y1 = x+w-width//2-xmargin, y+h-width//2-ymargin
r = MfxCanvasRectangle(self.canvas, x0, y0, x1, y1,
width=width, fill=None, outline=color)
if TOOLKIT == "kivy":
r.canvas.canvas.ask_update()
r.delete_deferred(self.app.opt.timeouts['highlight_cards'])
return
self.canvas.update_idletasks()
self.sleep(self.app.opt.timeouts['highlight_cards'])
r.delete()
self.canvas.update_idletasks()
def highlightPiles(self, sleep=1.5):
stackinfo = self.getHighlightPilesStacks()
if not stackinfo:
self.highlightNotMatching()
return 0
col = self.app.opt.colors['piles']
hi = []
for si in stackinfo:
for s in si[0]:
pile = s.getPile()
if pile and len(pile) >= si[1]:
hi.append((s, pile[0], pile[-1], col))
if not hi:
self.highlightNotMatching()
return 0
return self._highlightCards(hi, sleep)
#
# highlight matching cards
#
def shallHighlightMatch(self, stack1, card1, stack2, card2):
return False
def _shallHighlightMatch_AC(self, stack1, card1, stack2, card2):
# by alternate color
return card1.color != card2.color and abs(card1.rank-card2.rank) == 1
def _shallHighlightMatch_ACW(self, stack1, card1, stack2, card2):
# by alternate color with wrapping (only for french games)
return (card1.color != card2.color and
((card1.rank + 1) % 13 == card2.rank or
(card2.rank + 1) % 13 == card1.rank))
def _shallHighlightMatch_SS(self, stack1, card1, stack2, card2):
# by same suit
return card1.suit == card2.suit and abs(card1.rank-card2.rank) == 1
def _shallHighlightMatch_SSW(self, stack1, card1, stack2, card2):
# by same suit with wrapping (only for french games)
return (card1.suit == card2.suit and
((card1.rank + 1) % 13 == card2.rank or
(card2.rank + 1) % 13 == card1.rank))
def _shallHighlightMatch_RK(self, stack1, card1, stack2, card2):
# by rank
return abs(card1.rank-card2.rank) == 1
def _shallHighlightMatch_RKW(self, stack1, card1, stack2, card2):
# by rank with wrapping (only for french games)
return ((card1.rank + 1) % 13 == card2.rank or
(card2.rank + 1) % 13 == card1.rank)
def _shallHighlightMatch_BO(self, stack1, card1, stack2, card2):
# by any suit but own
return card1.suit != card2.suit and abs(card1.rank-card2.rank) == 1
def _shallHighlightMatch_BOW(self, stack1, card1, stack2, card2):
# by any suit but own with wrapping (only for french games)
return (card1.suit != card2.suit and
((card1.rank + 1) % 13 == card2.rank or
(card2.rank + 1) % 13 == card1.rank))
def _shallHighlightMatch_SC(self, stack1, card1, stack2, card2):
# by same color
return card1.color == card2.color and abs(card1.rank-card2.rank) == 1
def _shallHighlightMatch_SCW(self, stack1, card1, stack2, card2):
# by same color with wrapping (only for french games)
return (card1.color == card2.color and
((card1.rank + 1) % 13 == card2.rank or
(card2.rank + 1) % 13 == card1.rank))
def getQuickPlayScore(self, ncards, from_stack, to_stack):
if to_stack in self.s.reserves:
# if to_stack in reserves prefer empty stack
# return 1000 - len(to_stack.cards)
return 1000 - int(len(to_stack.cards) != 0)
# prefer non-empty piles in to_stack
return 1001 + int(len(to_stack.cards) != 0)
def _getSpiderQuickPlayScore(self, ncards, from_stack, to_stack):
if to_stack in self.s.reserves:
# if to_stack in reserves prefer empty stack
return 1000-len(to_stack.cards)
# for spider-type stacks
if to_stack.cards:
# check suit
same_suit = (from_stack.cards[-ncards].suit ==
to_stack.cards[-1].suit)
return int(same_suit)+1002
return 1001
#
# Score (I really don't like scores in Patience games...)
#
# update game-related canvas texts (i.e. self.texts)
def updateText(self):
pass
def getGameScore(self):
return None
# casino type scoring
def getGameScoreCasino(self):
v = -len(self.cards)
for s in self.s.foundations:
v = v + 5 * len(s.cards)
return v
def shallUpdateBalance(self):
# Update the balance unless this is a loaded game or
# a manually selected game number.
if self.gstats.loaded:
return False
if self.random.origin == self.random.ORIGIN_SELECTED:
return False
return True
def getGameBalance(self):
return 0
# compute all hints for the current position
# this is the only method that actually uses class Hint
def getHints(self, level, taken_hint=None):
if level == 3:
# if self.solver is None:
# return None
return self.solver.getHints(taken_hint)
hint_class = self.getHintClass()
if hint_class is None:
return None
hint = hint_class(self, level) # call constructor
return hint.getHints(taken_hint) # and return all hints
# give a hint
def showHint(self, level=0, sleep=1.5, taken_hint=None):
if self.getHintClass() is None:
self.highlightNotMatching()
return None
# reset list if level has changed
if level != self.hints.level:
self.hints.level = level
self.hints.list = None
# compute all hints
if self.hints.list is None:
self.hints.list = self.getHints(level, taken_hint)
# print self.hints.list
self.hints.index = 0
# get next hint from list
if not self.hints.list:
self.highlightNotMatching()
return None
h = self.hints.list[self.hints.index]
self.hints.index = self.hints.index + 1
if self.hints.index >= len(self.hints.list):
self.hints.index = 0
# paranoia - verify hint
score, pos, ncards, from_stack, to_stack, text_color, forced_move = h
assert from_stack and len(from_stack.cards) >= ncards
if ncards == 0:
# a deal move, should not happen with level=0/1
assert level >= 2
assert from_stack is self.s.talon
return h
elif from_stack == to_stack:
# a flip move, should not happen with level=0/1
assert level >= 2
assert ncards == 1 and len(from_stack.cards) >= ncards
return h
else:
# a move move
assert to_stack
assert 1 <= ncards <= len(from_stack.cards)
if DEBUG:
if not to_stack.acceptsCards(
from_stack, from_stack.cards[-ncards:]):
print('*fail accepts cards*', from_stack, to_stack, ncards)
if not from_stack.canMoveCards(from_stack.cards[-ncards:]):
print('*fail move cards*', from_stack, ncards)
# assert from_stack.canMoveCards(from_stack.cards[-ncards:])
# FIXME: Pyramid
assert to_stack.acceptsCards(
from_stack, from_stack.cards[-ncards:])
if sleep <= 0.0:
return h
info = (level == 1) or (level > 1 and DEBUG)
if info and self.app.statusbar and self.app.opt.statusbar:
self.app.statusbar.configLabel(
"info", text=_("Score %6d") % (score), fg=text_color)
else:
info = 0
self.drawHintArrow(from_stack, to_stack, ncards, sleep)
if info:
self.app.statusbar.configLabel("info", text="", fg="#000000")
return h
def drawHintArrow(self, from_stack, to_stack, ncards, sleep):
# compute position for arrow
images = self.app.images
x1, y1 = from_stack.getPositionFor(from_stack.cards[-ncards])
x2, y2 = to_stack.getPositionFor(to_stack.getCard())
cw, ch = images.getSize()
dx, dy = images.getDelta()
x1, y1 = x1 + dx, y1 + dy
x2, y2 = x2 + dx, y2 + dy
if ncards == 1:
x1 += cw // 2
y1 += ch // 2
elif from_stack.CARD_XOFFSET[0]:
x1 += from_stack.CARD_XOFFSET[0] // 2
y1 += ch // 2
else:
x1 += cw // 2
y1 += from_stack.CARD_YOFFSET[0] // 2
x2 += cw // 2
y2 += ch // 2
# draw the hint
arrow = MfxCanvasLine(self.canvas, x1, y1, x2, y2, width=7,
fill=self.app.opt.colors['hintarrow'],
arrow="last", arrowshape=(30, 30, 10))
self.canvas.update_idletasks()
# wait
if TOOLKIT == "kivy":
arrow.delete_deferred(sleep)
return
# wait
self.sleep(sleep)
# delete the hint
if arrow is not None:
arrow.delete()
self.canvas.update_idletasks()
#
# Demo - uses showHint()
#
def startDemo(self, mixed=1, level=2):
assert level >= 2 # needed for flip/deal hints
if not self.top:
return
self.demo = Struct(
level=level,
mixed=mixed,
sleep=self.app.opt.timeouts['demo'],
last_deal=[],
snapshots=[],
hint=None,
keypress=None,
start_demo_moves=self.stats.demo_moves,
info_text=None,
)
self.hints.list = None
self.createDemoInfoText()
self.createDemoLogo()
after_idle(self.top, self.demoEvent) # schedule first move
def stopDemo(self, event=None):
if not self.demo:
return
self.canvas.setTopImage(None)
self.demo_logo = None
self.demo = None
self.updateMenus()
# demo event - play one demo move and check for win/loss
def demoEvent(self):
# note: other events are allowed to stop self.demo at any time
if not self.demo or self.demo.keypress:
self.stopDemo()
# self.updateMenus()
return
finished = self.playOneDemoMove(self.demo)
self.finishMove()
self.top.update_idletasks()
self.hints.list = None
player_moves = self.getPlayerMoves()
d, status = None, 0
bitmap = "info"
timeout = 10000
if 1 and player_moves == 0:
timeout = 5000
if self.demo and self.demo.level == 3:
timeout = 0
if self.isGameWon():
self.updateTime()
finished = 1
self.finished = True
self.stopPlayTimer()
if not self.top.winfo_ismapped():
status = 2
elif player_moves == 0:
self.playSample("autopilotwon", priority=1000)
s = self.app.miscrandom.choice((_("&Great"), _("&Cool"),
_("&Yeah"), _("&Wow")))
text = ungettext('\nGame solved in %d move.\n',
'\nGame solved in %d moves.\n',
self.moves.index)
text = text % self.moves.index
d = MfxMessageDialog(self.top,
title=_("%s Autopilot") % TITLE,
text=text,
image=self.app.gimages.logos[4],
strings=(s,),
separator=True,
timeout=timeout)
status = d.status
else:
# s = self.app.miscrandom.choice((_("&OK"), _("&OK")))
s = _("&OK")
text = _("\nGame finished\n")
if DEBUG:
text += "\nplayer_moves: %d\ndemo_moves: %d\n" % \
(self.stats.player_moves, self.stats.demo_moves)
d = MfxMessageDialog(self.top,
title=_("%s Autopilot") % TITLE,
text=text, bitmap=bitmap, strings=(s,),
padx=30, timeout=timeout)
status = d.status
elif finished:
# self.stopPlayTimer()
if not self.top.winfo_ismapped():
status = 2
else:
if player_moves == 0:
self.playSample("autopilotlost", priority=1000)
s = self.app.miscrandom.choice(
(_("&Oh well"), _("&That's life"), _("&Hmm")))
# ??? accelerators
d = MfxMessageDialog(self.top,
title=_("%s Autopilot") % TITLE,
text=_("\nThis won't come out...\n"),
bitmap=bitmap, strings=(s,),
padx=30, timeout=timeout)
status = d.status
if finished:
self.updateStats(demo=1)
if not DEBUG and self.demo and status == 2:
# timeout in dialog
if self.stats.demo_moves > self.demo.start_demo_moves:
# we only increase the splash-screen counter if the last
# demo actually made a move
self.app.demo_counter += 1
if self.app.demo_counter % 3 == 0:
if self.top.winfo_ismapped():
status = help_about(self.app, timeout=10000)
if self.demo and status == 2:
# timeout in dialog - start another demo
demo = self.demo
id = self.id
if 1 and demo.mixed and DEBUG:
# debug - advance game id to make sure we hit all games
gl = self.app.gdb.getGamesIdSortedById()
# gl = self.app.gdb.getGamesIdSortedByName()
gl = list(gl)
index = (gl.index(self.id) + 1) % len(gl)
id = gl[index]
elif demo.mixed:
# choose a random game
gl = self.app.gdb.getGamesIdSortedById()
while len(gl) > 1:
id = self.app.getRandomGameId()
if 0 or id != self.id: # force change of game
break
if self.nextGameFlags(id) == 0:
self.endGame()
self.newGame(autoplay=0)
self.startDemo(mixed=demo.mixed)
else:
self.endGame()
self.stopDemo()
self.quitGame(id, startdemo=1)
else:
self.stopDemo()
if DEBUG >= 10:
# debug - only for testing winAnimation()
self.endGame()
self.winAnimation()
self.newGame()
else:
# game not finished yet
self.top.busyUpdate()
if self.demo:
after_idle(self.top, self.demoEvent) # schedule next move
# play one demo move while in the demo event
def playOneDemoMove(self, demo):
if self.moves.index > 2000:
# we're probably looping because of some bug in the hint code
return 1
sleep = demo.sleep
# first try to deal cards to the Waste (unless there was a forced move)
if not demo.hint or not demo.hint[6]:
if self._autoDeal(sound=False):
return 0
# display a hint
h = self.showHint(demo.level, sleep, taken_hint=demo.hint)
demo.hint = h
if not h:
return 1
# now actually play the hint
score, pos, ncards, from_stack, to_stack, text_color, forced_move = h
if ncards == 0:
# a deal-move
# do not let games like Klondike and Canfield deal forever
if self.dealCards() == 0:
return 1
if 0: # old version, based on dealing card
c = self.s.talon.getCard()
if c in demo.last_deal:
# We went through the whole Talon. Give up.
return 1
# Note that `None' is a valid entry in last_deal[]
# (this means that all cards are on the Waste).
demo.last_deal.append(c)
else: # new version, based on snapshots
# check snapshot
sn = self.getSnapshot()
if sn in demo.snapshots:
# not unique
return 1
demo.snapshots.append(sn)
elif from_stack == to_stack:
# a flip-move
from_stack.flipMove(animation=True)
demo.last_deal = []
else:
# a move-move
from_stack.moveMove(ncards, to_stack, frames=-1)
demo.last_deal = []
return 0
def createDemoInfoText(self):
# TODO - the text placement is not fully ok
if DEBUG:
self.showHelp('help', self.getDemoInfoText())
return
if not self.demo or self.demo.info_text or self.preview:
return
tinfo = [
("sw", 8, self.height - 8),
("se", self.width - 8, self.height - 8),
("nw", 8, 8),
("ne", self.width - 8, 8),
]
ta = self.getDemoInfoTextAttr(tinfo)
if ta:
# font = self.app.getFont("canvas_large")
font = self.app.getFont("default")
self.demo.info_text = MfxCanvasText(self.canvas, ta[1], ta[2],
anchor=ta[0], font=font,
text=self.getDemoInfoText())
def getDemoInfoText(self):
h = self.Hint_Class is None and 'None' or self.Hint_Class.__name__
return '%s (%s)' % (self.gameinfo.short_name, h)
def getDemoInfoTextAttr(self, tinfo):
items1, items2 = [], []
for s in self.allstacks:
if s.is_visible:
items1.append(s)
items1.extend(list(s.cards))
if not s.cards and s.cap.max_accept > 0:
items2.append(s)
else:
items2.extend(list(s.cards))
ti = self.__checkFreeSpaceForDemoInfoText(items1)
if ti < 0:
ti = self.__checkFreeSpaceForDemoInfoText(items2)
if ti < 0:
return None
return tinfo[ti]
def __checkFreeSpaceForDemoInfoText(self, items):
CW, CH = self.app.images.CARDW, self.app.images.CARDH
# note: these are translated by (-CW/2, -CH/2)
x1, x2 = 3*CW//2, self.width - 5*CW//2
y1, y2 = CH//2, self.height - 3*CH//2
#
m = [1, 1, 1, 1]
for c in items:
cx, cy = c.x, c.y
if cy >= y2:
if cx <= x1:
m[0] = 0
elif cx >= x2:
m[1] = 0
elif cy <= y1:
if cx <= x1:
m[2] = 0
elif cx >= x2:
m[3] = 0
for mm in m:
if mm:
return mm
return -1
def createDemoLogo(self):
if not self.app.gimages.demo:
return
if self.demo_logo or not self.app.opt.demo_logo:
return
if self.width <= 100 or self.height <= 100:
return
# self.demo_logo = self.app.miscrandom.choice(self.app.gimages.demo)
n = self.random.initial_seed % len(self.app.gimages.demo)
self.demo_logo = self.app.gimages.demo[int(n)]
self.canvas.setTopImage(self.demo_logo)
def getStuck(self):
h = self.Stuck_Class.getHints(None)
if h:
self.failed_snapshots = []
return True
if not self.canDealCards():
return False
# can deal cards: do we have any hints in previous deals ?
sn = self.getSnapshot()
if sn in self.failed_snapshots:
return False
self.failed_snapshots.append(sn)
return True
def updateStuck(self):
# stuck
if self.finished:
return
if self.Stuck_Class is None:
return
if self.getStuck():
text = ''
else:
text = 'x'
# self.playSample("autopilotlost", priority=1000)
self.updateStatus(stuck=text)
#
# Handle moves (with move history for undo/redo)
# Actual move is handled in a subclass of AtomicMove.
#
# Note:
# All playing moves (user actions, demo games) must get routed
# to Stack.moveMove() because the stack may add important
# triggers to a move (most notably fillStack and updateModel).
#
# Only low-level game (Game.startGame, Game.dealCards, Game.fillStack)
# or stack methods (Stack.moveMove) should call the functions below
# directly.
#
def startMoves(self):
self.moves = GameMoves()
self.stats._reset_statistics()
def __storeMove(self, am):
if self.S_DEAL <= self.moves.state <= self.S_PLAY:
self.moves.current.append(am)
# move type 1
def moveMove(self, ncards, from_stack, to_stack, frames=-1, shadow=-1):
assert from_stack and to_stack and from_stack is not to_stack
assert 0 < ncards <= len(from_stack.cards)
am = AMoveMove(ncards, from_stack, to_stack, frames, shadow)
self.__storeMove(am)
am.do(self)
self.hints.list = None
# move type 2
def flipMove(self, stack):
assert stack
am = AFlipMove(stack)
self.__storeMove(am)
am.do(self)
self.hints.list = None
def singleFlipMove(self, stack):
# flip with animation (without "moveMove" in this move)
assert stack
am = ASingleFlipMove(stack)
self.__storeMove(am)
am.do(self)
self.hints.list = None
def flipAndMoveMove(self, from_stack, to_stack, frames=-1):
assert from_stack and to_stack and (from_stack is not to_stack)
am = AFlipAndMoveMove(from_stack, to_stack, frames)
self.__storeMove(am)
am.do(self)
self.hints.list = None
# move type 3
def turnStackMove(self, from_stack, to_stack):
assert from_stack and to_stack and (from_stack is not to_stack)
assert len(to_stack.cards) == 0
am = ATurnStackMove(from_stack, to_stack)
self.__storeMove(am)
am.do(self)
self.hints.list = None
# move type 4
def nextRoundMove(self, stack):
assert stack
am = ANextRoundMove(stack)
self.__storeMove(am)
am.do(self)
self.hints.list = None
# move type 5
def saveSeedMove(self):
am = ASaveSeedMove(self)
self.__storeMove(am)
am.do(self)
# self.hints.list = None
# move type 6
def shuffleStackMove(self, stack):
assert stack
am = AShuffleStackMove(stack, self)
self.__storeMove(am)
am.do(self)
self.hints.list = None
# move type 7
def updateStackMove(self, stack, flags):
assert stack
am = AUpdateStackMove(stack, flags)
self.__storeMove(am)
am.do(self)
# #self.hints.list = None
# move type 8
def flipAllMove(self, stack):
assert stack
am = AFlipAllMove(stack)
self.__storeMove(am)
am.do(self)
self.hints.list = None
# move type 9
def saveStateMove(self, flags):
am = ASaveStateMove(self, flags)
self.__storeMove(am)
am.do(self)
# self.hints.list = None
# for ArbitraryStack
def singleCardMove(self, from_stack, to_stack, position,
frames=-1, shadow=-1):
am = ASingleCardMove(from_stack, to_stack, position, frames, shadow)
self.__storeMove(am)
am.do(self)
self.hints.list = None
# Finish the current move.
def finishMove(self):
current, moves, stats = self.moves.current, self.moves, self.stats
if not current:
return 0
# invalidate hints
self.hints.list = None
# update stats
if self.demo:
stats.demo_moves += 1
if moves.index == 0:
stats.player_moves = 0 # clear all player moves
else:
stats.player_moves += 1
if moves.index == 0:
stats.demo_moves = 0 # clear all demo moves
stats.total_moves += 1
# try to detect a redo move in order to keep our history
redo = 0
if moves.index + 1 < len(moves.history):
mylen, m = len(current), moves.history[moves.index]
if mylen == len(m):
for i in range(mylen):
a1 = current[i]
a2 = m[i]
if a1.__class__ is not a2.__class__ or \
a1.cmpForRedo(a2) != 0:
break
else:
redo = 1
# add current move to history (which is a list of lists)
if redo:
# print "detected redo:", current
# overwrite existing entry because minor things like
# shadow/frames may have changed
moves.history[moves.index] = current
moves.index += 1
else:
# resize (i.e. possibly shorten list from previous undos)
moves.history[moves.index:] = [current]
moves.index += 1
assert moves.index == len(moves.history)
moves.current = []
self.updateSnapshots()
# update view
self.updateText()
self.updateStatus(moves=(moves.index, self.stats.total_moves))
self.updateMenus()
self.updatePlayTime(do_after=0)
self.updateStuck()
reset_solver_dialog()
return 1
def undo(self):
assert self.canUndo()
assert self.moves.state == self.S_PLAY and len(self.moves.current) == 0
assert 0 <= self.moves.index <= len(self.moves.history)
if self.moves.index == 0:
return
self.moves.index -= 1
self.moves.state = self.S_UNDO
for atomic_move in reversed(self.moves.history[self.moves.index]):
atomic_move.undo(self)
self.moves.state = self.S_PLAY
self.stats.undo_moves += 1
self.stats.total_moves += 1
self.hints.list = None
self.updateSnapshots()
self.updateText()
self.updateStatus(moves=(self.moves.index, self.stats.total_moves))
self.updateMenus()
self.updateStatus(stuck='')
self.failed_snapshots = []
reset_solver_dialog()
def redo(self):
assert self.canRedo()
assert self.moves.state == self.S_PLAY and len(self.moves.current) == 0
assert 0 <= self.moves.index <= len(self.moves.history)
if self.moves.index == len(self.moves.history):
return
m = self.moves.history[self.moves.index]
self.moves.index += 1
self.moves.state = self.S_REDO
for atomic_move in m:
atomic_move.redo(self)
self.moves.state = self.S_PLAY
self.stats.redo_moves += 1
self.stats.total_moves += 1
self.hints.list = None
self.updateSnapshots()
self.updateText()
self.updateStatus(moves=(self.moves.index, self.stats.total_moves))
self.updateMenus()
self.updateStuck()
reset_solver_dialog()
#
# subclass hooks
#
def setState(self, state):
# restore saved vars (from undo/redo)
pass
def getState(self):
# save vars (for undo/redo)
return []
#
# bookmarks
#
def setBookmark(self, n, confirm=1):
self.finishMove() # just in case
if not self.canSetBookmark():
return 0
if confirm < 0:
confirm = self.app.opt.confirm
if confirm and self.gsaveinfo.bookmarks.get(n):
if not self.areYouSure(
_("Set bookmark"),
_("Replace existing bookmark %d?") % (n+1)):
return 0
f = BytesIO()
try:
self._dumpGame(Pickler(f, 1), bookmark=2)
bm = (f.getvalue(), self.moves.index)
except Exception:
pass
else:
self.gsaveinfo.bookmarks[n] = bm
return 1
return 0
def gotoBookmark(self, n, confirm=-1, update_stats=1):
self.finishMove() # just in case
bm = self.gsaveinfo.bookmarks.get(n)
if not bm:
return
if confirm < 0:
confirm = self.app.opt.confirm
if confirm:
if not self.areYouSure(_("Goto bookmark"),
_("Goto bookmark %d?") % (n+1)):
return
try:
s, moves_index = bm
self.setCursor(cursor=CURSOR_WATCH)
file = BytesIO(s)
p = Unpickler(file)
game = self._undumpGame(p, self.app)
assert game.id == self.id
# save state for undoGotoBookmark
self.setBookmark(-1, confirm=0)
except Exception:
del self.gsaveinfo.bookmarks[n]
self.setCursor(cursor=self.app.top_cursor)
else:
if update_stats:
self.stats.goto_bookmark_moves += 1
self.gstats.goto_bookmark_moves += 1
self.restoreGame(game, reset=0)
destruct(game)
def undoGotoBookmark(self):
self.gotoBookmark(-1, update_stats=0)
def loadGame(self, filename):
if self.changed():
if not self.areYouSure(_("Open game")):
return
self.finishMove() # just in case
game = None
self.setCursor(cursor=CURSOR_WATCH)
self.disableMenus()
try:
game = self._loadGame(filename, self.app)
game.gstats.holded = 0
except AssertionError:
self.updateMenus()
self.setCursor(cursor=self.app.top_cursor)
MfxMessageDialog(
self.top, title=_("Load game error"), bitmap="error",
text=_(
"Error while loading game.\n\n" +
"Probably the game file is damaged,\n" +
"but this could also be a bug you might want to report."))
traceback.print_exc()
except UnpicklingError as ex:
self.updateMenus()
self.setCursor(cursor=self.app.top_cursor)
MfxExceptionDialog(self.top, ex, title=_("Load game error"),
text=_("Error while loading game"))
except Exception:
self.updateMenus()
self.setCursor(cursor=self.app.top_cursor)
MfxMessageDialog(
self.top, title=_("Load game error"),
bitmap="error", text=_(
"""Internal error while loading game.\n\n""" +
"Please report this bug."))
traceback.print_exc()
else:
if self.pause:
# unselect pause-button
self.app.menubar.mPause()
self.filename = filename
game.filename = filename
# now start the new game
# print game.__dict__
if self.nextGameFlags(game.id) == 0:
self.endGame()
self.restoreGame(game)
destruct(game)
else:
self.endGame()
self.quitGame(game.id, loadedgame=game)
def saveGame(self, filename, protocol=-1):
self.finishMove() # just in case
self.setCursor(cursor=CURSOR_WATCH)
try:
self._saveGame(filename, protocol)
except Exception as ex:
self.setCursor(cursor=self.app.top_cursor)
MfxExceptionDialog(self.top, ex, title=_("Save game error"),
text=_("Error while saving game"))
else:
self.filename = filename
self.setCursor(cursor=self.app.top_cursor)
#
# low level load/save
#
def _loadGame(self, filename, app):
game = None
with open(filename, "rb") as f:
game = self._undumpGame(Unpickler(f), app)
game.gstats.loaded += 1
return game
def _undumpGame(self, p, app):
self.updateTime()
#
err_txt = _("Invalid or damaged %s save file") % PACKAGE
#
def pload(t=None, p=p):
obj = p.load()
if isinstance(t, type):
if not isinstance(obj, t):
# accept old storage format in case:
if t in _Game_LOAD_CLASSES:
assert isinstance(obj, Struct), err_txt
else:
assert False, err_txt
return obj
def validate(v, txt):
if not v:
raise UnpicklingError(txt)
#
package = pload(str)
validate(package == PACKAGE, err_txt)
version = pload(str)
# validate(isinstance(version, str) and len(version) <= 20, err_txt)
version_tuple = pload(tuple)
validate(
version_tuple >= (1, 0),
_('Cannot load games saved with\n%(app)s version %(ver)s') % {
'app': PACKAGE,
'ver': version})
game_version = 1
bookmark = pload(int)
validate(0 <= bookmark <= 2, err_txt)
game_version = pload(int)
validate(game_version > 0, err_txt)
#
id = pload(int)
validate(id > 0, err_txt)
if id not in GI.PROTECTED_GAMES:
game = app.constructGame(id)
if game:
if not game.canLoadGame(version_tuple, game_version):
destruct(game)
game = None
validate(
game is not None,
_('Cannot load this game from version %s\n' +
'as the game rules have changed\n' +
'in the current implementation.') % version)
game.version = version
game.version_tuple = version_tuple
#
initial_seed = random__int2str(pload(int))
game.random = construct_random(initial_seed)
state = pload()
if (game.random is not None and
not isinstance(game.random, random2.Random) and
isinstance(state, int)):
game.random.setstate(state)
# if not hasattr(game.random, "origin"):
# game.random.origin = game.random.ORIGIN_UNKNOWN
game.loadinfo.stacks = []
game.loadinfo.ncards = 0
nstacks = pload(int)
validate(1 <= nstacks, err_txt)
for i in range(nstacks):
stack = []
ncards = pload(int)
validate(0 <= ncards <= 1024, err_txt)
for j in range(ncards):
card_id = pload(int)
face_up = pload(int)
stack.append((card_id, face_up))
game.loadinfo.stacks.append(stack)
game.loadinfo.ncards = game.loadinfo.ncards + ncards
validate(game.loadinfo.ncards == game.gameinfo.ncards, err_txt)
game.loadinfo.talon_round = pload()
game.finished = pload()
if 0 <= bookmark <= 1:
saveinfo = pload(GameSaveInfo)
game.saveinfo.__dict__.update(saveinfo.__dict__)
gsaveinfo = pload(GameGlobalSaveInfo)
game.gsaveinfo.__dict__.update(gsaveinfo.__dict__)
moves = pload(GameMoves)
game.moves.__dict__.update(moves.__dict__)
snapshots = pload(list)
game.snapshots = snapshots
if 0 <= bookmark <= 1:
gstats = pload(GameGlobalStatsStruct)
game.gstats.__dict__.update(gstats.__dict__)
stats = pload(GameStatsStruct)
game.stats.__dict__.update(stats.__dict__)
game._loadGameHook(p)
dummy = pload(str)
validate(dummy == "EOF", err_txt)
if bookmark == 2:
# copy back all variables that are not saved
game.stats = self.stats
game.gstats = self.gstats
game.saveinfo = self.saveinfo
game.gsaveinfo = self.gsaveinfo
return game
def _saveGame(self, filename, protocol=-1):
if self.canSaveGame():
with open(filename, "wb") as f:
self._dumpGame(Pickler(f, protocol))
def _dumpGame(self, p, bookmark=0):
return pysolDumpGame(self, p, bookmark)
def startPlayTimer(self):
self.updateStatus(time=None)
self.stopPlayTimer()
self.play_timer = after(
self.top, PLAY_TIME_TIMEOUT, self.updatePlayTime)
def stopPlayTimer(self):
if hasattr(self, 'play_timer') and self.play_timer:
after_cancel(self.play_timer)
self.play_timer = None
self.updatePlayTime(do_after=0)
def updatePlayTime(self, do_after=1):
if not self.top:
return
if self.pause or self.finished:
return
if do_after:
self.play_timer = after(
self.top, PLAY_TIME_TIMEOUT, self.updatePlayTime)
d = time.time() - self.stats.update_time + self.stats.elapsed_time
self.updateStatus(time=format_time(d))
def doPause(self):
if self.finished:
return
if self.demo:
self.stopDemo()
if not self.pause:
self.updateTime()
self.pause = not self.pause
if self.pause:
# self.updateTime()
self.canvas.hideAllItems()
n = self.random.initial_seed % len(self.app.gimages.pause)
self.pause_logo = self.app.gimages.pause[int(n)]
self.canvas.setTopImage(self.pause_logo)
else:
self.stats.update_time = time.time()
self.updatePlayTime()
self.canvas.setTopImage(None)
self.pause_logo = None
self.canvas.showAllItems()
def showHelp(self, *args):
if self.preview:
return
kw = dict([(args[i], args[i+1]) for i in range(0, len(args), 2)])
if not kw:
kw = {'info': '', 'help': ''}
if 'info' in kw and self.app.opt.statusbar and self.app.opt.num_cards:
self.app.statusbar.updateText(info=kw['info'])
if 'help' in kw and self.app.opt.helpbar:
self.app.helpbar.updateText(info=kw['help'])
#
# Piles descriptions
#
def showStackDesc(self):
from pysollib.pysoltk import StackDesc
from pysollib.stack import InitialDealTalonStack
sd_list = []
for s in self.allstacks:
sd = (s.__class__.__name__, s.cap.base_rank, s.cap.dir)
if sd in sd_list:
# one of each uniq pile
continue
if isinstance(s, InitialDealTalonStack):
continue
self.stackdesc_list.append(StackDesc(self, s))
sd_list.append(sd)
def deleteStackDesc(self):
if self.stackdesc_list:
for sd in self.stackdesc_list:
sd.delete()
self.stackdesc_list = []
return True
return False
# for find_card_dialog
def canFindCard(self):
return self.gameinfo.category == GI.GC_FRENCH
#
# subclass hooks
#
def _restoreGameHook(self, game):
pass
def _loadGameHook(self, p):
pass
def _saveGameHook(self, p):
pass
def _dealNumRows(self, n):
for i in range(n):
self.s.talon.dealRow(frames=0)
def _startDealNumRows(self, n):
self._dealNumRows(n)
self.startDealSample()
def _startDealNumRowsAndDealSingleRow(self, n):
self._startDealNumRows(n)
self.s.talon.dealRow()
def _startAndDealRow(self):
self._startDealNumRowsAndDealSingleRow(0)
def _startDealNumRowsAndDealRowAndCards(self, n):
self._startDealNumRowsAndDealSingleRow(n)
self.s.talon.dealCards()
def _startAndDealRowAndCards(self):
self._startAndDealRow()
self.s.talon.dealCards()
class StartDealRowAndCards(object):
def startGame(self):
self._startAndDealRowAndCards()
|
shlomif/PySolFC
|
pysollib/game/__init__.py
|
Python
|
gpl-3.0
| 123,237
|
[
"CASINO"
] |
79fb2dd4f9b4d568735c71954cb07968fdacafbc8e5b6ac64bd262aa03ec7550
|
"""Factor Analysis.
A latent linear variable model.
FactorAnalysis is similar to probabilistic PCA implemented by PCA.score
While PCA assumes Gaussian noise with the same variance for each
feature, the FactorAnalysis model assumes different variances for
each of them.
This implementation is based on David Barber's Book,
Bayesian Reasoning and Machine Learning,
http://www.cs.ucl.ac.uk/staff/d.barber/brml,
Algorithm 21.1
"""
# Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Licence: BSD3
import warnings
from math import sqrt, log
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array, check_random_state
from ..utils.extmath import fast_logdet, fast_dot, randomized_svd, squared_norm
from ..utils import ConvergenceWarning
class FactorAnalysis(BaseEstimator, TransformerMixin):
"""Factor Analysis (FA)
A simple linear generative model with Gaussian latent variables.
The observations are assumed to be caused by a linear transformation of
lower dimensional latent factors and added Gaussian noise.
Without loss of generality the factors are distributed according to a
Gaussian with zero mean and unit covariance. The noise is also zero mean
and has an arbitrary diagonal covariance matrix.
If we would restrict the model further, by assuming that the Gaussian
noise is even isotropic (all diagonal entries are the same) we would obtain
:class:`PPCA`.
FactorAnalysis performs a maximum likelihood estimate of the so-called
`loading` matrix, the transformation of the latent variables to the
observed ones, using expectation-maximization (EM).
Parameters
----------
n_components : int | None
Dimensionality of latent space, the number of components
of ``X`` that are obtained after ``transform``.
If None, n_components is set to the number of features.
tol : float
Stopping tolerance for EM algorithm.
copy : bool
Whether to make a copy of X. If ``False``, the input X gets overwritten
during fitting.
max_iter : int
Maximum number of iterations.
verbose : int | bool
Print verbose output.
noise_variance_init : None | array, shape=(n_features,)
The initial guess of the noise variance for each feature.
If None, it defaults to np.ones(n_features)
svd_method : {'lapack', 'randomized'}
Which SVD method to use. If 'lapack' use standard SVD from
scipy.linalg, if 'randomized' use fast ``randomized_svd`` function.
Defaults to 'randomized'. For most applications 'randomized' will
be sufficiently precise while providing significant speed gains.
Accuracy can also be improved by setting higher values for
`iterated_power`. If this is not sufficient, for maximum precision
you should choose 'lapack'.
iterated_power : int, optional
Number of iterations for the power method. 3 by default. Only used
if ``svd_method`` equals 'randomized'
random_state : int or RandomState
Pseudo number generator state used for random sampling. Only used
if ``svd_method`` equals 'randomized'
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
loglike_ : list, [n_iterations]
The log likelihood at each iteration.
noise_variance_ : array, shape=(n_features,)
The estimated noise variance for each feature.
n_iter_ : int
Number of iterations run.
References
----------
.. David Barber, Bayesian Reasoning and Machine Learning,
Algorithm 21.1
.. Christopher M. Bishop: Pattern Recognition and Machine Learning,
Chapter 12.2.4
See also
--------
PCA: Principal component analysis is also a latent linear variable model
which however assumes equal noise variance for each feature.
This extra assumption makes probabilistic PCA faster as it can be
computed in closed form.
FastICA: Independent component analysis, a latent variable model with
non-Gaussian latent variables.
"""
def __init__(self, n_components=None, tol=1e-2, copy=True, max_iter=1000,
verbose=0, noise_variance_init=None, svd_method='randomized',
iterated_power=3, random_state=0):
self.n_components = n_components
self.copy = copy
self.tol = tol
self.max_iter = max_iter
if svd_method not in ['lapack', 'randomized']:
raise ValueError('SVD method %s is not supported. Please consider'
' the documentation' % svd_method)
self.svd_method = svd_method
if verbose:
warnings.warn('The `verbose` parameter has been deprecated and '
'will be removed in 0.16. To reduce verbosity '
'silence Python warnings instead.',
DeprecationWarning)
self.verbose = verbose
self.noise_variance_init = noise_variance_init
self.iterated_power = iterated_power
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the FactorAnalysis model to X using EM
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self
"""
X = check_array(X, copy=self.copy, dtype=np.float)
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
# some constant terms
nsqrt = sqrt(n_samples)
llconst = n_features * log(2. * np.pi) + n_components
var = np.var(X, axis=0)
if self.noise_variance_init is None:
psi = np.ones(n_features, dtype=X.dtype)
else:
if len(self.noise_variance_init) != n_features:
raise ValueError("noise_variance_init dimension does not "
"with number of features : %d != %d" %
(len(self.noise_variance_init), n_features))
psi = np.array(self.noise_variance_init)
loglike = []
old_ll = -np.inf
SMALL = 1e-12
# we'll modify svd outputs to return unexplained variance
# to allow for unified computation of loglikelihood
if self.svd_method == 'lapack':
def my_svd(X):
_, s, V = linalg.svd(X, full_matrices=False)
return (s[:n_components], V[:n_components],
squared_norm(s[n_components:]))
elif self.svd_method == 'randomized':
random_state = check_random_state(self.random_state)
def my_svd(X):
_, s, V = randomized_svd(X, n_components,
random_state=random_state,
n_iter=self.iterated_power)
return s, V, squared_norm(X) - squared_norm(s)
else:
raise ValueError('SVD method %s is not supported. Please consider'
' the documentation' % self.svd_method)
for i in xrange(self.max_iter):
# SMALL helps numerics
sqrt_psi = np.sqrt(psi) + SMALL
s, V, unexp_var = my_svd(X / (sqrt_psi * nsqrt))
s **= 2
# Use 'maximum' here to avoid sqrt problems.
W = np.sqrt(np.maximum(s - 1., 0.))[:, np.newaxis] * V
del V
W *= sqrt_psi
# loglikelihood
ll = llconst + np.sum(np.log(s))
ll += unexp_var + np.sum(np.log(psi))
ll *= -n_samples / 2.
loglike.append(ll)
if (ll - old_ll) < self.tol:
break
old_ll = ll
psi = np.maximum(var - np.sum(W ** 2, axis=0), SMALL)
else:
warnings.warn('FactorAnalysis did not converge.' +
' You might want' +
' to increase the number of iterations.',
ConvergenceWarning)
self.components_ = W
self.noise_variance_ = psi
self.loglike_ = loglike
self.n_iter_ = i + 1
return self
def transform(self, X):
"""Apply dimensionality reduction to X using the model.
Compute the expected mean of the latent variables.
See Barber, 21.2.33 (or Bishop, 12.66).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
The latent variables of X.
"""
X = check_array(X)
Ih = np.eye(len(self.components_))
X_transformed = X - self.mean_
Wpsi = self.components_ / self.noise_variance_
cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T))
tmp = fast_dot(X_transformed, Wpsi.T)
X_transformed = fast_dot(tmp, cov_z)
return X_transformed
def get_covariance(self):
"""Compute data covariance with the FactorAnalysis model.
``cov = components_.T * components_ + diag(noise_variance)``
Returns
-------
cov : array, shape (n_features, n_features)
Estimated covariance of data.
"""
cov = np.dot(self.components_.T, self.components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the FactorAnalysis model.
Returns
-------
precision : array, shape (n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components == 0:
return np.diag(1. / self.noise_variance_)
if self.n_components == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
precision = np.dot(components_ / self.noise_variance_, components_.T)
precision.flat[::len(precision) + 1] += 1.
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= self.noise_variance_[:, np.newaxis]
precision /= -self.noise_variance_[np.newaxis, :]
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def score_samples(self, X):
"""Compute the log-likelihood of each sample
Parameters
----------
X: array, shape (n_samples, n_features)
The data
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
Xr = X - self.mean_
precision = self.get_precision()
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Compute the average log-likelihood of the samples
Parameters
----------
X: array, shape (n_samples, n_features)
The data
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
|
soulmachine/scikit-learn
|
sklearn/decomposition/factor_analysis.py
|
Python
|
bsd-3-clause
| 12,046
|
[
"Gaussian"
] |
653d5c96c05806fe581c433639474468c576cc2b8f58e142f9bb60ecbc34c22c
|
"""
pyparticleprobe.io.read_p3_2dimage_probe
=========================
A grouping of scripts designed to read in various file formats containing data
from 2-D precipitation and cloud imaging probes (PIP and CIP) aboard
the NOAA WP-3D aircraft.
Created by Nick Guy.
"""
# HISTORY::
# 8 Jan 2014 - Nick Guy. NRC, NOAA/NSSL (nick.guy@noaa.gov)
# Converted NCL functions below to Python
# 19 Mar 2014 - NG: Added functionality for processing UCSC netcdf
# FUNCTIONS::
# rab_netcdf_vars - Pulls out a number of variables from NetCDF file created
# using R Black results.
# ucsc_netcdf_vars - Pulls out variables from UCSC NetCDF file
# ucsc_flt_lev_vars - Pulls out variables from flight level data to complete previous
# rab_netcdf - Opens the R Black NetCDF file
# ucsc_netcdf - Opens the UCSC NetCDF data file
# dbaum_spectrum_ascii - Open the Baumgardner ascii file (Not operable!!!)
#-------------------------------------------------------------------
# Load the needed packages
from scipy.io import FortranFile
import numpy as np
from netCDF4 import Dataset # , num2date, date2num
import netCDF4 as n4
import matplotlib.dates as mdate
import scipy.interpolate as si
from . import read_p3_flight as p3FltVar
#-------------------------------------------------------------------
# Define various constants that may be used for calculations
#===============================================================
# BEGIN FUNCTIONS
#===============================================================
def rab_netcdf_vars(FilePathString, Subset=False, StartT=None, EndT=None):
"""Read a NetCDF file containing distribution data generated by Bob
Black at the NOAA AOML. The original Fortran binary has been converted
into easier to use NetCDF format.
INPUT::
FilePathString = Long string path to NetCDF file to be read
OPTIONAL
Subset = Set True if subset of data by time is desired
StartT = Start time for subsetting [matlab date number instance]
EndT = End time for subsetting [matlab date number instance]
OUTPUT::
Sizebins = The mid-point of size bins [um]
Tm = Time [in matplotlib number]
Water = Water species concentration [m^-3]
Ice = Ice species concentration [m^-3]
Rhoair = Air density [g/m^3]
W = Aircraft vertical velocity [m/s]
USAGE::
DatOut = rab_netcdf_vars(filepathString,FillVal)
"""
# HISTORY::
# 13 Feb 2014 - Nick Guy NOAA/NSSL/WRDD, NRC
# NOTES::
# The original binary files provided concentration data in units of L^-1,
# this has been converted to units of m^-3 in the NetCDF files by
# the p3pip2nc_rab.ncl function.
# Time units in the NetCDF are in units of 'seconds since 1970-1-1 00:00:00'
#---------------------------------------
# Read the NetCDF file
Data = n4.Dataset(FilePathString, 'r')
# Read the size bins
Sizebins = Data.variables['Sizebins']
# First pull out the time variable
Time = mdate.epoch2num(Data.variables['EpochTime'])
# Check whether to subset and the range
if Subset:
# Check which type of subsetting to apply
if SubType.lower() == 'time':
inSub = (Time >= Smin) & (Time <= Smax) # Indices of time subset
elif SubType.lower() == 'altitude':
# Indices of altitude subset
inSub = (altitude >= Smin) & (altitude <= Smax)
else:
inSub = np.arange(0, len(Time)) # No subset, take entire array
# Pull out the subsetted variables
Tm = +Time[inSub] # Time
Water = Data.variables['Water'][inSub,:] # Water conc
Ice = Data.variables['Ice'][inSub,:]
RhoAir = Data.variables['RhoAir'][inSub]
W = Data.variables['vertVel'][inSub]
del Time
# Create a dictionary to transfer the data
data = {'Sizebins': Sizebins,
'Time': Tm,
'Conc_water': Water,
'Conc_ice': Ice,
'RhoAir': RhoAir,
'w_vel_air': W}
return data
#====================================================
def ucsc_netcdf_vars(FilePathString, Instr, YYYY, MM, DD, flCDF,
Subset=True, SubType=None, Smin=None, Smax=None,
AvDat=False, RunAv=6):
"""Read a NetCDF file containing distribution data generated by the
University of California at Santa Cruz. Group there is led by
Patrick Chuang (pchuang@ucsc.edu).
INPUT::
FilePathString = Long string path to UCSC NetCDF file to be read
Instr = Name of instrument to process ('cip' or 'pip' or '')
YYYY = String year
MM = String month
DD = String day
flCDF = Long string path to flight level NetCDF file to be read
OPTIONAL
Subset = Set True if subset of data by time is desired
SubType = Subset to apply min and max either 'time' or 'altitude'
Smin = Start time [matlab date number instance] or lower altitude [float]
for subsetting
Smax = End time [matlab date number instance] or upper altitude [float]
for subsetting
AvDat = Set True if averaging and data reduction is desired
OUTPUT::
Sizebins = Mid-point size of bin [micron]
Tm = Time [matlab date object, fractional day]
Water = Concentration of liquid water particles [m^-3]
RhoAir = Density of air [kg/m^3]
W = Vertical velocity [m/s]
Alt = Aircraft altitude [m]
USAGE::
Sizebins,Tm,Water,RhoAir,W = ucsc_netcdf_vars(filepathString,Instr,YYYY,MM,DD,flCDF,[**args])
NOTES::
The NetCDF from UCSC has data with the following units, some of which are
converted in the following program.
The Concentration is is provided as cm^-3, and converted to m^-3
Note the 10 minute buffer required when subsetting the data. Though the idea would
remain the same for any function used, this is specific in format to the
matplotlib.dates.date2num type object where is days.{fraction of day} since 0001.
"""
# HISTORY::
# 18 Jul 2013 - Nick Guy NOAA/NSSL/WRDD, NRC
#---------------------------------------
# Read the NetCDF file
Data = n4.Dataset(FilePathString, 'r')
# Read the size bins
Sizebins = Data.variables[Instr.upper() + 'corr_bin_mid'][:]
Bin_edges = np.hstack((Data.variables[Instr.upper() + 'corr_bin_min'][0],
Data.variables[Instr.upper() + 'corr_bin_max']))
# First pull out the time variable
HHMMSS = Data.variables['time']
# Now makes this an "unaware" datetime object converted back into a number
# of seconds since beginning of day. Necessary because of the way the
# variable is saved in NetCDF
Time = mdate.date2num(
n4.num2date(HHMMSS, 'seconds since ' + YYYY + '-' + MM + '-' + DD + ' 00:00:00'))
# Pull in the aircraft variables of interest
FltTime, Fltrhoair, FltVertW, FltAlt = ucsc_flt_lev_vars(flCDF)
# Map the data from the aircraft variables to the PIP variables
altitude = si.griddata(FltTime, FltAlt, Time)
rhoair = si.griddata(FltTime, Fltrhoair, Time)
VertVel = si.griddata(FltTime, FltVertW, Time)
# Check whether to subset and the range
if Subset:
# Check which type of subsetting to apply
if SubType.lower() == 'time':
inSub = (Time >= Smin) & (Time <= Smax) # Indices of time subset
elif SubType.lower() == 'altitude':
# Indices of altitude subset
inSub = (altitude >= Smin) & (altitude <= Smax)
else:
inSub = np.arange(0, len(Time)) # No subset, take entire array
# Pull out the subsetted variables
Tm = +Time[inSub] # Time
# Water conc
Water = Data.variables[Instr.upper() + 'corr_conc'][:, inSub] * 1E6
Altitude = altitude[inSub]
RhoAir = rhoair[inSub]
W = VertVel[inSub]
Alt = altitude[inSub]
del Time, FltTime, Fltrhoair, FltVertW, inSub, rhoair, VertVel
# Create a running average of the data and subset to every RunAv
if AvDat:
weights = np.repeat(1., RunAv) / RunAv
Tm = np.convolve(Tm, weights, 'valid')[::RunAv]
RhoAir = np.convolve(RhoAir, weights, 'valid')[::RunAv]
W = np.convolve(W, weights, 'valid')[::RunAv]
Alt = np.convolve(Alt, weights, 'valid')[::RunAv]
# Since Water is a 2D
WatTmp = np.empty([Water.shape[0], Water.shape[1] - RunAv + 1])
for num in range(Water.shape[0]):
WatTmp[num,:] = np.convolve(Water[num,:], weights, 'valid')
del Water
Water = WatTmp[:, ::RunAv]
# Create a dictionary to transfer the data
data = {'Sizebins': Sizebins,
'Time': Tm,
'Conc': Water.T,
'RhoAir': RhoAir,
'W_vel_air': W,
'Altitude': Alt,
'Bin_edges': Bin_edges}
# Transpose the concentration as they were made with Matlab (column major)
return data
#====================================================
def ucsc_flt_lev_vars(fname):
"""Read in data from NetCDF file containing P3 flight level data created
by NOAA AOC. Pull out the needed variables for flight track info.
INPUT::
fname = Filename [string]
OUTPUT::
Time = Aircraft time array [Datetime object]
Rhoair = Air density [kg/m^3]
VertVel = Vertical Wind [m/s]
Altitude = Aircraft altitude [m]
USAGE::
Time,Rhoair,VertVel = ucsc_flt_lev_vars(fname)
NOTES::
The variables are masked of bad values
"""
# HISTORY::
# 7 Mar 2014 - Nick Guy NOAA/NSSL/WRDD, NRC
#---------------------------------------------------
# Set a sea level density value
sea_level_dens = 1.2250 #[kg/m^3]
# Read the NetCDF
ncFile = n4.Dataset(fname, 'r')
# Retrieve vertical velocity
# VertVel = ncFile.variables['WSZ_DPJ'][:]
VertVel = ncFile.variables['UWZ.1'][:]
np.ma.masked_invalid(VertVel)
# Retrieve variables to calculate Air density
Temp = ncFile.variables['TA.1'][:] + 273.15 # Convert from C to K
np.ma.masked_invalid(Temp)
Alt = ncFile.variables['AltGPS.3'][:]
np.ma.masked_invalid(Alt)
RhoAir = sea_level_dens * np.exp(-0.04 * Alt / Temp)
del Temp
# Pull out the start time
StartTime = ncFile.StartTime
# Create a time array
TimeSec = np.linspace(StartTime, StartTime + len(RhoAir), len(RhoAir))
Time = mdate.epoch2num(TimeSec)
return Time, RhoAir, VertVel, Alt
#**====================================================
def rab_netcdf(FilePathString):
"""Read a NetCDF file containing distribution data generated by Bob
Black at the NOAA AOML. The original Fortran binary has been converted
into easier to use NetCDF format.
INPUT::
FilePathString = Long string path to NetCDF file to be read
OUTPUT::
Data = Data array containing ascii data
USAGE::
DatOut = rab_netcdf(filepathString,FillVal)
"""
# HISTORY::
# 13 Feb 2014 - Nick Guy NOAA/NSSL/WRDD, NRC
#---------------------------------------
# Read the NetCDF file
Data = n4.Dataset(FilePathString, 'r')
return Data
#====================================================
def ucsc_netcdf(FilePathString, Instr):
"""Read a NetCDF file containing distribution data generated by the
University of California at Santa Cruz. Group there is led by
Patrick Chuang (pchuang@ucsc.edu).
INPUT::
FilePathString = Long string path to NetCDF file to be read
Instr = Name of instrument to process ('cip' or 'pip')
OUTPUT::
Data = Data array containing ascii data
USAGE::
DatOut = ucsc_netcdf(filepathString,FillVal)
NOTES::
The NetCDF files were generated by p3_pip_cip_ucsc_ascii2nc.ncl,
which read the original ascii files and created a NetCDF containing
all data, found to be easier for processing.
This file was constructed when UCSC processed 1D data. It was later
learned through conversations with DMT that the 1D data is "useless"
and that only the 2D image data should be used. Make sure that 2D data
has been used to construct
"""
# MODIFICATION HISTORY::
# 18 Jul 2013 - Nick Guy NOAA/NSSL/WRDD, NRC
#---------------------------------------
# Read the NetCDF file
Data = n4.Dataset(FilePathString, 'r')
return Data
#====================================================
def dbaum_spectrum_ascii(FilePathString, Instr, WorI, Missing):
"""Read ASCII files containing distribution data generated by the
CIP/PIP processing code written by Darrel Baumgardner at DMT.
(darrel.baumgardner@gmail.com).
INPUT::
filepathString = Long string path to ascii file to be read
Instr = String containing instrument name, "cip" or "pip"
WorI = String containing which concentration file to use
can be set to "water" or "ice"
FillVal = Value for missing data
OUTPUT::
Data = Data array containing ascii data
USAGE::
DatOut = dbaum_spectrum_ascii(FilePathString,Instr,WorI,FillVal)
NOTES::
The CIP instrument (2D-C) has a range of 12.5 micron -1.55 mm with
0.025 mm resolution.
The PIP instrument (2D-P) has a range of 0.100-6.4 mm with 0.100
mm resolution
There are 61 columns in these ascii files.
Column 1 = Absolute time in seconds of day (UTC or local depends
upon PADS setup.
Columns 2-61 = Drop concentration in number per Liter, over 60
bins beginning at 25 micron and increasing by 25 micron each bin.
Each bin has been averaged over the number of seconds indicated
in the the initialization file.
A MODIFICATION of the output is necessary to make this script work.
The first line are column labels, but because the labels start with
numbers, NCL does not read these corrrectly. Therefore the column
label line must be deleted to properly read.
"""
# HISTORY::
# 13 Aug 2013 - Nick Guy NOAA/NSSL/WRDD, NRC Postdoc
#
# 6 Nov 2013 - The readAsciiTable function is now used to read the data
# This avoids requiring user interaction to manually remove
# header information created by ImageProcessor program.
#---------------------------------------
# Check that "pip" or "cip" is entered
# if (str_strip(str_lower(Instr)).eq."pip" .or. str_strip(str_lower(Instr)).ne."cip") then
# print("Need to enter "+quote+"cip"+quote+" or "+quote+"pip"+quote+" for 2nd variable")
# return
# end if
# Check that "water" or "ice" is entered
# if (str_strip(str_lower(WorI)).eq."ice" .or. str_strip(str_lower(WorI)).ne."water") then
# print("Need to enter "+quote+"water"+quote+" or "+quote+"ice"+quote+" for 3rd variable")
# return
# end if
# Set filename
fname = Instr.upper() + WorI.lower() + "Spectra.dat"
# Read the concentration spectra file
Data = np.loadtxt(FilePathString + fname, dtype=float, skiprows=1)
# Convert concentration data from L^-1 to m^-3 (for SI units)
Data[:, 1:60] = Data[:, 1:60] * 1000.
return Data
#====================================================
def dbaum_pbp_ascii(FilePathString, Instr, Missing):
"""Read ASCII files containing distribution data generated by the
CIP/PIP processing code written by Darrel Baumgardner at DMT.
(darrel.baumgardner@gmail.com).
INPUT::
filepathString = Long string path to ascii file to be read
Instr = String containing instrument name, "cip" or "pip"
FillVal = Value for missing data
OUTPUT::
Data = Data array containing ascii data
USAGE::
DatOut = read_dbaum_pbp_ascii(FilePathString,Instr,FillVal)
NOTES::
The CIP instrument (2D-C) has a range of 12.5 micron -1.55 mm with
0.025 mm resolution.
The PIP instrument (2D-P) has a range of 0.100-6.4 mm with 0.100
mm resolution
There are 61 columns in these ascii files.
Column 1 = Absolute time in seconds of day (UTC or local depends
upon PADS setup.
Columns 2-61 = Drop concentration in number per Liter, over 60
bins beginning at 25 micron and increasing by 25 micron each bin.
Each bin has been averaged over the number of seconds indicated
in the the initialization file.
A MODIFICATION of the output is necessary to make this script work.
The first line are column labels, but because the labels start with
numbers, NCL does not read these corrrectly. Therefore the column
label line must be deleted to properly read.
"""
# MODIFICATION HISTORY::
# 13 Aug 2013 - Nick Guy NOAA/NSSL/WRDD, NRC Postdoc
#---------------------------------------
# Check that "pip" or "cip" is entered
# if (str_strip(str_lower(Instr)).eq."pip" .or. str_strip(str_lower(Instr)).ne."cip") then
# print("Need to enter "+quote+"cip"+quote+" or "+quote+"pip"+quote+" for 2nd variable")
# return
# end if
# Check that "water" or "ice" is entered
# if (str_strip(str_lower(WorI)).eq."ice" .or. str_strip(str_lower(WorI)).ne."water") then
# print("Need to enter "+quote+"water"+quote+" or "+quote+"ice"+quote+" for 3rd variable")
# return
# end if
# Set filename
fname = Instr.upper() + "pbpStatistics.dat"
# Read the concentration spectra file
Data = np.loadtxt(FilePathString + fname, dtype=float, skiprows=1)
return Data
#====================================================
def bblack_binary(FilePathString, Rec, DatDims, DatType, FillVal):
"""Read in data from binary files created by Bob Black at NOAA HRD
Examination of .txt files or knowledge of the length of files is required
to create the DatDims array. More details regarding the data structure
of the binary file records are given below.
INPUT::
filepathString = Long string path to binary file to be read
Rec = Record number of binary file (0)
DatDims = Array dimensioned to shape of data
DatType = Type of data in record (e.g. "float")
FillVal = Value for missing data
OUTPUT::
Data = Data array containing binary record data
USAGE::
DatOut = read_bblack_binary(filepathString,Rec,DatDims,DatType,FillVal)
------------
NOTES::
The CIP instrument (2D-C) has a range of 12.5 micron -1.55 mm with
0.025 mm resolution.
The PIP instrument (2D-P) has a range of 0.100-6.4 mm with 0.100 mm resolution
Data file structure::
The number of lines (measurements) were determined via the
Dyn_20111124I_cip.txt and Dyn_20111124I_pip.txt files, respectively.
The data were saved to Fortran-produced, direct accesss files on a Big
Endian machine.
There are 7 records 256 bytes long consisting of 64 4-byte IEEE
Real numbers. The 64 numbers correspond to the numbers of elements
in the diode array, and hence are sizing bins.
The number of records is legacy from the older PMS 2D-C/2D-P combination
system aboard the aircrafts. They are in the following format:
Record 1 = Time and other computed parameters (see below)
Record 2 = PIP Water (empty in CIP file)
Record 3 = PIP Ice (empty in CIP file)
Record 4 = PIP Graupel / CIP
Record 5 = PIP Needle / CIP
Record 6 = CIP Graupel (empty in PIP file)
Record 7 = CIP Needle (empty in PIP file)
Number concentrations are in n/Liter
Record 1 format for :
CIP file PIP file
1 = YYMMDD 1 = YYMMDD
2 = HHMMSS 2 = HHMMSS
3 = CIP Rain rate [mm/hr] 3 = PIP Rain rate [mm/hr]
4 = 4 = LWC [g/m^3]
5 = CIP IWC 5 = IWC [g/m^3]
6 = 6 = PIP dBZ
7 = Attenuation [dBZ/km] 7 = Attenuation [dBZ/km]
8 = 8 =
9 = 9 =
10 = 10 = Repeat of 4
11 = 11 = Repeat of 5
12 = CIP LWC 12 =
13 = Repeat of 5? 13 =
14 = CIP svol [l] 14 =
15 = 15 =
16 = Repeat of 3 16 =
17 = 17 = Repeat of 3
18 = 18 =
19 = CIP dBZ 19 =
20 = 20 =
21 = VTC 21 =
22 = 22 =
23 = 23 =
24 = 24 =
25 = 25 =
26 = 26 =
27 = 27 = PIP sumxsa
28 = 28 = PIP avexsa
29 = 29 =
30 = 30 =
31 = YYYY 31 = YYYY
32 = RhoI [g/m^3] 32 = RhoI [g/m^3]
33 = # Accept 33 =
34 = # Reject 34 =
35 = % Accept 35 =
36 = Streakers 36 =
37 = % Small time (elt) 37 =
38 = % Zero A 38 =
39 = % Y-gap 39 =
40 = % Long-X On edge 40 =
41 = % Incomplete 41 =
42 = % Long Time 42 =
43 = % X-gap (multiple image) 43 =
44 = Air Density [kg m^-3] 44 = Air Density [kg m^-3]*10.
45 = Vertical wind, w [m/s] 45 = Vertical wind, w [m/s]
46 = Temp [C] 46 = Temp [C]
47 = Pressure [mb] 47 = Pressure [mb]
48 = Latitude [deg] 48 = Latitude [deg]
49 = Longitude [deg] 49 = Longitude [deg]
50 = Wind speed [m/s] 50 = Wind speed [m/s]
51 = Wind direction [deg] 51 = Wind direction [deg]
52 = True Air Speed 52 =
53 = Elapsed time 2D records 53 =
54 = 54 =
55 = 55 =
56 = 56 =
57 = 57 =
58 = 58 =
59 = 59 =
60 = 60 =
61 = 61 =
62 = 62 =
63 = 63 =
64 = 64 =
"""
# MODIFICATION HISTORY::
# 18 Jan 2013 - Nick Guy NOAA/NSSL/WRDD, NRC Postdoc
#---------------------------------------------------
# A Python way to read Fortran Direct access binary file
nRec = 7 # Number of records in binary file
nDiodes = 64 # Number of bins (diodes in this case
f = open(FilPathString, 'rb') # Open the file
Data = np.fromfile(f, dtype='>f4', count=-1) # Read in all Big-Endian Data
nLines = len(Data) / (nRec * nDiodes) # Find the # of lines of data
# Data = Data.byteswap() #? Needed to swap the bytes - needed here??
Data = Data.reshape(nLines, nRec, nDiodes) # Reshape the array
# fileIn = readfortbin.FortranFile
# Mask the bad data values
# Convert concentration data from L^-1 to m^-3 (for SI units)
# Data(:,1:6,:)=Data(:,1:6,:)*1000.
# return Data
#====================================================
|
nguy/pyparticleprobe
|
pyparticleprobe/io/read_p3_2dimage_probe.py
|
Python
|
lgpl-2.1
| 23,133
|
[
"NetCDF"
] |
b1048f549b173286e037af05cf74ed1eaac7c1ff932f9d0f806a3c843ced8044
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Subscription.is_active'
db.add_column(u'visits_subscription', 'is_active',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Subscription.is_active'
db.delete_column(u'visits_subscription', 'is_active')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'pastimes.pastime': {
'Meta': {'object_name': 'Pastime'},
'desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'users.referer': {
'Meta': {'object_name': 'Referer'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'users.user': {
'Meta': {'object_name': 'User'},
'arrive_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_available': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'need_sleeping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'pastimes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'users'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['pastimes.Pastime']"}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '22', 'null': 'True', 'blank': 'True'}),
'referer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.Referer']", 'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "u'Asia/Bangkok'", 'max_length': '30'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'userpic_origin': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'visits.payment': {
'Meta': {'object_name': 'Payment'},
'administrant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payments'", 'to': u"orm['users.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pay_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 2, 2, 0, 0)'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['visits.Subscription']"}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
u'visits.plan': {
'Meta': {'object_name': 'Plan'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'daily_fees': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'days': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'price': ('django.db.models.fields.IntegerField', [], {}),
'visits': ('django.db.models.fields.IntegerField', [], {})
},
u'visits.subscription': {
'Meta': {'object_name': 'Subscription'},
'administrant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscribers'", 'to': u"orm['users.User']"}),
'expire_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['visits.Plan']"}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 2, 2, 0, 0)'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptions'", 'to': u"orm['users.User']"})
},
u'visits.visit': {
'Meta': {'object_name': 'Visit'},
'administrant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'visitors'", 'to': u"orm['users.User']"}),
'arrival_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 2, 2, 0, 0)'}),
'departure_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pastimes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['pastimes.Pastime']", 'null': 'True', 'blank': 'True'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['visits.Subscription']"})
}
}
complete_apps = ['visits']
|
clincher/phuketstash
|
visits/migrations/0005_auto__add_field_subscription_is_active.py
|
Python
|
bsd-3-clause
| 9,061
|
[
"VisIt"
] |
17a35705d28dae5085424c7c2f55ac5fca220ccc707d8609bd3baba6901524ec
|
"""
Alpha diversity measures (:mod:`skbio.diversity.alpha`)
=======================================================
.. currentmodule:: skbio.diversity.alpha
This package provides implementations of various alpha diversity measures,
including measures of richness, dominance, and evenness. Some functions
generate confidence intervals (CIs). These functions have the suffix ``_ci``.
All alpha diversity measures accept a vector of counts within a single sample,
where each count is, for example, the number of observations of a particular
Operational Taxonomic Unit, or OTU. We use the term "OTU" here very loosely, as
these could be counts of any type of feature/observation (e.g., bacterial
species). We'll refer to this vector as the *counts vector* or simply *counts*
throughout the documentation.
The counts vector must be one-dimensional and contain integers representing the
number of individuals seen (or *counted*) for a particular OTU. Negative values
are not allowed; the counts vector may only contain integers greater than or
equal to zero.
The counts vector is `array_like`: anything that can be converted into a 1-D
numpy array is acceptable input. For example, you can provide a numpy array or
a native Python list and the results should be identical.
If the input to an alpha diversity measure does not meet the above
requirements, the function will raise either a ``ValueError`` or a
``TypeError``, depending on the condition that is violated.
.. note:: There are different ways that samples are represented in the
ecological literature and in related software. The alpha diversity measures
provided here *always* assume that the input contains abundance data: each
count represents the number of individuals seen for a particular OTU in the
sample. For example, if you have two OTUs, where 3 individuals were observed
from one of the OTUs and only a single individual was observed from the
other, you could represent this data in the following forms (among others):
As a vector of counts. This is the expected type of input for the alpha
diversity measures in this module. There are 3 individuals from the OTU at
index 0, and 1 individual from the OTU at index 1:
>>> counts = [3, 1]
As a vector of indices. The OTU at index 0 is observed 3 times, while the
OTU at index 1 is observed 1 time:
>>> indices = [0, 0, 0, 1]
As a vector of frequencies. We have 1 OTU that is a singleton and 1 OTU that
is a tripleton. We do not have any 0-tons or doubletons:
>>> frequencies = [0, 1, 0, 1]
Always use the first representation (a counts vector) with this module.
Functions
---------
.. autosummary::
:toctree: generated/
ace
berger_parker_d
brillouin_d
chao1
chao1_ci
dominance
doubles
enspie
equitability
esty_ci
fisher_alpha
gini_index
goods_coverage
heip_e
kempton_taylor_q
lladser_ci
lladser_pe
margalef
mcintosh_d
mcintosh_e
menhinick
michaelis_menten_fit
observed_otus
osd
robbins
shannon
simpson
simpson_e
singles
strong
Examples
--------
>>> import numpy as np
Assume we have the following abundance data for a sample, represented as a
counts vector:
>>> counts = [1, 0, 0, 4, 1, 2, 3, 0]
We can count the number of OTUs:
>>> observed_otus(counts)
5
Note that OTUs with counts of zero are ignored.
In the previous example, we provided a Python list as input. We can also
provide other types of input that are `array_like`:
>>> observed_otus((1, 0, 0, 4, 1, 2, 3, 0)) # tuple
5
>>> observed_otus(np.array([1, 0, 0, 4, 1, 2, 3, 0])) # numpy array
5
All of the alpha diversity measures work in this manner.
Other metrics include ``singles``, which tells us how many OTUs are observed
exactly one time (i.e., are *singleton* OTUs), and ``doubles``, which tells us
how many OTUs are observed exactly two times (i.e., are *doubleton* OTUs).
Let's see how many singletons and doubletons there are in the sample:
>>> singles(counts)
2
>>> doubles(counts)
1
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from skbio.util import TestRunner
from ._ace import ace
from ._chao1 import chao1, chao1_ci
from ._base import (
berger_parker_d, brillouin_d, dominance, doubles, enspie, equitability,
esty_ci, fisher_alpha, goods_coverage, heip_e, kempton_taylor_q, margalef,
mcintosh_d, mcintosh_e, menhinick, michaelis_menten_fit, observed_otus,
osd, robbins, shannon, simpson, simpson_e, singles, strong)
from ._gini import gini_index
from ._lladser import lladser_pe, lladser_ci
__all__ = ['ace', 'chao1', 'chao1_ci', 'berger_parker_d', 'brillouin_d',
'dominance', 'doubles', 'enspie', 'equitability', 'esty_ci',
'fisher_alpha', 'goods_coverage', 'heip_e', 'kempton_taylor_q',
'margalef', 'mcintosh_d', 'mcintosh_e', 'menhinick',
'michaelis_menten_fit', 'observed_otus', 'osd', 'robbins',
'shannon', 'simpson', 'simpson_e', 'singles', 'strong',
'gini_index', 'lladser_pe', 'lladser_ci']
test = TestRunner(__file__).test
|
jensreeder/scikit-bio
|
skbio/diversity/alpha/__init__.py
|
Python
|
bsd-3-clause
| 5,395
|
[
"scikit-bio"
] |
dbe62e49f8533cb5fc80ed0cec2610431480ae661460a1c73153abbe1f6efe00
|
# This test calculates derivatives of lcao overlap matrices such as
#
# a ~a
# P = < p | Phi >
# i mu i mu
#
# and compares to finite difference results.
from ase.data.molecules import molecule
from ase.units import Bohr
from gpaw import GPAW
from gpaw.atom.basis import BasisMaker
obasis = BasisMaker('O').generate(2, 1)
hbasis = BasisMaker('H').generate(2, 1)
basis = {'O' : obasis, 'H' : hbasis}
system1 = molecule('H2O')
system1.center(vacuum=2.0)
system1.positions[1] -= 0.2
system2 = system1.copy()
system2.set_cell((3., 3., 3.))
system2.set_pbc(1)
def runcheck(system, dR, kpts=None):
calc = GPAW(mode='lcao', basis=basis, txt=None)
system.set_calculator(calc)
calc.initialize(system)
calc.set_positions(system)
wfs = calc.wfs
tci = wfs.tci
tci.lcao_forces = True
calc.initialize(system)
calc.set_positions(system)
a = 0
c = 2
na = len(system)
rna = range(na)
T1 = wfs.T_qMM.copy()
P1_a = [wfs.P_aqMi[b].copy() for b in rna]
S1 = wfs.S_qMM.copy()
Theta1 = tci.Theta_qMM.copy()
dTdR_tci = tci.dTdR_kcmm[0, c].copy()
dPdR_tci_a = [tci.dPdR_akcmi[b][0, c].copy() for b in rna]
dSdR_tci = tci.dSdR_kcmm[0, c].copy()
dThetadR_tci = tci.dThetadR_kcmm[0, c].copy()
system.positions[a,c] += dR
calc.initialize(system)
calc.set_positions(system)
T2 = wfs.T_qMM.copy()
P2_a = [wfs.P_aqMi[b].copy() for b in rna]
S2 = wfs.S_qMM.copy()
Theta2 = tci.Theta_qMM.copy()
dTdR_fd = (T2 - T1) / dR * Bohr
dPdR_fd_a = [(p2 - p1) / dR * Bohr for p2, p1 in zip(P2_a, P1_a)]
dSdR_fd = (S2 - S1) / dR * Bohr
dThetadR_fd = (Theta2 - Theta1) / dR * Bohr
dPdRa_ami = wfs.get_projector_derivatives(tci, a, c, 0)
dSdR_real = wfs.get_overlap_derivatives(tci, a, c, dPdRa_ami, 0)
errs = [abs(dTdR_tci * tci.mask_amm[a] - dTdR_fd).max(),
abs(dSdR_real - dSdR_fd).max(),
max([abs(dPdRa_ami[b] - dPdR_fd_a[b]).max() for b in rna])]
print 'err dTdR', errs[0]
print 'err dSdR', errs[1]
print 'err dPdR', errs[2]
for err in errs:
assert err < 2 * dR
for dR in [1e-5]: #[1e-3, 1e-5, 1e-7]:
# Other values of dR should work fine, but we like short tests
print 'dR =', dR
print '---------------'
print 'Gamma point'
runcheck(system1, dR)
print
print 'Arbitrary k point'
runcheck(system2, dR, kpts=[(0.2, 0.3, 0.1)])
print
|
qsnake/gpaw
|
oldtest/tci_derivative.py
|
Python
|
gpl-3.0
| 2,468
|
[
"ASE",
"GPAW"
] |
1837f0c05cdac672ebf4b9c8f9d8523f73b0fd903ba9501aa2b82b1f502f77bd
|
# Orca
#
# Copyright (C) 2010 Joanmarie Diggs
# Copyright (C) 2011-2012 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs" \
"Copyright (c) 2011-2012 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import orca.keynames as keynames
import orca.object_properties as object_properties
import orca.settings as settings
import orca.settings_manager as settings_manager
import orca.speech_generator as speech_generator
_settingsManager = settings_manager.getManager()
########################################################################
# #
# Custom SpeechGenerator #
# #
########################################################################
class SpeechGenerator(speech_generator.SpeechGenerator):
"""Provides a speech generator specific to WebKitGtk widgets."""
def __init__(self, script):
speech_generator.SpeechGenerator.__init__(self, script)
def getVoiceForString(self, obj, string, **args):
voice = settings.voices[settings.DEFAULT_VOICE]
if string.isupper():
voice = settings.voices[settings.UPPERCASE_VOICE]
return voice
def _generateLabel(self, obj, **args):
result = \
speech_generator.SpeechGenerator._generateLabel(self, obj, **args)
if result:
return result
role = args.get('role', obj.getRole())
inferRoles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_LIST,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_RADIO_BUTTON]
if not role in inferRoles:
return result
label = self._script.labelInference.infer(obj)
if label:
result.append(label)
result.extend(self.voice(speech_generator.DEFAULT))
return result
def __generateHeadingRole(self, obj):
result = []
role = pyatspi.ROLE_HEADING
level = self._script.utilities.headingLevel(obj)
if level:
result.append(object_properties.ROLE_HEADING_LEVEL_SPEECH % {
'role': self.getLocalizedRoleName(obj, role),
'level': level})
else:
result.append(self.getLocalizedRoleName(obj, role))
return result
def _generateRoleName(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(speech_generator.SYSTEM)
role = args.get('role', obj.getRole())
force = args.get('force', False)
doNotSpeak = [pyatspi.ROLE_UNKNOWN]
if not force:
doNotSpeak.extend([pyatspi.ROLE_FORM,
pyatspi.ROLE_LABEL,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_SECTION,
pyatspi.ROLE_TABLE_CELL])
if not (role in doNotSpeak):
docRoles = [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB]
if role == pyatspi.ROLE_IMAGE:
link = self._script.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_LINK], docRoles)
if link:
result.append(self.getLocalizedRoleName(link))
elif role == pyatspi.ROLE_HEADING:
result.extend(self.__generateHeadingRole(obj))
else:
result.append(self.getLocalizedRoleName(obj, role))
if obj.parent and obj.parent.getRole() == pyatspi.ROLE_HEADING:
result.extend(self.__generateHeadingRole(obj.parent))
if result:
result.extend(acss)
if role == pyatspi.ROLE_LINK \
and obj.childCount and obj[0].getRole() == pyatspi.ROLE_IMAGE:
# If this is a link with a child which is an image, we
# want to indicate that.
#
acss = self.voice(speech_generator.HYPERLINK)
result.append(self.getLocalizedRoleName(obj[0]))
result.extend(acss)
return result
def _generateAncestors(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the text of the ancestors for
the object. This is typically used to present the context for
an object (e.g., the names of the window, the panels, etc.,
that the object is contained in). If the 'priorObj' attribute
of the args dictionary is set, only the differences in
ancestry between the 'priorObj' and the current obj will be
computed. The 'priorObj' is typically set by Orca to be the
previous object with focus.
"""
role = args.get('role', obj.getRole())
if role == pyatspi.ROLE_LINK:
return []
args['stopAtRoles'] = [pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_DOCUMENT_WEB,
pyatspi.ROLE_EMBEDDED,
pyatspi.ROLE_INTERNAL_FRAME,
pyatspi.ROLE_FORM,
pyatspi.ROLE_MENU_BAR,
pyatspi.ROLE_TOOL_BAR]
args['skipRoles'] = [pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_TEXT]
return speech_generator.SpeechGenerator._generateAncestors(
self, obj, **args)
def _generateMnemonic(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the mnemonic for the object, or
an empty array if no mnemonic can be found.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
if not (_settingsManager.getSetting('enableMnemonicSpeaking') \
or args.get('forceMnemonic', False)):
return []
if not self._script.utilities.isWebKitGtk(obj):
return speech_generator.SpeechGenerator._generateMnemonic(
self, obj, **args)
result = []
acss = self.voice(speech_generator.SYSTEM)
mnemonic, shortcut, accelerator = \
self._script.utilities.mnemonicShortcutAccelerator(obj)
if shortcut:
if _settingsManager.getSetting('speechVerbosityLevel') == \
settings.VERBOSITY_LEVEL_VERBOSE:
shortcut = 'Alt Shift %s' % shortcut
result = [keynames.localizeKeySequence(shortcut)]
result.extend(acss)
return result
|
h4ck3rm1k3/orca-sonar
|
src/orca/scripts/toolkits/WebKitGtk/speech_generator.py
|
Python
|
lgpl-2.1
| 7,888
|
[
"ORCA"
] |
daa19852ed2cc93ed54448dcc001651eeb1ae2179a78b9a96955aa19565d89d5
|
import unicodedata
import numpy as np
from .. import coding
from ..core.variable import Variable
# Special characters that are permitted in netCDF names except in the
# 0th position of the string
_specialchars = '_.@+- !"#$%&\\()*,:;<=>?[]^`{|}~'
# The following are reserved names in CDL and may not be used as names of
# variables, dimension, attributes
_reserved_names = {
"byte",
"char",
"short",
"ushort",
"int",
"uint",
"int64",
"uint64",
"float" "real",
"double",
"bool",
"string",
}
# These data-types aren't supported by netCDF3, so they are automatically
# coerced instead as indicated by the "coerce_nc3_dtype" function
_nc3_dtype_coercions = {
"int64": "int32",
"uint64": "int32",
"uint32": "int32",
"uint16": "int16",
"uint8": "int8",
"bool": "int8",
}
# encode all strings as UTF-8
STRING_ENCODING = "utf-8"
def coerce_nc3_dtype(arr):
"""Coerce an array to a data type that can be stored in a netCDF-3 file
This function performs the dtype conversions as specified by the
``_nc3_dtype_coercions`` mapping:
int64 -> int32
uint64 -> int32
uint32 -> int32
uint16 -> int16
uint8 -> int8
bool -> int8
Data is checked for equality, or equivalence (non-NaN values) using the
``(cast_array == original_array).all()``.
"""
dtype = str(arr.dtype)
if dtype in _nc3_dtype_coercions:
new_dtype = _nc3_dtype_coercions[dtype]
# TODO: raise a warning whenever casting the data-type instead?
cast_arr = arr.astype(new_dtype)
if not (cast_arr == arr).all():
raise ValueError(
f"could not safely cast array from dtype {dtype} to {new_dtype}"
)
arr = cast_arr
return arr
def encode_nc3_attr_value(value):
if isinstance(value, bytes):
pass
elif isinstance(value, str):
value = value.encode(STRING_ENCODING)
else:
value = coerce_nc3_dtype(np.atleast_1d(value))
if value.ndim > 1:
raise ValueError("netCDF attributes must be 1-dimensional")
return value
def encode_nc3_attrs(attrs):
return {k: encode_nc3_attr_value(v) for k, v in attrs.items()}
def encode_nc3_variable(var):
for coder in [
coding.strings.EncodedStringCoder(allows_unicode=False),
coding.strings.CharacterArrayCoder(),
]:
var = coder.encode(var)
data = coerce_nc3_dtype(var.data)
attrs = encode_nc3_attrs(var.attrs)
return Variable(var.dims, data, attrs, var.encoding)
def _isalnumMUTF8(c):
"""Return True if the given UTF-8 encoded character is alphanumeric
or multibyte.
Input is not checked!
"""
return c.isalnum() or (len(c.encode("utf-8")) > 1)
def is_valid_nc3_name(s):
"""Test whether an object can be validly converted to a netCDF-3
dimension, variable or attribute name
Earlier versions of the netCDF C-library reference implementation
enforced a more restricted set of characters in creating new names,
but permitted reading names containing arbitrary bytes. This
specification extends the permitted characters in names to include
multi-byte UTF-8 encoded Unicode and additional printing characters
from the US-ASCII alphabet. The first character of a name must be
alphanumeric, a multi-byte UTF-8 character, or '_' (reserved for
special names with meaning to implementations, such as the
"_FillValue" attribute). Subsequent characters may also include
printing special characters, except for '/' which is not allowed in
names. Names that have trailing space characters are also not
permitted.
"""
if not isinstance(s, str):
return False
if not isinstance(s, str):
s = s.decode("utf-8")
num_bytes = len(s.encode("utf-8"))
return (
(unicodedata.normalize("NFC", s) == s)
and (s not in _reserved_names)
and (num_bytes >= 0)
and ("/" not in s)
and (s[-1] != " ")
and (_isalnumMUTF8(s[0]) or (s[0] == "_"))
and all(_isalnumMUTF8(c) or c in _specialchars for c in s)
)
|
xray/xray
|
xarray/backends/netcdf3.py
|
Python
|
apache-2.0
| 4,174
|
[
"NetCDF"
] |
5b702aebd7eed41a09ea653d1537727c9665a90971f7fb2d55c12188ba9800af
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Created on Jan 22, 2013
@author: Bharat Medasani
"""
import unittest
import os
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.command_line.gulp_caller import GulpIO, GulpCaller, \
BuckinghamPotential, GulpError, get_energy_relax_structure_buckingham, get_energy_buckingham, get_energy_tersoff
from pymatgen.core.structure import Structure
from monty.os.path import which
from pymatgen.io.vasp.inputs import Poscar
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
gulp_present = which('gulp') and os.environ.get("GULP_LIB")
@unittest.skipIf(not gulp_present, "gulp not present.")
class GulpCallerTest(unittest.TestCase):
def test_run(self):
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True, True)
gio = GulpIO()
gin = gio.keyword_line('optimise', 'conp')
gin += gio.structure_lines(mgo_uc, symm_flg=False)
# gin += self.gc.gulp_lib('catlow.lib')
gin += "species\nMg core 2.00000\nO core 0.86902\nO shel -2.86902\n"
gin += "buck\n"
gin += "Mg core O shel 946.627 0.31813 0.00000 0.0 10.0\n"
gin += "O shel O shel 22764.000 0.14900 27.87900 0.0 12.0\n"
gin = gin
gc = GulpCaller()
"""Some inherent checks are in the run_gulp function itself.
They should be suffcient for raising errors."""
gout = gc.run(gin)
def test_decimal(self):
struct = Structure.from_str("""Mg2 Al4 O8
1.0
5.003532 0.000000 2.888790
1.667844 4.717375 2.888790
0.000000 0.000000 5.777581
O Mg Al
8 2 4
direct
0.736371 0.736371 0.736371 O
0.263629 0.263629 0.709114 O
0.263629 0.709114 0.263629 O
0.709114 0.263629 0.263629 O
0.736371 0.290886 0.736371 O
0.290886 0.736371 0.736371 O
0.263629 0.263629 0.263629 O
0.736371 0.736371 0.290886 O
0.125000 0.125000 0.125000 Mg
0.875000 0.875000 0.875000 Mg
0.500000 0.500000 0.000000 Al
0.500000 0.500000 0.500000 Al
0.000000 0.500000 0.500000 Al
0.500000 0.000000 0.500000 Al""", fmt='poscar')
bp = BuckinghamPotential(bush_lewis_flag="bush")
gio = GulpIO()
input = gio.buckingham_input(struct, ['relax conp'])
caller = GulpCaller()
gout = caller.run(input)
@unittest.skipIf(not gulp_present, "gulp not present.")
class GulpIOTest(unittest.TestCase):
_multiprocess_shared_ = True
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR.Al12O18'),
check_for_POTCAR=False)
self.structure = p.structure
self.gio = GulpIO()
def test_keyword_line_with_correct_keywords(self):
kw = ('defect', 'property')
inp_str = self.gio.keyword_line(*kw)
for word in kw:
self.assertIn(word, inp_str)
def test_structure_lines_default_options(self):
inp_str = self.gio.structure_lines(self.structure)
self.assertIn('cell', inp_str)
self.assertIn('frac', inp_str)
self.assertIn('space', inp_str)
def test_structure_lines_no_unitcell(self):
inp_str = self.gio.structure_lines(self.structure, cell_flg=False)
self.assertNotIn('cell', inp_str)
def test_structure_lines_no_frac_coords(self):
inp_str = self.gio.structure_lines(
self.structure, cell_flg=False, frac_flg=False
)
self.assertNotIn('cell', inp_str)
self.assertIn('cart', inp_str)
@unittest.skip("Not Implemented yet")
def test_specie_potential(self):
pass
@unittest.expectedFailure
def test_library_line_explicit_path(self):
gin = self.gio.library_line(
'/Users/mbkumar/Research/Defects/GulpExe/Libraries/catlow.lib'
)
self.assertIn('lib', gin)
def test_library_line_wrong_file(self):
with self.assertRaises(GulpError):
gin = self.gio.library_line('temp_to_fail.lib')
def test_buckingham_potential(self):
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg", 'O'] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0],
[0.5, 0, 0.5], [0, 0, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5]]
mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True, True)
gin = self.gio.buckingham_potential(mgo_uc)
self.assertIn('specie', gin)
self.assertIn('buck', gin)
self.assertIn('spring', gin)
self.assertIn('Mg core', gin)
self.assertIn('O core', gin)
self.assertIn('O shel', gin)
gin = self.gio.buckingham_potential(self.structure)
self.assertIn('specie', gin)
self.assertIn('buck', gin)
self.assertIn('spring', gin)
def test_buckingham_input(self):
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg", 'O'] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0],
[0.5, 0, 0.5], [0, 0, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5]]
mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True, True)
gin = self.gio.buckingham_input(mgo_uc, keywords=('optimise', 'conp'))
self.assertIn('optimise', gin)
self.assertIn('cell', gin)
self.assertIn('specie', gin)
self.assertIn('buck', gin)
self.assertIn('spring', gin)
self.assertIn('Mg core', gin)
self.assertIn('O core', gin)
self.assertIn('O shel', gin)
# Improve the test
def test_tersoff_potential(self):
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg", 'O'] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0],
[0.5, 0, 0.5], [0, 0, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5]]
mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True, True)
gin = self.gio.tersoff_potential(mgo_uc)
self.assertIn('specie', gin)
self.assertIn('Mg core', gin)
def test_get_energy(self):
# Output string obtained from running GULP on a terminal
out_str = """ Components of energy :
--------------------------------------------------------------------------------
Interatomic potentials = 5.61135426 eV
Monopole - monopole (real) = -4.34238722 eV
Monopole - monopole (recip)= -43.45344934 eV
Monopole - monopole (total)= -47.79583656 eV
--------------------------------------------------------------------------------
Total lattice energy :
Primitive unit cell = -42.18448230 eV
Non-primitive unit cell = -168.73792920 eV
--------------------------------------------------------------------------------
Total lattice energy (in kJmol-1):
Primitive unit cell = -4070.1577 kJ/(mole unit cells)
Non-primitive unit cell = -16280.6308 kJ/(mole unit cells)
--------------------------------------------------------------------------------
Components of energy :
--------------------------------------------------------------------------------
Interatomic potentials = 6.79846039 eV
Monopole - monopole (real) = -4.45761741 eV
Monopole - monopole (recip)= -44.60653603 eV
Monopole - monopole (total)= -49.06415344 eV
--------------------------------------------------------------------------------
Total lattice energy :
Primitive unit cell = -42.26569304 eV
Non-primitive unit cell = -169.06277218 eV
--------------------------------------------------------------------------------
Total lattice energy (in kJmol-1):
Primitive unit cell = -4077.9933 kJ/(mole unit cells)
Non-primitive unit cell = -16311.9732 kJ/(mole unit cells)
--------------------------------------------------------------------------------"""
energy = self.gio.get_energy(out_str)
self.assertEqual(energy, -169.06277218)
def test_get_relaxed_structure(self):
# Output string obtained from running GULP on a terminal
with open(os.path.join(test_dir, 'example21.gout'), 'r') as fp:
out_str = fp.read()
struct = self.gio.get_relaxed_structure(out_str)
self.assertIsInstance(struct, Structure)
self.assertEqual(8, len(struct.sites))
self.assertEqual(4.212, struct.lattice.a)
self.assertEqual(90, struct.lattice.alpha)
@unittest.skip("Test later")
def test_tersoff_inpt(self):
gin = self.gio.tersoff_input(self.structure)
@unittest.skipIf(not gulp_present, "gulp not present.")
class GlobalFunctionsTest(unittest.TestCase):
def setUp(self):
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg", 'O'] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0],
[0.5, 0, 0.5], [0, 0, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5]]
self.mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True, True)
bv = BVAnalyzer()
val = bv.get_valences(self.mgo_uc)
el = [site.species_string for site in self.mgo_uc.sites]
self.val_dict = dict(zip(el, val))
def test_get_energy_tersoff(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR.Al12O18'),
check_for_POTCAR=False)
structure = p.structure
enrgy = get_energy_tersoff(structure)
self.assertIsInstance(enrgy, float)
def test_get_energy_buckingham(self):
enrgy = get_energy_buckingham(self.mgo_uc)
self.assertIsInstance(enrgy, float)
# test with vacancy structure
del self.mgo_uc[0]
energy = get_energy_buckingham(self.mgo_uc,
keywords=('qok', 'optimise', 'conp'), valence_dict=self.val_dict)
self.assertIsInstance(energy, float)
def test_get_energy_relax_structure_buckingham(self):
enrgy, struct = get_energy_relax_structure_buckingham(self.mgo_uc)
self.assertIsInstance(enrgy, float)
self.assertIsInstance(struct, Structure)
site_len = len(struct.sites)
self.assertEqual(site_len, len(self.mgo_uc.sites))
@unittest.skipIf(not gulp_present, "gulp not present.")
class BuckinghamPotentialLewisTest(unittest.TestCase):
_multiprocess_shared_ = True
def setUp(self):
self.bpl = BuckinghamPotential('lewis')
def test_existing_element(self):
self.assertIn("Sc_2+", self.bpl.pot_dict.keys())
self.assertIn("Sc_2+", self.bpl.species_dict.keys())
self.assertIn("O", self.bpl.pot_dict.keys())
self.assertIn("O_core", self.bpl.species_dict.keys())
self.assertIn("O_shel", self.bpl.species_dict.keys())
def test_non_exisitng_element(self):
self.assertNotIn("Li_1+", self.bpl.pot_dict.keys())
self.assertNotIn("Li_1+", self.bpl.species_dict.keys())
def test_element_different_valence(self):
self.assertNotIn("Sc_4+", self.bpl.species_dict.keys())
def test_values(self):
self.assertNotEqual('', self.bpl.species_dict['Sc_2+'])
self.assertNotEqual('', self.bpl.pot_dict['Sc_2+'])
def test_spring(self):
self.assertNotIn('Li', self.bpl.spring_dict.keys())
self.assertNotEqual('', self.bpl.spring_dict['O'])
@unittest.skipIf(not gulp_present, "gulp not present.")
class BuckinghamPotentialBushTest(unittest.TestCase):
_multiprocess_shared_ = True
def setUp(self):
self.bpb = BuckinghamPotential('bush')
def test_existing_element(self):
self.assertIn("Li", self.bpb.pot_dict.keys())
self.assertIn("Li", self.bpb.species_dict.keys())
self.assertIn("O", self.bpb.pot_dict.keys())
self.assertIn("O", self.bpb.species_dict.keys())
def test_non_exisitng_element(self):
self.assertNotIn("Mn", self.bpb.pot_dict.keys())
self.assertNotIn("Mn", self.bpb.species_dict.keys())
def test_element_different_valence(self):
self.assertNotEqual(2, self.bpb.species_dict["Li"]['oxi'])
def test_spring(self):
self.assertEqual('', self.bpb.spring_dict["Li"])
self.assertNotEqual('', self.bpb.spring_dict['O'])
if __name__ == '__main__':
unittest.main()
|
mbkumar/pymatgen
|
pymatgen/command_line/tests/test_gulp_caller.py
|
Python
|
mit
| 12,876
|
[
"GULP",
"VASP",
"pymatgen"
] |
da0ffb0645d084d9e857dc9a2affe01f42e73f33551107406db425e718a14f80
|
# -*- coding: utf-8 -*-
"""Factories for the OSF models, including an abstract ModularOdmFactory.
Example usage: ::
>>> from tests.factories import UserFactory
>>> user1 = UserFactory()
>>> user1.username
fred0@example.com
>>> user2 = UserFactory()
fred1@example.com
Factory boy docs: http://factoryboy.readthedocs.org/
"""
from django.utils import timezone
from factory import base, Sequence, SubFactory, post_generation, LazyAttribute
import mock
from mock import patch, Mock
from modularodm import Q
from modularodm.exceptions import NoResultsFound
from framework.auth import Auth
from framework.auth.utils import impute_names_model, impute_names
from framework.mongo import StoredObject
from tests.base import fake
from tests.base import get_default_metaschema
from tests import mock_addons as addons_base
from addons.wiki.models import NodeWikiPage
from addons.osfstorage.models import OsfStorageFile
from osf.models import (Subject, NotificationSubscription, NotificationDigest,
ArchiveJob, ArchiveTarget, Identifier, NodeLicense,
NodeLicenseRecord, Embargo, RegistrationApproval,
Retraction, Sanction, Comment, DraftRegistration,
MetaSchema, AbstractNode, NodeLog,
PrivateLink, Tag, AlternativeCitation, Institution,
ApiOAuth2PersonalToken, ApiOAuth2Application, ExternalAccount,
ExternalProvider, OSFUser, PreprintService,
PreprintProvider, Session, Guid)
from website.archiver import ARCHIVER_SUCCESS
from website.util import permissions
from website.exceptions import InvalidSanctionApprovalToken
# TODO: This is a hack. Check whether FactoryBoy can do this better
def save_kwargs(**kwargs):
for value in kwargs.itervalues():
if isinstance(value, StoredObject) and not value._is_loaded:
value.save()
def FakerAttribute(provider, **kwargs):
"""Attribute that lazily generates a value using the Faker library.
Example: ::
class UserFactory(ModularOdmFactory):
name = FakerAttribute('name')
"""
fake_gen = getattr(fake, provider)
if not fake_gen:
raise ValueError('{0!r} is not a valid faker provider.'.format(provider))
return LazyAttribute(lambda x: fake_gen(**kwargs))
class ModularOdmFactory(base.Factory):
"""Base factory for modular-odm objects.
"""
class Meta:
abstract = True
@classmethod
def _build(cls, target_class, *args, **kwargs):
"""Build an object without saving it."""
save_kwargs(**kwargs)
return target_class(*args, **kwargs)
@classmethod
def _create(cls, target_class, *args, **kwargs):
save_kwargs(**kwargs)
instance = target_class(*args, **kwargs)
instance.save()
return instance
class PreprintProviderFactory(ModularOdmFactory):
class Meta:
model = PreprintProvider
abstract = False
def __init__(self, provider_id, provider_name):
super(PreprintProviderFactory, self).__init()
self._id = provider_id
self.name = provider_name
self.save()
class UserFactory(ModularOdmFactory):
class Meta:
model = OSFUser
abstract = False
username = Sequence(lambda n: 'fred{0}@mail.com'.format(n))
# Don't use post generation call to set_password because
# It slows down the tests dramatically
password = 'password'
fullname = Sequence(lambda n: 'Freddie Mercury{0}'.format(n))
is_registered = True
is_claimed = True
date_confirmed = timezone.now()
merged_by = None
email_verifications = {}
verification_key = None
verification_key_v2 = {}
@post_generation
def set_names(self, create, extracted):
parsed = impute_names_model(self.fullname)
for key, value in parsed.items():
setattr(self, key, value)
if create:
self.save()
@post_generation
def set_emails(self, create, extracted):
if not self.emails.filter(address=self.username).exists():
if not self.id:
# Perform implicit save to populate M2M
self.save()
self.emails.create(address=self.username)
self.save()
class AuthUserFactory(UserFactory):
"""A user that automatically has an api key, for quick authentication.
Example: ::
user = AuthUserFactory()
res = self.app.get(url, auth=user.auth) # user is "logged in"
"""
@post_generation
def add_auth(self, create, extracted):
self.set_password('password', notify=False)
self.save()
self.auth = (self.username, 'password')
class TagFactory(ModularOdmFactory):
class Meta:
model = Tag
_id = Sequence(lambda n: 'scientastic-{}'.format(n))
class ApiOAuth2ApplicationFactory(ModularOdmFactory):
class Meta:
model = ApiOAuth2Application
owner = SubFactory(UserFactory)
name = Sequence(lambda n: 'Example OAuth2 Application #{}'.format(n))
home_url = 'ftp://ftp.ncbi.nlm.nimh.gov/'
callback_url = 'http://example.uk'
class ApiOAuth2PersonalTokenFactory(ModularOdmFactory):
class Meta:
model = ApiOAuth2PersonalToken
owner = SubFactory(UserFactory)
scopes = 'osf.full_write osf.full_read'
name = Sequence(lambda n: 'Example OAuth2 Personal Token #{}'.format(n))
class PrivateLinkFactory(ModularOdmFactory):
class Meta:
model = PrivateLink
name = "link"
key = Sequence(lambda n: 'foobar{}'.format(n))
anonymous = False
creator = SubFactory(AuthUserFactory)
class AbstractNodeFactory(ModularOdmFactory):
class Meta:
model = AbstractNode
title = 'The meaning of life'
description = 'The meaning of life is 42.'
creator = SubFactory(AuthUserFactory)
class ProjectFactory(AbstractNodeFactory):
type = 'osf.node'
category = 'project'
class CollectionFactory(ProjectFactory):
is_collection = True
class BookmarkCollectionFactory(CollectionFactory):
is_bookmark_collection = True
class NodeFactory(AbstractNodeFactory):
category = 'hypothesis'
parent = SubFactory(ProjectFactory)
class PreprintProviderFactory(ModularOdmFactory):
name = 'OSFArxiv'
description = 'Preprint service for the OSF'
class Meta:
model = PreprintProvider
@classmethod
def _create(cls, target_class, name=None, description=None, *args, **kwargs):
provider = target_class(*args, **kwargs)
provider.name = name
provider.description = description
provider.save()
return provider
class PreprintFactory(ModularOdmFactory):
creator = None
category = 'project'
doi = Sequence(lambda n: '10.12345/0{}'.format(n))
provider = SubFactory(PreprintProviderFactory)
external_url = 'http://hello.org'
class Meta:
model = PreprintService
@classmethod
def _create(cls, target_class, project=None, is_public=True, filename='preprint_file.txt', provider=None,
doi=None, external_url=None, is_published=True, subjects=None, finish=True, *args, **kwargs):
save_kwargs(**kwargs)
user = None
if project:
user = project.creator
user = kwargs.get('user') or kwargs.get('creator') or user or UserFactory()
kwargs['creator'] = user
# Original project to be converted to a preprint
project = project or AbstractNodeFactory(*args, **kwargs)
if user._id not in project.permissions:
project.add_contributor(
contributor=user,
permissions=permissions.CREATOR_PERMISSIONS,
log=False,
save=False
)
project.save()
project.reload()
file = OsfStorageFile.create(
node=project,
path='/{}'.format(filename),
name=filename,
materialized_path='/{}'.format(filename))
file.save()
preprint = target_class(node=project, provider=provider)
auth = Auth(project.creator)
if finish:
preprint.set_primary_file(file, auth=auth)
subjects = subjects or [[SubjectFactory()._id]]
preprint.set_subjects(subjects, auth=auth)
preprint.set_published(is_published, auth=auth)
if not preprint.is_published:
project._has_abandoned_preprint = True
project.preprint_article_doi = doi
project.save()
preprint.save()
return preprint
class SubjectFactory(ModularOdmFactory):
text = Sequence(lambda n: 'Example Subject #{}'.format(n))
class Meta:
model = Subject
@classmethod
def _create(cls, target_class, text=None, parents=[], *args, **kwargs):
try:
subject = Subject.find_one(Q('text', 'eq', text))
except NoResultsFound:
subject = target_class(*args, **kwargs)
subject.text = text
subject.save()
subject.parents.add(*parents)
subject.save()
return subject
class RegistrationFactory(AbstractNodeFactory):
creator = None
# Default project is created if not provided
category = 'project'
@classmethod
def _build(cls, target_class, *args, **kwargs):
raise Exception('Cannot build registration without saving.')
@classmethod
def _create(cls, target_class, project=None, is_public=False,
schema=None, data=None,
archive=False, embargo=None, registration_approval=None, retraction=None,
*args, **kwargs):
save_kwargs(**kwargs)
user = None
if project:
user = project.creator
user = kwargs.get('user') or kwargs.get('creator') or user or UserFactory()
kwargs['creator'] = user
# Original project to be registered
project = project or target_class(*args, **kwargs)
if user._id not in project.permissions:
project.add_contributor(
contributor=user,
permissions=permissions.CREATOR_PERMISSIONS,
log=False,
save=False
)
project.save()
# Default registration parameters
schema = schema or get_default_metaschema()
data = data or {'some': 'data'}
auth = Auth(user=user)
register = lambda: project.register_node(
schema=schema,
auth=auth,
data=data
)
def add_approval_step(reg):
if embargo:
reg.embargo = embargo
elif registration_approval:
reg.registration_approval = registration_approval
elif retraction:
reg.retraction = retraction
else:
reg.require_approval(reg.creator)
reg.save()
reg.sanction.add_authorizer(reg.creator, reg)
reg.sanction.save()
with patch('framework.celery_tasks.handlers.enqueue_task'):
reg = register()
add_approval_step(reg)
if not archive:
with patch.object(reg.archive_job, 'archive_tree_finished', Mock(return_value=True)):
reg.archive_job.status = ARCHIVER_SUCCESS
reg.archive_job.save()
reg.sanction.state = Sanction.APPROVED
reg.sanction.save()
ArchiveJob(
src_node=project,
dst_node=reg,
initiator=user,
)
if is_public:
reg.is_public = True
reg.save()
return reg
class WithdrawnRegistrationFactory(AbstractNodeFactory):
@classmethod
def _create(cls, *args, **kwargs):
registration = kwargs.pop('registration', None)
registration.is_public = True
user = kwargs.pop('user', registration.creator)
registration.retract_registration(user)
withdrawal = registration.retraction
for token in withdrawal.approval_state.values():
try:
withdrawal.approve_retraction(user, token['approval_token'])
withdrawal.save()
return withdrawal
except InvalidSanctionApprovalToken:
continue
class ForkFactory(ModularOdmFactory):
class Meta:
model = AbstractNode
@classmethod
def _create(cls, *args, **kwargs):
project = kwargs.pop('project', None)
user = kwargs.pop('user', project.creator)
title = kwargs.pop('title', None)
fork = project.fork_node(auth=Auth(user), title=title)
fork.save()
return fork
class NodeLogFactory(ModularOdmFactory):
class Meta:
model = NodeLog
action = 'file_added'
user = SubFactory(UserFactory)
class SanctionFactory(ModularOdmFactory):
class Meta:
abstract = True
@classmethod
def _create(cls, target_class, initiated_by=None, approve=False, *args, **kwargs):
user = kwargs.get('user') or UserFactory()
kwargs['initiated_by'] = initiated_by or user
sanction = ModularOdmFactory._create(target_class, *args, **kwargs)
reg_kwargs = {
'creator': user,
'user': user,
sanction.SHORT_NAME: sanction
}
RegistrationFactory(**reg_kwargs)
if not approve:
sanction.state = Sanction.UNAPPROVED
sanction.save()
return sanction
class RetractionFactory(SanctionFactory):
class Meta:
model = Retraction
user = SubFactory(UserFactory)
class EmbargoFactory(SanctionFactory):
class Meta:
model = Embargo
user = SubFactory(UserFactory)
class RegistrationApprovalFactory(SanctionFactory):
class Meta:
model = RegistrationApproval
user = SubFactory(UserFactory)
class EmbargoTerminationApprovalFactory(ModularOdmFactory):
FACTORY_STRATEGY = base.CREATE_STRATEGY
@classmethod
def create(cls, registration=None, user=None, embargo=None, *args, **kwargs):
if registration:
if not user:
user = registration.creator
else:
user = user or AuthUserFactory()
if not embargo:
embargo = EmbargoFactory(initiated_by=user)
registration = embargo._get_registration()
else:
registration = RegistrationFactory(creator=user, user=user, embargo=embargo)
with mock.patch('website.project.sanctions.Sanction.is_approved', mock.Mock(return_value=True)):
with mock.patch('website.project.sanctions.TokenApprovableSanction.ask', mock.Mock()):
approval = registration.request_embargo_termination(Auth(user))
return approval
class NodeWikiFactory(ModularOdmFactory):
class Meta:
model = NodeWikiPage
page_name = 'home'
content = 'Some content'
version = 1
user = SubFactory(UserFactory)
node = SubFactory(NodeFactory)
@post_generation
def set_node_keys(self, create, extracted):
self.node.wiki_pages_current[self.page_name] = self._id
if self.node.wiki_pages_versions.get(self.page_name, None):
self.node.wiki_pages_versions[self.page_name].append(self._id)
else:
self.node.wiki_pages_versions[self.page_name] = [self._id]
self.node.save()
class UnregUserFactory(ModularOdmFactory):
"""Factory for an unregistered user. Uses User.create_unregistered()
to create an instance.
"""
class Meta:
model = OSFUser
abstract = False
email = Sequence(lambda n: "brian{0}@queen.com".format(n))
fullname = Sequence(lambda n: "Brian May{0}".format(n))
@classmethod
def _build(cls, target_class, *args, **kwargs):
'''Build an object without saving it.'''
return target_class.create_unregistered(*args, **kwargs)
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = target_class.create_unregistered(*args, **kwargs)
instance.save()
return instance
class UnconfirmedUserFactory(ModularOdmFactory):
"""Factory for a user that has not yet confirmed their primary email
address (username).
"""
class Meta:
model = OSFUser
username = Sequence(lambda n: 'roger{0}@queen.com'.format(n))
fullname = Sequence(lambda n: 'Roger Taylor{0}'.format(n))
password = 'killerqueen'
@classmethod
def _build(cls, target_class, username, password, fullname):
'''Build an object without saving it.'''
return target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
@classmethod
def _create(cls, target_class, username, password, fullname):
instance = target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
instance.save()
return instance
class AuthFactory(base.Factory):
class Meta:
model = Auth
user = SubFactory(UserFactory)
class ProjectWithAddonFactory(ProjectFactory):
"""Factory for a project that has an addon. The addon will be added to
both the Node and the creator records. ::
p = ProjectWithAddonFactory(addon='github')
p.get_addon('github') # => github node settings object
p.creator.get_addon('github') # => github user settings object
"""
# TODO: Should use mock addon objects
@classmethod
def _build(cls, target_class, addon='s3', *args, **kwargs):
'''Build an object without saving it.'''
instance = ProjectFactory._build(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
return instance
@classmethod
def _create(cls, target_class, addon='s3', *args, **kwargs):
instance = ProjectFactory._create(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
instance.save()
return instance
# Deprecated unregistered user factory, used mainly for testing migration
class DeprecatedUnregUser(object):
'''A dummy "model" for an unregistered user.'''
def __init__(self, nr_name, nr_email):
self.nr_name = nr_name
self.nr_email = nr_email
def to_dict(self):
return {"nr_name": self.nr_name, "nr_email": self.nr_email}
class DeprecatedUnregUserFactory(base.Factory):
"""Generates a dictonary represenation of an unregistered user, in the
format expected by the OSF.
::
>>> from tests.factories import UnregUserFactory
>>> UnregUserFactory()
{'nr_name': 'Tom Jones0', 'nr_email': 'tom0@example.com'}
>>> UnregUserFactory()
{'nr_name': 'Tom Jones1', 'nr_email': 'tom1@example.com'}
"""
class Meta:
model = DeprecatedUnregUser
nr_name = Sequence(lambda n: "Tom Jones{0}".format(n))
nr_email = Sequence(lambda n: "tom{0}@mail.com".format(n))
@classmethod
def _create(cls, target_class, *args, **kwargs):
return target_class(*args, **kwargs).to_dict()
_build = _create
class CommentFactory(ModularOdmFactory):
class Meta:
model = Comment
content = Sequence(lambda n: 'Comment {0}'.format(n))
is_public = True
@classmethod
def _build(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or Guid.load(node._id)
content = kwargs.pop('content', None) or 'Test comment.'
instance = target_class(
node=node,
user=user,
target=target,
content=content,
*args, **kwargs
)
if isinstance(target.referent, target_class):
instance.root_target = target.referent.root_target
else:
instance.root_target = target
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or Guid.load(node._id)
content = kwargs.pop('content', None) or 'Test comment.'
instance = target_class(
node=node,
user=user,
target=target,
content=content,
*args, **kwargs
)
if isinstance(target.referent, target_class):
instance.root_target = target.referent.root_target
else:
instance.root_target = target
instance.save()
return instance
class InstitutionFactory(ProjectFactory):
default_institution_attributes = {
'_id': fake.md5,
'name': fake.company,
'logo_name': fake.file_name,
'auth_url': fake.url,
'domains': lambda: [fake.url()],
'email_domains': lambda: [fake.domain_name()],
}
def _build(cls, target_class, *args, **kwargs):
inst = ProjectFactory._build(target_class)
for inst_attr, node_attr in Institution.attribute_map.items():
default = cls.default_institution_attributes.get(inst_attr)
if callable(default):
default = default()
setattr(inst, node_attr, kwargs.pop(inst_attr, default))
for key, val in kwargs.items():
setattr(inst, key, val)
return Institution(inst)
@classmethod
def _create(cls, target_class, *args, **kwargs):
inst = ProjectFactory._build(target_class)
for inst_attr, node_attr in Institution.attribute_map.items():
default = cls.default_institution_attributes.get(inst_attr)
if callable(default):
default = default()
setattr(inst, node_attr, kwargs.pop(inst_attr, default))
for key, val in kwargs.items():
setattr(inst, key, val)
inst.save()
return Institution(inst)
class NotificationSubscriptionFactory(ModularOdmFactory):
class Meta:
model = NotificationSubscription
class NotificationDigestFactory(ModularOdmFactory):
class Meta:
model = NotificationDigest
class ExternalAccountFactory(ModularOdmFactory):
class Meta:
model = ExternalAccount
provider = 'mock2'
provider_id = Sequence(lambda n: 'user-{0}'.format(n))
provider_name = 'Fake Provider'
display_name = Sequence(lambda n: 'user-{0}'.format(n))
class SessionFactory(ModularOdmFactory):
class Meta:
model = Session
@classmethod
def _build(cls, target_class, *args, **kwargs):
user = kwargs.pop('user', None)
instance = target_class(*args, **kwargs)
if user:
instance.data['auth_user_username'] = user.username
instance.data['auth_user_id'] = user._primary_key
instance.data['auth_user_fullname'] = user.fullname
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = cls._build(target_class, *args, **kwargs)
instance.save()
return instance
class MockOAuth2Provider(ExternalProvider):
name = "Mock OAuth 2.0 Provider"
short_name = "mock2"
client_id = "mock2_client_id"
client_secret = "mock2_client_secret"
auth_url_base = "https://mock2.com/auth"
callback_url = "https://mock2.com/callback"
auto_refresh_url = "https://mock2.com/callback"
refresh_time = 300
expiry_time = 9001
def handle_callback(self, response):
return {
'provider_id': 'mock_provider_id'
}
class MockAddonNodeSettings(addons_base.AddonNodeSettingsBase):
pass
class MockAddonUserSettings(addons_base.AddonUserSettingsBase):
pass
class MockAddonUserSettingsMergeable(addons_base.AddonUserSettingsBase):
def merge(self):
pass
class MockOAuthAddonUserSettings(addons_base.AddonOAuthUserSettingsBase):
oauth_provider = MockOAuth2Provider
class MockOAuthAddonNodeSettings(addons_base.AddonOAuthNodeSettingsBase):
oauth_provider = MockOAuth2Provider
folder_id = 'foo'
folder_name = 'Foo'
folder_path = '/Foo'
class ArchiveTargetFactory(ModularOdmFactory):
class Meta:
model = ArchiveTarget
class ArchiveJobFactory(ModularOdmFactory):
class Meta:
model = ArchiveJob
class AlternativeCitationFactory(ModularOdmFactory):
class Meta:
model = AlternativeCitation
@classmethod
def _create(cls, target_class, *args, **kwargs):
name = kwargs.get('name')
text = kwargs.get('text')
instance = target_class(
name=name,
text=text
)
instance.save()
return instance
class DraftRegistrationFactory(ModularOdmFactory):
class Meta:
model = DraftRegistration
@classmethod
def _create(cls, *args, **kwargs):
branched_from = kwargs.get('branched_from')
initiator = kwargs.get('initiator')
registration_schema = kwargs.get('registration_schema')
registration_metadata = kwargs.get('registration_metadata')
if not branched_from:
project_params = {}
if initiator:
project_params['creator'] = initiator
branched_from = ProjectFactory(**project_params)
initiator = branched_from.creator
registration_schema = registration_schema or MetaSchema.find()[0]
registration_metadata = registration_metadata or {}
draft = DraftRegistration.create_from_node(
branched_from,
user=initiator,
schema=registration_schema,
data=registration_metadata,
)
return draft
class NodeLicenseRecordFactory(ModularOdmFactory):
class Meta:
model = NodeLicenseRecord
@classmethod
def _create(cls, *args, **kwargs):
NodeLicense.find_one(
Q('name', 'eq', 'No license')
)
kwargs['node_license'] = kwargs.get(
'node_license',
NodeLicense.find_one(
Q('name', 'eq', 'No license')
)
)
return super(NodeLicenseRecordFactory, cls)._create(*args, **kwargs)
class IdentifierFactory(ModularOdmFactory):
class Meta:
model = Identifier
referent = SubFactory(RegistrationFactory)
value = Sequence(lambda n: 'carp:/2460{}'.format(n))
@classmethod
def _create(cls, *args, **kwargs):
kwargs['category'] = kwargs.get('category', 'carpid')
return super(IdentifierFactory, cls)._create(*args, **kwargs)
def render_generations_from_parent(parent, creator, num_generations):
current_gen = parent
for generation in xrange(0, num_generations):
next_gen = NodeFactory(
parent=current_gen,
creator=creator,
title=fake.sentence(),
description=fake.paragraph()
)
current_gen = next_gen
return current_gen
def render_generations_from_node_structure_list(parent, creator, node_structure_list):
new_parent = None
for node_number in node_structure_list:
if isinstance(node_number, list):
render_generations_from_node_structure_list(new_parent or parent, creator, node_number)
else:
new_parent = render_generations_from_parent(parent, creator, node_number)
return new_parent
def create_fake_user():
email = fake.email()
name = fake.name()
parsed = impute_names(name)
user = UserFactory(
username=email,
fullname=name,
is_registered=True,
is_claimed=True,
date_registered=fake.date_time(),
emails=[email],
**parsed
)
user.set_password('faker123')
user.save()
return user
def create_fake_project(creator, n_users, privacy, n_components, name, n_tags, presentation_name, is_registration):
auth = Auth(user=creator)
project_title = name if name else fake.sentence()
if not is_registration:
project = ProjectFactory(
title=project_title,
description=fake.paragraph(),
creator=creator
)
else:
project = RegistrationFactory(
title=project_title,
description=fake.paragraph(),
creator=creator
)
project.set_privacy(privacy)
for _ in range(n_users):
contrib = create_fake_user()
project.add_contributor(contrib, auth=auth)
if isinstance(n_components, int):
for _ in range(n_components):
NodeFactory(
project=project,
title=fake.sentence(),
description=fake.paragraph(),
creator=creator
)
elif isinstance(n_components, list):
render_generations_from_node_structure_list(project, creator, n_components)
for _ in range(n_tags):
project.add_tag(fake.word(), auth=auth)
if presentation_name is not None:
project.add_tag(presentation_name, auth=auth)
project.add_tag('poster', auth=auth)
project.save()
return project
|
caneruguz/osf.io
|
tests/factories.py
|
Python
|
apache-2.0
| 29,464
|
[
"Brian"
] |
0ba5b610cd2a7a8c7584fd8227fc51f90e9d1c96e08f9a757947d790a7ce04df
|
from netCDF4 import *
from numpy import ndarray
from sys import getsizeof
class NetcdfCopier(object):
"""create a copy of the netcdf, can exclude certain variables and/or dimensions"""
# vbl : variables black list
# dbl : dimensions black list
# f : format
def copy(self,src,dst,dbl=[],vbl=[],f="NETCDF4"):
# dimensions that aren't in the blacklist
for dim in [d for d in src.dimensions.values() if d.name not in dbl]:
if dim.isunlimited():
dst.createDimension(dim.name,None)
else:
dst.createDimension(dim.name,dim.size)
# variables that don't run on the blacklisted dimensions
#(set.intersection intersect the vector of dimensions with the blacklist)
# and are not blacklisted
for var in [v for v in src.variables.values() if not set(v.dimensions).intersection(dbl) and v.name not in vbl]:
if '_FillValue' in var.ncattrs():
newvar = dst.createVariable(var.name,var.dtype,var.dimensions, fill_value = var.getncattr('_FillValue'))
else:
newvar = dst.createVariable(var.name,var.dtype,var.dimensions)
newvar[:] = var[:]
for attr in var.ncattrs():
if attr != '_FillValue':
newvar.setncattr(attr,var.getncattr(attr))
return dst
|
RDCEP/hybrid-dile-server
|
lib/utils/netcdfcopier.py
|
Python
|
apache-2.0
| 1,230
|
[
"NetCDF"
] |
01cbea2812de8ceb02625ba6026bf191b1b377aff8cb9a2d2057f12e30730258
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# oidping - Check OpenID server availability
# Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Front end to check OpenID server availability"""
import cgi
import cgitb
cgitb.enable()
from shared.functionality.oidping import main
from shared.cgiscriptstub import run_cgi_script_possibly_with_cert
run_cgi_script_possibly_with_cert(main)
|
heromod/migrid
|
mig/cgi-bin/oidping.py
|
Python
|
gpl-2.0
| 1,170
|
[
"Brian"
] |
f2dbeedaa72dcfc3c07e1aa2b4dcce693a4b9d45f08d7ae762af945653cd1cdd
|
#!/usr/bin/python
#Audio Tools, a module and set of tools for manipulating audio data
#Copyright (C) 2007-2012 Brian Langenberger
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import unittest
import audiotools
import ConfigParser
import tempfile
import os
import os.path
from hashlib import md5
import random
import decimal
import test_streams
import cStringIO
import subprocess
parser = ConfigParser.SafeConfigParser()
parser.read("test.cfg")
def do_nothing(self):
pass
#add a bunch of decorator metafunctions like LIB_CORE
#which can be wrapped around individual tests as needed
for section in parser.sections():
for option in parser.options(section):
if (parser.getboolean(section, option)):
vars()["%s_%s" % (section.upper(),
option.upper())] = lambda function: function
else:
vars()["%s_%s" % (section.upper(),
option.upper())] = lambda function: do_nothing
class BLANK_PCM_Reader:
def __init__(self, length,
sample_rate=44100, channels=2, bits_per_sample=16,
channel_mask=None):
self.length = length
self.sample_rate = sample_rate
self.channels = channels
if (channel_mask is None):
self.channel_mask = audiotools.ChannelMask.from_channels(channels)
else:
self.channel_mask = channel_mask
self.bits_per_sample = bits_per_sample
self.total_frames = length * sample_rate
self.original_frames = self.total_frames
self.single_pcm_frame = audiotools.pcm.from_list(
[1] * channels, channels, bits_per_sample, True)
def read(self, pcm_frames):
if (self.total_frames > 0):
frame = audiotools.pcm.from_frames(
[self.single_pcm_frame] *
min(pcm_frames, self.total_frames))
self.total_frames -= frame.frames
return frame
else:
return audiotools.pcm.FrameList(
"", self.channels, self.bits_per_sample, True, True)
def close(self):
pass
def reset(self):
self.total_frames = self.original_frames
class RANDOM_PCM_Reader(BLANK_PCM_Reader):
def read(self, pcm_frames):
if (self.total_frames > 0):
frames_to_read = min(pcm_frames, self.total_frames)
frame = audiotools.pcm.FrameList(
os.urandom(frames_to_read *
(self.bits_per_sample / 8) *
self.channels),
self.channels,
self.bits_per_sample,
True,
True)
self.total_frames -= frame.frames
return frame
else:
return audiotools.pcm.FrameList(
"", self.channels, self.bits_per_sample, True, True)
class EXACT_BLANK_PCM_Reader(BLANK_PCM_Reader):
def __init__(self, pcm_frames,
sample_rate=44100, channels=2, bits_per_sample=16,
channel_mask=None):
self.sample_rate = sample_rate
self.channels = channels
if (channel_mask is None):
self.channel_mask = audiotools.ChannelMask.from_channels(channels)
else:
self.channel_mask = channel_mask
self.bits_per_sample = bits_per_sample
self.total_frames = pcm_frames
self.original_frames = self.total_frames
self.single_pcm_frame = audiotools.pcm.from_list(
[1] * channels, channels, bits_per_sample, True)
class EXACT_SILENCE_PCM_Reader(BLANK_PCM_Reader):
def __init__(self, pcm_frames,
sample_rate=44100, channels=2, bits_per_sample=16,
channel_mask=None):
self.sample_rate = sample_rate
self.channels = channels
if (channel_mask is None):
self.channel_mask = audiotools.ChannelMask.from_channels(channels)
else:
self.channel_mask = channel_mask
self.bits_per_sample = bits_per_sample
self.total_frames = pcm_frames
self.original_frames = self.total_frames
self.single_pcm_frame = audiotools.pcm.from_list(
[0] * channels, channels, bits_per_sample, True)
class EXACT_RANDOM_PCM_Reader(RANDOM_PCM_Reader):
def __init__(self, pcm_frames,
sample_rate=44100, channels=2, bits_per_sample=16,
channel_mask=None):
self.sample_rate = sample_rate
self.channels = channels
if (channel_mask is None):
self.channel_mask = audiotools.ChannelMask.from_channels(channels)
else:
self.channel_mask = channel_mask
self.bits_per_sample = bits_per_sample
self.total_frames = pcm_frames
self.original_frames = self.total_frames
self.single_pcm_frame = audiotools.pcm.from_list(
[1] * channels, channels, bits_per_sample, True)
class MD5_Reader:
def __init__(self, pcmreader):
self.pcmreader = pcmreader
self.sample_rate = pcmreader.sample_rate
self.channels = pcmreader.channels
self.channel_mask = pcmreader.channel_mask
self.bits_per_sample = pcmreader.bits_per_sample
self.md5 = md5()
def reset(self):
if (hasattr(self.pcmreader, "reset")):
self.pcmreader.reset()
self.md5 = md5()
def __repr__(self):
return "MD5Reader(%s,%s,%s)" % (self.sample_rate,
self.channels,
self.bits_per_sample)
def read(self, pcm_frames):
framelist = self.pcmreader.read(pcm_frames)
self.md5.update(framelist.to_bytes(False, True))
return framelist
def digest(self):
return self.md5.digest()
def hexdigest(self):
return self.md5.hexdigest()
def close(self):
self.pcmreader.close()
class Variable_Reader:
def __init__(self, pcmreader):
self.pcmreader = audiotools.BufferedPCMReader(pcmreader)
self.sample_rate = pcmreader.sample_rate
self.channels = pcmreader.channels
self.channel_mask = pcmreader.channel_mask
self.bits_per_sample = pcmreader.bits_per_sample
self.md5 = md5()
self.range = range(self.channels * (self.bits_per_sample / 8),
4096)
def read(self, pcm_frames):
return self.pcmreader.read(random.choice(self.range))
def close(self):
self.pcmreader.close()
class Join_Reader:
#given a list of 1 channel PCM readers,
#combines them into a single reader
#a bit like PCMCat but across channels instead of PCM frames
def __init__(self, pcm_readers, channel_mask):
if (len(set([r.sample_rate for r in pcm_readers])) != 1):
raise ValueError("all readers must have the same sample rate")
if (len(set([r.bits_per_sample for r in pcm_readers])) != 1):
raise ValueError("all readers must have the same bits per sample")
if (set([r.channels for r in pcm_readers]) != set([1])):
raise ValueError("all readers must be 1 channel")
self.channels = len(pcm_readers)
self.channel_mask = channel_mask
self.sample_rate = pcm_readers[0].sample_rate
self.bits_per_sample = pcm_readers[0].bits_per_sample
self.readers = map(audiotools.BufferedPCMReader, pcm_readers)
def read(self, pcm_frames):
return audiotools.pcm.from_channels(
[r.read(pcm_frames) for r in self.readers])
def close(self):
for r in self.readers:
r.close()
class MiniFrameReader:
def __init__(self, channel_data, sample_rate, channel_mask,
bits_per_sample):
self.sample_rate = sample_rate
self.channels = len(channel_data)
self.channel_mask = channel_mask
self.bits_per_sample = bits_per_sample
self.pcm_frames = zip(*channel_data)
def read(self, pcm_frames):
try:
return audiotools.pcm.from_list(self.pcm_frames.pop(0),
self.channels,
self.bits_per_sample,
True)
except IndexError:
return audiotools.pcm.FrameList("",
self.channels,
self.bits_per_sample,
True, True)
def close(self):
self.pcm_frames = []
class FrameCounter:
def __init__(self, channels, bits_per_sample, sample_rate, value=0):
self.channels = channels
self.bits_per_sample = bits_per_sample
self.sample_rate = sample_rate
self.value = value
def __repr__(self):
return "FrameCounter(%d %d %d %d)" % \
(self.channels,
self.bits_per_sample,
self.sample_rate,
self.value)
def update(self, f):
self.value += len(f)
def __int__(self):
return int(round(decimal.Decimal(self.value) /
(self.channels *
(self.bits_per_sample / 8) *
self.sample_rate)))
#probstat does this better, but I don't want to require that
#for something used only rarely
def Combinations(items, n):
if (n == 0):
yield []
else:
for i in xrange(len(items)):
for combos in Combinations(items[i + 1:], n - 1):
yield [items[i]] + combos
def Possibilities(*lists):
if (len(lists) == 0):
yield ()
else:
remainder = list(Possibilities(*lists[1:]))
for item in lists[0]:
for rem in remainder:
yield (item,) + rem
from_channels = audiotools.ChannelMask.from_channels
#these are combinations that tend to occur in nature
SHORT_PCM_COMBINATIONS = ((11025, 1, from_channels(1), 8),
(22050, 1, from_channels(1), 8),
(22050, 1, from_channels(1), 16),
(32000, 2, from_channels(2), 16),
(44100, 1, from_channels(1), 16),
(44100, 2, from_channels(2), 16),
(48000, 1, from_channels(1), 16),
(48000, 2, from_channels(2), 16),
(48000, 6, audiotools.ChannelMask.from_fields(
front_left=True, front_right=True,
front_center=True, low_frequency=True,
back_left=True, back_right=True), 16),
(192000, 2, from_channels(2), 24),
(96000, 6, audiotools.ChannelMask.from_fields(
front_left=True, front_right=True,
front_center=True, low_frequency=True,
back_left=True, back_right=True), 24))
TEST_COVER1 = \
"""eJzt1H1M0mkcAPAH0bSXZT/R6BLpxNJOz4rMXs7UP86Xq+AcQ5BCdNMLgwQ6EU0qu9tdm4plLb0p
mG62Uf7yZWpZgEpnvmTmHBmQChiSaGZUpEmKcdTt1nb3z/XPbbf1ebbnj+/3eb7Py549jkeOx2DN
/rh9cQCBQIDvnA04jGBt7HEWEwAiEQQDADzAB45R8C1wQ7q6uiLdnJ2bm9sy91Ue7k6eK1cuXwV5
enlBnhCEWotBo7zX+0DQOv916/38NmzYgELjNuKwGzHYDdj3RRDOqe7L3Fd7eKzGekPe2E/muA0g
D8QsYhaJwAEXCIGEEI4ugAEIgAQuSPCRc4euHggXpDO7aQ0CIFxdXFyQ7w/6gTPh6rYM8vJ3R3nj
8CSf7c5h3n8lP3ofhf4ZHQGrkAjn6kgIRAML7e/5zz77z/nfxDSKWK20hYHeTUNHW5qFC/jmlvoR
Ra5sei8Lvipud4Dzy89/Ws105Vr2Dvr96NLgCRotL3e7LO4O+jCVgQ+ztY6LM1UUsmWzKAqFNTWY
05cy95dstGnPWEOlcYOcK7A5juKtqpg1pzbxtovTYZaSq89WCXGRgqzguWe2FYcX6rJKSrN1Wxl3
d9La4tEFoyNGB+gb1jdRs9UnpmsycHpSFry5RpyhTjE/IZKD9Xrt1z22oQucVzdPMM4MluSdnZLK
lEnDzZpHLyUaHkGAZkpyufGCmHcaVvWL1u6+W9HoJ6k/U/vplF2CWeK63JdWrtHQFNMVo4rt9yEl
k/CQHh+ZQHo2JLlsEoYG+Z2LvKZJN7HHi6Yqj5972hBSITbXVplrYeaffvgiJyl0NHNe6c8/u1pg
vxTkbZrHh5drLOrdwzIVM4urE+OEMKuwhRtRwtA+cP/JMEk+/Yvlhth57VncDEYTdTGIf71b0djf
o2AzFa11PcTUxKHEIQbELTpNKy//bajTVuJnbGNrMSbxyLYbOVJ5bdOuEIVOm6hOVFP4FEpuWPRw
dYrygkc9umdvwL7r3Y+eXVePKs5QKMZDMkm+JWoTJaZrQBKu3fk8gYxfICeQwsDlV0tbesvsvVZq
C+fe29D1RCoX/fixkdM4viQwdLYw+hZDKcR8fNTTmuCiNHYDMzBD86BYPRW+fkAzxv+lcC7Dwj2k
qM6dgRvl13Ke3oiZC8MnJJIJ+U1+c7rFNxf//UtCVL7u4N/f7QB7H/xYz/N8MMPhNTJaGu4pO2Ql
ieqjWF7y4pHiQ/YAmF0wDSumA4UvNMW9UTQDOcMchbwQJyqdME2F8bfMZG2zveESJdmG27JYmVSR
A0snBUmEhF8HyWOnBJFuN/Osp1EmXwwxaMsITc3bYqT1K0VsvV1EZSmyOLGp2fSChfEZIlYQG5nf
kkie8GzY2mdHB5VM8ji8WjtmlfxYc2Dd0Yc60dxxG136UOWjDc8b2mEbimL0MpocoDpb0rCv2awg
RvvpJoYf2QWF6avT6cIQWQ6/QSeJQiWUMoqYYqmut1Ro8b87IbcwGiYwkwGU+ic0eaXl4NXK0YW6
AxcvpsgrfbMNjb49FXCtqFRFGOiYLrA+0yFZ4/bBs1b6nvlw+gqFluJtHrnXoyg84Ss/WcOltxPD
VaiEWxUFhQVVygIGr38MO8MXlB9XTJvfjOLwN1R8JE6/p4xAmGfD9V3Jl+eqLOSwmFwobDE+Lxdt
ijh5aaxfXp9fXZZGm8CkdbcHMi1tEjUDlhzcCb9uF7IlgreGmjS1IJZEmDf5EeKlJj61s7dTLL/V
MUm5WDdmTJ/4/o5L25GmrOKIhwPX+MnxowTb/bd06xU4QDYPtDeVQcdOYU0BlBbDqYPrykhxjOxx
gyzdC154JZq/WsMZrigsXJq+8rDTiEJB+MguB9ikaXsX0aFOmdTxjlZYPcd5rW+Hqfgdwr2Zbcn2
k1cdYPBJUpoSvlUo4b9JrgnoCYyMWNm77Sv1q+fcZrE15Iqnl7rgGg5mPifFQgmCgShpY8rC3NhL
zMtP+eKwIVLxFFz0tKgW/qa83BIY3R1xzp76+6xvJlHaeIDRVrw1ulNq4SxqjtlNcIcoKQTWV40z
o/ez5iJPo7/8tO/0s8/+jxCO4T8AO2LoJg==""".decode('base64').decode('zlib')
TEST_COVER2 = \
"""eJztV4lT00kWDrqzoEiC16JgiGcxoyCDiNFByCggIEdcWQXEcAoZbgmQRE6RS0YIogYEiYwgAcwg
gqIhCYciRs6IHEIiiVwiRwgQQoQcs41bUzvM1O4fsDuvqqv719/3+vXxvVf1SzvlaK2xVnstBALR
sLWxPA2BqMwvN7VVYMbyic0A6NZctHENh0DUNy43FUhe/hYwqRph62Cl+m6N+vpt0K96uOcgkHUY
W8tj/yByhQPBP5B9VzfMTgZhDbF3vqvOsd3wJNer1b7vzXnSoi3mpOGpdWv2VvpWwwoTrE4M5vhf
2ZJ2yuf5130lVRfI19NrvnFIL6ttKz+UX9S3NqLmUFnQ2FEElDJ28Fv5dbQbRyQdr+uInE58/2yM
0x7Z0QG33b1B5XJ8zrpUyPfvVTQJkJdwSJgqGP7af5laCYHhvyEwXAn9nr0C+gN7BfRn2P/FsJ+Z
+aj4uMYUDSSf6IPHL2AIAz19fZ9uX6Yb12LoF+8VFnp7en54c8+itrbWxMQEbSbprouVKaW/3CAe
nY7YPj0j7WMSRK9fv05FxBFFtVI+nhdsip/qY10Kt7Oz25llY36vurq6quoACoUyNAxdnBs1MDBo
ZvN4vF1Zr++3ylNSUmx2v+3vz92mewR3H/AA6WNb7uS7CpFQ6GAmToSZX7XcWYIu4D8LFcgXxcYH
DhwwNqZAqfl/sUdL34dz8kwC3yIWFVKBEw8Oh+fm5qLNFy8QCFKkIEbcZsyx3JmFRikOHmFeHHwh
m2Yaxgp8W7MHYqUDzUIfNsmqqFPvLrGwpKSERqM9ePCgtPTTi2T15n6lUqn54sEZ2kk7Ozc3t3rg
aIztOAy3NxnqiDDxeZXOYDBo7WednXNu3bqPQxkZVYLVe2jOeqngLqA75iWSPake8YpINa9flIrm
QW51ILiL4Vki7vDRo/kUioIbWLEntV65FKi2A4mUglN1rHLK9t1KpbXmGLK9K2nteDz+4bnqvdWe
N7Ky/u7qemlupHlkZpaN4LS0BAQEnIQK4mRCFovF1o3WjxXY7L6xjR8jbrfL2W+Gn3LB3aZQ4Mdd
aqMk5c/4E/qe7XCln7Ff2xYEop47VWyXs1ZdvQvxjb7+NjjcQRI1wIgUscSOOKOxAYKgvKws1yTw
LA4fETHfjhTo24gXxwpgGhrF9dwrX6nnr6JWlVo0HIwcoxAW5uftGdkikciDRQxT81qY6t+1a9f4
Yy1D93yzaHwA3b+LKhPV15eXB4OlgDRKy8sdHNpzjUsYjCg2CT7OHBsZkY9TNkr4z8mm51VhZvOn
rK3ZHz54TmQpZNIcMlkDBkvVPPuzSyeX+52RUVb+j+zh4ODgzZs3l+lVuD72U8oXVWG6QSEh7lUX
mqt8W087AQjLuYu57uft7c1nXSId6UrLhN+mvmKztQzOPYkYf7uwsJCQkPDOI95s3z5aXZ35EVk/
tgAIIEMHCaC7YNtdVAdXV1c9x3yb+OQcj7gaOp3+6NFMQ8Lq8cyCw2E7tTPMgeDMzMxiY2OZeGFL
W1sMELxSZpak+TRUML3pA+/ARYz883AmELyVlRVYivA+zNrCwmJpKmuXNTjL+mtNc3NzZx+e7+/t
PeQvDR/rsNqZJZfLwcM55AUEBrrV4Hzd3d0dHR2Bb3i4uIB/aKjjlpatfFYLAXEJ/w+5TP9bXD/J
X19yc3Jc3mlCx2GjdLSX7QGNZheMXuqJ1CTcjvvxi82JxU48sLWya0tcLrfpmhaHYvqsqMiH9zS4
pqaGTCbXy+fs1HboZtYvTdCamprANpKTk2Eo+YxUEF+gbDElTLNGs928K13OnDmDxWIPag/UxUYH
LBiGFGgMQd85g7P6+AyzLondo8aLiUfrwIOQSCSQkLuTZnrdQoXvax7X1cWBejIz2FjiSOE+8rJY
IlWw5k5iMBg0mvM0mKdL/JCQlpbWveHN7DD73UOM2+nTuInusiLrTFJGBgiKYRE7VbABs4237QnN
gRPNKD/4C0bk5Ia0lx/b71ioecRKehoavlfzEvFr0yyHSgrilhZ4oU5oPiMy0M/PL4AeswheYK77
UWWl0X3FK5GHwFyHquY8LQ8k37qVpOnXkb/1+Nf79zuGyIHbjiQX/d7u7ic/dBYCxW3etIk1+0qn
LPpQsiaDyWxtaTndODExMZ+jmORhE3230utw4eGNCEFpWpN3c8aIlaK33I0g5Ermu9AIVJx8frxL
BxliLwgLCvr5p5+2m7AGU3TeYitGF/pnMsVnbJQIEyQStfSpyO1pkK2BI5XzyrsSFIOSlJu9Xcsk
UGhhW3R07pgSQnDRMTGs4uI9SZqZbFANj6s9A9UAyDU3am6wMbVL6jBgbiqxCQ2t4GGNe1yyvbR1
dL8YAoEOhsFgHq2k0dFRkDxTE8sWNZJlvXfv3uNqZZHivLw8kAmrVaHroNC4+U7rVCj8pEDapOUB
qEBNk0KhUCQS1EYT/P3H7481oDjYFvthGdNDUR/xeVhmUCZ6m56enqQ5MTm5Me1lrjE2W991Q8YJ
LX2XGaVMFD/bpIUciHA6duwYTrDP+WF3Tw+oB3pIJEGxJElMTNyRpOVOHNQOLdAIua7h1E3e5wzq
/E3awbEOyr79+/mPsRwxByV67en6Vyrtph7648ePIf1VxRUVFUzmciK3NzdfmnmuCt/6Ek6tBE9M
pVKBaLKBkckKuZiDiJeHLemVfitxzVa5OAq9TF+9fRpy1RQyBP21/9fU0LTmbz+vmv6GCYYroD86
Q/8LeyX0e/ZK6M+w/z9h5ahFWOF6xsYTVuUy8O8BsbVytHx43PPKPwEw98Hh""".decode('base64').decode('zlib')
TEST_COVER3 = \
"""eJz7f+P/AwYBLzdPNwZGRkYGDyBk+H+bwZmBl5OLm4uDl5uLm4+Pl19YQVRYSEhYXUZOXEFP09BA\nT1NXx9jKy87YzM1cR9ch3NHNxy8oOMjILioxKiDBKzDIH2QIIx8fn7CgsJqoqJq/qa6pP8ng/wEG\nQQ6GFIYUZkZBBiZBRmZBxv9HGMTATkUGLBzsQHEJAUZGNBlmJiNHoIwImnogAIkKYoreYuBhZgRa\nxSzIYM9wpviCpICZQknDjcaLzEnsLrwdsiCuwwSfmS+4O6QFrBRyHF40bmRexHaED8R18FDz+cJ6\nBKYMSZeKsFoV0yOgsgnIuk7wdQg/ULP5wuaCTwvEoga4RUKc/baME5HdA9KVwu7CyXJ8XsMJJPdA\nLVrC0pRy3iEGyXAFMwewp5gcDZ8vMELzBZirMOPzBUkFNCdB/F75gmcCpt8VPCAemQBW1nCTEewk\nsEfk/98EALdspDk=\n""".decode('base64').decode('zlib')
TEST_COVER4 = \
"""iVBORw0KGgoAAAANSUhEUgAAAfQAAAH0AQMAAADxGE3JAAAAAXNSR0IArs4c6QAAAANQTFRFAAAA
p3o92gAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB9sCEA4rOb0fGtgAAAAIdEVYdENvbW1l
bnQA9syWvwAAADVJREFUeNrtwTEBAAAAwqD1T+1hDaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAbn0AAAGDIqjNAAAAAElFTkSuQmCC""".decode('base64')
#this is a very large, plain BMP encoded as bz2
HUGE_BMP = \
"""QlpoOTFBWSZTWSpJrRQACVR+SuEoCEAAQAEBEAIIAABAAAEgAAAIoABwU0yMTExApURDRoeppjv2
2uMceMt8M40qoj5nGLjFQkcuWdsL3rW+ugRSA6SFFV4lUR1/F3JFOFCQKkmtFA==""".decode('base64')
from test_formats import *
from test_core import *
from test_metadata import *
from test_utils import *
if (__name__ == '__main__'):
unittest.main()
|
R-a-dio/python-audio-tools
|
test/test.py
|
Python
|
gpl-2.0
| 17,878
|
[
"Brian"
] |
a0e008efbb8859d044588f33f56b477cd9155e2d9a6c84ece7b01ff50d461fa9
|
import cv2
import numpy as np
#from skimage.measure import compare_ssim
'''
MOG2
virtual int getHistory() const { return history; }
virtual void setHistory(int _nframes) { history = _nframes; }
virtual int getNMixtures() const { return nmixtures; }
virtual void setNMixtures(int nmix) { nmixtures = nmix; }
virtual double getBackgroundRatio() const { return backgroundRatio; }
virtual void setBackgroundRatio(double _backgroundRatio) { backgroundRatio = (float)_backgroundRatio; }
virtual double getVarThreshold() const { return varThreshold; }
virtual void setVarThreshold(double _varThreshold) { varThreshold = _varThreshold; }
virtual double getVarThresholdGen() const { return varThresholdGen; }
virtual void setVarThresholdGen(double _varThresholdGen) { varThresholdGen = (float)_varThresholdGen; }
virtual double getVarInit() const { return fVarInit; }
virtual void setVarInit(double varInit) { fVarInit = (float)varInit; }
virtual double getVarMin() const { return fVarMin; }
virtual void setVarMin(double varMin) { fVarMin = (float)varMin; }
virtual double getVarMax() const { return fVarMax; }
virtual void setVarMax(double varMax) { fVarMax = (float)varMax; }
virtual double getComplexityReductionThreshold() const { return fCT; }
virtual void setComplexityReductionThreshold(double ct) { fCT = (float)ct; }
virtual bool getDetectShadows() const { return bShadowDetection; }
virtual void setDetectShadows(bool detectshadows)
'''
class SeperatorMOG2_OCL:
def __init__(self, hist=16, shadows=False):
# Use Gaussian mixture based subtractor
self.bgsub = cv2.createBackgroundSubtractorMOG2(history=hist, varThreshold=33, detectShadows=shadows)
# do sepration with ocl (reduces computing time by 50% on odroid)
def seperate(self, img):
h,w = img.shape[:2]
#img = cv2.pyrDown(cv2.UMat(img))
img = cv2.resize(cv2.UMat(img),(0,0),0.3,0.3,cv2.INTER_AREA)
rangeRes = self.bgsub.apply(img)
#rangeRes = cv2.pyrUp(rangeRes)
rangeRes = cv2.resize(rangeRes,(w,h))
ret, rangeRes = cv2.threshold(rangeRes, 10, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#rangeRes = cv2.dilate(rangeRes, None, iterations=2)
return True,rangeRes.get()
class SeperatorMOG2:
def __init__(self, hist=120, shadows=False):
# Use Gaussian mixture based subtractor
self.bgsub = cv2.createBackgroundSubtractorMOG2(history=hist, varThreshold=33, detectShadows=shadows)
# sepration
def seperate(self, img):
h,w = img.shape[:2]
#img = cv2.pyrDown(cv2.UMat(img))
img = cv2.resize(img,(w/2,h/2))
rangeRes = self.bgsub.apply(img)
#rangeRes = cv2.pyrUp(rangeRes)
rangeRes = cv2.resize(rangeRes,(w,h))
ret, rangeRes = cv2.threshold(rangeRes, 10, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#rangeRes = cv2.dilate(rangeRes, None, iterations=2)
return True,rangeRes
'''
GMG
createBackgroundSubtractorMOG(int history=200, int nmixtures=5,
double backgroundRatio=0.7, double noiseSigma=0);
virtual int getMaxFeatures() const { return maxFeatures; }
virtual void setMaxFeatures(int _maxFeatures) { maxFeatures = _maxFeatures; }
virtual double getDefaultLearningRate() const { return learningRate; }
virtual void setDefaultLearningRate(double lr) { learningRate = lr; }
virtual int getNumFrames() const { return numInitializationFrames; }
virtual void setNumFrames(int nframes) { numInitializationFrames = nframes; }
virtual int getQuantizationLevels() const { return quantizationLevels; }
virtual void setQuantizationLevels(int nlevels) { quantizationLevels = nlevels; }
virtual double getBackgroundPrior() const { return backgroundPrior; }
virtual void setBackgroundPrior(double bgprior) { backgroundPrior = bgprior; }
virtual int getSmoothingRadius() const { return smoothingRadius; }
virtual void setSmoothingRadius(int radius) { smoothingRadius = radius; }
virtual double getDecisionThreshold() const { return decisionThreshold; }
virtual void setDecisionThreshold(double thresh) { decisionThreshold = thresh; }
virtual bool getUpdateBackgroundModel() const { return updateBackgroundModel; }
virtual void setUpdateBackgroundModel(bool update) { updateBackgroundModel = update; }
virtual double getMinVal() const { return minVal_; }
virtual void setMinVal(double val) { minVal_ = val; }
virtual double getMaxVal() const { return maxVal_; }
virtual void setMaxVal(double val) { maxVal_ = val; }
'''
class SeperatorGMG:
def __init__(self, hist=120, shadows=True):
# Use Gaussian mixture based subtractor
self.bgsub = cv2.bgsegm.createBackgroundSubtractorGMG(initializationFrames=20, decisionThreshold=0.95)
def getVarThreshold(self):
return self.bgsub.getDecisionThreshold()
def setVarThreshold(self, value):
self.bgsub.setDecisionThreshold(value)
def seperate(self, img, learningRate=-1.0):
rangeRes = self.bgsub.apply(img)
#rangeRes = cv2.dilate(rangeRes, None, iterations=2)
return True, rangeRes
'''
KNN
BackgroundSubtractorKNNImpl(int _history, float _dist2Threshold, bool _bShadowDetection=true)
virtual int getHistory() const { return history; }
virtual void setHistory(int _nframes) { history = _nframes; }
virtual int getNSamples() const { return nN; }
virtual void setNSamples(int _nN) { nN = _nN; }//needs reinitialization!
virtual int getkNNSamples() const { return nkNN; }
virtual void setkNNSamples(int _nkNN) { nkNN = _nkNN; }
virtual double getDist2Threshold() const { return fTb; }
virtual void setDist2Threshold(double _dist2Threshold) { fTb = (float)_dist2Threshold; }
virtual bool getDetectShadows() const { return bShadowDetection; }
virtual void setDetectShadows(bool detectshadows) { bShadowDetection = detectshadows; }
virtual int getShadowValue() const { return nShadowDetection; }
virtual void setShadowValue(int value) { nShadowDetection = (uchar)value; }
virtual double getShadowThreshold() const { return fTau; }
virtual void setShadowThreshold(double value) { fTau = (float)value; }
'''
class SeperatorKNN:
def __init__(self, hist=8, shadows=True):
# Use Gaussian mixture based subtractor
#history", "dist2Threshold", "detectShadows"
self.bgsub = cv2.createBackgroundSubtractorKNN(history=hist, dist2Threshold=400.0, detectShadows=shadows)
def getVarThreshold(self):
return self.bgsub.getDist2Threshold()
def setVarThreshold(self, value):
self.bgsub.setDist2Threshold(value)
def seperate(self, img):
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rangeRes = self.bgsub.apply(img)
rangeRes = cv2.dilate(rangeRes, None, iterations=3)
return True, rangeRes
class simpleBackgroundV3:
def __init__(self, delay=3, threshold=250):
self.maxlen = delay
self.threshold = threshold
##self.stack = []
##self.mean = None
self.prev_gray = None
self.dark = None
def seperate(self, img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3, 3), 0)
if self.prev_gray is None:
self.prev_gray = gray
self.dark = np.zeros(gray.shape, np.uint8)
# nice but it takes too much resources
(score, diff) = compare_ssim(gray, self.prev_gray, full=True)
print("SSIM: {}".format(score))
diff = (diff * 255).astype("uint8")
self.prev_gray = gray
ret, thres = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
print("ret: %d" % (ret))
if ret > 250:
thres = self.dark
thres = cv2.dilate(thres, None, iterations=2)
return True, thres
class simpleBackgroundV1:
def __init__(self, history=5, threshold=80):
self.maxlen = history
self.threshold = threshold
self.index = 0
self.hasImage = False
self.stack = []
self.mean = None
self.d0kernel = np.ones((3,3),np.uint8)
def seperate(self, img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#cvXorS(input, cvScalar(255), output)
if self.hasImage is False:
for i in range(self.maxlen):
self.stack.append(np.zeros_like(gray))
self.hasImage = True
self.mean = np.zeros_like(gray)
# find eveything not mean
igray = gray ^ 255
imean = self.mean ^ 255
diff = 2 * cv2.absdiff(self.mean, gray)
idiff = 2 * cv2.absdiff(imean, igray)
diff += idiff # amplify the difference
#diff = cv2.dilate(diff, self.d0kernel, iterations=1 )
ret, thres = cv2.threshold(diff, self.threshold, 255, cv2.THRESH_BINARY)
thres = cv2.dilate(thres, self.d0kernel, iterations=1 )
# build new mean
self.stack[self.index] = gray
mean = np.zeros(gray.shape, np.int32)
for a in self.stack:
mean += a
mean /= (self.maxlen)
self.mean = mean.astype(gray.dtype)
self.index += 1; self.index %= self.maxlen
return True,thres
class simpleBackground:
def __init__(self, threshold=.033):
self.threshold = threshold
self.mean = None
#self.kernel = np.ones((3,3), np.uint8)
def seperate(self, img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# faster with smaller image. (background separation is expensive)
# gray = cv2.cvtColor(img[0:img.shape[0]-1, int(img.shape[1]*(1-ROI)):img.shape[1]-1], cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3, 3), 0)
if self.mean is None:
self.mean = gray.copy().astype("float32")
self.mean = cv2.accumulateWeighted(gray, self.mean, 0.033)
# (src, dst, scale=1.0, shift=0.0)
# dst = <uchar8> scale * src + shift
diff = cv2.absdiff(gray, cv2.convertScaleAbs(self.mean))
#diff = cv2.norm(gray, cv2.convertScaleAbs(self.mean), cv2.NORM_L1)
# THRESH_OTSU creates a lot of noise on slightly noisy background
# ret, thres = cv2.threshold(diff, self.threshold, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
ret, thres = cv2.threshold(diff, self.threshold, 255, cv2.THRESH_BINARY)
thres = cv2.dilate(thres, None, iterations=2)
#thres = cv2.morphologyEx(thres, cv2.MORPH_CLOSE, self.kernel, iterations = 2)
return True, thres
|
barney-NG/pyCAMTracker
|
src/tracker/Background.py
|
Python
|
mit
| 10,695
|
[
"Gaussian"
] |
b32256dc7c09768ebdc95e9a773d5d6f9c706c4bf98976ebd19d42e100d6c28a
|
"""
## Automatically adapted for numpy.oldnumeric Jun 27, 2008 by -c
Replaced numpy.oldnumeric with numpy methods - Jan 2015, PGedeck
"""
#pylint: disable=E1101,C0111,R0904
import rdkit.Numerics.rdAlignment as rdAlg
from rdkit import Geometry
import unittest
import numpy as np
import math
import copy
def lstFeq(l1, l2, tol=1.e-4):
if len(list(l1)) != len(list(l2)):
return 0
for i in range(len(list(l1))):
if not feq(l1[i], l2[i], tol):
return 0
return 1
def feq(v1, v2, tol2=1e-4):
return abs(v1 - v2) <= tol2
def transformPoint(trans, pt):
pt2 = copy.copy(list(pt))
pt2.append(1.0)
pt2 = np.array(pt2)
res = np.dot(trans, pt2)
return res[:3]
class TestCase(unittest.TestCase):
def test1Basic(self):
# passing two numeric arrays
refPts = np.zeros((2, 3), np.float)
prbPts = np.zeros((2, 3), np.float)
refPts[1, 0] = 1.0
prbPts[0, 0] = 2.0
prbPts[0, 1] = 2.0
prbPts[1, 0] = 2.0
prbPts[1, 1] = 3.0
res = rdAlg.GetAlignmentTransform(refPts, prbPts)
self.assertTrue(feq(res[0], 0.0))
refLst = list(refPts)
cnt = 0
for item in list(prbPts):
self.assertTrue(lstFeq(transformPoint(res[1], item), refLst[cnt]))
cnt += 1
# repeat with with lists or tuples
refPts = ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0))
prbPts = ((2.0, 2.0, 0.0), (2.0, 3.0, 0.0))
res = rdAlg.GetAlignmentTransform(refPts, prbPts)
self.assertTrue(feq(res[0], 0.0))
refPts = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]]
prbPts = [[2.0, 2.0, 0.0], [2.0, 3.0, 0.0]]
res = rdAlg.GetAlignmentTransform(refPts, prbPts)
self.assertTrue(feq(res[0], 0.0))
# mix it up
refPts = np.zeros((2, 3), np.float)
refPts[1, 0] = 1.0
res = rdAlg.GetAlignmentTransform(refPts, prbPts)
self.assertTrue(feq(res[0], 0.0))
def test2Weights(self):
refPts = np.array([[-math.cos(math.pi / 6), -math.sin(math.pi / 6), 0.0],
[math.cos(math.pi / 6), -math.sin(math.pi / 6), 0.0], [0.0, 1.0, 0.0]],
np.float)
prbPts = np.array([[-2 * math.sin(math.pi / 6) + 3.0, 2 * math.cos(math.pi / 6), 4.0],
[-2 * math.sin(math.pi / 6) + 3.0, -2 * math.cos(math.pi / 6), 4.0],
[5.0, 0.0, 4.0]], np.float)
res = rdAlg.GetAlignmentTransform(refPts, prbPts)
self.assertTrue(feq(res[0], 3.0))
target = [[-1.732, -1., 0.], [1.732, -1., 0.], [0., 2., 0.]]
cnt = 0
for item in list(prbPts):
self.assertTrue(lstFeq(transformPoint(res[1], item), target[cnt]))
cnt += 1
weights = np.array([1.0, 1.0, 2.0], np.float)
res = rdAlg.GetAlignmentTransform(refPts, prbPts, weights)
self.assertTrue(feq(res[0], 3.75))
cnt = 0
target = [[-1.732, -1.25, 0.], [1.732, -1.25, 0.], [0., 1.75, 0.]]
for item in list(prbPts):
self.assertTrue(lstFeq(transformPoint(res[1], item), target[cnt]))
cnt += 1
weights = [1.0, 1.0, 2.0]
res = rdAlg.GetAlignmentTransform(refPts, prbPts, weights)
self.assertTrue(feq(res[0], 3.75))
weights = [1.0, 2.0, 2.0]
res = rdAlg.GetAlignmentTransform(refPts, prbPts, weights)
self.assertTrue(feq(res[0], 4.8))
def test3tetra(self):
refPts = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
np.float)
prbPts = np.array([[2.0, 2.0, 3.0], [3.0, 2.0, 3.0], [2.0, 3.0, 3.0]], np.float)
self.assertRaises(ValueError, lambda: rdAlg.GetAlignmentTransform(refPts, prbPts))
prbPts = np.array([[2.0, 2.0, 3.0], [3.0, 2.0, 3.0], [2.0, 3.0, 3.0], [2.0, 2.0, 4.0]],
np.float)
res = rdAlg.GetAlignmentTransform(refPts, prbPts)
self.assertTrue(feq(res[0], 0.0))
wts = [1.0, 1.0, 1.0]
self.assertRaises(ValueError, lambda: rdAlg.GetAlignmentTransform(refPts, prbPts, wts))
wts = [1.0, 1.0, 1.0, 1.0]
res = rdAlg.GetAlignmentTransform(refPts, prbPts, wts)
self.assertTrue(feq(res[0], 0.0))
# test reflection
prbPts = np.array([[2.0, 2.0, 3.0], [3.0, 2.0, 3.0], [2.0, 2.0, 4.0], [2.0, 3.0, 3.0]],
np.float)
res = rdAlg.GetAlignmentTransform(refPts, prbPts, wts)
self.assertTrue(feq(res[0], 1.0))
res = rdAlg.GetAlignmentTransform(refPts, prbPts, wts, 1)
self.assertTrue(feq(res[0], 0.0))
cnt = 0
refLst = list(refPts)
for item in list(prbPts):
self.assertTrue(lstFeq(transformPoint(res[1], item), refLst[cnt]))
cnt += 1
def test4points(self):
refPts = (Geometry.Point3D(0.0, 0.0, 0.0),
Geometry.Point3D(1.0, 0.0, 0.0),
Geometry.Point3D(0.0, 1.0, 0.0),
Geometry.Point3D(0.0, 0.0, 1.0), )
prbPts = (Geometry.Point3D(2.0, 2.0, 3.0),
Geometry.Point3D(3.0, 2.0, 3.0),
Geometry.Point3D(2.0, 3.0, 3.0),
Geometry.Point3D(2.0, 2.0, 4.0), )
res = rdAlg.GetAlignmentTransform(refPts, prbPts)
self.assertTrue(feq(res[0], 0.0))
def test5errorHandling(self):
refPts = (Geometry.Point3D(0.0, 0.0, 0.0),
Geometry.Point3D(1.0, 0.0, 0.0),
Geometry.Point3D(0.0, 1.0, 0.0),
Geometry.Point3D(0.0, 0.0, 1.0), )
prbPts = (1,
2,
3,
4, )
self.assertRaises(ValueError, lambda: rdAlg.GetAlignmentTransform(refPts, prbPts))
prbPts = ()
self.assertRaises(ValueError, lambda: rdAlg.GetAlignmentTransform(refPts, prbPts))
prbPts = 1
self.assertRaises(ValueError, lambda: rdAlg.GetAlignmentTransform(refPts, prbPts))
prbPts = (Geometry.Point3D(2.0, 2.0, 3.0),
Geometry.Point3D(3.0, 2.0, 3.0),
Geometry.Point3D(2.0, 3.0, 3.0),
(2.0, 2.0, 5.0), )
self.assertRaises(ValueError, lambda: rdAlg.GetAlignmentTransform(refPts, prbPts))
if __name__ == '__main__':
print("Testing Alignment Wrapper code:")
unittest.main()
|
bp-kelley/rdkit
|
Code/Numerics/Alignment/Wrap/testAlignment.py
|
Python
|
bsd-3-clause
| 5,911
|
[
"RDKit"
] |
a7ec4c744156ec5ca70483f61c098713fe09bdf1d871bf7cfa5e885cbbb15981
|
# -*- coding: utf-8 -*-
"""SQLite parser plugin for Mozilla Firefox history database files."""
from dfdatetime import posix_time as dfdatetime_posix_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
class FirefoxPlacesBookmarkAnnotationEventData(events.EventData):
"""Firefox bookmark annotation event data.
Attributes:
content (str): annotation content.
offset (str): identifier of the row, from which the event data was
extracted.
query (str): SQL query that was used to obtain the event data.
title (str): title of the bookmark folder.
url (str): bookmarked URL.
"""
DATA_TYPE = 'firefox:places:bookmark_annotation'
def __init__(self):
"""Initializes event data."""
super(FirefoxPlacesBookmarkAnnotationEventData, self).__init__(
data_type=self.DATA_TYPE)
self.content = None
self.offset = None
self.query = None
self.title = None
self.url = None
class FirefoxPlacesBookmarkFolderEventData(events.EventData):
"""Firefox bookmark folder event data.
Attributes:
offset (str): identifier of the row, from which the event data was
extracted.
query (str): SQL query that was used to obtain the event data.
title (str): title of the bookmark folder.
"""
DATA_TYPE = 'firefox:places:bookmark_folder'
def __init__(self):
"""Initializes event data."""
super(FirefoxPlacesBookmarkFolderEventData, self).__init__(
data_type=self.DATA_TYPE)
self.offset = None
self.query = None
self.title = None
class FirefoxPlacesBookmarkEventData(events.EventData):
"""Firefox bookmark event data.
Attributes:
host (str): visited hostname.
offset (str): identifier of the row, from which the event data was
extracted.
places_title (str): places title.
query (str): SQL query that was used to obtain the event data.
title (str): title of the bookmark folder.
type (int): bookmark type.
url (str): bookmarked URL.
visit_count (int): visit count.
"""
DATA_TYPE = 'firefox:places:bookmark'
def __init__(self):
"""Initializes event data."""
super(FirefoxPlacesBookmarkEventData, self).__init__(
data_type=self.DATA_TYPE)
self.host = None
self.offset = None
self.places_title = None
self.query = None
self.title = None
self.type = None
self.url = None
self.visit_count = None
class FirefoxPlacesPageVisitedEventData(events.EventData):
"""Firefox page visited event data.
Attributes:
from_visit (str): URL that referred to the visited page.
hidden (str): value to indicated if the URL was hidden.
host (str): visited hostname.
offset (str): identifier of the row, from which the event data was
extracted.
query (str): SQL query that was used to obtain the event data.
title (str): title of the visited page.
typed (str): value to indicated if the URL was typed.
url (str): URL of the visited page.
visit_count (int): visit count.
visit_type (str): transition type for the event.
"""
DATA_TYPE = 'firefox:places:page_visited'
def __init__(self):
"""Initializes event data."""
super(FirefoxPlacesPageVisitedEventData, self).__init__(
data_type=self.DATA_TYPE)
self.from_visit = None
self.hidden = None
self.host = None
self.offset = None
self.query = None
self.title = None
self.typed = None
self.url = None
self.visit_count = None
self.visit_type = None
class FirefoxHistoryPlugin(interface.SQLitePlugin):
"""SQLite parser plugin for Mozilla Firefox history database files.
The Mozilla Firefox history database file is typically stored in:
places.sqlite
"""
NAME = 'firefox_history'
DATA_FORMAT = 'Mozilla Firefox history SQLite database (places.sqlite) file'
REQUIRED_STRUCTURE = {
'moz_places': frozenset([
'url', 'title', 'visit_count', 'rev_host', 'hidden', 'typed', 'id']),
'moz_historyvisits': frozenset([
'id', 'visit_date', 'from_visit', 'visit_type', 'place_id']),
'moz_bookmarks': frozenset([
'type', 'title', 'dateAdded', 'lastModified', 'id', 'fk']),
'moz_items_annos': frozenset([
'content', 'dateAdded', 'lastModified', 'id', 'item_id'])}
QUERIES = [
(('SELECT moz_historyvisits.id, moz_places.url, moz_places.title, '
'moz_places.visit_count, moz_historyvisits.visit_date, '
'moz_historyvisits.from_visit, moz_places.rev_host, '
'moz_places.hidden, moz_places.typed, moz_historyvisits.visit_type '
'FROM moz_places, moz_historyvisits '
'WHERE moz_places.id = moz_historyvisits.place_id'),
'ParsePageVisitedRow'),
(('SELECT moz_bookmarks.type, moz_bookmarks.title AS bookmark_title, '
'moz_bookmarks.dateAdded, moz_bookmarks.lastModified, '
'moz_places.url, moz_places.title AS places_title, '
'moz_places.rev_host, moz_places.visit_count, moz_bookmarks.id '
'FROM moz_places, moz_bookmarks '
'WHERE moz_bookmarks.fk = moz_places.id AND moz_bookmarks.type <> 3'),
'ParseBookmarkRow'),
(('SELECT moz_items_annos.content, moz_items_annos.dateAdded, '
'moz_items_annos.lastModified, moz_bookmarks.title, '
'moz_places.url, moz_places.rev_host, moz_items_annos.id '
'FROM moz_items_annos, moz_bookmarks, moz_places '
'WHERE moz_items_annos.item_id = moz_bookmarks.id '
'AND moz_bookmarks.fk = moz_places.id'),
'ParseBookmarkAnnotationRow'),
(('SELECT moz_bookmarks.id, moz_bookmarks.title,'
'moz_bookmarks.dateAdded, moz_bookmarks.lastModified '
'FROM moz_bookmarks WHERE moz_bookmarks.type = 2'),
'ParseBookmarkFolderRow')]
_SCHEMA_V24 = {
'moz_anno_attributes': (
'CREATE TABLE moz_anno_attributes ( id INTEGER PRIMARY KEY, name '
'VARCHAR(32) UNIQUE NOT NULL)'),
'moz_annos': (
'CREATE TABLE moz_annos ( id INTEGER PRIMARY KEY, place_id INTEGER '
'NOT NULL, anno_attribute_id INTEGER, mime_type VARCHAR(32) DEFAULT '
'NULL, content LONGVARCHAR, flags INTEGER DEFAULT 0, expiration '
'INTEGER DEFAULT 0, type INTEGER DEFAULT 0, dateAdded INTEGER '
'DEFAULT 0, lastModified INTEGER DEFAULT 0)'),
'moz_bookmarks': (
'CREATE TABLE moz_bookmarks ( id INTEGER PRIMARY KEY, type INTEGER, '
'fk INTEGER DEFAULT NULL, parent INTEGER, position INTEGER, title '
'LONGVARCHAR, keyword_id INTEGER, folder_type TEXT, dateAdded '
'INTEGER, lastModified INTEGER)'),
'moz_bookmarks_roots': (
'CREATE TABLE moz_bookmarks_roots ( root_name VARCHAR(16) UNIQUE, '
'folder_id INTEGER)'),
'moz_favicons': (
'CREATE TABLE moz_favicons ( id INTEGER PRIMARY KEY, url '
'LONGVARCHAR UNIQUE, data BLOB, mime_type VARCHAR(32), expiration '
'LONG)'),
'moz_historyvisits': (
'CREATE TABLE moz_historyvisits ( id INTEGER PRIMARY KEY, '
'from_visit INTEGER, place_id INTEGER, visit_date INTEGER, '
'visit_type INTEGER, session INTEGER)'),
'moz_inputhistory': (
'CREATE TABLE moz_inputhistory ( place_id INTEGER NOT NULL, input '
'LONGVARCHAR NOT NULL, use_count INTEGER, PRIMARY KEY (place_id, '
'input))'),
'moz_items_annos': (
'CREATE TABLE moz_items_annos ( id INTEGER PRIMARY KEY, item_id '
'INTEGER NOT NULL, anno_attribute_id INTEGER, mime_type VARCHAR(32) '
'DEFAULT NULL, content LONGVARCHAR, flags INTEGER DEFAULT 0, '
'expiration INTEGER DEFAULT 0, type INTEGER DEFAULT 0, dateAdded '
'INTEGER DEFAULT 0, lastModified INTEGER DEFAULT 0)'),
'moz_keywords': (
'CREATE TABLE moz_keywords ( id INTEGER PRIMARY KEY AUTOINCREMENT, '
'keyword TEXT UNIQUE)'),
'moz_places': (
'CREATE TABLE moz_places ( id INTEGER PRIMARY KEY, url LONGVARCHAR, '
'title LONGVARCHAR, rev_host LONGVARCHAR, visit_count INTEGER '
'DEFAULT 0, hidden INTEGER DEFAULT 0 NOT NULL, typed INTEGER '
'DEFAULT 0 NOT NULL, favicon_id INTEGER, frecency INTEGER DEFAULT '
'-1 NOT NULL, last_visit_date INTEGER )')}
_SCHEMA_V25 = {
'moz_anno_attributes': (
'CREATE TABLE moz_anno_attributes ( id INTEGER PRIMARY KEY, name '
'VARCHAR(32) UNIQUE NOT NULL)'),
'moz_annos': (
'CREATE TABLE moz_annos ( id INTEGER PRIMARY KEY, place_id INTEGER '
'NOT NULL, anno_attribute_id INTEGER, mime_type VARCHAR(32) DEFAULT '
'NULL, content LONGVARCHAR, flags INTEGER DEFAULT 0, expiration '
'INTEGER DEFAULT 0, type INTEGER DEFAULT 0, dateAdded INTEGER '
'DEFAULT 0, lastModified INTEGER DEFAULT 0)'),
'moz_bookmarks': (
'CREATE TABLE moz_bookmarks ( id INTEGER PRIMARY KEY, type INTEGER, '
'fk INTEGER DEFAULT NULL, parent INTEGER, position INTEGER, title '
'LONGVARCHAR, keyword_id INTEGER, folder_type TEXT, dateAdded '
'INTEGER, lastModified INTEGER, guid TEXT)'),
'moz_bookmarks_roots': (
'CREATE TABLE moz_bookmarks_roots ( root_name VARCHAR(16) UNIQUE, '
'folder_id INTEGER)'),
'moz_favicons': (
'CREATE TABLE moz_favicons ( id INTEGER PRIMARY KEY, url '
'LONGVARCHAR UNIQUE, data BLOB, mime_type VARCHAR(32), expiration '
'LONG, guid TEXT)'),
'moz_historyvisits': (
'CREATE TABLE moz_historyvisits ( id INTEGER PRIMARY KEY, '
'from_visit INTEGER, place_id INTEGER, visit_date INTEGER, '
'visit_type INTEGER, session INTEGER)'),
'moz_hosts': (
'CREATE TABLE moz_hosts ( id INTEGER PRIMARY KEY, host TEXT NOT '
'NULL UNIQUE, frecency INTEGER, typed INTEGER NOT NULL DEFAULT 0, '
'prefix TEXT)'),
'moz_inputhistory': (
'CREATE TABLE moz_inputhistory ( place_id INTEGER NOT NULL, input '
'LONGVARCHAR NOT NULL, use_count INTEGER, PRIMARY KEY (place_id, '
'input))'),
'moz_items_annos': (
'CREATE TABLE moz_items_annos ( id INTEGER PRIMARY KEY, item_id '
'INTEGER NOT NULL, anno_attribute_id INTEGER, mime_type VARCHAR(32) '
'DEFAULT NULL, content LONGVARCHAR, flags INTEGER DEFAULT 0, '
'expiration INTEGER DEFAULT 0, type INTEGER DEFAULT 0, dateAdded '
'INTEGER DEFAULT 0, lastModified INTEGER DEFAULT 0)'),
'moz_keywords': (
'CREATE TABLE moz_keywords ( id INTEGER PRIMARY KEY AUTOINCREMENT, '
'keyword TEXT UNIQUE)'),
'moz_places': (
'CREATE TABLE moz_places ( id INTEGER PRIMARY KEY, url LONGVARCHAR, '
'title LONGVARCHAR, rev_host LONGVARCHAR, visit_count INTEGER '
'DEFAULT 0, hidden INTEGER DEFAULT 0 NOT NULL, typed INTEGER '
'DEFAULT 0 NOT NULL, favicon_id INTEGER, frecency INTEGER DEFAULT '
'-1 NOT NULL, last_visit_date INTEGER , guid TEXT)'),
'sqlite_stat1': (
'CREATE TABLE sqlite_stat1(tbl, idx, stat)')}
SCHEMAS = [_SCHEMA_V24, _SCHEMA_V25]
# Cache queries.
URL_CACHE_QUERY = (
'SELECT h.id AS id, p.url, p.rev_host FROM moz_places p, '
'moz_historyvisits h WHERE p.id = h.place_id')
# TODO: move to formatter.
_BOOKMARK_TYPES = {
1: 'URL',
2: 'Folder',
3: 'Separator',
}
def ParseBookmarkAnnotationRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a bookmark annotation row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = FirefoxPlacesBookmarkAnnotationEventData()
event_data.content = self._GetRowValue(query_hash, row, 'content')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.title = self._GetRowValue(query_hash, row, 'title')
event_data.url = self._GetRowValue(query_hash, row, 'url')
timestamp = self._GetRowValue(query_hash, row, 'dateAdded')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastModified')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseBookmarkFolderRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a bookmark folder row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
title = self._GetRowValue(query_hash, row, 'title')
event_data = FirefoxPlacesBookmarkFolderEventData()
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.title = title or 'N/A'
timestamp = self._GetRowValue(query_hash, row, 'dateAdded')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastModified')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseBookmarkRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a bookmark row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
rev_host = self._GetRowValue(query_hash, row, 'rev_host')
bookmark_type = self._GetRowValue(query_hash, row, 'type')
event_data = FirefoxPlacesBookmarkEventData()
event_data.host = rev_host or 'N/A'
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.places_title = self._GetRowValue(query_hash, row, 'places_title')
event_data.query = query
event_data.title = self._GetRowValue(query_hash, row, 'bookmark_title')
event_data.type = self._BOOKMARK_TYPES.get(bookmark_type, 'N/A')
event_data.url = self._GetRowValue(query_hash, row, 'url')
event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count')
timestamp = self._GetRowValue(query_hash, row, 'dateAdded')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'lastModified')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParsePageVisitedRow(
self, parser_mediator, query, row, cache=None, database=None,
**unused_kwargs):
"""Parses a page visited row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
cache (Optional[SQLiteCache]): cache.
database (Optional[SQLiteDatabase]): database.
"""
query_hash = hash(query)
from_visit = self._GetRowValue(query_hash, row, 'from_visit')
if from_visit is not None:
from_visit = self._GetUrl(from_visit, cache, database)
rev_host = self._GetRowValue(query_hash, row, 'rev_host')
event_data = FirefoxPlacesPageVisitedEventData()
event_data.from_visit = from_visit
event_data.hidden = self._GetRowValue(query_hash, row, 'hidden')
event_data.host = self._ReverseHostname(rev_host)
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.title = self._GetRowValue(query_hash, row, 'title')
event_data.typed = self._GetRowValue(query_hash, row, 'typed')
event_data.url = self._GetRowValue(query_hash, row, 'url')
event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count')
event_data.visit_type = self._GetRowValue(query_hash, row, 'visit_type')
timestamp = self._GetRowValue(query_hash, row, 'visit_date')
if timestamp:
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
def _ReverseHostname(self, hostname):
"""Reverses the hostname and strips the leading dot.
The hostname entry is reversed:
moc.elgoog.www.
Should be:
www.google.com
Args:
hostname (str): reversed hostname.
Returns:
str: hostname without a leading dot.
"""
if not hostname:
return ''
if len(hostname) <= 1:
return hostname
if hostname[-1] == '.':
return hostname[::-1][1:]
return hostname[::-1][0:]
def _GetUrl(self, url_id, cache, database):
"""Retrieves a URL from a reference to an entry in the from_visit table.
Args:
url_id (str): identifier of the visited URL.
cache (SQLiteCache): cache.
database (SQLiteDatabase): database.
Returns:
str: URL and hostname.
"""
url_cache_results = cache.GetResults('url')
if not url_cache_results:
result_set = database.Query(self.URL_CACHE_QUERY)
cache.CacheQueryResults(
result_set, 'url', 'id', ('url', 'rev_host'))
url_cache_results = cache.GetResults('url')
url, reverse_host = url_cache_results.get(url_id, ['', ''])
if not url:
return ''
hostname = self._ReverseHostname(reverse_host)
return '{0:s} ({1:s})'.format(url, hostname)
sqlite.SQLiteParser.RegisterPlugin(FirefoxHistoryPlugin)
|
joachimmetz/plaso
|
plaso/parsers/sqlite_plugins/firefox_history.py
|
Python
|
apache-2.0
| 19,252
|
[
"VisIt"
] |
0e7619e7e4f6f4ca452be38fcb36f8194633c4893b3a3921e4e282bf0494d64c
|
########################################################################
# $Id$
########################################################################
""" The BQS TimeLeft utility interrogates the BQS batch system for the
current CPU consumed and CPU limit.
"""
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.Utilities.TimeLeft.TimeLeft import runCommand
__RCSID__ = "$Id$"
import os
class BQSTimeLeft:
#############################################################################
def __init__( self ):
""" Standard constructor
"""
self.log = gLogger.getSubLogger( 'BQSTimeLeft' )
self.jobID = None
if os.environ.has_key( 'QSUB_REQNAME' ):
self.jobID = os.environ['QSUB_REQNAME']
self.log.verbose( 'QSUB_REQNAME=%s' % ( self.jobID ) )
self.scaleFactor = gConfig.getValue( '/LocalSite/CPUScalingFactor', 0.0 )
#############################################################################
def getResourceUsage( self ):
"""Returns a dictionary containing CPUConsumed, CPULimit, WallClockConsumed
and WallClockLimit for current slot. All values returned in seconds.
"""
if not self.jobID:
return S_ERROR( 'Could not determine batch jobID from QSUB_REQNAME env var.' )
if not self.scaleFactor:
return S_ERROR( 'CPU scala factor is not defined' )
cmd = 'qjob -a -nh -wide %s' % ( self.jobID )
result = runCommand( cmd )
if not result['OK']:
return result
self.log.verbose( result['Value'] )
cpu = None
cpuLimit = None
try:
cpuItems = result['Value'].split()
if cpuItems[5][-1] == '/':
cpu = float( cpuItems[5][:-1] )
cpuLimit = float( cpuItems[6] )
else:
cpuList = cpuItems[5].split( '/' )
cpu = float( cpuList[0] )
cpuLimit = float( cpuList[1] )
except Exception:
self.log.warn( 'Problem parsing "%s" for CPU usage' % ( result['Value'] ) )
#BQS has no wallclock limit so will simply return the same as for CPU to the TimeLeft utility
wallClock = cpu
wallClockLimit = cpuLimit
# Divide the numbers by 5 to bring it to HS06 units from the CC UI units
# and remove HS06 normalization factor
consumed = {'CPU':cpu / 5. / self.scaleFactor,
'CPULimit':cpuLimit / 5. / self.scaleFactor,
'WallClock':wallClock / 5. / self.scaleFactor,
'WallClockLimit':wallClockLimit / 5. / self.scaleFactor}
self.log.debug( consumed )
failed = False
for key, val in consumed.items():
if val == None:
failed = True
self.log.warn( 'Could not determine %s' % key )
if not failed:
return S_OK( consumed )
else:
msg = 'Could not determine some parameters,' \
' this is the stdout from the batch system call\n%s' % ( result['Value'] )
self.log.info( msg )
return S_ERROR( 'Could not determine some parameters' )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
avedaee/DIRAC
|
Core/Utilities/TimeLeft/BQSTimeLeft.py
|
Python
|
gpl-3.0
| 3,025
|
[
"DIRAC"
] |
7a7d3954c0a81d575bdc7beab36e6bb56dc4d6d996cbd52119004da83aed5cdd
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module provides classes that operate on points or vectors in 3D space.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
import numpy as np
import re
from math import sin, cos, pi, sqrt
from monty.json import MSONable
class SymmOp(MSONable):
"""
A symmetry operation in cartesian space. Consists of a rotation plus a
translation. Implementation is as an affine transformation matrix of rank 4
for efficiency. Read: http://en.wikipedia.org/wiki/Affine_transformation.
.. attribute:: affine_matrix
A 4x4 numpy.array representing the symmetry operation.
"""
def __init__(self, affine_transformation_matrix, tol=0.01):
"""
Initializes the SymmOp from a 4x4 affine transformation matrix.
In general, this constructor should not be used unless you are
transferring rotations. Use the static constructors instead to
generate a SymmOp from proper rotations and translation.
Args:
affine_transformation_matrix (4x4 array): Representing an
affine transformation.
tol (float): Tolerance for determining if matrices are equal.
"""
affine_transformation_matrix = np.array(affine_transformation_matrix)
if affine_transformation_matrix.shape != (4, 4):
raise ValueError("Affine Matrix must be a 4x4 numpy array!")
self.affine_matrix = affine_transformation_matrix
self.tol = tol
@staticmethod
def from_rotation_and_translation(
rotation_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)),
translation_vec=(0, 0, 0), tol=0.1):
"""
Creates a symmetry operation from a rotation matrix and a translation
vector.
Args:
rotation_matrix (3x3 array): Rotation matrix.
translation_vec (3x1 array): Translation vector.
tol (float): Tolerance to determine if rotation matrix is valid.
Returns:
SymmOp object
"""
rotation_matrix = np.array(rotation_matrix)
translation_vec = np.array(translation_vec)
if rotation_matrix.shape != (3, 3):
raise ValueError("Rotation Matrix must be a 3x3 numpy array.")
if translation_vec.shape != (3,):
raise ValueError("Translation vector must be a rank 1 numpy array "
"with 3 elements.")
affine_matrix = np.eye(4)
affine_matrix[0:3][:, 0:3] = rotation_matrix
affine_matrix[0:3][:, 3] = translation_vec
return SymmOp(affine_matrix, tol)
def __eq__(self, other):
return np.allclose(self.affine_matrix, other.affine_matrix,
atol=self.tol)
def __hash__(self):
return 7
def __repr__(self):
return self.__str__()
def __str__(self):
output = ["Rot:", str(self.affine_matrix[0:3][:, 0:3]), "tau",
str(self.affine_matrix[0:3][:, 3])]
return "\n".join(output)
def operate(self, point):
"""
Apply the operation on a point.
Args:
point: Cartesian coordinate.
Returns:
Coordinates of point after operation.
"""
affine_point = np.array([point[0], point[1], point[2], 1])
return np.dot(self.affine_matrix, affine_point)[0:3]
def operate_multi(self, points):
"""
Apply the operation on a list of points.
Args:
points: List of Cartesian coordinates
Returns:
Numpy array of coordinates after operation
"""
points = np.array(points)
affine_points = np.concatenate([points, np.ones(points.shape[:-1] + (1,))], axis=-1)
return np.inner(affine_points, self.affine_matrix)[..., :-1]
def apply_rotation_only(self, vector):
"""
Vectors should only be operated by the rotation matrix and not the
translation vector.
Args:
vector (3x1 array): A vector.
"""
return np.dot(self.rotation_matrix, vector)
def are_symmetrically_related(self, point_a, point_b, tol=0.001):
"""
Checks if two points are symmetrically related.
Args:
point_a (3x1 array): First point.
point_b (3x1 array): Second point.
tol (float): Absolute tolerance for checking distance.
Returns:
True if self.operate(point_a) == point_b or vice versa.
"""
if np.allclose(self.operate(point_a), point_b, atol=tol):
return True
if np.allclose(self.operate(point_b), point_a, atol=tol):
return True
return False
@property
def rotation_matrix(self):
"""
A 3x3 numpy.array representing the rotation matrix.
"""
return self.affine_matrix[0:3][:, 0:3]
@property
def translation_vector(self):
"""
A rank 1 numpy.array of dim 3 representing the translation vector.
"""
return self.affine_matrix[0:3][:, 3]
def __mul__(self, other):
"""
Returns a new SymmOp which is equivalent to apply the "other" SymmOp
followed by this one.
"""
new_matrix = np.dot(self.affine_matrix, other.affine_matrix)
return SymmOp(new_matrix)
@property
def inverse(self):
"""
Returns inverse of transformation.
"""
invr = np.linalg.inv(self.affine_matrix)
return SymmOp(invr)
@staticmethod
def from_axis_angle_and_translation(axis, angle, angle_in_radians=False,
translation_vec=(0, 0, 0)):
"""
Generates a SymmOp for a rotation about a given axis plus translation.
Args:
axis: The axis of rotation in cartesian space. For example,
[1, 0, 0]indicates rotation about x-axis.
angle (float): Angle of rotation.
angle_in_radians (bool): Set to True if angles are given in
radians. Or else, units of degrees are assumed.
translation_vec: A translation vector. Defaults to zero.
Returns:
SymmOp for a rotation about given axis and translation.
"""
if isinstance(axis, (tuple, list)):
axis = np.array(axis)
if isinstance(translation_vec, (tuple, list)):
vec = np.array(translation_vec)
else:
vec = translation_vec
a = angle if angle_in_radians else angle * pi / 180
cosa = cos(a)
sina = sin(a)
u = axis / np.linalg.norm(axis)
r = np.zeros((3, 3))
r[0, 0] = cosa + u[0] ** 2 * (1 - cosa)
r[0, 1] = u[0] * u[1] * (1 - cosa) - u[2] * sina
r[0, 2] = u[0] * u[2] * (1 - cosa) + u[1] * sina
r[1, 0] = u[0] * u[1] * (1 - cosa) + u[2] * sina
r[1, 1] = cosa + u[1] ** 2 * (1 - cosa)
r[1, 2] = u[1] * u[2] * (1 - cosa) - u[0] * sina
r[2, 0] = u[0] * u[2] * (1 - cosa) - u[1] * sina
r[2, 1] = u[1] * u[2] * (1 - cosa) + u[0] * sina
r[2, 2] = cosa + u[2] ** 2 * (1 - cosa)
return SymmOp.from_rotation_and_translation(r, vec)
@staticmethod
def from_origin_axis_angle(origin, axis, angle, angle_in_radians=False):
"""
Generates a SymmOp for a rotation about a given axis through an
origin.
Args:
origin (3x1 array): The origin which the axis passes through.
axis (3x1 array): The axis of rotation in cartesian space. For
example, [1, 0, 0]indicates rotation about x-axis.
angle (float): Angle of rotation.
angle_in_radians (bool): Set to True if angles are given in
radians. Or else, units of degrees are assumed.
Returns:
SymmOp.
"""
theta = angle * pi / 180 if not angle_in_radians else angle
a = origin[0]
b = origin[1]
c = origin[2]
u = axis[0]
v = axis[1]
w = axis[2]
# Set some intermediate values.
u2 = u * u
v2 = v * v
w2 = w * w
cos_t = cos(theta)
sin_t = sin(theta)
l2 = u2 + v2 + w2
l = sqrt(l2)
# Build the matrix entries element by element.
m11 = (u2 + (v2 + w2) * cos_t) / l2
m12 = (u * v * (1 - cos_t) - w * l * sin_t) / l2
m13 = (u * w * (1 - cos_t) + v * l * sin_t) / l2
m14 = (a * (v2 + w2) - u * (b * v + c * w)
+ (u * (b * v + c * w) - a * (v2 + w2)) * cos_t
+ (b * w - c * v) * l * sin_t) / l2
m21 = (u * v * (1 - cos_t) + w * l * sin_t) / l2
m22 = (v2 + (u2 + w2) * cos_t) / l2
m23 = (v * w * (1 - cos_t) - u * l * sin_t) / l2
m24 = (b * (u2 + w2) - v * (a * u + c * w)
+ (v * (a * u + c * w) - b * (u2 + w2)) * cos_t
+ (c * u - a * w) * l * sin_t) / l2
m31 = (u * w * (1 - cos_t) - v * l * sin_t) / l2
m32 = (v * w * (1 - cos_t) + u * l * sin_t) / l2
m33 = (w2 + (u2 + v2) * cos_t) / l2
m34 = (c * (u2 + v2) - w * (a * u + b * v)
+ (w * (a * u + b * v) - c * (u2 + v2)) * cos_t
+ (a * v - b * u) * l * sin_t) / l2
return SymmOp([[m11, m12, m13, m14], [m21, m22, m23, m24],
[m31, m32, m33, m34], [0, 0, 0, 1]])
@staticmethod
def reflection(normal, origin=(0, 0, 0)):
"""
Returns reflection symmetry operation.
Args:
normal (3x1 array): Vector of the normal to the plane of
reflection.
origin (3x1 array): A point in which the mirror plane passes
through.
Returns:
SymmOp for the reflection about the plane
"""
#Normalize the normal vector first.
n = np.array(normal, dtype=float) / np.linalg.norm(normal)
u, v, w = n
translation = np.eye(4)
translation[0:3, 3] = -np.array(origin)
xx = 1 - 2 * u ** 2
yy = 1 - 2 * v ** 2
zz = 1 - 2 * w ** 2
xy = -2 * u * v
xz = -2 * u * w
yz = -2 * v * w
mirror_mat = [[xx, xy, xz, 0], [xy, yy, yz, 0], [xz, yz, zz, 0],
[0, 0, 0, 1]]
if np.linalg.norm(origin) > 1e-6:
mirror_mat = np.dot(np.linalg.inv(translation),
np.dot(mirror_mat, translation))
return SymmOp(mirror_mat)
@staticmethod
def inversion(origin=(0, 0, 0)):
"""
Inversion symmetry operation about axis.
Args:
origin (3x1 array): Origin of the inversion operation. Defaults
to [0, 0, 0].
Returns:
SymmOp representing an inversion operation about the origin.
"""
mat = -np.eye(4)
mat[3, 3] = 1
mat[0:3, 3] = 2 * np.array(origin)
return SymmOp(mat)
@staticmethod
def rotoreflection(axis, angle, origin=(0, 0, 0)):
"""
Returns a roto-reflection symmetry operation
Args:
axis (3x1 array): Axis of rotation / mirror normal
angle (float): Angle in degrees
origin (3x1 array): Point left invariant by roto-reflection.
Defaults to (0, 0, 0).
Return:
Roto-reflection operation
"""
rot = SymmOp.from_origin_axis_angle(origin, axis, angle)
refl = SymmOp.reflection(axis, origin)
m = np.dot(rot.affine_matrix, refl.affine_matrix)
return SymmOp(m)
def as_dict(self):
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"matrix": self.affine_matrix.tolist(), "tolerance": self.tol}
return d
def as_xyz_string(self):
"""
Returns a string of the form 'x, y, z', '-x, -y, z',
'-y+1/2, x+1/2, z+1/2', etc. Only works for integer rotation matrices
"""
xyz = ['x', 'y', 'z']
strings = []
# test for invalid rotation matrix
if not np.all(np.isclose(self.rotation_matrix,
np.round(self.rotation_matrix))):
raise ValueError('Rotation matrix must be integer')
for r, t in zip(self.rotation_matrix, self.translation_vector):
symbols = []
for val, axis in zip(r, xyz):
val = int(round(val))
if val == 1:
if symbols:
symbols.append('+')
symbols.append(axis)
elif val == -1:
symbols.append('-' + axis)
elif val > 1:
if symbols:
symbols.append('+')
symbols.append(str(val) + axis)
elif val < -1:
symbols.append(str(val) + axis)
import fractions
f = fractions.Fraction(float(t)).limit_denominator()
if abs(f) > 1e-6:
if f > 0:
symbols.append('+')
symbols.append(str(f))
strings.append("".join(symbols))
return ', '.join(strings)
@staticmethod
def from_xyz_string(xyz_string):
"""
Args:
xyz_string: string of the form 'x, y, z', '-x, -y, z',
'-2y+1/2, 3x+1/2, z-y+1/2', etc.
Returns:
SymmOp
"""
rot_matrix = np.zeros((3, 3))
trans = np.zeros(3)
toks = xyz_string.strip().replace(" ", "").lower().split(",")
re_rot = re.compile("([+-]?)([\d\.]*)/?([\d\.]*)([x-z])")
re_trans = re.compile("([+-]?)([\d\.]+)/?([\d\.]*)(?![x-z])")
for i, tok in enumerate(toks):
# build the rotation matrix
for m in re_rot.finditer(tok):
factor = -1 if m.group(1) == "-" else 1
if m.group(2) != "":
factor *= float(m.group(2)) / float(m.group(3)) \
if m.group(3) != "" else float(m.group(2))
j = ord(m.group(4)) - 120
rot_matrix[i, j] = factor
# build the translation vector
for m in re_trans.finditer(tok):
factor = -1 if m.group(1) == "-" else 1
num = float(m.group(2)) / float(m.group(3)) \
if m.group(3) != "" else float(m.group(2))
trans[i] = num * factor
return SymmOp.from_rotation_and_translation(rot_matrix, trans)
@classmethod
def from_dict(cls, d):
return cls(d["matrix"], d["tolerance"])
|
migueldiascosta/pymatgen
|
pymatgen/core/operations.py
|
Python
|
mit
| 14,998
|
[
"pymatgen"
] |
d4fae7c6cb5796496f41c1cb42c208e041843802b175b45daa359835918d716a
|
from __future__ import absolute_import
import re
from collections import namedtuple, defaultdict
from copy import deepcopy
from datetime import datetime
import six
from django.utils.functional import cached_property
from parsimonious.expressions import Optional
from parsimonious.exceptions import IncompleteParseError, ParseError
from parsimonious.nodes import Node
from parsimonious.grammar import Grammar, NodeVisitor
from sentry import eventstore
from sentry.models import Project
from sentry.search.utils import (
parse_datetime_range,
parse_datetime_string,
parse_datetime_value,
InvalidQuery,
)
from sentry.utils.dates import to_timestamp
from sentry.utils.snuba import SENTRY_SNUBA_MAP, get_snuba_column_name
WILDCARD_CHARS = re.compile(r"[\*]")
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
modified from: https://github.com/python/cpython/blob/2.7/Lib/fnmatch.py#L85
"""
i, n = 0, len(pat)
res = ""
while i < n:
c = pat[i]
i = i + 1
# fnmatch.translate has no way to handle escaping metacharacters.
# Applied this basic patch to handle it:
# https://bugs.python.org/file27570/issue8402.1.patch
if c == "\\":
res += re.escape(pat[i])
i += 1
elif c == "*":
res = res + ".*"
# TODO: We're disabling everything except for wildcard matching for the
# moment. Just commenting this code out for the moment, since there's a
# reasonable chance we'll add this back in in the future.
# elif c == '?':
# res = res + '.'
# elif c == '[':
# j = i
# if j < n and pat[j] == '!':
# j = j + 1
# if j < n and pat[j] == ']':
# j = j + 1
# while j < n and pat[j] != ']':
# j = j + 1
# if j >= n:
# res = res + '\\['
# else:
# stuff = pat[i:j].replace('\\', '\\\\')
# i = j + 1
# if stuff[0] == '!':
# stuff = '^' + stuff[1:]
# elif stuff[0] == '^':
# stuff = '\\' + stuff
# res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return "^" + res + "$"
# Explaination of quoted string regex, courtesy of Matt
# " // literal quote
# ( // begin capture group
# (?: // begin uncaptured group
# [^"] // any character that's not quote
# | // or
# (?<=\\)["] // A quote, preceded by a \ (for escaping)
# ) // end uncaptured group
# * // repeat the uncaptured group
# ) // end captured group
# ? // allow to be empty (allow empty quotes)
# " // quote literal
event_search_grammar = Grammar(
r"""
search = (boolean_term / paren_term / search_term)*
boolean_term = (paren_term / search_term) space? (boolean_operator space? (paren_term / search_term) space?)+
paren_term = space? open_paren space? (paren_term / boolean_term)+ space? closed_paren space?
search_term = key_val_term / quoted_raw_search / raw_search
key_val_term = space? (time_filter / rel_time_filter / specific_time_filter
/ numeric_filter / has_filter / is_filter / basic_filter)
space?
raw_search = (!key_val_term ~r"\ *([^\ ^\n ()]+)\ *" )*
quoted_raw_search = spaces quoted_value spaces
# standard key:val filter
basic_filter = negation? search_key sep search_value
# filter for dates
time_filter = search_key sep? operator date_format
# filter for relative dates
rel_time_filter = search_key sep rel_date_format
# exact time filter for dates
specific_time_filter = search_key sep date_format
# Numeric comparison filter
numeric_filter = search_key sep operator? ~r"[0-9]+(?=\s|$)"
# has filter for not null type checks
has_filter = negation? "has" sep (search_key / search_value)
is_filter = negation? "is" sep search_value
search_key = key / quoted_key
search_value = quoted_value / value
value = ~r"[^()\s]*"
quoted_value = ~r"\"((?:[^\"]|(?<=\\)[\"])*)?\""s
key = ~r"[a-zA-Z0-9_\.-]+"
# only allow colons in quoted keys
quoted_key = ~r"\"([a-zA-Z0-9_\.:-]+)\""
date_format = ~r"\d{4}-\d{2}-\d{2}(T\d{2}:\d{2}:\d{2}(\.\d{1,6})?)?Z?(?=\s|$)"
rel_date_format = ~r"[\+\-][0-9]+[wdhm](?=\s|$)"
# NOTE: the order in which these operators are listed matters
# because for example, if < comes before <= it will match that
# even if the operator is <=
boolean_operator = "OR" / "AND"
operator = ">=" / "<=" / ">" / "<" / "=" / "!="
open_paren = "("
closed_paren = ")"
sep = ":"
space = " "
negation = "!"
spaces = ~r"\ *"
"""
)
# add valid snuba `raw_query` args
SEARCH_MAP = dict(
{
"start": "start",
"end": "end",
"project_id": "project_id",
"first_seen": "first_seen",
"last_seen": "last_seen",
"times_seen": "times_seen",
# TODO(mark) figure out how to safelist aggregate functions/field aliases
# so they can be used in conditions
},
**SENTRY_SNUBA_MAP
)
no_conversion = set(["project_id", "start", "end"])
PROJECT_KEY = "project.name"
class InvalidSearchQuery(Exception):
pass
class SearchBoolean(namedtuple("SearchBoolean", "left_term operator right_term")):
BOOLEAN_AND = "AND"
BOOLEAN_OR = "OR"
class SearchFilter(namedtuple("SearchFilter", "key operator value")):
def __str__(self):
return "".join(map(six.text_type, (self.key.name, self.operator, self.value.raw_value)))
@cached_property
def is_negation(self):
# Negations are mostly just using != operators. But we also have
# negations on has: filters, which translate to = '', so handle that
# case as well.
return (
self.operator == "!="
and self.value.raw_value != ""
or self.operator == "="
and self.value.raw_value == ""
)
class SearchKey(namedtuple("SearchKey", "name")):
@property
def snuba_name(self):
snuba_name = SEARCH_MAP.get(self.name)
if snuba_name:
return snuba_name
# assume custom tag if not listed
return "tags[%s]" % (self.name,)
@cached_property
def is_tag(self):
return self.name not in SEARCH_MAP
class SearchValue(namedtuple("SearchValue", "raw_value")):
@property
def value(self):
if self.is_wildcard():
return translate(self.raw_value)
return self.raw_value
def is_wildcard(self):
if not isinstance(self.raw_value, six.string_types):
return False
return bool(WILDCARD_CHARS.search(self.raw_value))
class SearchVisitor(NodeVisitor):
# A list of mappers that map source keys to a target name. Format is
# <target_name>: [<list of source names>],
key_mappings = {}
numeric_keys = set(
[
"device.battery_level",
"device.charging",
"device.online",
"device.simulator",
"error.handled",
"issue.id",
"stack.colno",
"stack.in_app",
"stack.lineno",
"stack.stack_level",
# TODO(mark) figure out how to safelist aggregate functions/field aliases
# so they can be used in conditions
]
)
date_keys = set(["start", "end", "first_seen", "last_seen", "time", "timestamp"])
unwrapped_exceptions = (InvalidSearchQuery,)
@cached_property
def key_mappings_lookup(self):
lookup = {}
for target_field, source_fields in self.key_mappings.items():
for source_field in source_fields:
lookup[source_field] = target_field
return lookup
def flatten(self, children):
def _flatten(seq):
# there is a list from search_term and one from raw_search, so flatten them.
# Flatten each group in the list, since nodes can return multiple items
for item in seq:
if isinstance(item, list):
for sub in _flatten(item):
yield sub
else:
yield item
if not (children and isinstance(children, list) and isinstance(children[0], list)):
return children
children = [child for group in children for child in _flatten(group)]
children = filter(None, _flatten(children))
return children
def remove_optional_nodes(self, children):
def is_not_optional(child):
return not (isinstance(child, Node) and isinstance(child.expr, Optional))
return filter(is_not_optional, children)
def remove_space(self, children):
def is_not_space(child):
return not (isinstance(child, Node) and child.text == " ")
return filter(is_not_space, children)
def visit_search(self, node, children):
return self.flatten(children)
def visit_key_val_term(self, node, children):
_, key_val_term, _ = children
# key_val_term is a list because of group
return key_val_term[0]
def visit_raw_search(self, node, children):
value = node.text.strip(" ")
if not value:
return None
return SearchFilter(SearchKey("message"), "=", SearchValue(value))
def visit_quoted_raw_search(self, node, children):
value = children[1]
if not value:
return None
return SearchFilter(SearchKey("message"), "=", SearchValue(value))
def visit_boolean_term(self, node, children):
def find_next_operator(children, start, end, operator):
for index in range(start, end):
if children[index] == operator:
return index
return None
def build_boolean_tree_branch(children, start, end, operator):
index = find_next_operator(children, start, end, operator)
if index is None:
return None
left = build_boolean_tree(children, start, index)
right = build_boolean_tree(children, index + 1, end)
return SearchBoolean(left, children[index], right)
def build_boolean_tree(children, start, end):
if end - start == 1:
return children[start]
result = build_boolean_tree_branch(children, start, end, SearchBoolean.BOOLEAN_OR)
if result is None:
result = build_boolean_tree_branch(children, start, end, SearchBoolean.BOOLEAN_AND)
return result
children = self.flatten(children)
children = self.remove_optional_nodes(children)
children = self.remove_space(children)
return [build_boolean_tree(children, 0, len(children))]
def visit_paren_term(self, node, children):
children = self.flatten(children)
children = self.remove_optional_nodes(children)
children = self.remove_space(children)
return self.flatten(children[1])
def visit_numeric_filter(self, node, children):
(search_key, _, operator, search_value) = children
operator = operator[0] if not isinstance(operator, Node) else "="
if search_key.name in self.numeric_keys:
try:
search_value = SearchValue(int(search_value.text))
except ValueError:
raise InvalidSearchQuery("Invalid numeric query: %s" % (search_key,))
return SearchFilter(search_key, operator, search_value)
else:
search_value = SearchValue(
operator + search_value.text if operator != "=" else search_value.text
)
return self._handle_basic_filter(search_key, "=", search_value)
def visit_time_filter(self, node, children):
(search_key, _, operator, search_value) = children
if search_key.name in self.date_keys:
try:
search_value = parse_datetime_string(search_value)
except InvalidQuery as exc:
raise InvalidSearchQuery(six.text_type(exc))
return SearchFilter(search_key, operator, SearchValue(search_value))
else:
search_value = operator + search_value if operator != "=" else search_value
return self._handle_basic_filter(search_key, "=", SearchValue(search_value))
def visit_rel_time_filter(self, node, children):
(search_key, _, value) = children
if search_key.name in self.date_keys:
try:
from_val, to_val = parse_datetime_range(value.text)
except InvalidQuery as exc:
raise InvalidSearchQuery(six.text_type(exc))
# TODO: Handle negations
if from_val is not None:
operator = ">="
search_value = from_val[0]
else:
operator = "<="
search_value = to_val[0]
return SearchFilter(search_key, operator, SearchValue(search_value))
else:
return self._handle_basic_filter(search_key, "=", SearchValue(value.text))
def visit_specific_time_filter(self, node, children):
# If we specify a specific date, it means any event on that day, and if
# we specify a specific datetime then it means a few minutes interval
# on either side of that datetime
(search_key, _, date_value) = children
if search_key.name not in self.date_keys:
return self._handle_basic_filter(search_key, "=", SearchValue(date_value))
try:
from_val, to_val = parse_datetime_value(date_value)
except InvalidQuery as exc:
raise InvalidSearchQuery(six.text_type(exc))
# TODO: Handle negations here. This is tricky because these will be
# separate filters, and to negate this range we need (< val or >= val).
# We currently AND all filters together, so we'll need extra logic to
# handle. Maybe not necessary to allow negations for this.
return [
SearchFilter(search_key, ">=", SearchValue(from_val[0])),
SearchFilter(search_key, "<", SearchValue(to_val[0])),
]
def visit_operator(self, node, children):
return node.text
def visit_date_format(self, node, children):
return node.text
def is_negated(self, node):
# Because negations are always optional, parsimonious returns a list of nodes
# containing one node when a negation exists, and a single node when it doesn't.
if isinstance(node, list):
node = node[0]
return node.text == "!"
def visit_basic_filter(self, node, children):
(negation, search_key, _, search_value) = children
operator = "!=" if self.is_negated(negation) else "="
return self._handle_basic_filter(search_key, operator, search_value)
def _handle_basic_filter(self, search_key, operator, search_value):
# If a date or numeric key gets down to the basic filter, then it means
# that the value wasn't in a valid format, so raise here.
if search_key.name in self.date_keys:
raise InvalidSearchQuery("Invalid format for date search")
if search_key.name in self.numeric_keys:
raise InvalidSearchQuery("Invalid format for numeric search")
return SearchFilter(search_key, operator, search_value)
def visit_has_filter(self, node, children):
# the key is has here, which we don't need
negation, _, _, (search_key,) = children
# if it matched search value instead, it's not a valid key
if isinstance(search_key, SearchValue):
raise InvalidSearchQuery(
'Invalid format for "has" search: %s' % (search_key.raw_value,)
)
operator = "=" if self.is_negated(negation) else "!="
return SearchFilter(search_key, operator, SearchValue(""))
def visit_is_filter(self, node, children):
raise InvalidSearchQuery('"is" queries are not supported on this search')
def visit_search_key(self, node, children):
key = children[0]
return SearchKey(self.key_mappings_lookup.get(key, key))
def visit_search_value(self, node, children):
return SearchValue(children[0])
def visit_closed_paren(self, node, children):
return node.text
def visit_open_paren(self, node, children):
return node.text
def visit_boolean_operator(self, node, children):
return node.text
def visit_value(self, node, children):
return node.text
def visit_key(self, node, children):
return node.text
def visit_quoted_value(self, node, children):
return node.match.groups()[0].replace('\\"', '"')
def visit_quoted_key(self, node, children):
return node.match.groups()[0]
def generic_visit(self, node, children):
return children or node
def parse_search_query(query):
try:
tree = event_search_grammar.parse(query)
except IncompleteParseError as e:
raise InvalidSearchQuery(
"%s %s"
% (
u"Parse error: %r (column %d)." % (e.expr.name, e.column()),
"This is commonly caused by unmatched-parentheses. Enclose any text in double quotes.",
)
)
return SearchVisitor().visit(tree)
def convert_search_boolean_to_snuba_query(search_boolean):
def convert_term(term):
if isinstance(term, SearchFilter):
return convert_search_filter_to_snuba_query(term)
elif isinstance(term, SearchBoolean):
return convert_search_boolean_to_snuba_query(term)
else:
raise InvalidSearchQuery(
"Attempted to convert term of unrecognized type %s into a snuba expression"
% term.__class__.__name__
)
if not search_boolean:
return search_boolean
left = convert_term(search_boolean.left_term)
right = convert_term(search_boolean.right_term)
operator = search_boolean.operator.lower()
return [operator, [left, right]]
def convert_endpoint_params(params):
return [SearchFilter(SearchKey(key), "=", SearchValue(params[key])) for key in params]
def convert_search_filter_to_snuba_query(search_filter):
snuba_name = search_filter.key.snuba_name
value = search_filter.value.value
if snuba_name in no_conversion:
return
elif snuba_name == "tags[environment]":
env_conditions = []
_envs = set(value if isinstance(value, (list, tuple)) else [value])
# the "no environment" environment is null in snuba
if "" in _envs:
_envs.remove("")
operator = "IS NULL" if search_filter.operator == "=" else "IS NOT NULL"
env_conditions.append(["tags[environment]", operator, None])
if _envs:
env_conditions.append(["tags[environment]", "IN", list(_envs)])
return env_conditions
elif snuba_name == "message":
if search_filter.value.is_wildcard():
# XXX: We don't want the '^$' values at the beginning and end of
# the regex since we want to find the pattern anywhere in the
# message. Strip off here
value = search_filter.value.value[1:-1]
return [["match", ["message", "'(?i)%s'" % (value,)]], search_filter.operator, 1]
else:
# https://clickhouse.yandex/docs/en/query_language/functions/string_search_functions/#position-haystack-needle
# positionCaseInsensitive returns 0 if not found and an index of 1 or more if found
# so we should flip the operator here
operator = "=" if search_filter.operator == "!=" else "!="
# make message search case insensitive
return [["positionCaseInsensitive", ["message", "'%s'" % (value,)]], operator, 0]
else:
value = (
int(to_timestamp(value)) * 1000
if isinstance(value, datetime) and snuba_name != "timestamp"
else value
)
# Tags are never null, but promoted tags are columns and so can be null.
# To handle both cases, use `ifNull` to convert to an empty string and
# compare so we need to check for empty values.
if search_filter.key.is_tag:
snuba_name = ["ifNull", [snuba_name, "''"]]
# Handle checks for existence
if search_filter.operator in ("=", "!=") and search_filter.value.value == "":
if search_filter.key.is_tag:
return [snuba_name, search_filter.operator, value]
else:
# If not a tag, we can just check that the column is null.
return [["isNull", [snuba_name]], search_filter.operator, 1]
is_null_condition = None
if search_filter.operator == "!=" and not search_filter.key.is_tag:
# Handle null columns on inequality comparisons. Any comparison
# between a value and a null will result to null, so we need to
# explicitly check for whether the condition is null, and OR it
# together with the inequality check.
# We don't need to apply this for tags, since if they don't exist
# they'll always be an empty string.
is_null_condition = [["isNull", [snuba_name]], "=", 1]
if search_filter.value.is_wildcard():
condition = [["match", [snuba_name, "'(?i)%s'" % (value,)]], search_filter.operator, 1]
else:
condition = [snuba_name, search_filter.operator, value]
# We only want to return as a list if we have the check for null
# present. Returning as a list causes these conditions to be ORed
# together. Otherwise just return the raw condition, so that it can be
# used correctly in aggregates.
if is_null_condition:
return [is_null_condition, condition]
else:
return condition
def get_snuba_query_args(query=None, params=None):
# NOTE: this function assumes project permissions check already happened
parsed_terms = []
if query is not None:
try:
parsed_terms = parse_search_query(query)
except ParseError as e:
raise InvalidSearchQuery(u"Parse error: %r (column %d)" % (e.expr.name, e.column()))
# Keys included as url params take precedent if same key is included in search
if params is not None:
parsed_terms.extend(convert_endpoint_params(params))
kwargs = {"conditions": [], "filter_keys": defaultdict(list)}
projects = {}
has_project_term = any(
isinstance(term, SearchFilter) and term.key.name == PROJECT_KEY for term in parsed_terms
)
if has_project_term:
projects = {
p["slug"]: p["id"]
for p in Project.objects.filter(id__in=params["project_id"]).values("id", "slug")
}
for term in parsed_terms:
if isinstance(term, SearchFilter):
snuba_name = term.key.snuba_name
if term.key.name == PROJECT_KEY:
condition = ["project_id", "=", projects.get(term.value.value)]
kwargs["conditions"].append(condition)
elif snuba_name in ("start", "end"):
kwargs[snuba_name] = term.value.value
elif snuba_name in ("project_id", "issue"):
value = term.value.value
if isinstance(value, int):
value = [value]
kwargs["filter_keys"][snuba_name].extend(value)
else:
converted_filter = convert_search_filter_to_snuba_query(term)
kwargs["conditions"].append(converted_filter)
else: # SearchBoolean
# TODO(lb): remove when boolean terms fully functional
kwargs["has_boolean_terms"] = True
kwargs["conditions"].append(convert_search_boolean_to_snuba_query(term))
return kwargs
FIELD_ALIASES = {
"last_seen": {"aggregations": [["max", "timestamp", "last_seen"]]},
"latest_event": {
"aggregations": [
# TODO(mark) This is a hack to work around jsonschema limitations
# in snuba.
["argMax(event_id, timestamp)", "", "latest_event"]
]
},
"project": {"fields": ["project.id"]},
"user": {"fields": ["user.id", "user.name", "user.username", "user.email", "user.ip"]}
# TODO(mark) Add rpm alias.
}
VALID_AGGREGATES = {
"count_unique": {"snuba_name": "uniq", "fields": "*"},
"count": {"snuba_name": "count", "fields": "*"},
"min": {"snuba_name": "min", "fields": ["timestamp", "duration"]},
"max": {"snuba_name": "max", "fields": ["timestamp", "duration"]},
"sum": {"snuba_name": "sum", "fields": ["duration"]},
# These don't entirely work yet but are intended to be illustrative
"avg": {"snuba_name": "avg", "fields": ["duration"]},
"p75": {"snuba_name": "quantileTiming(0.75)", "fields": ["duration"]},
}
AGGREGATE_PATTERN = re.compile(r"^(?P<function>[^\(]+)\((?P<column>[a-z\._]*)\)$")
def validate_aggregate(field, match):
function_name = match.group("function")
if function_name not in VALID_AGGREGATES:
raise InvalidSearchQuery("Unknown aggregate function '%s'" % field)
function_data = VALID_AGGREGATES[function_name]
column = match.group("column")
if column not in function_data["fields"] and function_data["fields"] != "*":
raise InvalidSearchQuery(
"Invalid column '%s' in aggregate function '%s'" % (column, function_name)
)
def resolve_orderby(orderby, fields, aggregations):
"""
We accept column names, aggregate functions, and aliases as order by
values. Aggregates and field aliases need to be resolve/validated.
"""
orderby = orderby if isinstance(orderby, (list, tuple)) else [orderby]
validated = []
for column in orderby:
bare_column = column.lstrip("-")
if bare_column in fields:
validated.append(column)
continue
match = AGGREGATE_PATTERN.search(bare_column)
if match:
bare_column = get_aggregate_alias(match)
found = [agg[2] for agg in aggregations if agg[2] == bare_column]
if found:
prefix = "-" if column.startswith("-") else ""
validated.append(prefix + bare_column)
if len(validated) == len(orderby):
return validated
raise InvalidSearchQuery("Cannot order by an field that is not selected.")
def get_aggregate_alias(match):
column = match.group("column").replace(".", "_")
return u"{}_{}".format(match.group("function"), column).rstrip("_")
def resolve_field_list(fields, snuba_args):
"""
Expand a list of fields based on aliases and aggregate functions.
Returns a dist of aggregations, selected_columns, and
groupby that can be merged into the result of get_snuba_query_args()
to build a more complete snuba query based on event search conventions.
"""
# If project.name is requested, get the project.id from Snuba so we
# can use this to look up the name in Sentry
if "project.name" in fields:
fields.remove("project.name")
if "project.id" not in fields:
fields.append("project.id")
aggregations = []
groupby = []
columns = []
for field in fields:
if not isinstance(field, six.string_types):
raise InvalidSearchQuery("Field names must be strings")
if field in FIELD_ALIASES:
special_field = deepcopy(FIELD_ALIASES[field])
columns.extend(special_field.get("fields", []))
aggregations.extend(special_field.get("aggregations", []))
continue
# Basic fields don't require additional validation. They could be tag
# names which we have no way of validating at this point.
match = AGGREGATE_PATTERN.search(field)
if not match:
columns.append(field)
continue
validate_aggregate(field, match)
aggregations.append(
[
VALID_AGGREGATES[match.group("function")]["snuba_name"],
match.group("column"),
get_aggregate_alias(match),
]
)
rollup = snuba_args.get("rollup")
if not rollup:
# Ensure fields we require to build a functioning interface
# are present. We don't add fields when using a rollup as the additional fields
# would be aggregated away. When there are aggregations
# we use argMax to get the latest event/projectid so we can create links.
# The `projectid` output name is not a typo, using `project_id` triggers
# generates invalid queries.
if not aggregations and "id" not in columns:
columns.append("id")
columns.append("project.id")
if aggregations and "latest_event" not in fields:
aggregations.extend(deepcopy(FIELD_ALIASES["latest_event"]["aggregations"]))
if aggregations and "project.id" not in columns:
aggregations.append(["argMax(project_id, timestamp)", "", "projectid"])
if rollup and columns and not aggregations:
raise InvalidSearchQuery("You cannot use rollup without an aggregate field.")
orderby = snuba_args.get("orderby")
if orderby:
orderby = resolve_orderby(orderby, columns, aggregations)
# If aggregations are present all columns
# need to be added to the group by so that the query is valid.
if aggregations:
groupby.extend(columns)
return {
"selected_columns": columns,
"aggregations": aggregations,
"groupby": groupby,
"orderby": orderby,
}
def find_reference_event(snuba_args, reference_event_slug, fields):
try:
project_slug, event_id = reference_event_slug.split(":")
except ValueError:
raise InvalidSearchQuery("Invalid reference event")
try:
project = Project.objects.get(
slug=project_slug, id__in=snuba_args["filter_keys"]["project_id"]
)
except Project.DoesNotExist:
raise InvalidSearchQuery("Invalid reference event")
reference_event = eventstore.get_event_by_id(project.id, event_id, fields)
if not reference_event:
raise InvalidSearchQuery("Invalid reference event")
return reference_event.snuba_data
TAG_KEY_RE = re.compile(r"^tags\[(.*)\]$")
def get_reference_event_conditions(snuba_args, event_slug):
"""
Returns a list of additional conditions/filter_keys to
scope a query by the groupby fields using values from the reference event
This is a key part of pagination in the event details modal and
summary graph navigation.
"""
field_names = [get_snuba_column_name(field) for field in snuba_args.get("groupby", [])]
# translate the field names into enum columns
columns = []
has_tags = False
for field in field_names:
if field.startswith("tags["):
has_tags = True
else:
columns.append(eventstore.Columns(field))
if has_tags:
columns.extend([eventstore.Columns.TAGS_KEY, eventstore.Columns.TAGS_VALUE])
# Fetch the reference event ensuring the fields in the groupby
# clause are present.
event_data = find_reference_event(snuba_args, event_slug, columns)
conditions = []
tags = {}
if "tags.key" in event_data and "tags.value" in event_data:
tags = dict(zip(event_data["tags.key"], event_data["tags.value"]))
for field in field_names:
match = TAG_KEY_RE.match(field)
if match:
value = tags.get(match.group(1), None)
else:
value = event_data.get(field, None)
if value:
conditions.append([field, "=", value])
return conditions
|
mvaled/sentry
|
src/sentry/api/event_search.py
|
Python
|
bsd-3-clause
| 32,277
|
[
"VisIt"
] |
ec618d8443d799d6d387145cb36989e0cf20359898205afc82dcb0fbfd3fc558
|
# Copyright (c) 2018, Henrique Miranda
# All rights reserved.
#
# This file is part of the yambopy project
#
from yambopy import *
from netCDF4 import Dataset
from math import sqrt
import numpy as np
from cmath import exp
from yambopy.units import ha2ev, ev2cm1, I
class YamboElectronPhononDB():
"""
Python class to read the electron-phonon matrix elements from yambo
"""
def __init__(self,lattice,filename='ndb.elph_gkkp',folder_gkkp='SAVE',save='SAVE',only_freqs=False):
self.lattice = lattice
self.save = save
self.filename = "%s/%s"%(folder_gkkp,filename)
self.ph_eigenvalues = None
self.car_kpoints = lattice.car_kpoints
self.red_kpoints = lattice.red_kpoints
self.rlat = lattice.rlat
#read dimensions of electron phonon parameters
try:
database = Dataset(self.filename)
except:
raise FileNotFoundError("error opening %s in YamboElectronPhononDB"%self.filename)
self.qpoints = database.variables['PH_Q'][:].T
self.car_qpoints = np.array([ q/self.lattice.alat for q in self.qpoints ])
self.nmodes, self.nqpoints, self.nkpoints, self.nbands = database.variables['PARS'][:4].astype(int)
self.natoms = self.nmodes/3
database.close()
# I wouldn't open any DB GKKP file in the initizialiaction
#self.readDB_n_np(ib1=2,ib2=3,ik1=3)
#self.readDB()
def get_elphon(self,dir=0):
if self.gkkp is None:
self.get_elphon_databases()
kpts, nks, nss = self.expand_kpts()
gkkp = self.gkkp
return gkkp, kpts
def readDB(self,only_freqs=False):
"""
Load all the gkkp databases to memory
"""
self.ph_eigenvalues = np.zeros([self.nqpoints,self.nmodes])
self.ph_eigenvectors = np.zeros([self.nqpoints,self.nmodes,self.nmodes/3,3],dtype=np.complex64)
if not only_freqs:
self.gkkp = np.zeros([self.nqpoints,self.nkpoints,self.nmodes,self.nbands,self.nbands],dtype=np.complex64)
for nq in range(self.nqpoints):
filename = '%s_fragment_%d'%(self.filename,nq+1)
database = Dataset(filename)
self.ph_eigenvalues[nq] = np.sqrt(database.variables['PH_FREQS%d'%(nq+1)][:])
p_re = database.variables['POLARIZATION_VECTORS_REAL'][:].T
p_im = database.variables['POLARIZATION_VECTORS_IMAG'][:].T
self.ph_eigenvectors[nq] = p_re + p_im*I
if not only_freqs:
gkkp = database.variables['ELPH_GKKP_Q%d'%(nq+1)][:]
self.gkkp[nq] = (gkkp[:,0,:,:] + I*gkkp[:,1,:,:]).reshape([self.nkpoints,self.nmodes,self.nbands,self.nbands])
database.close()
if not only_freqs:
return self.gkkp
def readDB_n_np(self,ib1=1,ib2=1,ik1=1):
# Read gkkps for a given n,n' and k
# The structure of the gkkps in Yambo is
# GKKP(q)[k,complex,nmodes,nbands*nbands]
iband = (ib1-1)*self.nbands + (ib2-1)
if iband < 0: raise ValueError("error in iband. ib1 and ib2 cannot be zero")
self.gkkp_n_np_kn = np.zeros([self.nqpoints,self.nmodes],dtype=np.complex64)
print('The transition from band n = %d to band n\'= %d has yambo index %d' % (ib1, ib2, iband) )
for nq in xrange(self.nqpoints):
filename = '%s_fragment_%d'%(self.filename,nq+1)
database = Dataset(filename)
#self.ph_eigenvalues[nq] = np.sqrt(database.variables['PH_FREQS%d'%(nq+1)][:])
#p_re = database.variables['POLARIZATION_VECTORS_REAL'][:].T
#p_im = database.variables['POLARIZATION_VECTORS_IMAG'][:].T
#self.ph_eigenvectors[nq] = p_re + p_im*I
#if not only_freqs:
self.gkkp_n_np_kn[nq] = database.variables['ELPH_GKKP_Q%d'%(nq+1)][ik1-1,0,:,iband] + I* database.variables['ELPH_GKKP_Q%d'%(nq+1)][ik1-1,1,:,iband]
#self.gkkp_n_np_kn[nq] = (gkkp[:,0,:,:] + I*gkkp[:,1,:,:]).reshape([self.nkpoints,self.nmodes,self.nbands,self.nbands])
database.close()
return self.gkkp_n_np_kn
def plot_map(self,fig,ib1=1,ib2=1,ik1=1,all_phonons=True,cmap='viridis',size=60,lim=0.15):
"""
Alejandro Molina-Sanchez
Plot the gkkp in a scatter plot (1st version developed by A. Molina-Sanchez)
Options:
cmap : colormap. Default viridis
log_scale : Logarithmic scale for the intensity (True or False) Do we put that?
set_maximum : All plots are normalized
Further development: Option for the colorbar
"""
import matplotlib.pyplot as plt
import matplotlib.colors as colors
#size=20,marker='H',set_origin=0.0,lim=0.2,cmap='viridis',log_scale=False,set_maximum=1.0
# GKKP(k+q)[n,n',k]
data=self.readDB_n_np(ib1,ib2,ik1)
color_map = plt.get_cmap(cmap)
kx_aux, ky_aux = self.car_qpoints[:,0], self.car_qpoints[:,1]
kx = concatenate([kx_aux,kx_aux+self.rlat[0,0],kx_aux-self.rlat[0,0],kx_aux+self.rlat[1,0],kx_aux-self.rlat[1,0],kx_aux+self.rlat[0,0]-self.rlat[1,0],kx_aux-self.rlat[0,0]+self.rlat[1,0]])
ky = concatenate([ky_aux,ky_aux+self.rlat[0,1],ky_aux-self.rlat[0,1],ky_aux+self.rlat[1,1],ky_aux-self.rlat[1,1],ky_aux+self.rlat[0,1]-self.rlat[1,1],ky_aux-self.rlat[0,1]+self.rlat[1,1]])
"""
all_phonons options
True: Sum over all phonon modes
False: Plot all gkkp from each phonon mode
"""
if all_phonons:
gkkp_aux = zeros([self.nqpoints])
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.set_xlim(-lim,lim)
ax.set_ylim(-lim,lim)
for ip in range(self.nmodes):
gkkp_aux += abs(data[:,ip])
max_gkkp = max(gkkp_aux)
gkkp = concatenate(7*[gkkp_aux/max_gkkp])
ax.scatter( kx,ky,s=size,marker='H',c=gkkp,cmap=color_map)
else:
for ip in range(self.nmodes):
square_size = 0.25
x = 0.05 + (square_size+0.05)*(ip-ip/3*3)
y = 0.75 - (square_size+0.05)*(ip/3)
ax = fig.add_axes( [ x, y, square_size, square_size ])
ax.set_aspect('equal')
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.set_xlim(-lim,lim)
ax.set_ylim(-lim,lim)
ax.set_facecolor(color_map(0.0))
gkkp_aux = abs(data[:,ip])
max_gkkp = max(gkkp_aux)
gkkp = concatenate(7*[gkkp_aux/max_gkkp])
ax.scatter( kx,ky,s=size,marker='H',c=gkkp,cmap=color_map)
def plot_modulus(self,ib1=1,ib2=1,ik1=1,all_phonons=True):
# GKKP(k+q)[n,n',k]
data=self.readDB_n_np(ib1,ib2,ik1)
q_modulus = zeros(self.nqpoints)
# Modulus of q-point
for iq in range(self.nqpoints):
q_modulus[iq] = sqrt(np.dot(self.car_qpoints[iq],self.car_qpoints[iq]))
"""
all_phonons options
True: Sum over all phonon modes
False: Plot all gkkp from each phonon mode
"""
if all_phonons:
gkkp = zeros([self.nqpoints])
gkkp_aux = zeros([self.nqpoints])
for ip in range(self.nmodes):
gkkp_aux[:] += abs(data[:,ip])
gkkp[:] = gkkp_aux[:]#/max(gkkp_aux)
else:
gkkp = zeros([self.nqpoints,self.nmodes])
for ip in range(self.nmodes):
gkkp[:,ip] = abs(data[:,ip])
# q_modulus : array dimension: nqpoints
# gkkp : matrix dimension: (nqpoints x nphonons) or (nqpoints)
return q_modulus,gkkp
def __str__(self):
if self.ph_eigenvalues is None:
self.get_elphon_databases()
s = 'nqpoints: %d\n'%self.nqpoints
s+= 'nkpoints: %d\n'%self.nkpoints
s+= 'nmodes: %d\n'%self.nmodes
s+= 'natoms: %d\n'%self.natoms
s+= 'nbands: %d\n'%self.nbands
for nq in range(self.nqpoints):
s+= 'nqpoint %d\n'%nq
for n,mode in enumerate(self.ph_eigenvectors[nq]):
s+= 'mode %d freq: %lf cm-1\n'%(n,self.ph_eigenvalues[nq][n]*ha2ev*ev2cm1)
for a in range(self.natoms):
s += ("%12.8lf "*3+'\n')%tuple(mode[a].real)
return s
if __name__ == '__main__':
elph = ElectronPhononDB()
print(elph)
elph.get_databases()
|
henriquemiranda/yambo-py
|
yambopy/dbs/elphondb.py
|
Python
|
bsd-3-clause
| 8,789
|
[
"Yambo"
] |
f32bd5c08c918a542354a780f017c942a382162303070d7f2acdfb1bf27394fe
|
#
# Copyright (C) 2007, Mark Lee
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# $Revision: 473 $
# $Date: 2009-01-29 20:50:12 -0700 (Thu, 29 Jan 2009) $
# $Author: brian@tannerpages.com $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/rlglue/agent/AgentLoader.py $
import sys
import os
import rlglue.network.Network as Network
from ClientAgent import ClientAgent
from rlglue.versions import get_svn_codec_version
from rlglue.versions import get_codec_version
def loadAgent(theAgent):
theSVNVersion=get_svn_codec_version()
theCodecVersion=get_codec_version()
client = ClientAgent(theAgent)
host = Network.kLocalHost
port = Network.kDefaultPort
hostString = os.getenv("RLGLUE_HOST")
portString = os.getenv("RLGLUE_PORT")
if (hostString != None):
host = hostString
try:
port = int(portString)
except TypeError:
port = Network.kDefaultPort
print "RL-Glue Python Agent Codec Version: "+theCodecVersion+" (Build "+theSVNVersion+")"
print "\tConnecting to " + host + " on port " + str(port) + "..."
sys.stdout.flush()
client.connect(host, port, Network.kRetryTimeout)
print "\t Agent Codec Connected"
client.runAgentEventLoop()
client.close()
def loadAgentLikeScript():
agentModule = __import__(sys.argv[1])
agentClass = getattr(agentModule,sys.argv[1])
agent = agentClass()
client = ClientAgent(agent)
loadAgent(agent)
|
mguzdial3/MineCode
|
python-codec/src/rlglue/agent/AgentLoader.py
|
Python
|
apache-2.0
| 1,942
|
[
"Brian"
] |
d931cc42493ae6af05ac4d8e4eab1cdbaa6318c64a8bd90b175f4313c65372f7
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# resedit - Front end to resource editor
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.resedit import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
heromod/migrid
|
mig/cgi-bin/resedit.py
|
Python
|
gpl-2.0
| 1,075
|
[
"Brian"
] |
60f61735b52293a86c8632a4f5f6baddec1af026180f10d5ba5d02e6041c5104
|
#!/usr/local/bin/python
# -*- coding: utf8
"""
this is a Library for SOM.
CopyLight Masato MASUDA. 2017.23.AUG.
"""
import SOM.Map as Map
import SOM.Coefficients as coef
import SOM.logger as logger
import SOM.SOM_config as SOM_config
from SOM.Topology import hexagonal_tbl
import numpy as np
from numpy.random import *
import time
import sys
import inspect
#try :
# import pycuda.autoinit
# import pycuda.driver as cuda
# import pycuda
# print "Using GPU"
#except ImportError:
# print "Using CPU"
class SOM:
"""
This is a class contain the status data of SOM.
Set by giving arguments.
# Example
'''python
som = SOM(10, 10, 16, 16)
# The arguments are "map size x", "map size y",
"number of input data sets" and
"feature size of input data" in order.
som = SOM (map_size_x=30, map_size_y=30
, input_num=32, input_size=16)
# It is possible to pass values by specifying
arguments.
"""
def __init__ (self, epochs=10
, map_size_x=10, map_size_y=10
, input_num=16, input_size=16
, class_names=None, feature_names=None
, input_file=None
, weight=None
, alpha_max=0.3, alpha_min=0.01
, sigma_max=10, sigma_min=1
, unit_shape="Square"
, log_file = None
, log_fp = None
, config_dic = None
):
self.config = {}
self.map_size_x = map_size_x
self.map_size_y = map_size_y
self.input_num = input_num
self.input_size = input_size
self.alpha_max = alpha_max
self.alpha_min = alpha_min
self.sigma_max = sigma_max
self.sigma_min = sigma_min
self.alpha_tbl = None
self.sigma_tbl = None
self.epochs = epochs
self.log_fp = log_fp
if log_fp == None and log_file:
self.log_fp=open(log_file, "w")
if log_fp == None and log_file==None:
self.log_fp=None
if config_dic:
SOM_config.set_som_params(self, config_dic)
if unit_shape in {"Square", "SQUARE", "square", "SQ", "squ", "SQU"}:
pass
elif unit_shape in {"Hexagon", "HEXAGON", "hexagon"
, "HX", "hex", "HEX"
, "Circle", "CIRCLE", "circle"
, "CR", "cir", "CIR"}:
self.ary = hexagonal_tbl(self)
self.som_class = np.zeros ((map_size_y, map_size_x), dtype=int)
self.mapping = np.zeros ((map_size_y, map_size_x))
if class_names is None:
self.class_names = ["%d" % i for i in range(input_num)]
else :
self.class_names = class_names
if feature_names is None:
self.feature_names = ["%d" % i for i in range(input_size)]
else :
self.feature_names = feature_names
if input_file is None:
self.input_x = np.zeros ((input_num, input_size))
else :
import pandas
try :
data = pandas.read_csv (input_file, index_col="label")
except IOError:
print "Failed read input-file : ", input_file
return
tmp = data.as_matrix()
self.input_x = tmp[:,:]
self.input_num = len(self.input_x)
self.input_size = len(self.input_x[0])
self.feature_names = list(data.columns)
self.class_names = list(data.index)
if weight is None:
# self.weights = np.zeros ((map_size_y, map_size_x, input_size))
self.weights = rand (self.map_size_y, self.map_size_x, self.input_size)
else :
if isinstance(weight, str):
self.weight_load_csv(weight)
if isinstance(weight, unicode):
self.weight_load_csv(weight)
elif isinstance(weight, tuple):
self.weights = weight
self.min_nodes_x = np.zeros ((self.input_num), dtype=int)
self.min_nodes_y = np.zeros ((self.input_num), dtype=int)
SOM_config.conf2dict(self)
def set_config (self, params_dict=None):
"""
Usage: som.set_config ( parameters_dictionary )
'som' is class of SOM.Model.SOM.
The contents of the parameters_dictionary can be shown by
inputting 'config.set_som_params.__doc__' after "import config".
"""
SOM_config.set_som_params (self, params_dict)
self.recreate()
def recreate (self):
self.som_class = np.zeros ((self.map_size_y, self.map_size_x), dtype=int)
self.mapping = np.zeros ((self.map_size_y, self.map_size_x))
self.min_nodes_x = np.zeros ((self.input_num), dtype=int)
self.min_nodes_y = np.zeros ((self.input_num), dtype=int)
self.weights = rand (self.map_size_y, self.map_size_x, self.input_size)
i, j = self.input_x.nonzero()
if len(i)==0 and len(j)==0:
self.input_x = np.zeros ((self.input_num, self.input_size))
def load_input_csv (self, csv):
"""
This function uses Pandas module.
Loading a csv file for SOM input data.
The label of input data and the name of feature quantity
to be entered are required in the CSV file.
"""
import pandas
try :
data = pandas.read_csv (csv, index_col='label')
except IOError:
print "Failed read input-file", csv
return
tmp = data.as_matrix()
self.input_x = tmp[:,:]
shape = self.input_x.shape
if self.input_num != shape[0]:
print "The number of input data sets dose not match the initial state."
exit()
if self.input_size != shape[1]:
print "The feature size of input data dose not match the initial state."
exit()
self.class_names = list(data.index)
self.feature_names = list(data.columns)
def load_verify_csv (self, csv):
import pandas
data = pandas.read_csv (csv, index_col='label')
print "== load verify data =="
print data
tmp = data.as_matrix()
self.input_x = tmp[:,:]
shape = self.input_x.shape
if self.input_num != shape[0]:
print "Warning: The verfy data dose not match the initilal state. verify data shape:" + str(shape)
self.input_num = shape[0]
if self.input_size != shape[1]:
print "Error: The feature size of verify data dose not match the initilal state. verify data shape:" + str(shape)
exit()
self.class_names = list(data.index)
self.feature_names = list(data.columns)
self.recreate()
print "======================"
def weight_output_csv (self, csv):
"""
Output the weights by CSV format.
"""
import pandas
index = csv.rfind('.csv')
if index == -1:
csv += '.csv'
elif index!=(len(csv)-len('.csv')) :
csv += '.csv'
else :
pass
new_array = np.reshape(self.weights, (self.map_size_x*self.map_size_y, self.input_size))
df = pandas.DataFrame(new_array)
df.columns = self.feature_names
df.to_csv(csv)
def weight_load_csv (self, csv):
"""
Load Weights file by CSV format.
"""
import pandas
try :
data = pandas.read_csv(csv, index_col=0)
except IOError:
print "Failed read weight-file : ", csv
return
tmp = data.as_matrix()
self.weights = np.reshape(tmp, (self.map_size_y, self.map_size_x, self.input_size))
def to_json (self, filename):
import json
fp=open(filename, "w")
json.dump (self.config, fp)
fp.close()
def from_json (self, filename):
import json
fp=open(filename, "r")
self.config = json.load(fp)
fp.close()
SOM_config.dict2conf(self)
self.recreate()
def fit (self, trainX, epochs=10, verbose=0, topology="gaussian"
, callbacks=None, shuffle=True
, alpha_func=coef.alpha_func
, sigma_func=coef.sigma_func
, unrelated_columns = None
):
if shuffle :
_index = np.random.permutation(self.input_num)
self.input_x = trainX[_index]
_names = np.array(self.class_names)
self.class_names = list(_names[_index])
else :
self.input_x = trainX
if callbacks is None:
cb_fg = 0
elif callbacks == self.write_BMU:
cb_fg = 1
else :
cb_fg = 0
tmp_inputx = self.input_x
colid=[]
if unrelated_columns is not None:
for uncols in unrelated_columns:
if type(uncols) is int:
colid.append(uncols)
else :
colid.append(self.feature_names.index(uncols))
tmp_inputx = np.delete (tmp_inputx, colid, 1)
self.alpha_tbl = alpha_func(alpha_max=self.alpha_max, alpha_min=self.alpha_min, epochs=epochs)
self.sigma_tbl = sigma_func(sigma_max=self.sigma_max, sigma_min=self.sigma_min, epochs=epochs)
for epoch in range(epochs):
tmp_weights = self.weights
tmp_weights = np.delete(tmp_weights, colid, 2)
self.search_BMU(tmp_inputx, tmp_weights)
if cb_fg is 1:
self.write_BMU()
if self.log_fp:
self.fwrite_BMU()
self.modify(epoch=epoch, epochs=epochs, verbose=verbose, topology=topology, log_fp=self.log_fp)
self.som_mapping(epoch=epoch, inputx=self.input_x, weights=self.weights)
if self.log_fp:
self.log_fp.write("BMU of learned model\n")
self.fwrite_BMU()
self.log_fp.close()
@logger.train_counter
def modify (self, epoch, epochs=10, verbose=0
, topology="gaussian"
, alpha=None
, sigma=None
, neighborhood=coef.neighborhood_func
, log_fp = None
):
if alpha is None:
alpha=self.alpha_tbl
if sigma is None:
sigma=self.sigma_tbl
for n in range(self.input_num):
if n ==0:
start = time.time()
if verbose :
logger.train_input_counter(self, n, verbose)
for i in range(self.map_size_y):
for j in range(self.map_size_x):
# self.weights[i][j] += alpha(epoch, alpha_max=self.alpha_max, alpha_min=self.alpha_min, epochs=epochs) * neighborhood(self, epoch, n, i, j, epochs=epochs) * (self.input_x[n]-self.weights[i][j])
self.weights[i][j] += alpha[epoch] * neighborhood(self, epoch, n, i, j, epochs=epochs, sigma_tbl=sigma) * (self.input_x[n]-self.weights[i][j])
if verbose :
elapsed = time.time() - start
sys.stdout.write(" -%2.2fs" % elapsed)
if n+1 == self.input_num:
sys.stdout.write("\n")
if self.log_fp:
elapsed = time.time() - start
self.log_fp.write(" - %2.2fs\n" % elapsed)
def som_mapping (self, epoch, inputx, weights):
for i in range(self.map_size_y):
for j in range(self.map_size_x):
min = self.map_size_x*self.map_size_y
for k in range(self.input_num):
dist = self.measur_distance (inputx[k], weights[i][j])
if k==0 or min>dist :
min = dist
self.som_class[i][j] = k
self.mapping[i][j] = min
if epoch+1==self.epochs:
self.regularization_mapping ( )
def som_mapping_fire (self, inputx, weights, grad):
"""
inputx is 1D vector. ([input_size])
weights is 2D vecotor. ([map_size_y, map_size_x])
"""
for i in range(self.map_size_y):
for j in range(self.map_size_x):
dist = self.measur_distance (inputx, weights[i][j])
self.mapping[i][j] = dist
self.regularization_mapping()
min_value = np.min(self.mapping)
max_value = np.max(self.mapping)
self.som_class = (self.mapping-min_value)/(max_value-min_value) * (grad-1)
self.som_class = self.som_class.astype(np.int)
def som_mapping_columns (self, epoch, column, grad):
gradation_range = np.arange(0, 1+1./(grad-1), 1./(grad-1))
for i in range(self.map_size_y):
for j in range(self.map_size_x):
min = self.map_size_x*self.map_size_y
for k in range(self.input_num):
dist = abs(self.input_x[k][column] - self.weights[i][j][column])
if k==0 or min>dist:
min = dist
for l in range(grad-1):
if gradation_range[l]<=self.input_x[k][column] and self.input_x[k][column]<gradation_range[l+1]:
self.som_class[i][j] = l
break
else:
self.som_class[i][j] = grad-1
self.mapping[i][j] = min
if epoch+1 == self.epochs:
self.regularization_mapping()
def som_mapping_values (self, epoch, column, grad):
min = np.min(self.weights[:,:,column])
max = np.max(self.weights[:,:,column])
if min != max:
mapping = (self.weights[:,:,column]-min)/(max-min)
else :
mapping = self.weights[:,:,column]
gradation_range = np.arange(0, 1+1./(grad-1), 1./(grad-1))
for i in range(self.map_size_y):
for j in range(self.map_size_x):
for l in range(grad-1):
if gradation_range[l]<=mapping[i][j] and mapping[i][j]<gradation_range[l+1]:
self.som_class[i][j] = l
break
else:
self.som_class[i][j] = grad-1
def som_mapping_differences (self, epoch, column, grad):
difference = self.mapping
min = np.min(difference)
max = np.max(difference)
if min != max:
mapping = (difference-min)/(max-min)
else :
mapping = difference
gradation_range = np.arange(0, 1+1./(grad-1), 1./(grad-1))
for i in range(self.map_size_y):
for j in range(self.map_size_x):
for l in range(grad-1):
if gradation_range[l]<=mapping[i][j] and mapping[i][j]<gradation_range[l+1]:
self.som_class[i][j] = l
break
else :
self.som_class[i][j] = grad-1
def som_mapping_index (self, epoch, inputx, weights):
for i in range(self.map_size_y):
for j in range(self.map_size_x):
min = self.map_size_x*self.map_size_y
for k in range(self.input_num):
dist = self.measur_distance (inputx[k], weights[i][j])
if min>dist:
min = dist
self.som_class[i][j] = k
def measur_distance (self, inputx, weights):
dist = (inputx-weights) * (inputx-weights)
return np.sqrt(np.sum(dist))
def regularization_mapping (self):
min=np.min(self.mapping)
max=np.max(self.mapping)
if max > min:
self.mapping = 1.0 - (self.mapping-min)/(max-min)
else :
print "Map regularization is failed: max value = min value"
pass
def search_BMU (self, inputx, weights):
min = 0.0
for k in range(inputx.shape[0]):
for i in range(self.map_size_y):
for j in range(self.map_size_x):
dist = self.measur_distance(inputx[k], weights[i][j])
if dist<min or (i==0 and j==0):
min = dist
self.min_nodes_y[k] = i
self.min_nodes_x[k] = j
def write_BMU (self):
for k in range(self.input_num):
sys.stdout.write("BMU[%d] %d %d\n" % (k, self.min_nodes_y[k], self.min_nodes_x[k]))
def fwrite_BMU (self):
for k in range(self.input_num):
self.log_fp.write("BMU[%03d] %4d %4d \t%s\n" % (k, self.min_nodes_y[k], self.min_nodes_x[k], self.class_names[k]))
def print_config(self):
SOM_config.print_config(self)
def map_output2wrl_squ (self, grad, func=Map.thick_func_weights, drawable="weights", filename="test", index=""):
Map.output_rgb_wrl_squ(self, grad, func=func, drawable=drawable, filename=filename, index=index)
def map_output2wrl_gray_squ (self, filename):
Map.output_gray_wrl_squ(self, filename=filename)
def map_output2wrl_gray_hex (self, filename):
Map.output_gray_wrl_hex (self, filename=filename)
@logger.time_rap
def test (epochs=10, map_size_x=10, map_size_y=10, verbose=1):
som = SOM(epochs=epochs, map_size_x=map_size_x, map_size_y=map_size_y, input_file="animal.csv")
callbacks = som.write_BMU
drawable = "weights"
func = Map.thick_func_weights
grad = som.input_num
som.fit(som.input_x, epochs=epochs, verbose=verbose, callbacks=callbacks, shuffle=False)
# som.fit(som.input_x, epochs=epochs, verbose=verbose)
Map.output_rgb_wrl_squ(som, grad=grad, func=func, drawable=drawable)
Map.output_gray_wrl_squ(som, "test")
@logger.time_rap
def run ( input_file=None, input_x=None
, epochs=100, map_size_x=20, map_size_y=20
, input_num=16, input_size=16, class_names=None
, drawable="weights", func=Map.thick_func_weights
, grad=100, verbose=1
, alpha_max=0.3, alpha_min=0.01
, sigma_max=5, sigma_min=1
, topology="gaussian"
, callbacks=None, shuffle=True
, output_file="output_map"
):
if input_x is None and input_file is None:
print "Input vector is not set."
quit()
else :
som = SOM ( input_file=input_file, epochs=epochs
, map_size_x=map_size_x, map_size_y=map_size_y
, input_num=input_num, input_size=input_size
, class_names=class_names
, alpha_min=alpha_min, alpha_max=alpha_max
, sigma_min=sigma_min, sigma_max=sigma_max)
if input_x :
som.input_x = input_x
if drawable in {"indexes", "Indexes", "INDEXES", "index", "Index"}:
grad = som.input_num
som.fit (som.input_x, epochs=epochs, verbose=verbose, topology=topology, callbacks=callbacks, shuffle=shuffle)
Map.output_rgb_wrl_squ(som=som, grad=grad, func=func, drawable=drawable, filename=output_file)
Map.output_rgb_wrl_squ(som=som, grad=grad, func=Map.thick_func_None, drawable="indexes", filename=output_file+"2")
Map.output_gray_wrl_squ(som=som, filename=output_file)
@logger.time_rap
def classifier(som, verbose=0, topology="gausian", callbacks=None, shuffle=False):
"""
This module classifies the input of the set SOM.
"""
som.fit(som.input_x, epochs=som.epochs, verbose=verbose, topology=topology, callbaks=callbacks, shuffle=shuffle)
|
kinect110/RPSOM
|
src/SOM/Model.py
|
Python
|
mit
| 16,178
|
[
"Gaussian"
] |
5702a7d1a78df01a16c0364b6313cb631b65b782d82b671f45955438e1e50e63
|
from galaxy.util import xml_text
DEFAULT_REQUIREMENT_TYPE = "package"
DEFAULT_REQUIREMENT_VERSION = None
class ToolRequirement( object ):
"""
Represents an external requirement that must be available for the tool to
run (for example, a program, package, or library). Requirements can
optionally assert a specific version.
"""
def __init__( self, name=None, type=None, version=None ):
self.name = name
self.type = type
self.version = version
def to_dict( self ):
return dict(name=self.name, type=self.type, version=self.version)
@staticmethod
def from_dict( dict ):
version = dict.get( "version", None )
name = dict.get("name", None)
type = dict.get("type", None)
return ToolRequirement( name=name, type=type, version=version )
DEFAULT_CONTAINER_TYPE = "docker"
class ContainerDescription( object ):
def __init__( self, identifier=None, type="docker" ):
self.identifier = identifier
self.type = type
def to_dict( self ):
return dict(identifier=self.identifier, type=self.type)
@staticmethod
def from_dict( dict ):
identifier = dict["identifier"]
type = dict.get("type", DEFAULT_CONTAINER_TYPE)
return ContainerDescription( identifier=identifier, type=type )
def parse_requirements_from_xml( xml_root ):
"""
>>> from xml.etree import ElementTree
>>> def load_requirements( contents ):
... contents_document = '''<tool><requirements>%s</requirements></tool>'''
... root = ElementTree.fromstring( contents_document % contents )
... return parse_requirements_from_xml( root )
>>> reqs, containers = load_requirements('''<requirement>bwa</requirement>''')
>>> reqs[0].name
'bwa'
>>> reqs[0].version is None
True
>>> reqs[0].type
'package'
>>> reqs, containers = load_requirements('''<requirement type="binary" version="1.3.3">cufflinks</requirement>''')
>>> reqs[0].name
'cufflinks'
>>> reqs[0].version
'1.3.3'
>>> reqs[0].type
'binary'
"""
requirements_elem = xml_root.find( "requirements" )
requirement_elems = []
if requirements_elem is not None:
requirement_elems = requirements_elem.findall( 'requirement' )
requirements = []
for requirement_elem in requirement_elems:
name = xml_text( requirement_elem )
type = requirement_elem.get( "type", DEFAULT_REQUIREMENT_TYPE )
version = requirement_elem.get( "version", DEFAULT_REQUIREMENT_VERSION )
requirement = ToolRequirement( name=name, type=type, version=version )
requirements.append( requirement )
container_elems = []
if requirements_elem is not None:
container_elems = requirements_elem.findall( 'container' )
containers = []
for container_elem in container_elems:
identifier = xml_text( container_elem )
type = container_elem.get( "type", DEFAULT_CONTAINER_TYPE )
container = ContainerDescription( identifier=identifier, type=type )
containers.append( container )
return requirements, containers
|
jmchilton/lwr
|
galaxy/tools/deps/requirements.py
|
Python
|
apache-2.0
| 3,157
|
[
"BWA",
"Galaxy"
] |
ada523eaaeedc2026d629d9cc44a9335d42982251c630de7cb564c447d15184c
|
#!/usr/bin/python3
#Author: Brian Raaen
#Original code: https://www.brianraaen.com/2016/11/04/superputty-to-pac-manager/
#
#This script can convert a SuperPutty Sessions.xml file to an Asbru-cm yaml file. The
#resulting yaml file can then be imported into Asbru-cm.
#This script does require tweaking for your personal setup in the template section.
#For example, add the location of your personal ssh keys to "public key: /home/user".
#Similarly if you want to use a jump off box you can add it to the options section.
#options: ' -X -o "proxycommand=ssh -W %h:%p myhostname.com"'
#To create the Asbru-cm yaml file place your SuperPutty Sessions.xml file in the same
# directory as this script and run asbru_from_superputty.py >importfile.yml.
import uuid
import xml.etree.ElementTree as ET
def branchListImport(devices):
temp = []
branches = {}
for y in devices:
x = y['SessionId'].split('/')[:-1]
if "/".join(y['SessionId'].split('/')[:-1]) not in branches:
if x[0] not in branches:
branches.update({str(x[0]) : {'name' : str(x[0]), 'description' : str(x[0]), 'uuidNumber' : uuid.uuid4(), 'parent' : "__PAC__EXPORTED__"}})
if len(x) > 1:
for y in range(1,len(x)):
if "/".join(x[:(y+1)]) not in branches:
branches.update({"/".join(x[:(y+1)]) : {'name': "/".join(x[:(y+1)]), 'description' : str(x[y]), 'uuidNumber' : uuid.uuid4(), 'parent' : str(branches["/".join(x[:y])]['uuidNumber'])}})
for x in sorted(branches.items()):
temp.append(branchPoint(**x[1]))
for x in temp:
for y in temp:
if str(x.uuid) == str(y.parent):
x.addChild(y.uuid)
return temp
def deviceListImport(devices, branchList):
temp = []
for x in devices:
temp.append(device(description=x['SessionName'], parentName="/".join(x['SessionId'].split('/')[:-1]), ip=x['Host'], port=x['Port'], method=x['Proto'], username=x['Username'] ))
for x in temp:
for y in branchList:
if x.parentName == y.name:
x.parentUuid = str(y.uuid)
y.addChild(str(x.uuid))
break
return temp
class device(object):
def __init__(self, description="", parentName="Unknown", parentUuid=False, uuidNumber=False, ip="", port="", method="", username="", password=False):
self.description = description
self.parentName = parentName
self.parentUuid = parentUuid
if uuidNumber == False:
self.uuid = uuid.uuid4()
else:
if isinstance(uuidNumber, uuid.UUID):
self.uuid = uuidNumber
elif isinstance(uuidNumber, str):
self.uuid = uuid.UUID(uuidNumber)
self.ip = ip
self.port = port
if method.upper() == "SSH":
self.method = "SSH"
elif method.upper() == "TELNET":
self.method = "Telnet"
else:
self.method = method.upper()
self.username = username
self.password = password
def __hash__(self):
return hash(self.description, self.parentName, self.parentUuid, self.uuid, self.ip, self.port, self.method, self.username, self.password)
def __str__(self):
return str(self.uuid)
def __repr__(self):
return 'asbru_template.device(description="{}", parentName={}, parentUuid="{}", uuidNumber="{}", ip="{}", port="{}", method="{}", username="{}", password="{}"'.format(self.description, self.parentName, self.parentUuid, self.uuid, self.ip, self.port, self.method, self.username, self.password)
@property
def ymlString(self):
if self.password == False:
password = "<<ASK_PASS>>"
else:
password = self.password
return elementTemplate.format(uuid=self.uuid, ip=self.ip, desc=self.description, parent=self.parentUuid, port=self.port, method=self.method, username=self.username, password=password)
class branchPoint(object):
def __init__(self, description="", name="", parent="__PAC__EXPORTED__", children=False,uuidNumber=False):
self.description = description
self.name = name
self.parent = parent
self.children = children
if uuidNumber == False:
self.uuid = uuid.uuid4()
else:
if isinstance(uuidNumber, uuid.UUID):
self.uuid = uuidNumber
elif isinstance(uuidNumber, str):
self.uuid = uuid.UUID(uuidNumber)
def __hash__(self):
return hash(self.uuid, self.name, self.description, self.parent, self.children)
def __str__(self):
return str(self.uuid)
def __repr__(self):
return 'asbru_template.branchPoint(description="{}", name={}, parent="{}", children={}, uuidNumber="{}"'.format(self.description, self.name, self.parent, self.children, self.uuid)
def addChild(self, child):
if self.children == False:
self.children = []
self.children.append(str(child))
@property
def ymlString(self):
temp = "{}:\n _is_group: 1\n _protected: 0\n children:\n".format(str(self.uuid))
if self.children != False:
for x in self.children:
temp += " {}: 1\n".format(x)
temp += " cluster: []\n description: Connection group '{0}'\n name: {0}\n parent: {1}\n screenshots: ~\n variables: []".format(self.description, self.parent)
return temp
elementTemplate = """{uuid}:
KPX title regexp: '.*{desc}.*'
_is_group: 0
_protected: 0
auth fallback: 1
auth type: userpass
autoreconnect: ''
autossh: ''
children: {{}}
cluster: []
description: "Connection with '{desc}'"
embed: 0
expect: []
favourite: 0
infer from KPX where: 3
infer user pass from KPX: ''
ip: {ip}
local after: []
local before: []
local connected: []
mac: ''
macros: []
method: {method}
name: '{desc}'
options: ''
parent: {parent}
pass: '{password}'
passphrase: ''
passphrase user: ''
port: {port}
prepend command: ''
proxy ip: ''
proxy pass: ''
proxy port: 8080
proxy user: ''
public key: /home/user
quote command: ''
remove control chars: ''
save session logs: ''
screenshots: ~
search pass on KPX: 0
send slow: 0
send string active: ''
send string every: 60
send string intro: 1
send string txt: ''
session log pattern: <UUID>_<NAME>_<DATE_Y><DATE_M><DATE_D>_<TIME_H><TIME_M><TIME_S>.txt
session logs amount: 10
session logs folder: ~/.config/pac/session_logs
startup launch: ''
startup script: ''
startup script name: sample1.pl
terminal options:
audible bell: ''
back color: '#000000000000'
bold color: '#cc62cc62cc62'
bold color like text: 1
command prompt: '[#%\$>]|\:\/\s*$'
cursor shape: block
disable ALT key bindings: ''
disable CTRL key bindings: ''
disable SHIFT key bindings: ''
open in tab: 1
password prompt: "([pP]ass|[pP]ass[wW]or[dt]|ontrase.a|Enter passphrase for key \'.+\'):\\s*$"
tab back color: '#000000000000'
terminal backspace: auto
terminal character encoding: UTF-8
terminal emulation: xterm
terminal font: Monospace 9
terminal scrollback lines: 5000
terminal select words: '-.:_/'
terminal transparency: 0
terminal window hsize: 800
terminal window vsize: 600
text color: '#cc62cc62cc62'
timeout command: 40
timeout connect: 40
use personal settings: ''
use tab back color: ''
username prompt: '([l|L]ogin|[u|u]suario|[u|U]ser-?[n|N]ame|[u|U]ser):\s*$'
visible bell: ''
title: '{desc}'
use prepend command: ''
use proxy: 0
use sudo: ''
user: {username}
variables: []"""
def main():
temp = []
tree = ET.parse('Sessions.xml')
root = tree.getroot()
devices = []
for child in root:
if child.tag == 'SessionData':
devices.append(child.attrib)
branchList = branchListImport(devices)
deviceList = deviceListImport(devices, branchList)
temp.append("---\n__PAC__EXPORTED__:\n children:")
temp += [" {}: 1".format(str(x.uuid)) for x in branchList if '__PAC__EXPORTED__' == x.parent]
temp += [x.ymlString for x in branchList]
temp += [x.ymlString for x in deviceList]
print("\n".join(temp))
if __name__ == "__main__":
main()
|
asbru-cm/asbru-cm
|
utils/asbru_from_superputty.py
|
Python
|
gpl-3.0
| 8,343
|
[
"Brian"
] |
452087d2c4c04af9bc4d67a9d97c5dd216913d155e00b81b54417060355ac020
|
# -*- coding: utf-8 -*-
# <nbformat>2</nbformat>
# <codecell>
# Demo python notebook for sklearn elm and random_hidden_layer modules
#
# Author: David C. Lambert [dcl -at- panix -dot- com]
# Copyright(c) 2013
# License: Simple BSD
# <codecell>
from time import time
from sklearn.cluster import k_means
from dl_elm import ELMClassifier, ELMRegressor, GenELMClassifier, GenELMRegressor
from random_layer import RandomLayer, MLPRandomLayer, RBFRandomLayer, GRBFRandomLayer
# <codecell>
def make_toy():
x = np.arange(0.25,20,0.1)
y = x*np.cos(x)+0.5*sqrt(x)*np.random.randn(x.shape[0])
x = x.reshape(-1,1)
y = y.reshape(-1,1)
return x, y
# <codecell>
def res_dist(x, y, e, n_runs=100, random_state=None):
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=random_state)
test_res = []
train_res = []
start_time = time()
for i in xrange(n_runs):
e.fit(x_train, y_train)
train_res.append(e.score(x_train, y_train))
test_res.append(e.score(x_test, y_test))
if (i%(n_runs/10) == 0): print "%d"%i,
print "\nTime: %.3f secs" % (time() - start_time)
print "Test Min: %.3f Mean: %.3f Max: %.3f SD: %.3f" % (min(test_res), mean(test_res), max(test_res), std(test_res))
print "Train Min: %.3f Mean: %.3f Max: %.3f SD: %.3f" % (min(train_res), mean(train_res), max(train_res), std(train_res))
print
return (train_res, test_res)
# <codecell>
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_digits, load_diabetes, make_regression
stdsc = StandardScaler()
iris = load_iris()
irx, iry = stdsc.fit_transform(iris.data), iris.target
irx_train, irx_test, iry_train, iry_test = train_test_split(irx, iry, test_size=0.2)
digits = load_digits()
dgx, dgy = stdsc.fit_transform(digits.data/16.0), digits.target
dgx_train, dgx_test, dgy_train, dgy_test = train_test_split(dgx, dgy, test_size=0.2)
diabetes = load_diabetes()
dbx, dby = stdsc.fit_transform(diabetes.data), diabetes.target
dbx_train, dbx_test, dby_train, dby_test = train_test_split(dbx, dby, test_size=0.2)
mrx, mry = make_regression(n_samples=2000, n_targets=4)
mrx_train, mrx_test, mry_train, mry_test = train_test_split(mrx, mry, test_size=0.2)
xtoy, ytoy = make_toy()
xtoy, ytoy = stdsc.fit_transform(xtoy), stdsc.fit_transform(ytoy)
xtoy_train, xtoy_test, ytoy_train, ytoy_test = train_test_split(xtoy, ytoy, test_size=0.2)
plot(xtoy, ytoy)
# <codecell>
# RBFRandomLayer tests
for af in RandomLayer.activation_func_names():
print af,
elmc = ELMClassifier(activation_func=af)
tr,ts = res_dist(irx, iry, elmc, n_runs=200, random_state=0)
# <codecell>
elmc.classes_
# <codecell>
for af in RandomLayer.activation_func_names():
print af
elmc = ELMClassifier(activation_func=af, random_state=0)
tr,ts = res_dist(dgx, dgy, elmc, n_runs=100, random_state=0)
# <codecell>
elmc = ELMClassifier(n_hidden=500, activation_func='multiquadric')
tr,ts = res_dist(dgx, dgy, elmc, n_runs=100, random_state=0)
scatter(tr, ts, alpha=0.1, marker='D', c='r')
# <codecell>
elmr = ELMRegressor(random_state=0, activation_func='gaussian', alpha=0.0)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
# <codecell>
from sklearn import pipeline
from sklearn.linear_model import LinearRegression
elmr = pipeline.Pipeline([('rhl', RandomLayer(random_state=0, activation_func='multiquadric')),
('lr', LinearRegression(fit_intercept=False))])
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
# <codecell>
rhl = RandomLayer(n_hidden=200, alpha=1.0)
elmr = GenELMRegressor(hidden_layer=rhl)
tr, ts = res_dist(mrx, mry, elmr, n_runs=200, random_state=0)
scatter(tr, ts, alpha=0.1, marker='D', c='r')
# <codecell>
rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.8)
elmr = GenELMRegressor(hidden_layer=rhl)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
# <codecell>
nh = 15
(ctrs, _, _) = k_means(xtoy_train, nh)
unit_rs = np.ones(nh)
#rhl = RBFRandomLayer(n_hidden=nh, activation_func='inv_multiquadric')
#rhl = RBFRandomLayer(n_hidden=nh, centers=ctrs, radii=unit_rs)
rhl = GRBFRandomLayer(n_hidden=nh, grbf_lambda=.0001, centers=ctrs)
elmr = GenELMRegressor(hidden_layer=rhl)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
# <codecell>
rbf_rhl = RBFRandomLayer(n_hidden=100, random_state=0, rbf_width=0.01)
elmc_rbf = GenELMClassifier(hidden_layer=rbf_rhl)
elmc_rbf.fit(dgx_train, dgy_train)
print elmc_rbf.score(dgx_train, dgy_train), elmc_rbf.score(dgx_test, dgy_test)
def powtanh_xfer(activations, power=1.0):
return pow(np.tanh(activations), power)
tanh_rhl = MLPRandomLayer(n_hidden=100, activation_func=powtanh_xfer, activation_args={'power':3.0})
elmc_tanh = GenELMClassifier(hidden_layer=tanh_rhl)
elmc_tanh.fit(dgx_train, dgy_train)
print elmc_tanh.score(dgx_train, dgy_train), elmc_tanh.score(dgx_test, dgy_test)
# <codecell>
rbf_rhl = RBFRandomLayer(n_hidden=100, rbf_width=0.01)
tr, ts = res_dist(dgx, dgy, GenELMClassifier(hidden_layer=rbf_rhl), n_runs=100, random_state=0)
# <codecell>
hist(ts), hist(tr)
print
# <codecell>
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
tr, ts = res_dist(dbx, dby, RandomForestRegressor(n_estimators=15), n_runs=100, random_state=0)
hist(tr), hist(ts)
print
rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.1)
tr,ts = res_dist(dbx, dby, GenELMRegressor(rhl), n_runs=100, random_state=0)
hist(tr), hist(ts)
print
# <codecell>
elmc = ELMClassifier(n_hidden=1000, activation_func='gaussian', alpha=0.0, random_state=0)
elmc.fit(dgx_train, dgy_train)
print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)
# <codecell>
elmc = ELMClassifier(n_hidden=500, activation_func='hardlim', alpha=1.0, random_state=0)
elmc.fit(dgx_train, dgy_train)
print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)
# <codecell>
elmr = ELMRegressor(random_state=0)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
# <codecell>
elmr = ELMRegressor(activation_func='inv_tribas', random_state=0)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
|
alexsavio/elm-test
|
elm_notebook.py
|
Python
|
bsd-3-clause
| 6,787
|
[
"Gaussian"
] |
e96b11a719ac0af15e099deee73d924434d52640b7387d705d64e8ff50f21b23
|
import mdtraj as md
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit as u
from fah_parameters import *
deviceid = 2
ff_name = "amber99sbildn"
water_name = 'tip3p'
which_forcefield = "%s.xml" % ff_name
which_water = '%s.xml' % water_name
pdb_filename = "./OLDRUN0/system.pdb"
out_pdb_filename = "./equil_npt/equil_npt.pdb"
final_step_pdb_filename = "./equil_npt/equil_npt_final_step.pdb"
dcd_filename = "./equil_npt/equil_npt.dcd"
log_filename = "./equil_npt/equil_npt.log"
platform = mm.Platform.getPlatformByName("OpenCL")
#platform = mm.Platform.getPlatformByName("CUDA")
#platform.setPropertyDefaultValue('CudaDeviceIndex', '%d' % deviceid) # select Cuda device index
ff = app.ForceField(which_forcefield, which_water)
pdb = app.PDBFile(pdb_filename)
topology = pdb.topology
positions = pdb.positions
system = ff.createSystem(topology, nonbondedMethod=app.PME, nonbondedCutoff=cutoff, constraints=app.HBonds)
integrator = mm.LangevinIntegrator(temperature, equil_friction, equil_timestep)
system.addForce(mm.MonteCarloBarostat(pressure, temperature, barostat_frequency))
simulation = app.Simulation(topology, system, integrator, platform=platform)
simulation.context.setPositions(positions)
print('Minimizing...')
simulation.minimizeEnergy()
simulation.context.setVelocitiesToTemperature(temperature)
print('Equilibrating...')
simulation.step(discard_steps) # Don't even save the first XXX ps
simulation.reporters.append(app.DCDReporter(dcd_filename, output_frequency))
simulation.reporters.append(app.PDBReporter(out_pdb_filename, n_steps - 1))
simulation.reporters.append(app.StateDataReporter(open(log_filename, 'w'), output_frequency, step=True, time=True, speed=True))
simulation.step(n_steps)
del simulation
del system
t = md.load(dcd_filename, top=out_pdb_filename)
t0 = t[-1]
t0.unitcell_lengths = t.unitcell_lengths.mean(0)
t0.save(out_pdb_filename)
del t
del t0
t = md.load(dcd_filename, top=out_pdb_filename)[-1]
t.save(final_step_pdb_filename)
|
kyleabeauchamp/fah-projects
|
code/equilibrate_npt.py
|
Python
|
gpl-2.0
| 2,006
|
[
"MDTraj",
"OpenMM"
] |
4fbe4dd7d716b8090352f091d30a4aaf46f9df0a404aa389bbd233d4445afed9
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
"""
This example shows how to use pytim classes online during
a simulation performed with openmm (http://mdtraj.org/)
(see also the mdtraj interoperability)
"""
# openmm imports
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
# pytim
import pytim
from pytim.datafiles import WATER_PDB
# usual openmm setup, we load one of pytim's example files
pdb = PDBFile(WATER_PDB)
forcefield = ForceField('amber99sb.xml', 'spce.xml')
system = forcefield.createSystem(pdb.topology, nonbondedMethod=PME,
nonbondedCutoff=1 * nanometer)
integrator = LangevinIntegrator(
300 * kelvin, 1 / picosecond, 0.002 * picoseconds)
simulation = Simulation(pdb.topology, system, integrator)
simulation.context.setPositions(pdb.positions)
# just pass the openmm Simulation object to pytim
inter = pytim.ITIM(simulation)
print(repr(inter.atoms))
# the new interfacial atoms will be computed at the end
# of the integration cycle
simulation.step(10)
print(repr(inter.atoms))
|
balazsfabian/pytim
|
pytim/examples/example_openmm.py
|
Python
|
gpl-3.0
| 1,160
|
[
"MDTraj",
"OpenMM"
] |
d26e92f628c78d66a3290775bfbc637cd15a83463ecb85e119bade83d2d08854
|
import unittest
import fun_bd
class TestStringMethods(unittest.TestCase):
def test_devuelve_clasificacion(self):
clasi=fun_bd.selectClasificacion()
self.assertEquals(clasi, fun_bd.selectClasificacion())
def test_devuelve_marcador(self):
marc=fun_bd.selectMarcador()
self.assertEquals(marc, fun_bd.selectMarcador())
def test_devuelve_local(self):
local=fun_bd.selectResultados()
self.assertEquals(local, fun_bd.selectResultados())
def test_devuelve_visit(self):
visit=fun_bd.selectVisitante()
self.assertEquals(visit, fun_bd.selectVisitante())
if __name__ == '__main__':
unittest.main()
|
manuelalonsobraojos/proyectoIV
|
bot_LaLiga/test.py
|
Python
|
gpl-3.0
| 616
|
[
"VisIt"
] |
6141b218e6066a943723e8fc9b7990593e0ec196df161ac30179b1e01c23f235
|
# Copyright 2009 by Osvaldo Zagordi. All rights reserved.
# Revisions copyright 2010 by Peter Cock.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Command line wrapper for the short read aligner Novoalign by Novocraft."""
import types
from Bio.Application import _Option, AbstractCommandline
class NovoalignCommandline(AbstractCommandline):
"""Command line wrapper for novoalign by Novocraft.
See www.novocraft.com - novoalign is a short read alignment program.
Example:
>>> from Bio.Sequencing.Applications import NovoalignCommandline
>>> novoalign_cline = NovoalignCommandline(database='some_db',
... readfile='some_seq.txt')
>>> print novoalign_cline
novoalign -d some_db -f some_seq.txt
As will all the Biopython application wrappers, you can also add or
change options after creating the object:
>>> novoalign_cline.format = 'PRBnSEQ'
>>> novoalign_cline.r_method='0.99' # limited valid values
>>> novoalign_cline.fragment = '250 20' # must be given as a string
>>> novoalign_cline.miRNA = 100
>>> print novoalign_cline
novoalign -d some_db -f some_seq.txt -F PRBnSEQ -r 0.99 -i 250 20 -m 100
You would typically run the command line with novoalign_cline() or via
the Python subprocess module, as described in the Biopython tutorial.
Last checked against version: 2.05.04
"""
def __init__(self, cmd="novoalign", **kwargs):
READ_FORMAT = ['FA', 'SLXFQ', 'STDFQ', 'ILMFQ', 'PRB', 'PRBnSEQ']
REPORT_FORMAT = ['Native', 'Pairwise', 'SAM']
REPEAT_METHOD = ['None', 'Random', 'All', 'Exhaustive', '0.99']
self.parameters = \
[
_Option(["-d", "database"],
"database filename",
filename=True,
equate=False),
_Option(["-f", "readfile"],
"read file",
filename=True,
equate=False),
_Option(["-F", "format"],
"Format of read files.\n\nAllowed values: %s" \
% ", ".join(READ_FORMAT),
checker_function=lambda x: x in READ_FORMAT,
equate=False),
# Alignment scoring options
_Option(["-t", "threshold"],
"Threshold for alignment score",
checker_function=lambda x: isinstance(x, types.IntType),
equate=False),
_Option(["-g", "gap_open"],
"Gap opening penalty [default: 40]",
checker_function=lambda x: isinstance(x, types.IntType),
equate=False),
_Option(["-x", "gap_extend"],
"Gap extend penalty [default: 15]",
checker_function=lambda x: isinstance(x, types.IntType),
equate=False),
_Option(["-u", "unconverted"],
"Experimental: unconverted cytosines penalty in bisulfite mode\n\n"
"Default: no penalty",
checker_function=lambda x: isinstance(x, types.IntType),
equate=False),
# Quality control and read filtering
_Option(["-l", "good_bases"],
"Minimum number of good quality bases [default: log(N_g, 4) + 5]",
checker_function=lambda x: isinstance(x, types.IntType),
equate=False),
_Option(["-h", "homopolymer"],
"Homopolymer read filter [default: 20; disable: negative value]",
checker_function=lambda x: isinstance(x, types.IntType),
equate=False),
# Read preprocessing options
_Option(["-a", "adapter3"],
"Strips a 3' adapter sequence prior to alignment.\n\n"
"With paired ends two adapters can be specified",
checker_function=lambda x: isinstance(x, types.StringType),
equate=False),
_Option(["-n", "truncate"],
"Truncate to specific length before alignment",
checker_function=lambda x: isinstance(x, types.IntType),
equate=False),
_Option(["-s", "trimming"],
"If fail to align, trim by s bases until they map or become shorter than l.\n\n"
"Ddefault: 2",
checker_function=lambda x: isinstance(x, types.IntType),
equate=False),
_Option(["-5", "adapter5"],
"Strips a 5' adapter sequence.\n\n"
"Similar to -a (adaptor3), but on the 5' end.",
checker_function=lambda x: isinstance(x, types.StringType),
equate=False),
# Reporting options
_Option(["-o", "report"],
"Specifies the report format.\n\nAllowed values: %s\nDefault: Native" \
% ", ".join(REPORT_FORMAT),
checker_function=lambda x: x in REPORT_FORMAT,
equate=False),
_Option(["-Q", "quality"],
"Lower threshold for an alignment to be reported [default: 0]",
checker_function=lambda x: isinstance(x, types.IntType),
equate=False),
_Option(["-R", "repeats"],
"If score difference is higher, report repeats.\n\n"
"Otherwise -r read method applies [default: 5]",
checker_function=lambda x: isinstance(x, types.IntType),
equate=False),
_Option(["-r", "r_method"],
"Methods to report reads with multiple matches.\n\n"
"Allowed values: %s\n"
"'All' and 'Exhaustive' accept limits." \
% ", ".join(REPEAT_METHOD),
checker_function=lambda x: x.split()[0] in REPEAT_METHOD,
equate=False),
_Option(["-e", "recorded"],
"Alignments recorded with score equal to the best.\n\n"
"Default: 1000 in default read method, otherwise no limit.",
checker_function=lambda x: isinstance(x, types.IntType),
equate=False),
_Option(["-q", "qual_digits"],
"Decimal digits for quality scores [default: 0]",
checker_function=lambda x: isinstance(x, types.IntType),
equate=False),
# Paired end options
_Option(["-i", "fragment"],
"Fragment length (2 reads + insert) and standard deviation [default: 250 30]",
checker_function=lambda x: len(x.split()) == 2,
equate=False),
_Option(["-v", "variation"],
"Structural variation penalty [default: 70]",
checker_function=lambda x: isinstance(x, types.IntType),
equate=False),
# miRNA mode
_Option(["-m", "miRNA"],
"Sets miRNA mode and optionally sets a value for the region scanned [default: off]",
checker_function=lambda x: isinstance(x, types.IntType),
equate=False),
# Multithreading
_Option(["-c", "cores"],
"Number of threads, disabled on free versions [default: number of cores]",
checker_function=lambda x: isinstance(x, types.IntType),
equate=False),
# Quality calibrations
_Option(["-k", "read_cal"],
"Read quality calibration from file (mismatch counts)",
checker_function=lambda x: isinstance(x, types.StringType),
equate=False),
_Option(["-K", "write_cal"],
"Accumulate mismatch counts and write to file",
checker_function=lambda x: isinstance(x, types.StringType),
equate=False),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
def _test():
"""Run the module's doctests (PRIVATE)."""
print "Runing Novoalign doctests..."
import doctest
doctest.testmod()
print "Done"
if __name__ == "__main__":
_test()
|
bryback/quickseq
|
genescript/Bio/Sequencing/Applications/_Novoalign.py
|
Python
|
mit
| 8,639
|
[
"Biopython"
] |
c79d2cc707a593da49767e6d6fff02a9d05c62a8506589ff044338b85f3a47e5
|
"""
EMBL format (:mod:`skbio.io.format.embl`)
=========================================
.. currentmodule:: skbio.io.format.embl
EMBL format stores sequence and its annotation together. The start of the
annotation section is marked by a line beginning with the word "ID". The start
of sequence section is marked by a line beginning with the word "SQ".
The "//" (terminator) line also contains no data or comments and designates
the end of an entry. More information on EMBL file format can be found
here [1]_.
The EMBL file may end with .embl or .txt extension. An example of EMBL file
can be seen here [2]_.
Feature Level Products
^^^^^^^^^^^^^^^^^^^^^^
As described in [3]_ *"Feature-level products contain nucleotide sequence
and related annotations derived from submitted ENA assembled and annotated
sequences. Data are distributed in flatfile format, similar to that of parent
ENA records, with each flatfile representing a single feature"*.
While only the sequence of the feature is included in such entries, features
are derived from the parent entry, and can't be applied as interval metadata.
For such reason, interval metatdata are ignored from Feature-level products,
as they will be ignored by subsetting a generic Sequence object.
Format Support
--------------
**Has Sniffer: Yes**
**NOTE: No protein support at the moment**
Current protein support development is tracked in issue-1499 [4]_
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |Yes |:mod:`skbio.sequence.Sequence` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.sequence.DNA` |
+------+------+---------------------------------------------------------------+
|Yes |Yes |:mod:`skbio.sequence.RNA` |
+------+------+---------------------------------------------------------------+
|No |No |:mod:`skbio.sequence.Protein` |
+------+------+---------------------------------------------------------------+
|Yes |Yes | generator of :mod:`skbio.sequence.Sequence` objects |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
**State: Experimental as of 0.5.1-dev.**
Sections before ``FH (Feature Header)``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
All the sections before ``FH (Feature Header)`` will be read into the attribute
of ``metadata``. The header and its content of a section are stored as
key-value pairs in ``metadata``. For the ``RN (Reference Number)``
section, its value is stored as a list, as there are often multiple
reference sections in one EMBL record.
``FT`` section
^^^^^^^^^^^^^^
See :ref:`Genbank FEATURES section<genbank_feature_section>`
``SQ`` section
^^^^^^^^^^^^^^
The sequence in the ``SQ`` section is always in lowercase for
the EMBL files downloaded from ENA. For the RNA molecules, ``t``
(thymine), instead of ``u`` (uracil) is used in the sequence. All
EMBL writers follow these conventions while writing EMBL files.
Examples
--------
Reading EMBL Files
^^^^^^^^^^^^^^^^^^
Suppose we have the following EMBL file example:
>>> embl_str = '''
... ID X56734; SV 1; linear; mRNA; STD; PLN; 1859 BP.
... XX
... AC X56734; S46826;
... XX
... DT 12-SEP-1991 (Rel. 29, Created)
... DT 25-NOV-2005 (Rel. 85, Last updated, Version 11)
... XX
... DE Trifolium repens mRNA for non-cyanogenic beta-glucosidase
... XX
... KW beta-glucosidase.
... XX
... OS Trifolium repens (white clover)
... OC Eukaryota; Viridiplantae; Streptophyta; Embryophyta; Tracheophyta;
... OC Spermatophyta; Magnoliophyta; eudicotyledons; Gunneridae;
... OC Pentapetalae; rosids; fabids; Fabales; Fabaceae; Papilionoideae;
... OC Trifolieae; Trifolium.
... XX
... RN [5]
... RP 1-1859
... RX DOI; 10.1007/BF00039495.
... RX PUBMED; 1907511.
... RA Oxtoby E., Dunn M.A., Pancoro A., Hughes M.A.;
... RT "Nucleotide and derived amino acid sequence of the cyanogenic
... RT beta-glucosidase (linamarase) from white clover
... RT (Trifolium repens L.)";
... RL Plant Mol. Biol. 17(2):209-219(1991).
... XX
... RN [6]
... RP 1-1859
... RA Hughes M.A.;
... RT ;
... RL Submitted (19-NOV-1990) to the INSDC.
... RL Hughes M.A., University of Newcastle Upon Tyne, Medical School,
... RL Newcastle
... RL Upon Tyne, NE2 4HH, UK
... XX
... DR MD5; 1e51ca3a5450c43524b9185c236cc5cc.
... XX
... FH Key Location/Qualifiers
... FH
... FT source 1..1859
... FT /organism="Trifolium repens"
... FT /mol_type="mRNA"
... FT /clone_lib="lambda gt10"
... FT /clone="TRE361"
... FT /tissue_type="leaves"
... FT /db_xref="taxon:3899"
... FT mRNA 1..1859
... FT /experiment="experimental evidence, no additional
... FT details recorded"
... FT CDS 14..1495
... FT /product="beta-glucosidase"
... FT /EC_number="3.2.1.21"
... FT /note="non-cyanogenic"
... FT /db_xref="GOA:P26204"
... FT /db_xref="InterPro:IPR001360"
... FT /db_xref="InterPro:IPR013781"
... FT /db_xref="InterPro:IPR017853"
... FT /db_xref="InterPro:IPR033132"
... FT /db_xref="UniProtKB/Swiss-Prot:P26204"
... FT /protein_id="CAA40058.1"
... FT /translation="MDFIVAIFALFVISSFTITSTNAVEASTLLDIGNLSRS
... FT SFPRGFIFGAGSSAYQFEGAVNEGGRGPSIWDTFTHKYPEKIRDGSNADITV
... FT DQYHRYKEDVGIMKDQNMDSYRFSISWPRILPKGKLSGGINHEGIKYYNNLI
... FT NELLANGIQPFVTLFHWDLPQVLEDEYGGFLNSGVINDFRDYTDLCFKEFGD
... FT RVRYWSTLNEPWVFSNSGYALGTNAPGRCSASNVAKPGDSGTGPYIVTHNQI
... FT LAHAEAVHVYKTKYQAYQKGKIGITLVSNWLMPLDDNSIPDIKAAERSLDFQ
... FT FGLFMEQLTTGDYSKSMRRIVKNRLPKFSKFESSLVNGSFDFIGINYYSSSY
... FT ISNAPSHGNAKPSYSTNPMTNISFEKHGIPLGPRAASIWIYVYPYMFIQEDF
... FT EIFCYILKINITILQFSITENGMNEFNDATLPVEEALLNTYRIDYYYRHLYY
... FT IRSAIRAGSNVKGFYAWSFLDCNEWFAGFTVRFGLNFVD"
... XX
... SQ Sequence 1859 BP; 609 A; 314 C; 355 G; 581 T; 0 other;
... aaacaaacca aatatggatt ttattgtagc catatttgct ctgtttgtta ttagctcatt
... cacaattact tccacaaatg cagttgaagc ttctactctt cttgacatag gtaacctgag
... tcggagcagt tttcctcgtg gcttcatctt tggtgctgga tcttcagcat accaatttga
... aggtgcagta aacgaaggcg gtagaggacc aagtatttgg gataccttca cccataaata
... tccagaaaaa ataagggatg gaagcaatgc agacatcacg gttgaccaat atcaccgcta
... caaggaagat gttgggatta tgaaggatca aaatatggat tcgtatagat tctcaatctc
... ttggccaaga atactcccaa agggaaagtt gagcggaggc ataaatcacg aaggaatcaa
... atattacaac aaccttatca acgaactatt ggctaacggt atacaaccat ttgtaactct
... ttttcattgg gatcttcccc aagtcttaga agatgagtat ggtggtttct taaactccgg
... tgtaataaat gattttcgag actatacgga tctttgcttc aaggaatttg gagatagagt
... gaggtattgg agtactctaa atgagccatg ggtgtttagc aattctggat atgcactagg
... aacaaatgca ccaggtcgat gttcggcctc caacgtggcc aagcctggtg attctggaac
... aggaccttat atagttacac acaatcaaat tcttgctcat gcagaagctg tacatgtgta
... taagactaaa taccaggcat atcaaaaggg aaagataggc ataacgttgg tatctaactg
... gttaatgcca cttgatgata atagcatacc agatataaag gctgccgaga gatcacttga
... cttccaattt ggattgttta tggaacaatt aacaacagga gattattcta agagcatgcg
... gcgtatagtt aaaaaccgat tacctaagtt ctcaaaattc gaatcaagcc tagtgaatgg
... ttcatttgat tttattggta taaactatta ctcttctagt tatattagca atgccccttc
... acatggcaat gccaaaccca gttactcaac aaatcctatg accaatattt catttgaaaa
... acatgggata cccttaggtc caagggctgc ttcaatttgg atatatgttt atccatatat
... gtttatccaa gaggacttcg agatcttttg ttacatatta aaaataaata taacaatcct
... gcaattttca atcactgaaa atggtatgaa tgaattcaac gatgcaacac ttccagtaga
... agaagctctt ttgaatactt acagaattga ttactattac cgtcacttat actacattcg
... ttctgcaatc agggctggct caaatgtgaa gggtttttac gcatggtcat ttttggactg
... taatgaatgg tttgcaggct ttactgttcg ttttggatta aactttgtag attagaaaga
... tggattaaaa aggtacccta agctttctgc ccaatggtac aagaactttc tcaaaagaaa
... ctagctagta ttattaaaag aactttgtag tagattacag tacatcgttt gaagttgagt
... tggtgcacct aattaaataa aagaggttac tcttaacata tttttaggcc attcgttgtg
... aagttgttag gctgttattt ctattatact atgttgtagt aataagtgca ttgttgtacc
... agaagctatg atcataacta taggttgatc cttcatgtat cagtttgatg ttgagaatac
... tttgaattaa aagtcttttt ttattttttt aaaaaaaaaa aaaaaaaaaa aaaaaaaaa
... //
... '''
Now we can read it as ``DNA`` object:
>>> import io
>>> from skbio import DNA, RNA, Sequence
>>> embl = io.StringIO(embl_str)
>>> dna_seq = DNA.read(embl)
>>> dna_seq
DNA
----------------------------------------------------------------------
Metadata:
'ACCESSION': 'X56734; S46826;'
'CROSS_REFERENCE': <class 'list'>
'DATE': <class 'list'>
'DBSOURCE': 'MD5; 1e51ca3a5450c43524b9185c236cc5cc.'
'DEFINITION': 'Trifolium repens mRNA for non-cyanogenic beta-
glucosidase'
'KEYWORDS': 'beta-glucosidase.'
'LOCUS': <class 'dict'>
'REFERENCE': <class 'list'>
'SOURCE': <class 'dict'>
'VERSION': 'X56734.1'
Interval metadata:
3 interval features
Stats:
length: 1859
has gaps: False
has degenerates: False
has definites: True
GC-content: 35.99%
----------------------------------------------------------------------
0 AAACAAACCA AATATGGATT TTATTGTAGC CATATTTGCT CTGTTTGTTA TTAGCTCATT
60 CACAATTACT TCCACAAATG CAGTTGAAGC TTCTACTCTT CTTGACATAG GTAACCTGAG
...
1740 AGAAGCTATG ATCATAACTA TAGGTTGATC CTTCATGTAT CAGTTTGATG TTGAGAATAC
1800 TTTGAATTAA AAGTCTTTTT TTATTTTTTT AAAAAAAAAA AAAAAAAAAA AAAAAAAAA
Since this is a mRNA molecule, we may want to read it as ``RNA``.
As the EMBL file usually have ``t`` instead of ``u`` in
the sequence, we can read it as ``RNA`` by converting ``t`` to ``u``:
>>> embl = io.StringIO(embl_str)
>>> rna_seq = RNA.read(embl)
>>> rna_seq
RNA
----------------------------------------------------------------------
Metadata:
'ACCESSION': 'X56734; S46826;'
'CROSS_REFERENCE': <class 'list'>
'DATE': <class 'list'>
'DBSOURCE': 'MD5; 1e51ca3a5450c43524b9185c236cc5cc.'
'DEFINITION': 'Trifolium repens mRNA for non-cyanogenic beta-
glucosidase'
'KEYWORDS': 'beta-glucosidase.'
'LOCUS': <class 'dict'>
'REFERENCE': <class 'list'>
'SOURCE': <class 'dict'>
'VERSION': 'X56734.1'
Interval metadata:
3 interval features
Stats:
length: 1859
has gaps: False
has degenerates: False
has definites: True
GC-content: 35.99%
----------------------------------------------------------------------
0 AAACAAACCA AAUAUGGAUU UUAUUGUAGC CAUAUUUGCU CUGUUUGUUA UUAGCUCAUU
60 CACAAUUACU UCCACAAAUG CAGUUGAAGC UUCUACUCUU CUUGACAUAG GUAACCUGAG
...
1740 AGAAGCUAUG AUCAUAACUA UAGGUUGAUC CUUCAUGUAU CAGUUUGAUG UUGAGAAUAC
1800 UUUGAAUUAA AAGUCUUUUU UUAUUUUUUU AAAAAAAAAA AAAAAAAAAA AAAAAAAAA
We can also ``trascribe`` a sequence and verify that it will be a ``RNA``
sequence
>>> rna_seq == dna_seq.transcribe()
True
Reading EMBL Files using generators
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Soppose we have an EMBL file with multiple records: we can instantiate a
generator object to deal with multiple records
>>> import skbio
>>> embl = io.StringIO(embl_str)
>>> embl_gen = skbio.io.read(embl, format="embl")
>>> dna_seq = next(embl_gen)
For more informations, see :mod:`skbio.io`
References
----------
.. [1] ftp://ftp.ebi.ac.uk/pub/databases/embl/release/doc/usrman.txt
.. [2] http://www.ebi.ac.uk/ena/data/view/X56734&display=text
.. [3] http://www.ebi.ac.uk/ena/browse/feature-level-products
.. [4] https://github.com/biocore/scikit-bio/issues/1499
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
# std modules
import re
import copy
import textwrap
from functools import partial
# skbio modules
from skbio.io import create_format, EMBLFormatError
from skbio.io.format._base import (_line_generator, _get_nth_sequence)
from skbio.io.format._sequence_feature_vocabulary import (
_yield_section, _parse_single_feature, _serialize_section_default,
_serialize_single_feature)
from skbio.metadata import IntervalMetadata
from skbio.sequence import Sequence, DNA, RNA, Protein
from skbio.util._misc import chunk_str
# look at skbio.io.registry to have an idea on how to define this class
embl = create_format('embl')
# This list is ordered used to read and write embl file. By processing those
# values one by one, I will write embl sections with the same order
_HEADERS = [
'LOCUS',
'ACCESSION',
'PARENT_ACCESSION',
'PROJECT_IDENTIFIER',
'DATE',
'DEFINITION',
'GENE_NAME',
'KEYWORDS',
'SOURCE',
'REFERENCE',
'DBSOURCE',
'COMMENT',
'FEATURES'
]
# embl has a series of keys different from genbank; moreover keys are not so
# easy to understand (eg. RA for AUTHORS). I want to use the same keys used by
# genbank both to convert between formats and to use the same methods to get
# info from Sequence and its derived objects Here is a dictionary of keys
# conversion (EMBL->GB). All the unspecified keys will remain in embl format
KEYS_TRANSLATOR = {
# identification
'ID': 'LOCUS',
'AC': 'ACCESSION',
# PA means PARENT ACCESSION (?) and applies to
# feature-level-products entries
'PA': 'PARENT_ACCESSION',
'PR': 'PROJECT_IDENTIFIER',
'DT': 'DATE',
'DE': 'DEFINITION',
'GN': 'GENE_NAME', # uniprot specific
'KW': 'KEYWORDS',
# Source (taxonomy and classification)
'OS': 'ORGANISM',
'OC': 'taxonomy',
'OG': 'organelle',
# reference keys
'RA': 'AUTHORS',
'RP': 'REFERENCE',
'RC': 'REFERENCE_COMMENT',
'RX': 'CROSS_REFERENCE',
'RG': 'GROUP',
'RT': 'TITLE',
'RL': 'JOURNAL',
# Cross references
'DR': 'DBSOURCE',
'CC': 'COMMENT',
# features
'FH': 'FEATURES',
'FT': 'FEATURES',
'SQ': 'ORIGIN',
}
# the inverse of KEYS_TRANSLATOR, for semplicity
REV_KEYS_TRANSLATOR = {v: k for k, v in KEYS_TRANSLATOR.items()}
# the original genbank _yield_section divides entries in sections relying on
# spaces (the same section has the same level of indentation). EMBL entries
# have a key for each line, so to divide record in sections I need to define a
# correspondance for each key to section, then I will divide a record in
# sections using these section name.
KEYS_2_SECTIONS = {
# identification
'ID': 'LOCUS',
'AC': 'ACCESSION',
# PA means PARENT ACCESSION (?) and applies to
# feature-level-products entries
'PA': 'PARENT_ACCESSION',
'PR': 'PROJECT_IDENTIFIER',
'DT': 'DATE',
'DE': 'DEFINITION',
'GN': 'GENE_NAME', # uniprot specific
'KW': 'KEYWORDS',
# Source (taxonomy and classification)
'OS': 'SOURCE',
'OC': 'SOURCE',
'OG': 'SOURCE',
# reference keys
'RA': 'REFERENCE',
'RP': 'REFERENCE',
'RC': 'REFERENCE',
'RX': 'REFERENCE',
'RG': 'REFERENCE',
'RT': 'REFERENCE',
'RL': 'REFERENCE',
# This shuold be Reference Number. However, to split
# between references with _embl_yield_section I need to
# change section after reading one reference. So a single
# reference is completed when I found a new RN. The
# reference number information will be the reference
# position in the final REFERENCE list metadata
'RN': 'SPACER',
# Cross references
'DR': 'DBSOURCE',
'CC': 'COMMENT',
'AH': 'ASSEMBLY',
'AS': 'ASSEMBLY',
'FH': 'FEATURES',
'FT': 'FEATURES',
# sequence
'SQ': 'ORIGIN',
' ': 'ORIGIN',
'CO': 'CONSTRUCTED',
# spacer (discarded)
'XX': 'SPACER'
}
# for convenience: I think such functions are more readadble while accessing
# values in lambda functions
def _get_embl_key(line):
"""Return first part of a string as a embl key (ie 'AC M14399;' -> 'AC')"""
# embl keys have a fixed size of 2 chars
return line[:2]
def _get_embl_section(line):
"""Return the embl section from uniprot key(ie 'RA' -> 'REFERENCE')"""
# get embl key
key = _get_embl_key(line)
# get embl section from key
section = KEYS_2_SECTIONS[key]
return section
def _translate_key(key):
"""A method to translate a single key from EMBL to genbank. Returns key
itself if no traslation is defined"""
return KEYS_TRANSLATOR.get(key, key)
# a method to translate keys from embl to genbank for a dict object. All keys
# not defined in the original dict will remain the same
def _translate_keys(data):
"""Translate a dictionary of uniprot key->value in a genbank like
dictionary of key values. Keep old keys if no translation is defined"""
# traslate keys and get a new_data object
new_data = {_translate_key(k): v for k, v in data.items()}
return new_data
# define a default textwrap.Wrapper for embl
def _get_embl_wrapper(embl_key, indent=5, subsequent_indent=None, width=80):
"""Returns a textwrap.TextWrapper for embl records (eg, write
<key> <string> by providing embl key and a string. Wrap text to
80 column"""
# define the string to prepen (eg "OC ")
prepend = '{key:<{indent}}'.format(key=embl_key, indent=indent)
# deal with 2° strings and more
if subsequent_indent is None:
subsequent_prepend = prepend
else:
subsequent_prepend = '{key:<{indent}}'.format(
key=embl_key, indent=subsequent_indent)
# define a text wrapper object
wrapper = textwrap.TextWrapper(
initial_indent=prepend,
subsequent_indent=subsequent_prepend,
width=width
)
return wrapper
def _serialize_list(embl_wrapper, data, sep="\n"):
"""Serialize a list of obj using a textwrap.TextWrapper instance. Returns
one string of wrapped embl objects"""
# the output array
output = []
for line in data:
output += embl_wrapper.wrap(line)
# merge dates in one string. Add final newline
output = sep.join(output) + "\n"
# return comupted string
return output
# Method to determine if file is in EMBL format or not. A uniprot embl format
# can't be parsed by this module (at the moment)
@embl.sniffer()
def _embl_sniffer(fh):
try:
line = next(_line_generator(fh, skip_blanks=True, strip=False))
except StopIteration:
return False, {}
try:
_parse_id([line])
except EMBLFormatError:
return False, {}
return True, {}
@embl.reader(None)
def _embl_to_generator(fh, constructor=None, **kwargs):
for record in _parse_embls(fh):
yield _construct(record, constructor, **kwargs)
# Method to read EMBL data as skbio.sequence.DNA
@embl.reader(Sequence)
def _embl_to_sequence(fh, seq_num=1, **kwargs):
record = _get_nth_sequence(_parse_embls(fh), seq_num)
return _construct(record, Sequence, **kwargs)
# Method to read EMBL data as skbio.sequence.DNA
@embl.reader(DNA)
def _embl_to_dna(fh, seq_num=1, **kwargs):
record = _get_nth_sequence(_parse_embls(fh), seq_num)
return _construct(record, DNA, **kwargs)
# Method to read EMBL data as skbio.sequence.DNA
@embl.reader(RNA)
def _embl_to_rna(fh, seq_num=1, **kwargs):
record = _get_nth_sequence(_parse_embls(fh), seq_num)
return _construct(record, RNA, **kwargs)
# No protein support at the moment
@embl.reader(Protein)
def _embl_to_protein(fh, seq_num=1, **kwargs):
# no protein support, at the moment
raise EMBLFormatError("There's no protein support for EMBL record. "
"Current status of EMBL protein support is "
"described in issue-1499 (https://github.com/"
"biocore/scikit-bio/issues/1499)")
# Writer methods
@embl.writer(None)
def _generator_to_embl(obj, fh):
for obj_i in obj:
_serialize_single_embl(obj_i, fh)
@embl.writer(Sequence)
def _sequence_to_embl(obj, fh):
_serialize_single_embl(obj, fh)
@embl.writer(DNA)
def _dna_to_embl(obj, fh):
_serialize_single_embl(obj, fh)
@embl.writer(RNA)
def _rna_to_embl(obj, fh):
_serialize_single_embl(obj, fh)
@embl.writer(Protein)
def _protein_to_embl(obj, fh):
# no protein support, at the moment
raise EMBLFormatError("There's no protein support for EMBL record. "
"Current status of EMBL protein support is "
"described in issue-1499 (https://github.com/"
"biocore/scikit-bio/issues/1499)")
def _construct(record, constructor=None, **kwargs):
'''Construct the object of Sequence, DNA, RNA, or Protein.'''
# sequence, metadata and interval metadata
seq, md, imd = record
if 'lowercase' not in kwargs:
kwargs['lowercase'] = True
if constructor is None:
unit = md['LOCUS']['unit']
if unit == 'bp':
# RNA mol type has T instead of U for genbank from from NCBI
constructor = DNA
elif unit == 'aa':
# no protein support, at the moment
# constructor = Protein
raise EMBLFormatError("There's no protein support for EMBL record")
if constructor == RNA:
return DNA(
seq, metadata=md, interval_metadata=imd, **kwargs).transcribe()
else:
return constructor(
seq, metadata=md, interval_metadata=imd, **kwargs)
# looks like the genbank _parse_genbank
def _parse_embls(fh):
"""Chunck multiple EMBL records by '//', and returns a generator"""
data_chunks = []
for line in _line_generator(fh, skip_blanks=True, strip=False):
if line.startswith('//'):
yield _parse_single_embl(data_chunks)
data_chunks = []
else:
data_chunks.append(line)
def _parse_single_embl(chunks):
metadata = {}
interval_metadata = None
sequence = ''
# define a section splitter with _embl_yield_section function defined in
# this module (return the embl section by embl key). returns generator for
# each block with different line type
section_splitter = _embl_yield_section(
lambda line: _get_embl_section(line),
skip_blanks=True,
strip=False)
# process each section, like genbank does.
for section, section_name in section_splitter(chunks):
# section is a list of records with the same session (eg RA, RP for
# for a single reference). section_name is the name of the section
# (eg REFERENCE for the section of the previous example)
# search for a specific method in PARSER_TABLE using section_name or
# set _embl_parse_section_default
parser = _PARSER_TABLE.get(
section_name, _embl_parse_section_default)
if section_name == 'FEATURES':
# This requires 'ID' line parsed before 'FEATURES', which should
# be true and is implicitly checked by the sniffer. This is true
# since the first section is parsed by the last else condition
if "PARENT_ACCESSION" in metadata:
# this is a feature-level-products entry and features are
# relative to parent accession; in the same way a subset of a
# Sequence object has no interval metadata, I will refuse to
# process interval metadata here
continue
# partials add arguments to previous defined functions, in this
# case length of Sequence object
parser = partial(
parser, length=metadata["LOCUS"]["size"])
elif section_name == "COMMENT":
# mantain newlines in comments
# partials add arguments to previous defined functions
parser = partial(
parser, join_delimiter="\n")
# call function on section
parsed = parser(section)
# reference can appear multiple times
if section_name == 'REFERENCE':
# genbank data hasn't CROSS_REFERENCE section, To have a similar
# metatadata object, I chose to remove CROSS_REFERENCE from
# each single reference and put them in metadata. Since I could
# have more references, I need a list of CROSS_REFERENCE, with
# None values when CROSS_REFERENCE are not defined: there are cases
# in which some references have a CROSS_REFERENCE and others not.
# So each reference will have it's cross reference in the same
# index position, defined or not
cross_reference = parsed.pop("CROSS_REFERENCE", None)
# fix REFERENCE metadata. Ask if is the first reference or not
# I need a reference number as genbank, this could be reference
# size
if section_name in metadata:
RN = len(metadata[section_name]) + 1
else:
RN = 1
# fix reference fields. Get RN->REFERENCE value from dict
positions = parsed.pop("REFERENCE", None)
parsed["REFERENCE"] = str(RN)
# append position to RN (eg "1 (bases 1 to 63)")
if positions:
parsed["REFERENCE"] += " %s" % (positions)
# cross_reference will be a list of cross reference; Also
# metadata[REFERENCE] is a list of references
if section_name in metadata:
# I've already seen a reference, append new one
metadata[section_name].append(parsed)
metadata["CROSS_REFERENCE"].append(cross_reference)
else:
# define a list for this first reference and its RX
metadata[section_name] = [parsed]
metadata["CROSS_REFERENCE"] = [cross_reference]
elif section_name == 'ORIGIN':
sequence = parsed
elif section_name == 'FEATURES':
interval_metadata = parsed
elif section_name == 'DATE':
# read data (list)
metadata[section_name] = parsed
# fix locus metadata using last date. Take only last date
date = metadata[section_name][-1].split()[0]
metadata["LOCUS"]["date"] = date
# parse all the others sections (SOURCE, ...)
else:
metadata[section_name] = parsed
# after metadata were read, add a VERSION section like genbank
# eval if entry is a feature level product or not
if "ACCESSION" in metadata:
metadata["VERSION"] = "{accession}.{version}".format(
accession=metadata["ACCESSION"].split(";")[0],
version=metadata["LOCUS"]["version"])
elif "PARENT_ACCESSION" in metadata:
# locus name is in the format
# <accession>.<version>:<feature location>:<feature name>[:ordinal]
# and ordinal could be present or not, depends on how many features
# are found in such location. Such entry couldn't be found in others
# database like NCBI (at the moment) so we will take the version
# relying on parent accession (hoping that an update in the parent
# accession will generate an update in all feature level products)
metadata["VERSION"] = metadata["PARENT_ACCESSION"]
# return a string, metatdata as a dictionary and IntervalMetadata object
return sequence, metadata, interval_metadata
def _write_serializer(fh, serializer, embl_key, data):
"""A simple method to write serializer to a file. Append 'XX'"""
# call the serializer function
out = serializer(embl_key, data)
# test if 'out' is a iterator.
# cf. Effective Python Item 17
if iter(out) is iter(out):
for s in out:
fh.write(s)
else:
fh.write(out)
# add spacer between sections
fh.write("XX\n")
# main function for writer methods
def _serialize_single_embl(obj, fh):
'''Write a EMBL record.
Always write it in ENA canonical way:
1. sequence in lowercase (uniprot are uppercase)
2. 'u' as 't' even in RNA molecules.
Parameters
----------
obj : Sequence or its child class
'''
# shortcut to deal with metadata
md = obj.metadata
# embl has a different magick number than embl
serialize_default = partial(
_serialize_section_default, indent=5)
# Now cicle for GB like headers (sections) in _HEADERS.
for header in _HEADERS:
# Get appropriate serializer method or default one
serializer = _SERIALIZER_TABLE.get(
header, serialize_default)
# headers needs to be converted into embl, or matained as they are
# if no conversion could be defined.
embl_key = REV_KEYS_TRANSLATOR.get(header, header)
# this is true also for locus line
if header in md:
# deal with special source case, add cross references if needed
if header == "REFERENCE":
serializer = partial(
serializer, cross_references=md.get("CROSS_REFERENCE"))
elif header == "LOCUS":
# pass also metadata (in case of entries from genbank)
serializer = partial(
serializer, metadata=md)
# call the serializer function
_write_serializer(fh, serializer, embl_key, md[header])
else:
# header not in metadata. Could be date read from GB?
if header == "DATE":
# Have I date in locus metadata?
if md["LOCUS"]["date"]:
# call serializer on date. Date is a list of values
_write_serializer(
fh, serializer, embl_key, [md["LOCUS"]["date"]])
if header == 'FEATURES':
if obj.has_interval_metadata():
# magic number 21: the amount of indentation before
# feature table starts as defined by INSDC
indent = 21
feature_key = "FH Key"
fh.write('{header:<{indent}}Location/Qualifiers\n'.format(
header=feature_key, indent=indent))
# add FH spacer
fh.write("FH\n")
for s in serializer(obj.interval_metadata._intervals, indent):
fh.write(s)
# add spacer between sections
fh.write("XX\n")
# write out the sequence
# always write RNA seq as DNA
if isinstance(obj, RNA):
obj = obj.reverse_transcribe()
# serialize sequence from a Sequence object
for s in _serialize_sequence(obj):
fh.write(s)
# terminate a embl record with
fh.write('//\n')
def _parse_id(lines):
"""
From EMBL user manual (Release 130, November 2016)
(ftp://ftp.ebi.ac.uk/pub/databases/embl/release/doc/usrman.txt)
The ID (IDentification) line is always the first line of an entry. The
format of the ID line is:
ID <1>; SV <2>; <3>; <4>; <5>; <6>; <7> BP.
The tokens represent:
1. Primary accession number
2. Sequence version number
3. Topology: 'circular' or 'linear'
4. Molecule type (see note 1 below)
5. Data class (see section 3.1 of EMBL user manual)
6. Taxonomic division (see section 3.2 of EMBL user manual)
7. Sequence length (see note 2 below)
Note 1 - Molecule type: this represents the type of molecule as stored and
can be any value from the list of current values for the mandatory mol_type
source qualifier. This item should be the same as the value in the mol_type
qualifier(s) in a given entry.
Note 2 - Sequence length: The last item on the ID line is the length of the
sequence (the total number of bases in the sequence). This number includes
base positions reported as present but undetermined (coded as "N").
An example of a complete identification line is shown below:
ID CD789012; SV 4; linear; genomic DNA; HTG; MAM; 500 BP.
"""
# get only the first line of EMBL record
line = lines[0]
# define a specific patter for EMBL
pattern = re.compile(r'ID'
r' +([^\s]+);' # ie: CD789012
r' +SV ([0-9]*);' # 4
r' +(\w+);' # linear
r' +([^;]+);' # genomic DNA
r' +(\w*);' # HTG
r' +(\w+);' # MAM
r' +(\d+)' # 500
r' +(\w+)\.$') # BP
# search it
matches = re.match(pattern, line)
try:
res = dict(zip(
['locus_name', 'version', 'shape', 'mol_type',
'class', 'division', 'size', 'unit'],
matches.groups()))
except AttributeError:
raise EMBLFormatError(
"Could not parse the ID line:\n%s" % line)
# check for CON entries:
if res['class'] == "CON":
# entries like http://www.ebi.ac.uk/ena/data/view/LT357133
# doesn't have sequence, so can't be read by skbio.sequence
raise EMBLFormatError(
"There's no support for embl CON record: for more information "
"see issue-1506 (https://github.com/biocore/scikit-bio/issues/"
"1506)")
# those values are integer
res['size'] = int(res['size'])
# version could be integer
if res['version']:
res['version'] = int(res['version'])
# unit are in lower cases in others modules
res['unit'] = res['unit'].lower()
# initialize a date record (for gb compatibility)
res['date'] = None
# returning parsed attributes
return res
def _serialize_id(header, obj, metadata={}, indent=5):
'''Serialize ID line.
Parameters
----------
obj : dict
'''
# get key->value pairs, or key->'' if values is None
kwargs = {k: '' if v is None else v for k, v in obj.items()}
# then unit is in upper cases
kwargs["unit"] = kwargs["unit"].upper()
# check for missing keys (eg from gb data). Keys in md are in uppercase
for key in ["version", "class"]:
if key not in kwargs:
if key.upper() in metadata:
kwargs[key] = metadata[key.upper()]
else:
kwargs[key] = ""
# version from genbank could be "M14399.1 GI:145229". I need an integer
version = kwargs["version"]
# version could by empty, integer or text
if version != '':
try:
int(kwargs["version"])
# could be a text like M14399.1
except ValueError:
match = re.search(r"^\w+\.([0-9]+)", version)
if match:
kwargs["version"] = match.groups()[0]
# return first line
return ('{header:<{indent}}{locus_name}; SV {version}; {shape}; '
'{mol_type}; {class}; {division}; {size} {unit}.\n').format(
header=header, indent=indent, **kwargs)
# similar to skbio.io.format._sequence_feature_vocabulary.__yield_section
# but applies to embl file format
def _embl_yield_section(get_line_key, **kwargs):
'''Returns function that returns successive sections from file.
Parameters
----------
get_line_key : callable
It takes a string as input and a key indicating the section
(could be the embl key or embl KEYS_2_SECTIONS)
kwargs : dict, optional
Keyword arguments will be passed to `_line_generator`.
Returns
-------
function
A function accept a list of lines as input and return
a generator to yield section one by one.
'''
def parser(lines):
curr = []
curr_type = None
for line in _line_generator(lines, **kwargs):
# if we find another line, return the previous section
line_type = get_line_key(line)
# changed line type
if line_type != curr_type:
if curr:
# returning block
yield curr, curr_type
# reset curr after yield
curr = []
# reset curr_type in any cases
curr_type = line_type
# don't append record if line type is a spacer
if 'SPACER' not in line_type:
curr.append(line)
# don't forget to return the last section in the file
if curr:
yield curr, curr_type
return parser
# replace skbio.io.format._sequence_feature_vocabulary._parse_section_default
def _embl_parse_section_default(
lines, label_delimiter=None, join_delimiter=' ', return_label=False):
'''Parse sections in default way.
Do 2 things:
1. split first line with label_delimiter for label
2. join all the lines into one str with join_delimiter.
'''
data = []
label = None
line = lines[0]
# take the first line, divide the key from the text
items = line.split(label_delimiter, 1)
if len(items) == 2:
label, section = items
else:
label = items[0]
section = ""
# append the text of the first element in a empty array
data.append(section)
# Then process all the elements with the same embl key. remove the key
# and append all the text in the data array
data.extend(line.split(label_delimiter, 1)[-1] for line in lines[1:])
# Now concatenate the text using join_delimiter. All content with the same
# key will be placed in the same string. Strip final "\n
data = join_delimiter.join(i.strip() for i in data)
# finally return the merged text content, and the key if needed
if return_label:
return label, data
else:
return data
# parse an embl reference record.
def _parse_reference(lines):
'''Parse single REFERENCE field.
'''
# parsed reference will be placed here
res = {}
# define a section splitter with _embl_yield_section function defined in
# this module
section_splitter = _embl_yield_section(lambda line: _get_embl_key(line),
skip_blanks=True, strip=False)
# now itereta along sections (lines of the same type)
for section, section_name in section_splitter(lines):
# this function append all data in the same keywords. A list of lines
# as input (see skbio.io.format._sequence_feature_vocabulary)
label, data = _embl_parse_section_default(
section, join_delimiter=' ', return_label=True)
res[label] = data
# now RX (CROSS_REFERENCE) is a joined string of multiple values. To get
# back to a list of values you can use: re.compile("([^;\s]*); ([^\s]*)")
# search for pubmed record, and add the PUBMED key
if "RX" in res:
match = re.search(r"PUBMED; (\d+)\.", res["RX"])
if match:
# add pubmed notation
res["PUBMED"] = match.groups()[0]
# fix RP field like genbank (if exists), Ie: (bases 1 to 63)
if "RP" in res:
match = re.search(r"(\d+)-(\d+)", res["RP"])
if match:
# fix rp fields
res["RP"] = "(bases {start} to {stop})".format(
start=match.groups()[0], stop=match.groups()[1])
# return translated keys (EMBL->GB)
return _translate_keys(res)
def _serialize_reference(header, obj, cross_references, indent=5):
"""Serialize a list of references"""
reference = []
sort_order = ["RC", "RP", "RX", "RG", "RA", "RT", "RL"]
# deal with RX pattern and RP pattern
RX = re.compile(r"([^;\s]*); ([^\s]*)")
RP = re.compile(r"bases (\d+) to (\d+)")
# create a copy of obj, that can be changed. I need to delete values or
# adding new ones
obj = copy.deepcopy(obj)
# obj is a list of references. Now is a copy of metadata[SOURCE]
for i, data in enumerate(obj):
# get the reference number (as the iteration number)
embl_key = "RN"
# get cross_references
if cross_references:
cross_reference = cross_references[i]
# append cross reference [i] to data (obj[i]) (if they exists)
if cross_reference:
data["CROSS_REFERENCE"] = cross_reference
# delete PUBMED key (already present in CROSS_REFERENCE)
if "PUBMED" in data:
del(data["PUBMED"])
else:
# no cross reference, do I have PUBMED in data?
if "PUBMED" in data:
# add a fake CROSS_REFERENCE
data["CROSS_REFERENCE"] = 'PUBMED; %s.' % data["PUBMED"]
# get an embl wrapper
wrapper = _get_embl_wrapper(embl_key, indent)
# define wrapped string and add RN to embl data
reference += wrapper.wrap("[{RN}]".format(RN=i+1))
# now process each record for references
for embl_key in sort_order:
# get internal key (genbank like key)
key = _translate_key(embl_key)
# have I this reference in my reference data?
if key not in data:
continue
# if yes, define wrapper
wrapper = _get_embl_wrapper(embl_key, indent)
# data could have newlines
records = data[key].split("\n")
for record in records:
# strip after newlines
record = record.strip()
# define wrapped string. beware RX
if embl_key == "RX":
for match in re.finditer(RX, record):
source, link = match.groups()
# join text
cross_reference = "; ".join([source, link])
reference += wrapper.wrap(cross_reference)
# RP case
elif embl_key == "RP":
match = re.search(RP, record)
# if I have position, re-define RP key
if match:
record = "%s-%s" % match.groups()
reference += wrapper.wrap(record)
# if not, ignore RP key
else:
continue
# all the other cases, go in wrapper as they are
else:
reference += wrapper.wrap(record)
# add a spacer between references (but no at the final reference)
# cause the caller will add spacer
if (i+1) < len(obj):
reference += ["XX"]
# now define a string and add a final "\n"
s = "\n".join(reference) + "\n"
# and return it
return s
# parse an embl reference record.
def _parse_source(lines):
'''Parse single SOURCE field.
'''
# parsed reference will be placed here
res = {}
# define a section splitter with _embl_yield_section function defined in
# this module
section_splitter = _embl_yield_section(lambda line: _get_embl_key(line),
skip_blanks=True, strip=False)
# now itereta along sections (lines of the same type)
for section, section_name in section_splitter(lines):
# this function append all data in the same keywords. A list of lines
# as input (see skbio.io.format._sequence_feature_vocabulary)
label, data = _embl_parse_section_default(
section, join_delimiter=' ', return_label=True)
res[label] = data
# return translated keys
return _translate_keys(res)
def _serialize_source(header, obj, indent=5):
'''Serialize SOURCE.
Parameters
----------
header: section header
obj : dict
indent : indent length
'''
source = []
# treat taxonomy and all others keys
for key in ["ORGANISM", "taxonomy", "organelle"]:
# get data to serielize
data = obj.get(key)
# if key is not defined (eg. organelle, continue)
if data is None:
continue
# get embl key for my key (eg, taxonomy -> OC)
embl_key = REV_KEYS_TRANSLATOR.get(key, key)
# get an embl wrapper
wrapper = _get_embl_wrapper(embl_key, indent)
# define wrapped string
source += wrapper.wrap(data)
# now define a string and add a final "\n"
s = "\n".join(source) + "\n"
# and return it
return s
def _parse_sequence(lines):
'''Parse the sequence section for sequence.'''
# result array
sequence = []
for line in lines:
# ignore record like:
# SQ Sequence 275 BP; 64 A; 73 C; 88 G; 50 T; 0 other;
if line.startswith('SQ'):
continue
# remove the numbers inside strings. revome spaces around string
items = [i for i in line.split() if not i.isdigit()]
# append each sequence items to sequence list
sequence += items
return ''.join(sequence)
def _serialize_sequence(obj, indent=5):
'''Serialize seq to SQ.
Parameters
----------
obj : DNA, RNA, Sequence Obj
'''
# a flag to determine if I wrote header or not
flag_header = False
# magic numbers: there will be 60 letters (AA, bp) on each line
chunk_size = 60
# letters (AA, bp) will be grouped by 10: each group is divided by
# one space from each other
frag_size = 10
# fasta sequence will have indent spaces on the left, chunk_size/frag_size
# groups of frag_size letters separated by n-1 groups of single spaces,
# then the sequence length aligned on the right to get a string of
# line_size. Setting left and right padding for semplicity
pad_right = 65 # there are also 5 columns for indentation
pad_left = 10 # sequence number will be in the last 10 columns
# get sequence as a string with lower letters (uniprot will be upper!)
seq = str(obj).lower()
# count bases in sequence. Frequencies returns a dictionary of occurences
# of A,C,G,T. Sequences are stored always in capital letters
freq = obj.frequencies()
# get values instead of popping them: I can't assure that the letter T,
# for example, is always present
n_a = freq.get('A', 0)
n_c = freq.get('C', 0)
n_g = freq.get('G', 0)
n_t = freq.get('T', 0)
# this will be the count of all others letters (more than ACGT)
n_others = len(obj) - (n_a + n_c + n_g + n_t)
# define SQ like this:
# SQ Sequence 275 BP; 63 A; 72 C; 88 G; 52 T; 0 other;
SQ = "SQ Sequence {size} {unit}; {n_a} A; {n_c} C; {n_g} G; " +\
"{n_t} T; {n_others} other;\n"
# TODO: deal with protein SQ: they have a sequence header like:
# SQ SEQUENCE 256 AA; 29735 MW; B4840739BF7D4121 CRC64;
# apply format
SQ = SQ.format(size=len(obj), unit=obj.metadata["LOCUS"]["unit"].upper(),
n_a=n_a, n_c=n_c, n_g=n_g, n_t=n_t, n_others=n_others)
for i in range(0, len(seq), chunk_size):
line = seq[i:i+chunk_size]
# pad string left and right
s = '{indent}{s:<{pad_right}}{pos:>{pad_left}}\n'.format(
indent=" "*indent,
s=chunk_str(line, frag_size, ' '),
pad_left=pad_left,
pos=i+len(line),
pad_right=pad_right)
if not flag_header:
# First time here. Add SQ header to sequence
s = SQ + s
# When I added header, I need to turn off this flag
flag_header = True
yield s
def _embl_parse_feature_table(lines, length):
"""Parse embl feature tables"""
# define interval metadata
imd = IntervalMetadata(length)
# get only FT records, and remove key from line
lines = [line[2:] for line in lines if line.startswith('FT')]
# magic number 19: after key removal, the lines of each feature
# are indented with 19 spaces.
feature_indent = ' ' * 19
section_splitter = _yield_section(
lambda x: not x.startswith(feature_indent),
skip_blanks=True, strip=False)
for section in section_splitter(lines):
_parse_single_feature(section, imd)
return imd
def _serialize_feature_table(intervals, indent=21):
'''
Parameters
----------
intervals : list of ``Interval``
'''
# define a embl wrapper object. I need to replace only the first two
# characters from _serialize_single_feature output
wrapper = _get_embl_wrapper("FT", indent=2, subsequent_indent=21)
for intvl in intervals:
tmp = _serialize_single_feature(intvl, indent)
output = []
# I need to remove two spaces, cause I will add a FT key
for line in tmp.split("\n"):
output += wrapper.wrap(line[2:])
# re add newline between elements, and a final "\n"
yield "\n".join(output) + "\n"
def _parse_date(lines, label_delimiter=None, return_label=False):
"""Parse embl date records"""
# take the first line, and derive a label
label = lines[0].split(label_delimiter, 1)[0]
# read all the others dates and append to data array
data = [line.split(label_delimiter, 1)[-1] for line in lines]
# strip returned data
data = [i.strip() for i in data]
# finally return data array, and the key if needed
if return_label:
return label, data
else:
return data
def _serialize_date(embl_key, date_list, indent=5):
'''Serialize date line.
Parameters
----------
header : embl key id
date_list : a list of dates
'''
# get an embl wrapper
wrapper = _get_embl_wrapper(embl_key, indent)
# # serialize date and return them as a string
return _serialize_list(wrapper, date_list)
def _serialize_comment(embl_key, obj, indent=5):
"""Serialize comment (like Assembly)"""
# obj is a string, Split it by newlines
data = obj.split("\n")
# get an embl wrapper
wrapper = _get_embl_wrapper(embl_key, indent)
# serialize data and return it
return _serialize_list(wrapper, data)
def _serialize_dbsource(embl_key, obj, indent=5):
"""Serialize DBSOURCE"""
# data are stored like 'SILVA-LSU; LK021130. SILVA-SSU; LK021130. ...
# I need to split string after final period (not AAT09660.1)
# deal with re pattern. A pattern to find a period as end of sentence
DR = re.compile(r"\.\s")
# splitting by this pattern, I will have
# ["SILVA-LSU; LK021130", "SILVA-SSU; LK021130", ...]
# I need that each of them will be in a DR record.
# get an embl wrapper
wrapper = _get_embl_wrapper(embl_key, indent)
# serialize data and return it. Split dbsource using re. Add a
# final period between elements since I removed it by splitting
return _serialize_list(wrapper, re.split(DR, obj), sep=".\n")
def _parse_assembly(lines):
"""Parse embl assembly records"""
output = []
# first line is header, skip it
for line in lines[1:]:
data = line.split()
# data could have comp feature or not. First element in data is 'AS'
if len(data) == 5:
res = dict(zip(
['local_span', 'primary_identifier', 'primary_span', 'comp'],
data[1:]))
elif len(data) == 4:
res = dict(zip(
['local_span', 'primary_identifier', 'primary_span', 'comp'],
data[1:]+['']))
else:
raise EMBLFormatError("Can't parse assembly line %s"
% line)
# append res to output
output += [res]
return output
# Map a function to each section of the entry
_PARSER_TABLE = {
'LOCUS': _parse_id,
'SOURCE': _parse_source,
'DATE': _parse_date,
'REFERENCE': _parse_reference,
'FEATURES': _embl_parse_feature_table,
'ORIGIN': _parse_sequence,
'ASSEMBLY': _parse_assembly,
}
# for writer functions
_SERIALIZER_TABLE = {
'LOCUS': _serialize_id,
'SOURCE': _serialize_source,
'DATE': _serialize_date,
'REFERENCE': _serialize_reference,
'FEATURES': _serialize_feature_table,
'COMMENT': _serialize_comment,
'DBSOURCE': _serialize_dbsource,
}
|
gregcaporaso/scikit-bio
|
skbio/io/format/embl.py
|
Python
|
bsd-3-clause
| 53,962
|
[
"scikit-bio"
] |
ec9f5ec4bc56603f6e3c49800c86aa58cd8f3217c14d9ee3284c4d64206146f1
|
# coding: utf-8
from __future__ import unicode_literals
from textwrap import dedent
from kinko.i18n import Extractor, Translator
from kinko.nodes import Tuple, Symbol, String, List
from kinko.compat import PY3
from kinko.parser import parser
from kinko.tokenizer import tokenize
from .base import TestCase, NODE_EQ_PATCHER
class RawParseMixin(object):
def parse(self, src):
src = dedent(src).strip() + '\n'
return parser().parse(list(tokenize(src)))
class TestExtractor(RawParseMixin, TestCase):
def testString(self):
node = self.parse(
"""
div "Some text"
"""
)
messages = Extractor.extract(node)
self.assertEqual(messages, ["Some text"])
def testWithInterpolation(self):
node = self.parse(
"""
div "Some {var} text"
"""
)
messages = Extractor.extract(node)
self.assertEqual(messages, ["Some {var} text"])
class Translations(object):
messages = {
'Some {var} text': 'Какой-то {var} текст',
}
if PY3:
def gettext(self, message):
return self.messages.get(message, message)
else:
def ugettext(self, message):
return self.messages.get(message, message)
class TestTranslator(RawParseMixin, TestCase):
ctx = [NODE_EQ_PATCHER]
def setUp(self):
self.translations = Translations()
def test(self):
node = self.parse(
"""
div "Some {var} text"
"""
)
self.assertEqual(
Translator(self.translations).visit(node),
List([Tuple([Symbol('div'), String('Какой-то {var} текст')])]),
)
|
vmagamedov/kinko
|
tests/test_i18n.py
|
Python
|
bsd-3-clause
| 1,747
|
[
"VisIt"
] |
2c44107cc145b39721ad1d4bcbb72b879f1acf1fa070cbb3893a619cd2f1ad72
|
import json
import logging
import requests
from urllib import urlencode
from django.conf import settings
from django.contrib.auth.views import logout as auth_logout
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404, render
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.utils.encoding import force_bytes, smart_bytes
from django.utils.safestring import mark_safe
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.views.generic import View
from django.utils.translation import ugettext as _
from haystack.generic_views import SearchView
from haystack.query import EmptySearchQuerySet
from josepy.jwk import JWK
from josepy.jws import JWS
from mozilla_django_oidc.views import OIDCAuthenticationRequestView, get_next_url
from mozilla_django_oidc.utils import absolutify, import_from_settings
from raven.contrib.django.models import client
from waffle import flag_is_active
from waffle.decorators import waffle_flag
from mozillians.api.models import APIv2App
from mozillians.common.decorators import allow_public, allow_unvouched
from mozillians.common.middleware import LOGIN_MESSAGE, GET_VOUCHED_MESSAGE
from mozillians.common.templatetags.helpers import (get_object_or_none, nonprefixed_url, redirect,
urlparams)
from mozillians.common.urlresolvers import reverse
from mozillians.groups.models import Group
import mozillians.phonebook.forms as forms
from mozillians.phonebook.models import Invite
from mozillians.phonebook.utils import redeem_invite
from mozillians.users.managers import EMPLOYEES, MOZILLIANS, PUBLIC, PRIVATE
from mozillians.users.models import AbuseReport, ExternalAccount, IdpProfile, UserProfile
from mozillians.users.tasks import (check_spam_account, send_userprofile_to_cis,
update_email_in_basket)
ORIGINAL_CONNECTION_USER_ID = 'https://sso.mozilla.com/claim/original_connection_user_id'
@allow_unvouched
def login(request):
if request.user.userprofile.is_complete:
return redirect('phonebook:home')
return redirect('phonebook:profile_edit')
@never_cache
@allow_public
def home(request):
show_start = False
if request.GET.get('source', ''):
show_start = True
deprecation_message = ('This website will be decommissioned soon. '
'Please be aware that all changes '
'that you will make <b>after</b> July 27th will '
'not be saved. Please visit <a href="https://people.mozilla.org">'
'Mozilla People Directory</a> to perform the '
'changes directly over there.')
messages.warning(request, mark_safe(deprecation_message))
return render(request, 'phonebook/home.html', {'show_start': show_start})
@allow_unvouched
@never_cache
def vouch(request, username):
"""Automatically vouch username.
This must be behind a waffle flag and activated only for testing
purposes.
"""
if flag_is_active(request, 'testing-autovouch-views'):
profile = get_object_or_404(UserProfile, user__username=username)
now = timezone.now()
description = 'Automatically vouched for testing purposes on {0}'.format(now)
vouch = profile.vouch(None, description=description, autovouch=True)
if vouch:
messages.success(request, _('Successfully vouched user.'))
else:
msg = _('User not vouched. Maybe there are {0} vouches already?')
msg = msg.format(settings.VOUCH_COUNT_LIMIT)
messages.error(request, msg)
return redirect('phonebook:profile_view', profile.user.username)
raise Http404
@allow_unvouched
@never_cache
def unvouch(request, username):
"""Automatically remove all vouches from username.
This must be behind a waffle flag and activated only for testing
purposes.
"""
if flag_is_active(request, 'testing-autovouch-views'):
profile = get_object_or_404(UserProfile, user__username=username)
profile.vouches_received.all().delete()
messages.success(request, _('Successfully unvouched user.'))
return redirect('phonebook:profile_view', profile.user.username)
raise Http404
@allow_public
@never_cache
def view_profile(request, username):
"""View a profile by username."""
data = {}
privacy_mappings = {'anonymous': PUBLIC, 'mozillian': MOZILLIANS, 'employee': EMPLOYEES,
'private': PRIVATE, 'myself': None}
privacy_level = None
abuse_form = None
if (request.user.is_authenticated() and request.user.username == username):
# own profile
view_as = request.GET.get('view_as', 'myself')
privacy_level = privacy_mappings.get(view_as, None)
profile = UserProfile.objects.privacy_level(privacy_level).get(user__username=username)
data['privacy_mode'] = view_as
else:
userprofile_query = UserProfile.objects.filter(user__username=username)
public_profile_exists = userprofile_query.public().exists()
profile_exists = userprofile_query.exists()
profile_complete = userprofile_query.exclude(full_name='').exists()
if not public_profile_exists:
if not request.user.is_authenticated():
# you have to be authenticated to continue
messages.warning(request, LOGIN_MESSAGE)
return (login_required(view_profile, login_url=reverse('phonebook:home'))
(request, username))
if not request.user.userprofile.is_vouched:
# you have to be vouched to continue
messages.error(request, GET_VOUCHED_MESSAGE)
return redirect('phonebook:home')
if not profile_exists or not profile_complete:
raise Http404
profile = UserProfile.objects.get(user__username=username)
profile.set_instance_privacy_level(PUBLIC)
if request.user.is_authenticated():
profile.set_instance_privacy_level(
request.user.userprofile.privacy_level)
if (request.user.is_authenticated() and request.user.userprofile.is_vouched
and not profile.is_vouched):
abuse_report = get_object_or_none(AbuseReport, reporter=request.user.userprofile,
profile=profile)
if not abuse_report:
abuse_report = AbuseReport(reporter=request.user.userprofile, profile=profile)
abuse_form = forms.AbuseReportForm(request.POST or None, instance=abuse_report)
if abuse_form.is_valid():
abuse_form.save()
msg = _(u'Thanks for helping us improve mozillians.org!')
messages.info(request, msg)
return redirect('phonebook:profile_view', profile.user.username)
if (request.user.is_authenticated() and profile.is_vouchable(request.user.userprofile)):
vouch_form = forms.VouchForm(request.POST or None)
data['vouch_form'] = vouch_form
if vouch_form.is_valid():
# We need to re-fetch profile from database.
profile = UserProfile.objects.get(user__username=username)
profile.vouch(request.user.userprofile, vouch_form.cleaned_data['description'])
# Notify the current user that they vouched successfully.
msg = _(u'Thanks for vouching for a fellow Mozillian! This user is now vouched!')
messages.info(request, msg)
return redirect('phonebook:profile_view', profile.user.username)
data['shown_user'] = profile.user
data['profile'] = profile
data['access_groups'] = profile.get_annotated_access_groups()
data['tags'] = profile.get_annotated_tags()
data['abuse_form'] = abuse_form
data['primary_identity'] = profile.identity_profiles.filter(primary_contact_identity=True)
data['alternate_identities'] = profile.identity_profiles.filter(primary_contact_identity=False)
# Only show pending groups if user is looking at their own profile,
# or current user is a superuser
if not (request.user.is_authenticated()
and (request.user.username == username or request.user.is_superuser)):
data['access_groups'] = [grp for grp in data['access_groups']
if not (grp.pending or grp.pending_terms)]
data['tags'] = [tag for tag in data['tags'] if not (tag.pending or tag.pending_terms)]
return render(request, 'phonebook/profile.html', data)
@allow_unvouched
@never_cache
def edit_profile(request):
"""Edit user profile view."""
# Don't use request.user
user = User.objects.get(pk=request.user.id)
profile = user.userprofile
user_groups = profile.groups.all().order_by('name')
idp_profiles = IdpProfile.objects.filter(profile=profile)
idp_primary_profile = get_object_or_none(IdpProfile, profile=profile, primary=True)
# The accounts that a user can select as the primary login identity
accounts_qs = ExternalAccount.objects.exclude(type=ExternalAccount.TYPE_EMAIL)
sections = {
'registration_section': ['user_form', 'registration_form'],
'basic_section': ['user_form', 'basic_information_form'],
'groups_section': ['groups_privacy_form'],
'skills_section': ['skills_form'],
'idp_section': ['idp_profile_formset'],
'languages_section': ['language_privacy_form', 'language_formset'],
'accounts_section': ['accounts_formset'],
'location_section': ['location_form'],
'irc_section': ['irc_form'],
'contribution_section': ['contribution_form'],
'tshirt_section': ['tshirt_form'],
}
curr_sect = next((s for s in sections.keys() if s in request.POST), None)
def get_request_data(form):
if curr_sect and form in sections[curr_sect]:
return request.POST
return None
ctx = {}
ctx['user_form'] = forms.UserForm(get_request_data('user_form'), instance=user)
ctx['registration_form'] = forms.RegisterForm(get_request_data('registration_form'),
request.FILES or None,
instance=profile)
basic_information_data = get_request_data('basic_information_form')
ctx['basic_information_form'] = forms.BasicInformationForm(basic_information_data,
request.FILES or None,
instance=profile)
ctx['accounts_formset'] = forms.AccountsFormset(get_request_data('accounts_formset'),
instance=profile,
queryset=accounts_qs)
ctx['location_form'] = forms.LocationForm(get_request_data('location_form'), instance=profile)
ctx['language_formset'] = forms.LanguagesFormset(get_request_data('language_formset'),
instance=profile,
locale=request.locale)
language_privacy_data = get_request_data('language_privacy_form')
ctx['language_privacy_form'] = forms.LanguagesPrivacyForm(language_privacy_data,
instance=profile)
ctx['skills_form'] = forms.SkillsForm(get_request_data('skills_form'), instance=profile)
ctx['contribution_form'] = forms.ContributionForm(get_request_data('contribution_form'),
instance=profile)
ctx['tshirt_form'] = forms.TshirtForm(get_request_data('tshirt_form'), instance=profile)
ctx['groups_privacy_form'] = forms.GroupsPrivacyForm(get_request_data('groups_privacy_form'),
instance=profile)
ctx['irc_form'] = forms.IRCForm(get_request_data('irc_form'), instance=profile)
ctx['idp_profile_formset'] = forms.IdpProfileFormset(get_request_data('idp_profile_formset'),
instance=profile,
queryset=idp_profiles)
ctx['idp_primary_profile'] = idp_primary_profile
ctx['autocomplete_form_media'] = ctx['registration_form'].media + ctx['skills_form'].media
forms_valid = True
if request.POST:
if not curr_sect:
raise Http404
curr_forms = map(lambda x: ctx[x], sections[curr_sect])
forms_valid = all(map(lambda x: x.is_valid(), curr_forms))
if forms_valid:
old_username = request.user.username
for f in curr_forms:
f.save()
# Spawn task to check for spam
if not profile.is_vouched:
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
user_ip = x_forwarded_for.split(',')[0]
else:
user_ip = request.META.get('REMOTE_ADDR')
params = {
'instance_id': profile.id,
'user_ip': user_ip,
'user_agent': request.META.get('HTTP_USER_AGENT'),
'referrer': request.META.get('HTTP_REFERER'),
'comment_author': profile.full_name,
'comment_author_email': profile.email,
'comment_content': profile.bio
}
check_spam_account.delay(**params)
next_section = request.GET.get('next')
next_url = urlparams(reverse('phonebook:profile_edit'), next_section)
if curr_sect == 'registration_section':
settings_url = reverse('phonebook:profile_edit')
settings_link = '<a href="{0}">settings</a>'.format(settings_url)
msg = _(u'Your registration is complete. '
u'Feel free to visit the {0} page to add '
u'additional information to your profile.'.format(settings_link))
messages.info(request, mark_safe(msg))
redeem_invite(profile, request.session.get('invite-code'))
next_url = reverse('phonebook:profile_view', args=[user.username])
elif user.username != old_username:
msg = _(u'You changed your username; '
u'please note your profile URL has also changed.')
messages.info(request, _(msg))
return HttpResponseRedirect(next_url)
ctx.update({
'user_groups': user_groups,
'profile': request.user.userprofile,
'vouch_threshold': settings.CAN_VOUCH_THRESHOLD,
'appsv2': profile.apps.filter(enabled=True),
'forms_valid': forms_valid
})
return render(request, 'phonebook/edit_profile.html', ctx)
@allow_unvouched
@never_cache
def delete_identity(request, identity_pk):
"""Delete alternate email address."""
user = User.objects.get(pk=request.user.id)
profile = user.userprofile
# Only email owner can delete emails
idp_query = IdpProfile.objects.filter(profile=profile, pk=identity_pk)
if not idp_query.exists():
raise Http404()
idp_query = idp_query.filter(primary=False, primary_contact_identity=False)
if idp_query.exists():
idp_type = idp_query[0].get_type_display()
idp_query.delete()
send_userprofile_to_cis.delay(profile.pk)
msg = _(u'Identity {0} successfully deleted.'.format(idp_type))
messages.success(request, msg)
return redirect('phonebook:profile_edit')
# We are trying to delete the primary identity, politely ignore the request
msg = _(u'Sorry the requested Identity cannot be deleted.')
messages.error(request, msg)
return redirect('phonebook:profile_edit')
@allow_unvouched
@never_cache
def change_primary_contact_identity(request, identity_pk):
"""Change primary email address."""
user = User.objects.get(pk=request.user.id)
profile = user.userprofile
alternate_identities = IdpProfile.objects.filter(profile=profile)
# Only email owner can change primary email
if not alternate_identities.filter(pk=identity_pk).exists():
raise Http404()
if alternate_identities.filter(primary_contact_identity=True).exists():
alternate_identities.filter(pk=identity_pk).update(primary_contact_identity=True)
alternate_identities.exclude(pk=identity_pk).update(primary_contact_identity=False)
msg = _(u'Primary Contact Identity successfully updated.')
messages.success(request, msg)
return redirect('phonebook:profile_edit')
@allow_unvouched
@never_cache
def confirm_delete(request):
"""Display a confirmation page asking the user if they want to
leave.
"""
return render(request, 'phonebook/confirm_delete.html')
@allow_unvouched
@never_cache
@require_POST
def delete(request):
request.user.delete()
messages.info(request, _('Your account has been deleted. Thanks for being a Mozillian!'))
return logout(request)
def invite(request):
profile = request.user.userprofile
invite_form = None
vouch_form = None
if profile.can_vouch:
invite_form = forms.InviteForm(request.POST or None,
instance=Invite(inviter=profile))
vouch_form = forms.VouchForm(request.POST or None)
if invite_form and vouch_form and invite_form.is_valid() and vouch_form.is_valid():
invite_form.instance.reason = vouch_form.cleaned_data['description']
invite = invite_form.save()
invite.send(sender=profile, personal_message=invite_form.cleaned_data['message'])
msg = _(u"%s has been invited to Mozillians. They'll receive an email "
u"with instructions on how to join. You can "
u"invite another Mozillian if you like.") % invite.recipient
messages.success(request, msg)
return redirect('phonebook:invite')
return render(request, 'phonebook/invite.html',
{
'invite_form': invite_form,
'vouch_form': vouch_form,
'invites': profile.invites.all(),
'vouch_threshold': settings.CAN_VOUCH_THRESHOLD,
})
@require_POST
def delete_invite(request, invite_pk):
profile = request.user.userprofile
deleted_invite = get_object_or_404(Invite, pk=invite_pk, inviter=profile, redeemed=None)
deleted_invite.delete()
msg = (_(u"%s's invitation to Mozillians has been revoked. "
u"You can invite %s again if you like.") %
(deleted_invite.recipient, deleted_invite.recipient))
messages.success(request, msg)
return redirect('phonebook:invite')
def apikeys(request):
profile = request.user.userprofile
apikey_request_form = forms.APIKeyRequestForm(
request.POST or None,
instance=APIv2App(enabled=True, owner=profile)
)
if apikey_request_form.is_valid():
apikey_request_form.save()
msg = _(u'API Key generated successfully.')
messages.success(request, msg)
return redirect('phonebook:apikeys')
data = {
'appsv2': profile.apps.filter(enabled=True),
'apikey_request_form': apikey_request_form,
}
return render(request, 'phonebook/apikeys.html', data)
def delete_apikey(request, api_pk):
api_key = get_object_or_404(APIv2App, pk=api_pk, owner=request.user.userprofile)
api_key.delete()
messages.success(request, _('API key successfully deleted.'))
return redirect('phonebook:apikeys')
@allow_unvouched
def logout(request):
"""View that logs out the user and redirects to home page."""
auth_logout(request)
return redirect('phonebook:home')
@allow_public
def register(request):
"""Registers Users.
Pulls out an invite code if it exists and auto validates the user
if so. Single-purpose view.
"""
# TODO already vouched users can be re-vouched?
if 'code' in request.GET:
request.session['invite-code'] = request.GET['code']
if request.user.is_authenticated():
if not request.user.userprofile.is_vouched:
redeem_invite(request.user.userprofile, request.session['invite-code'])
else:
messages.info(request, _("You've been invited to join Mozillians.org! "
"Sign in and then you can create a profile."))
return redirect('phonebook:home')
@require_POST
@csrf_exempt
@allow_public
def capture_csp_violation(request):
data = client.get_data_from_request(request)
data.update({
'level': logging.INFO,
'logger': 'CSP',
})
try:
csp_data = json.loads(request.body)
except ValueError:
# Cannot decode CSP violation data, ignore
return HttpResponseBadRequest('Invalid CSP Report')
try:
blocked_uri = csp_data['csp-report']['blocked-uri']
except KeyError:
# Incomplete CSP report
return HttpResponseBadRequest('Incomplete CSP Report')
client.captureMessage(
message='CSP Violation: {}'.format(blocked_uri),
data=data)
return HttpResponse('Captured CSP violation, thanks for reporting.')
# Django haystack
@allow_public
class PhonebookSearchView(SearchView):
form_class = forms.PhonebookSearchForm
template_name = 'phonebook/search.html'
def form_invalid(self, form):
context = self.get_context_data(**{
self.form_name: form,
'object_list': EmptySearchQuerySet()
})
return self.render_to_response(context)
def get_form_kwargs(self):
"""Pass the request.user to the form's kwargs."""
kwargs = {'initial': self.get_initial()}
if self.request.method == 'GET':
kwargs.update({
'data': self.request.GET
})
kwargs.update({'searchqueryset': self.get_queryset()})
kwargs['request'] = self.request
# pass the parameters from the url
kwargs.update(self.kwargs)
return kwargs
def get_context_data(self, **kwargs):
"""Override method to pass more context data in the template."""
context_data = super(PhonebookSearchView, self).get_context_data(**kwargs)
context_data['functional_areas'] = Group.get_functional_areas()
context_data['show_pagination'] = context_data['is_paginated']
context_data['search_form'] = context_data['form']
context_data['country'] = self.kwargs.get('country')
context_data['region'] = self.kwargs.get('region')
context_data['city'] = self.kwargs.get('city')
return context_data
# Verify additional identities
class VerifyIdentityView(OIDCAuthenticationRequestView):
def __init__(self, *args, **kwargs):
"""Override the init method to dynamically pass a different client_id."""
self.OIDC_RP_VERIFICATION_CLIENT_ID = (
import_from_settings('OIDC_RP_VERIFICATION_CLIENT_ID')
)
super(VerifyIdentityView, self).__init__(*args, **kwargs)
def get(self, request):
"""OIDC client authentication initialization HTTP endpoint.
This is based on the mozilla-django-oidc library
"""
state = get_random_string(import_from_settings('OIDC_STATE_SIZE', 32))
redirect_field_name = import_from_settings('OIDC_REDIRECT_FIELD_NAME', 'next')
params = {
'response_type': 'code',
'scope': import_from_settings('OIDC_RP_SCOPES', 'openid email profile'),
'client_id': self.OIDC_RP_VERIFICATION_CLIENT_ID,
'redirect_uri': absolutify(
request,
nonprefixed_url('phonebook:verify_identity_callback')
),
'state': state
}
if import_from_settings('OIDC_USE_NONCE', True):
nonce = get_random_string(import_from_settings('OIDC_NONCE_SIZE', 32))
params.update({
'nonce': nonce
})
request.session['oidc_verify_nonce'] = nonce
# Add parameter to disable silent authentication and the LDAP check for AUTO_VOUCH_DOMAINS
# This will allow users to verify AUTO_VOUCH_DOMAINS as contact identities
params['account_verification'] = settings.OIDC_ACCOUNT_LINKING
request.session['oidc_verify_state'] = state
request.session['oidc_login_next'] = get_next_url(request, redirect_field_name)
query = urlencode(params)
redirect_url = '{url}?{query}'.format(url=self.OIDC_OP_AUTH_ENDPOINT, query=query)
return HttpResponseRedirect(redirect_url)
class VerifyIdentityCallbackView(View):
def __init__(self, *args, **kwargs):
"""Initialize settings."""
self.OIDC_OP_TOKEN_ENDPOINT = import_from_settings('OIDC_OP_TOKEN_ENDPOINT')
self.OIDC_OP_USER_ENDPOINT = import_from_settings('OIDC_OP_USER_ENDPOINT')
self.OIDC_RP_VERIFICATION_CLIENT_ID = (
import_from_settings('OIDC_RP_VERIFICATION_CLIENT_ID')
)
self.OIDC_RP_VERIFICATION_CLIENT_SECRET = (
import_from_settings('OIDC_RP_VERIFICATION_CLIENT_SECRET')
)
def get(self, request):
"""Callback handler for OIDC authorization code flow.
This is based on the mozilla-django-oidc library.
This callback is used to verify the identity added by the user.
Users are already logged in and we do not care about authentication.
The JWT token is used to prove the identity of the user.
"""
profile = request.user.userprofile
# This is a difference nonce than the one used to login!
nonce = request.session.get('oidc_verify_nonce')
if nonce:
# Make sure that nonce is not used twice
del request.session['oidc_verify_nonce']
# Check for all possible errors and display a message to the user.
errors = [
'code' not in request.GET,
'state' not in request.GET,
'oidc_verify_state' not in request.session,
not request.GET.get('state')
or request.GET['state'] != request.session['oidc_verify_state']
]
if any(errors):
msg = 'Something went wrong, account verification failed.'
messages.error(request, msg)
return redirect('phonebook:profile_edit')
token_payload = {
'client_id': self.OIDC_RP_VERIFICATION_CLIENT_ID,
'client_secret': self.OIDC_RP_VERIFICATION_CLIENT_SECRET,
'grant_type': 'authorization_code',
'code': request.GET['code'],
'redirect_uri': absolutify(
self.request,
nonprefixed_url('phonebook:verify_identity_callback')
),
}
response = requests.post(self.OIDC_OP_TOKEN_ENDPOINT,
data=token_payload,
verify=import_from_settings('OIDC_VERIFY_SSL', True))
response.raise_for_status()
token_response = response.json()
id_token = token_response.get('id_token')
# Verify JWT
jws = JWS.from_compact(force_bytes(id_token))
jwk = JWK.load(smart_bytes(self.OIDC_RP_VERIFICATION_CLIENT_SECRET))
verified_token = None
if jws.verify(jwk):
verified_token = jws.payload
# Create the new Identity Profile.
if verified_token:
user_info = json.loads(verified_token)
email = user_info['email']
verification_user_id = user_info.get(ORIGINAL_CONNECTION_USER_ID)
msg = ''
if not user_info.get('email_verified'):
msg = 'Account verification failed: Email is not verified.'
if not verification_user_id:
msg = 'Account verification failed: Could not get original user id'
if msg:
messages.error(request, msg)
return redirect('phonebook:profile_edit')
user_q = {
'auth0_user_id': verification_user_id,
'email': email
}
# If we are linking GitHub we need to save
# the username too.
if 'github|' in verification_user_id:
user_q['username'] = user_info['nickname']
# Check that the identity doesn't exist in another Identity profile
# or in another mozillians profile
error_msg = ''
if IdpProfile.objects.filter(**user_q).exists():
error_msg = 'Account verification failed: Identity already exists.'
elif User.objects.filter(email__iexact=email).exclude(pk=profile.user.pk).exists():
error_msg = 'The email in this identity is used by another user.'
if error_msg:
messages.error(request, error_msg)
next_url = self.request.session.get('oidc_login_next', None)
return HttpResponseRedirect(next_url or reverse('phonebook:profile_edit'))
# Save the new identity to the IdpProfile
user_q['profile'] = profile
idp, created = IdpProfile.objects.get_or_create(**user_q)
current_idp = get_object_or_none(IdpProfile, profile=profile, primary=True)
# The new identity is stronger than the one currently used. Let's swap
append_msg = ''
# We need to check for equality too in the case a user updates the primary email in
# the same identity (matching auth0_user_id). If there is an addition of the same type
# we are not swapping login identities
if ((current_idp and current_idp.type < idp.type)
or (current_idp and current_idp.auth0_user_id == idp.auth0_user_id)
or (not current_idp and created and idp.type >= IdpProfile.PROVIDER_GITHUB)):
IdpProfile.objects.filter(profile=profile).exclude(pk=idp.pk).update(primary=False)
idp.primary = True
idp.save()
# Also update the primary email of the user
update_email_in_basket(profile.user.email, idp.email)
User.objects.filter(pk=profile.user.id).update(email=idp.email)
append_msg = ' You need to use this identity the next time you will login.'
send_userprofile_to_cis.delay(profile.pk)
if created:
msg = 'Account successfully verified.'
if append_msg:
msg += append_msg
messages.success(request, msg)
else:
msg = 'Account verification failed: Identity already exists.'
messages.error(request, msg)
next_url = self.request.session.get('oidc_login_next', None)
return HttpResponseRedirect(next_url or reverse('phonebook:profile_edit'))
@waffle_flag('delete-idp-profiles-qa')
@allow_unvouched
@never_cache
def delete_idp_profiles(request):
"""QA helper: Delete IDP profiles for request.user"""
request.user.userprofile.idp_profiles.all().delete()
messages.warning(request, 'Identities deleted.')
return redirect('phonebook:profile_edit')
|
mozilla/mozillians
|
mozillians/phonebook/views.py
|
Python
|
bsd-3-clause
| 31,909
|
[
"VisIt"
] |
b324afb710dec866cb038ff865b87cdfa6dc7a127203a9b57b89415fb3314f26
|
import py, re, os, signal, time, commands
from subprocess import Popen, PIPE
mod_re = (r"\bmodule\s+(", r")\s*\(\s*")
func_re = (r"\bfunction\s+(", r")\s*\(")
def extract_definitions(fpath, name_re=r"\w+", def_re=""):
regex = name_re.join(def_re)
matcher = re.compile(regex)
return (m.group(1) for m in matcher.finditer(fpath.read()))
def extract_mod_names(fpath, name_re=r"\w+"):
return extract_definitions(fpath, name_re=name_re, def_re=mod_re)
def extract_func_names(fpath, name_re=r"\w+"):
return extract_definitions(fpath, name_re=name_re, def_re=func_re)
def collect_test_modules(dirpath=None):
dirpath = dirpath or py.path.local("./")
print "Collecting openscad test module names"
test_files = {}
for fpath in dirpath.visit('*.scad'):
#print fpath
modules = extract_mod_names(fpath, r"test\w*")
#functions = extract_func_names(fpath, r"test\w*")
test_files[fpath] = modules
return test_files
class Timeout(Exception): pass
def call_openscad(path, stlpath, timeout=5):
command = ['openscad', '-s', str(stlpath), str(path)]
print command
if timeout:
try:
proc = Popen(command,
stdout=PIPE, stderr=PIPE, close_fds=True)
calltime = time.time()
time.sleep(0.05)
#print calltime
while True:
if proc.poll() is not None:
break
time.sleep(0.5)
#print time.time()
if time.time() > calltime + timeout:
raise Timeout()
finally:
try:
proc.terminate()
proc.kill()
except OSError:
pass
return (proc.returncode,) + proc.communicate()
else:
output = commands.getstatusoutput(" ".join(command))
return output + ('', '')
def parse_output(text):
pass
|
1up3d/Stallman
|
src/MCAD/openscad_utils.py
|
Python
|
gpl-3.0
| 1,933
|
[
"VisIt"
] |
174f02033620f96196f05363845c1b71a971f6fc36b44484b6c3c76c0019bc2f
|
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import ast
from xml.etree import cElementTree as ET
class SpecList(object):
""" A class with module specifications and custom code
This describes how the wrapped methods/classes will
maps to modules in vistrails
"""
def __init__(self, module_specs=None):
if module_specs is None:
module_specs = []
self.module_specs = module_specs
def write_to_xml(self, fname):
root = ET.Element("specs")
for spec in self.module_specs:
root.append(spec.to_xml())
tree = ET.ElementTree(root)
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
indent(tree.getroot())
tree.write(fname)
@staticmethod
def read_from_xml(fname, klass=None):
if klass is None:
klass = ModuleSpec
module_specs = []
tree = ET.parse(fname)
for elt in tree.getroot():
if elt.tag == klass.xml_name:
module_specs.append(klass.from_xml(elt))
retval = SpecList(module_specs)
# for spec in retval.module_specs:
# print "==", spec.name, "=="
# for ps in spec.port_specs:
# print " ", ps.arg, ps.name
return retval
######### BASE MODULE SPEC ###########
class PortSpec(object):
""" Represents specification of a port
"""
xml_name = "portSpec"
# attrs tuple means (default value, [is subelement, [run eval]])
# Subelement: ?
# eval: serialize as string and use eval to get value back
# FIXME: subelement/eval not needed if using json
attrs = {"name": "", # port name
"port_type": None, # type signature in vistrails
"docstring": ("", True), # documentation
"min_conns": (0, False, True), # set min_conns (1=required)
"max_conns": (-1, False, True), # Set max_conns (default -1)
"show_port": (False, False, True), # Set not optional (use connection)
"sort_key": (-1, False, True), # sort_key
"shape": (None, False, True), # physical shape
"depth": (0, False, True)} # expected list depth
def __init__(self, **kwargs):
self.set_defaults(**kwargs)
self.port_types = []
def set_defaults(self, **kwargs):
for attr, props in self.attrs.iteritems():
if isinstance(props, tuple):
default_val = props[0]
else:
default_val = props
if attr in kwargs:
setattr(self, attr, kwargs[attr])
else:
setattr(self, attr, default_val)
def to_xml(self, elt=None):
if elt is None:
elt = ET.Element(self.xml_name)
for attr, props in self.attrs.iteritems():
attr_val = getattr(self, attr)
is_subelt = False
if isinstance(props, tuple):
default_val = props[0]
if len(props) > 1:
is_subelt = props[1]
else:
default_val = props
if default_val != attr_val:
if is_subelt:
subelt = ET.Element(attr)
subelt.text = unicode(getattr(self, attr))
elt.append(subelt)
else:
elt.set(attr, unicode(attr_val))
return elt
@classmethod
def internal_from_xml(cls, elt, obj=None):
if obj is None:
obj = cls()
child_elts = {}
for child in elt.getchildren():
# if child.tag not in obj.attrs:
# raise RuntimeError('Cannot deal with tag "%s"' % child.tag)
if child.tag not in child_elts:
child_elts[child.tag] = []
child_elts[child.tag].append(child)
kwargs = {}
for attr, props in obj.attrs.iteritems():
is_subelt = False
run_eval = False
if isinstance(props, tuple):
if len(props) > 1:
is_subelt = props[1]
if len(props) > 2:
run_eval = props[2]
attr_vals = []
if is_subelt:
if attr in child_elts:
attr_vals = [c.text for c in child_elts[attr]
if c.text is not None]
else:
attr_val = elt.get(attr)
if attr_val is not None:
attr_vals = [attr_val]
if len(attr_vals) > 1:
raise ValueError('Should have only one value for '
'attribute "%s"' % attr)
if len(attr_vals) > 0:
attr_val = attr_vals[0]
if run_eval:
try:
kwargs[attr] = ast.literal_eval(attr_val)
except (NameError, SyntaxError, ValueError):
kwargs[attr] = attr_val
else:
kwargs[attr] = attr_val
obj.set_defaults(**kwargs)
return obj, child_elts
@classmethod
def from_xml(cls, elt, obj=None):
obj, child_elts = cls.internal_from_xml(elt, obj)
return obj
@classmethod
def create_from_xml(cls, elt):
if elt.tag == cls.InputSpecType.xml_name:
return cls.InputSpecType.from_xml(elt)
elif elt.tag == cls.OutputSpecType.xml_name:
return cls.OutputSpecType.from_xml(elt)
raise TypeError('Cannot create spec from element of type "%s"' %
elt.tag)
def get_port_type(self):
if self.port_type is None:
return "basic:Null"
try:
port_types = ast.literal_eval(self.port_type)
def flatten(t):
if not isinstance(t, list):
raise Exception("Expected a list")
flat = []
for elt in t:
if isinstance(elt, list):
flat.extend(flatten(elt))
else:
flat.append(elt)
return flat
return ','.join(flatten(port_types))
except (SyntaxError, ValueError):
pass
return self.port_type
def get_prepend_params(self):
if self.prepend_params is None:
return []
return self.prepend_params
class InputPortSpec(PortSpec):
xml_name = "inputPortSpec"
attrs = {"entry_types": (None, True, True),# custom entry type (like enum)
"values": (None, True, True), # values for enums
"labels": (None, True, True), # custom labels on enum values
"defaults": (None, True, True), # default value list
}
attrs.update(PortSpec.attrs)
def get_port_attrs(self):
""" Port attribute dict that will be used to create the port
"""
attrs = {}
if self.sort_key != -1:
attrs["sort_key"] = self.sort_key
if self.shape:
attrs["shape"] = self.shape
if self.depth:
attrs["depth"] = self.depth
if self.values:
attrs["values"] = unicode(self.values)
if self.labels:
attrs["labels"] = unicode(self.labels)
if self.entry_types:
attrs["entry_types"] = unicode(self.entry_types)
if self.defaults:
attrs["defaults"] = unicode(self.defaults)
if self.docstring:
attrs["docstring"] = self.docstring
if self.min_conns:
attrs["min_conns"] = self.min_conns
if self.max_conns != -1:
attrs["max_conns"] = self.max_conns
if not self.show_port:
attrs["optional"] = True
return attrs
class OutputPortSpec(PortSpec):
xml_name = "outputPortSpec"
attrs = {}
attrs.update(PortSpec.attrs)
def get_port_attrs(self):
""" Port attribute dict that will be used to create the port
"""
attrs = {}
if self.sort_key != -1:
attrs["sort_key"] = self.sort_key
if self.shape:
attrs["shape"] = self.shape
if self.depth:
attrs["depth"] = self.depth
if self.docstring:
attrs["docstring"] = self.docstring
if self.min_conns:
attrs["min_conns"] = self.min_conns
if self.max_conns != -1:
attrs["max_conns"] = self.max_conns
if not self.show_port:
attrs["optional"] = True
return attrs
class ModuleSpec(object):
""" Represents specification of a module
This mirrors how the module will look in the vistrails registry
"""
xml_name = 'moduleSpec'
InputSpecType = InputPortSpec
OutputSpecType = OutputPortSpec
# From Modulesettings. See core.modules.config._documentation
ms_attrs = ['name',
'configure_widget',
'constant_widget',
'constant_widgets',
'signature',
'constant_signature',
'color',
'fringe',
'left_fringe',
'right_fringe',
'abstract',
'namespace',
'package_version',
'hide_descriptor']
attrs = [
# basic attributes
'module_name', # Name of module (can be overridden by modulesettings)
'superklass', # class to inherit from
'code_ref', # reference to wrapped class/method
'docstring', # module __doc__
'cacheable', # should this module be cached
# special attributes
'callback', # name of attribute for progress callback
'tempfile'] # attribute name for temporary file creation method
attrs.extend(ms_attrs)
def __init__(self, module_name='', superklass='', code_ref='',
docstring='', callback=None, tempfile=None, cacheable=True,
input_port_specs=None, output_port_specs=None, **kwargs):
if input_port_specs is None:
input_port_specs = []
if output_port_specs is None:
output_port_specs = []
self.module_name = module_name
self.superklass = superklass
self.code_ref = code_ref
self.docstring = docstring
self.callback = callback
self.tempfile = tempfile
self.cacheable = cacheable
self.input_port_specs = input_port_specs
self.output_port_specs = output_port_specs
for attr in self.ms_attrs:
setattr(self, attr, kwargs.get(attr, None))
def to_xml(self, elt=None):
if elt is None:
elt = ET.Element(self.xml_name)
elt.set("module_name", self.module_name)
elt.set("superklass", self.superklass)
elt.set("code_ref", self.code_ref)
subelt = ET.Element("docstring")
subelt.text = unicode(self.docstring)
elt.append(subelt)
if self.callback is not None:
elt.set("callback", self.callback)
if self.tempfile is not None:
elt.set("tempfile", self.tempfile)
if self.cacheable is False:
elt.set("cacheable", 'False')
for attr in self.ms_attrs:
value = getattr(self, attr)
if value is not None:
elt.set(attr, repr(value))
for port_spec in self.input_port_specs:
subelt = port_spec.to_xml()
elt.append(subelt)
for port_spec in self.output_port_specs:
subelt = port_spec.to_xml()
elt.append(subelt)
return elt
@staticmethod
def from_xml(elt, klass=None):
if klass is None:
klass = ModuleSpec
module_name = elt.get("module_name", '')
superklass = elt.get("superklass", '')
code_ref = elt.get("code_ref", '')
callback = elt.get("callback", None)
tempfile = elt.get("tempfile", None)
cacheable = ast.literal_eval(elt.get("cacheable", "True"))
kwargs = {}
for attr in klass.ms_attrs:
value = elt.get(attr, None)
if value is not None:
kwargs[attr] = ast.literal_eval(value)
docstring = ""
input_port_specs = []
output_port_specs = []
for child in elt.getchildren():
if child.tag == klass.InputSpecType.xml_name:
input_port_specs.append(klass.InputSpecType.from_xml(child))
elif child.tag == klass.OutputSpecType.xml_name:
output_port_specs.append(klass.OutputSpecType.from_xml(child))
elif child.tag == "docstring":
if child.text:
docstring = child.text
return klass(module_name=module_name, superklass=superklass,
code_ref=code_ref, docstring=docstring,
callback=callback, tempfile=tempfile, cacheable=cacheable,
input_port_specs=input_port_specs,
output_port_specs=output_port_specs, **kwargs)
def get_output_port_spec(self, compute_name):
for ps in self.output_port_specs:
if ps.compute_name == compute_name:
return ps
return None
def get_module_settings(self):
""" Returns modulesettings dict
"""
attrs = {}
for attr in self.ms_attrs:
value = getattr(self, attr)
if value is not None:
attrs[attr] = value
return attrs
######### PYTHON FUNCTION SPEC ###########
class FunctionInputPortSpec(InputPortSpec):
xml_name = "functionInputPortSpec"
attrs = {"arg": ""} # attribute name
attrs.update(InputPortSpec.attrs)
class FunctionOutputPortSpec(OutputPortSpec):
xml_name = "functionOutputPortSpec"
class FunctionSpec(ModuleSpec):
""" Specification for wrapping a python function
"""
xml_name = 'functionSpec'
InputSpecType = FunctionInputPortSpec
OutputSpecType = FunctionOutputPortSpec
attrs = ['output_type'] # None(=single), list(ordered), or dict(attr=value)
attrs.extend(ModuleSpec.attrs)
def __init__(self, module_name, superklass='', code_ref='', docstring="",
output_type=None, callback=None, tempfile=None,
cacheable=True, input_port_specs=None, output_port_specs=None,
**kwargs):
ModuleSpec.__init__(self, module_name, superklass, code_ref,
docstring, callback, tempfile, cacheable,
input_port_specs, output_port_specs, **kwargs)
self.output_type = output_type
def to_xml(self, elt=None):
if elt is None:
elt = ET.Element(self.xml_name)
elt = ModuleSpec.to_xml(self, elt)
if self.output_type is not None:
elt.set("output_type", self.output_type)
return elt
@staticmethod
def from_xml(elt):
inst = ModuleSpec.from_xml(elt, FunctionSpec)
inst.output_type = elt.get("output_type", None)
return inst
######### PYTHON CLASS SPEC ###########
class ClassInputPortSpec(InputPortSpec):
xml_name = "classInputPortSpec"
attrs = {"method_name": "", # method name
"method_type": "", # Type like nullary, OnOff or SetXToY
"prepend_params": (None, True, True)} # prepended params like index
attrs.update(InputPortSpec.attrs)
def __init__(self, **kwargs):
InputPortSpec.__init__(self, **kwargs)
if not self.method_name:
self.method_name = self.name
class ClassOutputPortSpec(OutputPortSpec):
xml_name = "classOutputPortSpec"
attrs = {"method_name": "", # method/attribute name
"prepend_params": (None, True, True)} # prepended params used with indexed methods
attrs.update(OutputPortSpec.attrs)
def __init__(self, **kwargs):
OutputPortSpec.__init__(self, **kwargs)
if not self.method_name:
self.method_name = self.name
class ClassSpec(ModuleSpec):
""" Specification for wrapping a python class
"""
xml_name = 'classSpec'
InputSpecType = ClassInputPortSpec
OutputSpecType = ClassOutputPortSpec
attrs = ['methods_last', # If True will compute methods before connections
'compute', # Function to call after input methods
'cleanup'] # Function to call after output methods
attrs.extend(ModuleSpec.attrs)
def __init__(self, module_name, superklass='', code_ref='', docstring="",
callback=None, tempfile=None,
cacheable=True, input_port_specs=None, output_port_specs=None,
compute=None, cleanup=None, methods_last=False, **kwargs):
ModuleSpec.__init__(self, module_name, superklass, code_ref,
docstring, callback, tempfile, cacheable,
input_port_specs, output_port_specs, **kwargs)
self.methods_last = methods_last
self.compute = compute
self.cleanup = cleanup
def to_xml(self, elt=None):
if elt is None:
elt = ET.Element(self.xml_name)
if self.methods_last is not False:
elt.set("methods_last", unicode(self.methods_last))
if self.compute is not None:
elt.set("compute", self.compute)
if self.cleanup is not None:
elt.set("cleanup", self.cleanup)
elt = ModuleSpec.to_xml(self, elt)
return elt
@staticmethod
def from_xml(elt):
inst = ModuleSpec.from_xml(elt, ClassSpec)
inst.methods_last = ast.literal_eval(elt.get("methods_last", 'False'))
inst.compute = elt.get("compute", None)
inst.cleanup = elt.get("cleanup", None)
return inst
###############################################################################
import unittest
class TestModuleSpec(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
import vtk
except ImportError:
raise unittest.SkipTest("vtk is not installed")
from vistrails.tests.utils import enable_package
from ..identifiers import identifier
enable_package(identifier)
def test_module_spec(self):
input_spec = InputPortSpec(name='myportname',
port_type='basic:String',
docstring='my port doc',
min_conns=1,
max_conns=3,
show_port=True,
sort_key=5,
depth=1,
entry_type='enum')
in_attrs = input_spec.get_port_attrs()
output_spec = OutputPortSpec(name='myportname',
port_type='basic:String',
docstring='my port doc',
min_conns=1,
max_conns=3,
show_port=False,
sort_key=5,
depth=1)
out_attrs = output_spec.get_port_attrs()
ms = ModuleSpec(module_name='myclassname',
superklass='mysuperclassname',
code_ref='theclassname',
docstring='my documentation',
callback=None,
tempfile=None,
cacheable=False,
input_port_specs=[input_spec],
output_port_specs=[output_spec])
as_string = ET.tostring(ms.to_xml())
from_string = ET.fromstring(as_string)
ms2 = ModuleSpec.from_xml(from_string)
in_attrs2 = ms2.input_port_specs[0].get_port_attrs()
out_attrs2 = ms2.output_port_specs[0].get_port_attrs()
self.assertEqual(in_attrs, in_attrs2)
self.assertEqual(out_attrs, out_attrs2)
def test_function_spec(self):
input_spec = FunctionInputPortSpec(name='myportname',
port_type='basic:String',
docstring='my port doc',
min_conns=1,
max_conns=3,
show_port=False,
sort_key=5,
depth=1,
arg='myargname',
)
in_attrs = input_spec.get_port_attrs()
output_spec = FunctionOutputPortSpec(name='myportname',
port_type='basic:String',
docstring='my port doc',
min_conns=1,
max_conns=3,
show_port=False,
sort_key=5,
depth=1)
out_attrs = output_spec.get_port_attrs()
ms = FunctionSpec(module_name='myclassname',
superklass='mysuperclassname',
code_ref='theclassname',
docstring='my documentation',
callback=None,
tempfile=None,
cacheable=False,
input_port_specs=[input_spec],
output_port_specs=[output_spec],
output_type='list')
as_string = ET.tostring(ms.to_xml())
from_string = ET.fromstring(as_string)
ms2 = FunctionSpec.from_xml(from_string)
in_attrs2 = ms2.input_port_specs[0].get_port_attrs()
out_attrs2 = ms2.output_port_specs[0].get_port_attrs()
self.assertEqual(in_attrs, in_attrs2)
self.assertEqual(out_attrs, out_attrs2)
def test_class_spec(self):
input_spec = ClassInputPortSpec(name='myportname',
port_type='basic:String',
docstring='my port doc',
min_conns=1,
max_conns=3,
show_port=False,
sort_key=5,
depth=1,
method_name='MyClassMethodName',
method_type='SetXToY',
prepend_params=[1])
in_attrs = input_spec.get_port_attrs()
output_spec = ClassOutputPortSpec(name='myportname',
port_type='basic:String',
docstring='my port doc',
min_conns=1,
max_conns=3,
show_port=False,
sort_key=5,
depth=1,
method_name='MyClassMethodName',
prepend_params=[1])
out_attrs = output_spec.get_port_attrs()
ms = ClassSpec(module_name='myclassname',
superklass='mysuperclassname',
code_ref='theclassname',
docstring='my documentation',
callback=None,
tempfile=None,
cacheable=False,
input_port_specs=[input_spec],
output_port_specs=[output_spec],
methods_last=True,
compute='myCompute',
cleanup='myCleanup')
as_string = ET.tostring(ms.to_xml())
from_string = ET.fromstring(as_string)
ms2 = ClassSpec.from_xml(from_string)
in_attrs2 = ms2.input_port_specs[0].get_port_attrs()
out_attrs2 = ms2.output_port_specs[0].get_port_attrs()
self.assertEqual(in_attrs, in_attrs2)
self.assertEqual(out_attrs, out_attrs2)
#def run():
# specs = SpecList.read_from_xml("mpl_plots_raw.xml")
# specs.write_to_xml("mpl_plots_raw_out.xml")
#if __name__ == '__main__':
# run()
|
hjanime/VisTrails
|
vistrails/packages/vtk/vtk_wrapper/specs.py
|
Python
|
bsd-3-clause
| 26,973
|
[
"VTK"
] |
9de40f7126379a77b03e5bcd8376cf9702d42ef7b085b6e430a91a6797431fbf
|
from django.conf import settings
import urllib, subprocess, os, sys, datetime, mwclient
'''
Handles linking PDB structure images to ProteinBox templates.
'''
description_skeleton = '''== {{{{int:filedesc}} ==
{{{{Information
| Description={{{{en | 1=Structure of protein {symbol}.
Based on [[w:PyMol | PyMol]] rendering of PDB {{{{PDB2|{pdb}}}}}.}}}}
| Source = {{{{own}}}}
| Author = [[User:{username}|{username}]]
| Date = {date}
| Permission =
| other_versions =
}}}}
{{{{PD-self}}}}
{{{{Category:Protein_structures}}}}'''
caption_skeleton = 'Rendering based on [[Protein_Data_Bank | PDB]] {{{{PDB2|{pdb}}}}}.'
rcsb_skeleton = 'http://www.rcsb.org/pdb/files/{}.pdb'
# A dev server at EBI that provides 'chosen' PDB structures and images based
# on homologene ID
ebiserver = 'http://wwwdev.ebi.ac.uk/pdbe-apps/jsonizer/homologene/{}/'
title_skeleton = 'File:Protein_{hugo_sym}_PDB_{pdb_id}.png'
def get_image(proteinbox, use_experimental=True):
'''Attempts to find a suitable image given a gene. Returns the image filename as
it exists on Wikipedia commons, along with a suitable caption.
Arguments:
- `proteinbox`: a ProteinBox object representing known information about a gene
- `use_experimental`: use a dev server at EBI to provide chosen PDB structures and
images.'''
fields = proteinbox.fieldsdict
pdbid = ''
if use_experimental and fields['Homologene']:
import json
homologene = fields['Homologene']
try:
req = urllib.urlopen(ebiserver.format(homologene))
rawj = req.read()
rawj = rawj.replace(' ', '').replace('\n', '')
info = json.loads(rawj)
if 'best_structure' in info:
pdbid = info['best_structure']['pdbid']
pdb = PDB(pdbid, fields['Symbol'])
# They often have images, but we're going to render them again anyway.
imagefile, caption = pdb.render()
pdb.uploadToCommons()
return image, caption
finally:
req.close()
else:
# Try to find one on Commons
commons = mwclient.Site('commons.wikimedia.org')
title1 = title_skeleton.format(hugo_sym=fields['Symbol'], pdb_id=fields['PDB'][0].upper())
title2 = title_skeleton.format(hugo_sym=fields['Symbol'], pdb_id=fields['PDB'][0].lower())
page1 = commons.Pages[title1]
page2 = commons.Pages[title2]
if page1.exists:
return page1.name, caption_skeleton.format(pdb=fields['PDB'][0])
elif page2.exists:
return page2.name, caption_skeleton.format(pdb=fields['PDB'][0])
elif len(fields['PDB']) > 1:
titles = [title_skeleton.format(hugo_sym=fields['Symbol'], pdb_id=pdb) for pdb in fields['PDB']]
for title in titles:
page = commons.Pages[title]
if page.exists:
import re
pdb = re.search(r'PDB[ _]([\d\w]*).png', title)
return page.name, caption_skeleton.format(pdb=pdb)
# otherwise render a new one
else:
pdb = PDB(fields['PDB'][0], fields['Symbol'])
image, caption = pdb.render()
return image, caption
class PDB(object):
def __init__(self, pdbid, hugosym, pdbpath=None, pymolpath=None, commons=None):
self.pdbid = pdbid
self.hugosym = hugosym
self.pdbpath = pdbpath if pdbpath else settings.pbbhome + 'pdb/'
if not self.pdbpath.endswith(os.sep):
self.pdbpath = self.pdbpath + os.sep
# ensure that pdb path exists
if not os.path.exists(self.pdbpath):
try:
os.makedirs(self.pdbpath)
except OSError:
raise ValueError('Could not access pdb path at {} nor create it.'.format(self.pdbpath))
# ensure that we can write in it
try:
open(self.pdbpath + 'test', 'w').close()
os.remove(self.pdbpath + 'test')
except IOError:
raise ValueError(
'Could not write in pdb path {}: permission denied.'.format(
self.pdbpath))
self.pymolpath = pymolpath if pymolpath else settings.pymol
self.pdbfile = None
self.pngfile = None
self.commons = commons
self.__closed = False
def download(self, pdb_id=None):
'''
Downloads a PDB file from rcsb.org and returns the filename if
successful (or None if failed).
'''
if not pdb_id:
pdb_id = self.pdbid
try:
remote = urllib.urlopen(rcsb_skeleton.format(pdb_id))
except IOError:
sys.stderr.write('pdb: error downloading pdb file from {}\n'
.format(rcsb_skeleton.format(pdb_id)))
return None
filename = self.pdbpath + pdb_id + '.pdb'
with open(filename, 'wb') as local:
local.write(remote.read())
remote.close()
self.pdbfile = filename
return filename
def render(self, pdb_id=None, hugo_sym=None, pdb_file=None):
'''
Render and return the filename of the image given a pdb id
and hugo symbol.
If the pdb file is already present, it can be passed as a parameter;
otherwise it will be downloaded from rcsb.org.
'''
if not pdb_id:
pdb_id = self.pdbid
if not hugo_sym:
hugo_sym = self.hugosym
# Attempt to download the pdb file if not explicitly passed
if not pdb_file:
pdb_file = self.pdbfile
if not pdb_file:
pdb_file = self.download(pdb_id)
if not pdb_file:
return None
# Set up the future location of the image
if pdb_id is None:
return None
png_file = '{pdbpath}Protein_{hugo_sym}_PDB_{pdb_id}.png'.format(pdbpath=self.pdbpath, hugo_sym=hugo_sym, pdb_id=pdb_id)
# Launch pymol as a subprocess and wait for return
rendercmd = "cmd.png('{png_file}', 1200, 1000)".format(png_file=png_file)
pymolcmd = [self.pymolpath, '-c', pdb_file, settings.PROJECT_PATH.format('commands.pml'), '-d', rendercmd]
print ' '.join(pymolcmd)
try:
subprocess.check_call(pymolcmd)
except CalledProcessError:
print 'pdb: error rendering pdb file for id {}'.format(pdb_id)
return None
self.pngfile = png_file
return png_file, caption_skeleton.format(pdb=pdb_id)
def uploadToCommons(self, description=None, png_file=None, commons=None):
if not png_file:
raise ValueError('No .png file specified.')
self.commons = commons
self.commons = mwclient.Site('commons.wikimedia.org')
if hasattr(settings, 'commons_user'):
cuser = settings.commons_user
cpass = settings.commons_pass
else:
cuser = settings.wiki_user
cpass = settings.wiki_pass
self.commons.login(cuser, cpass)
if not description:
description = description_skeleton.format(symbol=self.hugosym,
pdb=self.pdbid, username=cuser,
date=str(datetime.datetime.now()))
try:
self.commons.upload(open(png_file),
png_file.split(os.sep).pop(),
description)
except mwclient.errors.LoginError:
self.commons.login()
|
SuLab/genewiki
|
genewiki/bio/images.py
|
Python
|
mit
| 7,651
|
[
"PyMOL"
] |
6087a3593423be3baef6d05618244b2f3aeeff42ad51e68156b8ea817e161bc8
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Tests the basic functionality of the GEO parsers.
"""
from __future__ import print_function
import os
import sys
import Bio.Geo
testfiles = ['GSE16.txt', 'GSM645.txt', 'GSM691.txt', 'GSM700.txt', 'GSM804.txt']
# Five additional files from the NCBI to document the GEO SOFT file format
# changes made in 2005. Note the new table_begin and table_end lines.
testfiles.extend(['soft_ex_affy.txt',
'soft_ex_affy_chp.txt',
'soft_ex_dual.txt',
'soft_ex_family.txt',
'soft_ex_platform.txt',
'soft_ex_series.txt'])
for file in testfiles:
if sys.version_info[0] >= 3:
# Python 3 problem: Can't use utf8 on Tests/Geo/soft_ex_*.txt
# due to micro (\xb5) and degrees (\xb0) symbols
fh = open(os.path.join("Geo", file), encoding="latin")
else:
fh = open(os.path.join("Geo", file))
print("Testing Bio.Geo on " + file + "\n\n")
records = Bio.Geo.parse(fh)
for record in records:
print(record)
print("\n")
fh.close()
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_geo.py
|
Python
|
gpl-2.0
| 1,239
|
[
"Biopython"
] |
635f9e48c74a0d4095ef2ab17712cc212409e765d2c99417611d61d8bbfc9a34
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Presto documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
import os
import sys
import xml.dom.minidom
try:
sys.dont_write_bytecode = True
except:
pass
sys.path.insert(0, os.path.abspath('ext'))
def child_node(node, name):
for i in node.childNodes:
if (i.nodeType == i.ELEMENT_NODE) and (i.tagName == name):
return i
return None
def node_text(node):
return node.childNodes[0].data
def maven_version(pom):
dom = xml.dom.minidom.parse(pom)
project = dom.childNodes[0]
version = child_node(project, 'version')
if version:
return node_text(version)
parent = child_node(project, 'parent')
version = child_node(parent, 'version')
return node_text(version)
def get_version():
version = os.environ.get('PRESTO_VERSION', '').strip()
return version or maven_version('../../../pom.xml')
# -- General configuration -----------------------------------------------------
needs_sphinx = '3.3.0'
extensions = [
'sphinx_copybutton', 'download', 'issue', 'pr'
]
copyright = 'The Presto Foundation. All rights reserved. Presto is a registered trademark of LF Projects, LLC'
templates_path = ['templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Presto'
version = get_version()
release = version
exclude_patterns = ['_build']
highlight_language = 'sql'
rst_epilog = """
.. |presto_server_release| replace:: ``presto-server-{release}``
""".replace('{release}', release)
# -- Options for HTML output ---------------------------------------------------
html_theme = 'sphinx_material'
# Set link name generated in the top bar.
html_title = '%s %s Documentation' % (project, release)
html_logo = 'images/logo.png'
html_favicon = 'images/favicon.ico'
# doesn't seem to do anything
# html_baseurl = 'overview.html'
html_static_path = ['static']
html_css_files = [
'presto.css',
]
templates_path = ['_templates']
html_add_permalinks = '#'
html_show_copyright = True
html_show_sphinx = False
html_sidebars = {
"**": ['logo-text.html', 'globaltoc.html', 'localtoc.html', 'searchbox.html']
}
html_show_sourcelink = False
# Material theme options (see theme.conf for more information)
html_theme_options = {
# Set the name to appear in the left sidebar/header. If not provided, uses
# html_short_title if defined, or html_title
# 'nav_title': 'Project Name',
# Set your GA account ID to enable tracking
'google_analytics_account': 'UA-82811140-44',
# Specify a base_url used to generate sitemap.xml. If not
# specified, then no sitemap will be built.
'base_url': '/',
# Colors
# The theme color for mobile browsers. Hex color.
'theme_color': '374665',
# Primary colors:
# red, pink, purple, deep-purple, indigo, blue, light-blue, cyan,
# teal, green, light-green, lime, yellow, amber, orange, deep-orange,
# brown, grey, blue-grey, white. default is blue.
'color_primary': 'grey',
# Accent colors:
# red, pink, purple, deep-purple, indigo, blue, light-blue, cyan,
# teal, green, light-green, lime, yellow, amber, orange, deep-orange
'color_accent': 'blue',
# Repository integration
# Set the repo url for the link to appear
'repo_url': 'https://github.com/prestodb/presto',
'repo_name': 'Presto',
'repo_type': 'github',
# TOC Tree generation
# The maximum depth of the global TOC; set it to -1 to allow unlimited depth
'globaltoc_depth': 2,
# If true, TOC entries that are not ancestors of the current page are collapsed
'globaltoc_collapse': True,
# If true, the global TOC tree will also contain hidden entries
'globaltoc_includehidden': False,
# Include the master document at the top of the page in the breadcrumb bar.
# You must also set this to true if you want to override the rootrellink block, in which
# case the content of the overridden block will appear
# master_doc = True
# A list of dictionaries where each has three keys:
# href: The URL or pagename (str)
# title: The title to appear (str)
# internal: Flag indicating to use pathto (bool)
# nav_links =
# Text to appear at the top of the home page in a "hero" div. Must be a
# dict[str, str] of the form pagename: hero text, e.g., {'index': 'text on index'}
# heroes =
# Enable the version dropdown feature. See the demo site for the structure
# of the json file required.
# 'version_dropdown': 'True',
# Text to use in the dropdown menu
# version_dropdown_text = Versions
# Optional dictionary that to use when populating the version dropdown.
# The key is the text that appears, and the value is the absolute path
# of the alternative versions
# version_info =
# Relative path to json file. The default is "versions.json" which assumes the
# file hosted in the root of the site. You can use other file names or locations, e.g.,
# "_static/old_versions.json"
# 'version_json': 'static/versions.json',
# Table classes to _not_ strip. Must be a list. Classes on this list are *not*
# removed from tables. All other classes are removed, and only tables with outclasses
# are styled by default.
# table_classes =
}
|
zzhao0/presto
|
presto-docs/src/main/sphinx/conf.py
|
Python
|
apache-2.0
| 5,695
|
[
"Amber"
] |
dd0ef9f1d2fa45e7141998878fbb89da0d9576d18e4f31fe68206b57531177a7
|
from __future__ import (absolute_import, division, print_function)
import re
from six import string_types
parNamePattern = re.compile(r'([a-zA-Z][\w.]+)')
class FunctionParameters(object):
"""
A helper class that simplifies access to parameters of nested composite fitting functions.
"""
def __init__(self, function, prefix=''):
self.function = function
self.prefix = prefix
def __getitem__(self, name):
return self.function.getParameterValue(self.prefix + name)
def __setitem__(self, name, value):
self.function.setParameter(self.prefix + name, value)
def update(self, function):
self.function = function
class FunctionAttributes(object):
"""
A helper class that simplifies access to attributes of nested composite fitting functions.
"""
def __init__(self, function, prefix=''):
self.function = function
self.prefix = prefix
def __getitem__(self, name):
return self.function.getAttributeValue(self.prefix + name)
def __setitem__(self, name, value):
self.function.setAttributeValue(self.prefix + name, value)
def update(self, function):
self.function = function
class Function(object):
"""A helper object that simplifies getting and setting parameters of a simple named function."""
def __init__(self, name_or_function, **kwargs):
"""
Initialise new instance.
@param name: A valid name registered with the FunctionFactory.
@param kwargs: Parameters (but not attributes) of this function. To set attributes use `attr` property.
Example:
f = Function('TabulatedFunction', Scaling=2.0)
f.attr['Workspace'] = 'workspace_with_data'
"""
from mantid.simpleapi import FunctionFactory
if isinstance(name_or_function, str):
self.function = FunctionFactory.createFunction(name_or_function)
else:
self.function = name_or_function
if 'prefix' in kwargs:
self.prefix = kwargs['prefix']
del kwargs['prefix']
else:
self.prefix = ''
# Function attributes.
self._attrib = FunctionAttributes(self.function, self.prefix)
# Function parameters.
self._params = FunctionParameters(self.function, self.prefix)
# The rest of kw arguments are treated as function parameters
for param in kwargs:
self._params[param] = kwargs[param]
@property
def attr(self):
return self._attrib
@property
def param(self):
return self._params
def ties(self, **kwargs):
"""Set ties on the parameters.
@param kwargs: Ties as name=value pairs: name is a parameter name,
the value is a tie string or a number. For example:
tie(A0 = 0.1, A1 = '2*A0')
"""
for param in kwargs:
self.function.tie(self.prefix + param, str(kwargs[param]))
def constraints(self, *args):
"""
Set constraints for the parameters.
@param args: A list of constraints. For example:
constraints('A0 > 0', '0.1 < A1 < 0.9')
"""
for arg in args:
constraint = re.sub(parNamePattern, '%s\\1' % self.prefix, arg)
self.function.addConstraints(constraint)
def toString(self):
"""Create function initialisation string"""
if self.prefix != '':
raise RuntimeError('Cannot convert to string a part of function')
return str(self.function)
def update(self, function):
"""
Update values of the fitting parameters.
@param func: A IFunction object containing new parameter values.
"""
self._attrib.update(function)
self._params.update(function)
class CompositeProperties(object):
"""
A helper class that simplifies access of attributes and parameters of a composite function.
"""
def __init__(self, function, prefix, kind, first_index):
"""
Constructor.
Args:
function: a function that this object provides access to
prefix: a prefix that is prepended to properties names. This makes it easier to access parameters
of a nested composite function.
kind: a kind of properties accessed: 'attributes' or 'parameters'
firstIndex: shifts the index of a member function
"""
self.function = function
self.prefix = prefix
self.PropertyType = FunctionAttributes if kind == 'attributes' else FunctionParameters
self.first_index = first_index
def __getitem__(self, i):
"""
Get a FunctionParameters or FunctionAttributes object that give access to properties of the i-th
member function (shifted by self.firstIndex).
For example:
function = FunctionFactory.createInitialized('name=Gaussian,Sigma=1;name=Gaussian,Sigma=2')
params = CompositeProperties(function, '', 'parameters', 0)
assert params[0]['Sigma'] == 1
assert params[1]['Sigma'] == 2
params[1]['Sigma'] = 3
assert params[1]['Sigma'] == 3
Args:
i: index of a member function to get/set parameters
Returns:
FunctionParameters or FunctionAttributes object.
"""
return self.PropertyType(self.function, self.prefix + 'f%s.' % (i + self.first_index))
def update(self, function):
self.function = function
def ties(self, ties_dict):
"""Set ties on the parameters.
:param ties_dict: Ties as name=value pairs: name is a parameter name,
the value is a tie string or a number. For example:
tie({'A0': 0.1, 'A1': '2*A0'})
"""
for param, tie in ties_dict.items():
tie = re.sub(parNamePattern, '%s\\1' % self.prefix, tie)
self.function.tie(self.prefix + param, tie)
def constraints(self, *args):
"""
Set constraints for the parameters.
@param args: A list of constraints. For example:
constraints('A0 > 0', '0.1 < A1 < 0.9')
"""
for arg in args:
constraint = re.sub(parNamePattern, '%s\\1' % self.prefix, arg)
self.function.addConstraints(constraint)
class PeaksFunction(object):
"""A helper object that simplifies getting and setting parameters of a composite function
containing multiple peaks of the same spectrum.
"""
def __init__(self, function, prefix, first_index):
"""
Constructor.
:param function: A CrystalField function who's peaks we want to access.
:param prefix: a prefix of the parameters of the spectrum we want to access.
:param first_index: Index of the first peak
"""
# Collection of all attributes
self._attrib = CompositeProperties(function, prefix, 'attributes', first_index)
# Collection of all parameters
self._params = CompositeProperties(function, prefix, 'parameters', first_index)
@property
def attr(self):
"""Get or set the function attributes.
Returns a FunctionAttributes object that accesses the peaks' attributes.
"""
return self._attrib
@property
def param(self):
"""Get or set the function parameters.
Returns a FunctionParameters object that accesses the peaks' parameters.
"""
return self._params
def ties(self, ties_dict):
"""Set ties on the peak parameters.
:param ties_dict: Ties as name=value pairs: name is a parameter name,
the value is a tie string or a number. For example:
ties({'f1.Sigma': '0.1', 'f2.Sigma': '2*f0.Sigma'})
"""
self._params.ties(ties_dict)
def constraints(self, *constraints):
"""
Set constraints for the peak parameters.
@param constraints: A list of constraints. For example:
constraints('f0.Sigma > 0', '0.1 < f1.Sigma < 0.9')
"""
self._params.constraints(*constraints)
def tieAll(self, tie, iFirstN, iLast=-1):
"""
Tie parameters with the same name for all peaks.
@param tie: A tie as a string. For example:
tieAll('Sigma=0.1', 3) is equivalent to a call
ties('f0.Sigma=0.1', 'f1.Sigma=0.1', 'f2.Sigma=0.1')
@param iFirstN: If iLast is given then it's the index of the first peak to tie.
Otherwise it's a number of peaks to tie.
@param iLast: An index of the last peak to tie (inclusive).
"""
if iLast >= 0:
start = iFirstN
end = iLast + 1
else:
start = self._params.first_index
end = iFirstN + self._params.first_index
name, expr = tuple(tie.split('='))
name = 'f%s.' + name.strip()
expr = expr.strip()
ties = {(name % i): expr for i in range(start, end)}
self.ties(ties)
def constrainAll(self, constraint, iFirstN, iLast=-1):
"""
Constrain parameters with the same name for all peaks.
@param constraint: A constraint as a string. For example:
constrainAll('0 < Sigma <= 0.1', 3) is equivalent to a call
constrains('0 < f0.Sigma <= 0.1', '0 < f1.Sigma <= 0.1', '0 < f2.Sigma <= 0.1')
@param iFirstN: If iLast is given then it's the index of the first peak to constrain.
Otherwise it's a number of peaks to constrain.
@param iLast: An index of the last peak to tie (inclusive).
"""
if iLast >= 0:
start = iFirstN
end = iLast + 1
else:
start = self._params.first_index
end = iFirstN + self._params.first_index
pattern = re.sub(parNamePattern, 'f%s.\\1', constraint)
self.constraints(*[pattern % i for i in range(start, end)])
class Background(object):
"""Object representing spectrum background: a sum of a central peak and a
background.
"""
def __init__(self, peak=None, background=None):
"""
Initialise new instance.
@param peak: An instance of Function class meaning to be the elastic peak.
@param background: An instance of Function class serving as the background.
"""
self.peak = peak
self.background = background
def clone(self):
"""Make a copy of self."""
aCopy = Background()
if self.peak is not None:
aCopy.peak = self.peak.clone()
if self.background is not None:
aCopy.background = self.background.clone()
return aCopy
def toString(self):
if self.peak is None and self.background is None:
return ''
if self.peak is None:
return self.background.toString()
if self.background is None:
return self.peak.toString()
return '(%s;%s)' % (self.peak.toString(), self.background.toString())
def update(self, func1, func2=None):
"""
Update values of the fitting parameters. If both arguments are given
the first one updates the peak and the other updates the background.
@param func1: First IFunction object containing new parameter values.
@param func2: Second IFunction object containing new parameter values.
"""
if func2 is not None:
if self.peak is None or self.background is None:
raise RuntimeError('Background has peak or background undefined.')
self.peak.update(func1)
self.background.update(func2)
elif self.peak is None:
self.background.update(func1)
else:
self.peak.update(func1)
class ResolutionModel:
"""
Encapsulates a resolution model.
"""
default_accuracy = 1e-4
max_model_size = 100
def __init__(self, model, xstart=None, xend=None, accuracy=None):
"""
Initialize the model.
:param model: Either a prepared model or a single python function or a list
of functions. If it's functions they must have signatures:
func(x: ndarray) -> ndarray
A prepared model is a tuple of exactly two arrays of floats of
equal sizes or a list of such tuples. The first array in the tuple
is the x-values and the second array is the y-values of the resolution
model.
:param xstart:
:param xend:
:param accuracy: (Optional) If given and model argument contains functions it's used
to tabulate the functions such that linear interpolation between the
tabulated points has this accuracy. If not given a default value is used.
"""
self.multi = False
if hasattr(model, '__call__'):
self.model = self._makeModel(model, xstart, xend, accuracy)
return
elif hasattr(model, '__len__'):
if len(model) == 0:
raise RuntimeError('Resolution model cannot be initialised with an empty iterable %s' %
str(model))
if hasattr(model[0], '__call__'):
self.model = [self._makeModel(m, xstart, xend, accuracy) for m in model]
self.multi = True
return
elif hasattr(model[0], 'model'):
self.model = [m.model for m in model]
self.multi = True
return
elif isinstance(model[0], tuple):
for m in model:
self._checkModel(m)
self.model = model
self.multi = True
return
self._checkModel(model)
self.model = model
@property
def NumberOfSpectra(self):
if not self.multi:
return 1
else:
return len(self.model)
def _checkModel(self, model):
if not isinstance(model, tuple):
raise RuntimeError('Resolution model must be a tuple of two arrays of floats.\n'
'Found instead:\n\n%s' % str(model))
if len(model) != 2:
raise RuntimeError('Resolution model tuple must have exactly two elements.\n'
'Found instead %d' % len(model))
self._checkArray(model[0])
self._checkArray(model[1])
if len(model[0]) != len(model[1]):
raise RuntimeError('Resolution model expects two arrays of equal sizes.\n'
'Found sizes %d and %d' % (len(model[0]), len(model[1])))
def _checkArray(self, array):
if not hasattr(array, '__len__'):
raise RuntimeError('Expected an array of floats, found %s' % str(array))
if len(array) == 0:
raise RuntimeError('Expected a non-empty array of floats.')
if not isinstance(array[0], float) and not isinstance(array[0], int):
raise RuntimeError('Expected an array of floats, found %s' % str(array[0]))
def _mergeArrays(self, a, b):
import numpy as np
c = np.empty(2 * len(a) - 1)
c[::2] = a
c[1::2] = b
return c
def _makeModel(self, model, xstart, xend, accuracy):
if xstart is None or xend is None:
raise RuntimeError('The x-range must be provided to ResolutionModel via '
'xstart and xend parameters.')
import numpy as np
if accuracy is None:
accuracy = self.default_accuracy
n = 5
acc = accuracy * 2
x = []
y = []
while n < self.max_model_size:
x = np.linspace(xstart, xend, n)
y = model(x)
dx = (x[1] - x[0]) / 2
xx = np.linspace(xstart + dx, xend - dx, n - 1)
yi = np.interp(xx, x, y)
yy = model(xx)
acc = np.max(np.abs(yy - yi))
if acc <= accuracy:
break
x = self._mergeArrays(x, xx)
y = self._mergeArrays(y, yy)
n = len(x)
return list(x), list(y)
class PhysicalProperties(object):
"""
Contains information about measurement conditions of physical properties
"""
HEATCAPACITY = 1
SUSCEPTIBILITY = 2
MAGNETISATION = 3
MAGNETICMOMENT = 4
def _str2id(self, typeid):
mappings = [['cp', 'cv', 'heatcap'], ['chi', 'susc'], ['mag', 'm(h)'], ['mom', 'm(t)']]
for id in range(4):
if any([typeid.lower() in elem for elem in mappings[id]]):
return id + 1
return 0
def __init__(self, typeid, *args, **kwargs):
"""
Initialize physical properties environment.
:param typeid: a flag or string (case insensitive) indicating the type of physical properties.
"Cp" or "Cv" or "HeatCap*" or 1: Data is heat capacity in J/mol/K
"chi" or "susc*" or 2: Data is magnetic susceptibility
"mag*" or "M(H)" or 3: Data is magnetisation vs field
"mom*" or "M(T)" or 4: Data is magnetic moment vs temperature
:param hdir: the direction of the applied magnetic field for susceptibiliy or M(T) measurements
:param hmag: the magnitude in Tesla of the magnetic field for M(T)
:param temperature: the temperature in Kelvin of measurements of M(H)
:param inverse: a boolean indicating whether susceptibility is chi or 1/chi or M(T) or 1/M(T)
:param unit: the unit the data was measured in. Either: 'bohr', 'SI' or 'cgs'.
:param lambda: (susceptibility only) the value of the exchange constant in inverse susc units
:param chi0: (susceptibility only) the value of the residual (background) susceptibility
typeid is required in all cases, and all other parameters may be specified as keyword arguments.
otherwise the syntax is:
PhysicalProperties('Cp') # No further parameters required for heat capacity
PhysicalProperties('chi', hdir, inverse, unit, lambda, chi0)
PhysicalProperties('chi', unit)
PhysicalProperties('mag', hdir, temp, unit)
PhysicalProperties('mag', unit)
PhysicalProperties('M(T)', hmag, hdir, inverse, unit)
PhysicalProperties('M(T)', unit)
Defaults are: hdir=[0, 0, 1]; hmag=1; temp=1; inverse=False; unit='cgs'; lambda=chi0=0.
"""
self._physpropUnit = 'cgs'
self._suscInverseFlag = False
self._hdir = [0., 0., 1.]
self._hmag = 1.
self._physpropTemperature = 1.
self._lambda = 0. # Exchange parameter (for susceptibility only)
self._chi0 = 0. # Residual/background susceptibility (for susceptibility only)
self._typeid = self._str2id(typeid) if isinstance(typeid, string_types) else int(typeid)
try:
initialiser = getattr(self, 'init' + str(self._typeid))
except AttributeError:
raise ValueError('Physical property type %s not recognised' % (str(typeid)))
initialiser(*args, **kwargs)
def _checkmagunits(self, unit, default=None):
""" Checks that unit string is valid and converts to correct case. """
if 'cgs' in unit.lower():
return 'cgs'
elif 'bohr' in unit.lower():
return 'bohr'
elif 'SI' in unit.upper():
return 'SI'
elif default is not None:
return default
else:
raise ValueError('Unit %s not recognised' % (unit))
def _checkhdir(self, hdir):
import numpy as np
try:
if isinstance(hdir, string_types):
if 'powder' in hdir.lower():
return 'powder'
else:
raise TypeError()
else:
hdir = np.array(hdir)
if len(hdir) != 3:
raise TypeError()
hdir * hdir # Catches most cases where elements not numeric...
except TypeError:
raise ValueError('Magnetic field direction %s not recognised' % (str(self._hdir)))
return hdir
@property
def TypeID(self):
return self._typeid
@property
def Unit(self):
return self._physpropUnit
@Unit.setter
def Unit(self, value):
self._physpropUnit = self._checkmagunits(value)
@property
def Inverse(self):
return self._suscInverseFlag if (self._typeid == self.SUSCEPTIBILITY or self._typeid == self.MAGNETICMOMENT) else None
@Inverse.setter
def Inverse(self, value):
if (self._typeid == self.SUSCEPTIBILITY or self._typeid == self.MAGNETICMOMENT):
if isinstance(value, string_types):
self._suscInverseFlag = value.lower() in ['true', 't', '1', 'yes', 'y']
else:
self._suscInverseFlag = bool(value) # In some cases will always be true...
else:
raise NameError('This physical properties does not support the Inverse attribute')
@property
def Hdir(self):
return self._hdir if (self._typeid != self.HEATCAPACITY) else None
@Hdir.setter
def Hdir(self, value):
if (self._typeid != self.HEATCAPACITY):
self._hdir = self._checkhdir(value)
@property
def Hmag(self):
return self._hmag if (self._typeid == self.MAGNETICMOMENT) else None
@Hmag.setter
def Hmag(self, value):
if (self._typeid == self.MAGNETICMOMENT):
self._hmag = float(value)
@property
def Temperature(self):
return self._physpropTemperature if (self._typeid == self.MAGNETISATION) else None
@Temperature.setter
def Temperature(self, value):
if (self._typeid == self.MAGNETISATION):
self._physpropTemperature = float(value)
@property
def Lambda(self):
return self._lambda if (self._typeid == self.SUSCEPTIBILITY) else None
@Lambda.setter
def Lambda(self, value):
if (self._typeid == self.SUSCEPTIBILITY):
self._lambda = float(value)
@property
def Chi0(self):
return self._chi0 if (self._typeid == self.SUSCEPTIBILITY) else None
@Chi0.setter
def Chi0(self, value):
if (self._typeid == self.SUSCEPTIBILITY):
self._chi0 = float(value)
def init1(self, *args, **kwargs):
""" Initialises environment for heat capacity data """
if len(args) > 0:
raise ValueError('No environment arguments should be specified for heat capacity')
def _parseargs(self, mapping, *args, **kwargs):
args = [_f for _f in list(args) if _f]
# Handles special case of first argument being a unit type
if len(args) > 0:
try:
if self._checkmagunits(args[0], 'bad') is not 'bad':
kwargs['Unit'] = args.pop(0)
except AttributeError:
pass
for i in range(len(mapping)):
if len(args) > i:
setattr(self, mapping[i], args[i])
elif mapping[i] in kwargs.keys():
setattr(self, mapping[i], kwargs[mapping[i]])
def init2(self, *args, **kwargs):
""" Initialises environment for susceptibility data """
mapping = ['Hdir', 'Inverse', 'Unit', 'Lambda', 'Chi0']
self._parseargs(mapping, *args, **kwargs)
def init3(self, *args, **kwargs):
""" Initialises environment for M(H) data """
mapping = ['Hdir', 'Temperature', 'Unit']
self._parseargs(mapping, *args, **kwargs)
def init4(self, *args, **kwargs):
""" Initialises environment for M(T) data """
mapping = ['Hmag', 'Hdir', 'Inverse', 'Unit']
self._parseargs(mapping, *args, **kwargs)
def toString(self):
"""Create function initialisation string"""
types = ['CrystalFieldHeatCapacity', 'CrystalFieldSusceptibility',
'CrystalFieldMagnetisation', 'CrystalFieldMoment']
out = 'name=%s' % (types[self._typeid - 1])
if self._typeid != self.HEATCAPACITY:
out += ',Unit=%s' % (self._physpropUnit)
if 'powder' in self._hdir:
out += ',powder=1'
else:
out += ',Hdir=(%s)' % (','.join([str(hh) for hh in self._hdir]))
if self._typeid == self.MAGNETISATION:
out += ',Temperature=%s' % (self._physpropTemperature)
else: # either susceptibility or M(T)
out += ',inverse=%s' % (1 if self._suscInverseFlag else 0)
out += (',Hmag=%s' % (self._hmag)) if self._typeid == self.MAGNETISATION else ''
if self._typeid == self.SUSCEPTIBILITY and self._lambda != 0:
out += ',Lambda=%s' % (self._lambda)
if self._typeid == self.SUSCEPTIBILITY and self._chi0 != 0:
out += ',Chi0=%s' % (self._chi0)
return out
def getAttributes(self, dataset=None):
"""Returns a dictionary of PhysicalProperties attributes for use with IFunction"""
dataset = '' if dataset is None else str(dataset)
out = {}
if self._typeid != self.HEATCAPACITY:
out['Unit%s' % (dataset)] = self._physpropUnit
if 'powder' in self._hdir:
out['powder%s' % (dataset)] = 1
else:
out['Hdir%s' % (dataset)] = [float(hh) for hh in self._hdir] # needs to be list
if self._typeid != self.MAGNETISATION: # either susceptibility or M(T)
out['inverse%s' % (dataset)] = 1 if self._suscInverseFlag else 0
if self._typeid == self.MAGNETICMOMENT:
out['Hmag%s' % (dataset)] = self._hmag
if self._typeid == self.SUSCEPTIBILITY:
out['Lambda%s' % (dataset)] = self._lambda
out['Chi0%s' % (dataset)] = self._chi0
return out
|
ScreamingUdder/mantid
|
scripts/Inelastic/CrystalField/function.py
|
Python
|
gpl-3.0
| 26,175
|
[
"Gaussian"
] |
59038c91957f71233609e6747c4a318b63c656a86c6d4a9ca73342bd8406625e
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Effective core potential (ECP)
This module exposes some ecp integration functions from the C implementation.
Reference for ecp integral computation
* Analytical integration
J. Chem. Phys. 65, 3826
J. Chem. Phys. 111, 8778
J. Comput. Phys. 44, 289
* Numerical integration
J. Comput. Chem. 27, 1009
Chem. Phys. Lett. 296, 445
'''
import ctypes
import numpy
from pyscf import lib
from pyscf.gto import moleintor
libecp = moleintor.libcgto
def type1_by_shell(mol, shls, cart=False):
li = mol.bas_angular(shls[0])
lj = mol.bas_angular(shls[1])
if cart:
fn = libecp.ECPtype1_cart
di = (li+1)*(li+2)//2 * mol.bas_nctr(shls[0])
dj = (lj+1)*(lj+2)//2 * mol.bas_nctr(shls[1])
else:
fn = libecp.ECPtype1_sph
di = (li*2+1) * mol.bas_nctr(shls[0])
dj = (lj*2+1) * mol.bas_nctr(shls[1])
buf = numpy.empty((di,dj), order='F')
cache = numpy.empty(buf.size*5)
fn(buf.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*2)(*shls),
mol._ecpbas.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(len(mol._ecpbas)),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p), lib.c_null_ptr(),
cache.ctypes.data_as(ctypes.c_void_p))
return buf
def type2_by_shell(mol, shls, cart=False):
li = mol.bas_angular(shls[0])
lj = mol.bas_angular(shls[1])
if cart:
fn = libecp.ECPtype2_cart
di = (li+1)*(li+2)//2 * mol.bas_nctr(shls[0])
dj = (lj+1)*(lj+2)//2 * mol.bas_nctr(shls[1])
else:
fn = libecp.ECPtype2_sph
di = (li*2+1) * mol.bas_nctr(shls[0])
dj = (lj*2+1) * mol.bas_nctr(shls[1])
buf = numpy.empty((di,dj), order='F')
cache = numpy.empty(buf.size*5)
fn(buf.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*2)(*shls),
mol._ecpbas.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(len(mol._ecpbas)),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
mol._bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p), lib.c_null_ptr(),
cache.ctypes.data_as(ctypes.c_void_p))
return buf
AS_ECPBAS_OFFSET= 18
AS_NECPBAS = 19
def so_by_shell(mol, shls):
'''Spin-orbit coupling ECP in spinor basis
i/2 <Pauli_matrix dot l U(r)>
'''
li = mol.bas_angular(shls[0])
lj = mol.bas_angular(shls[1])
di = (li*4+2) * mol.bas_nctr(shls[0])
dj = (lj*4+2) * mol.bas_nctr(shls[1])
bas = numpy.vstack((mol._bas, mol._ecpbas))
mol._env[AS_ECPBAS_OFFSET] = len(mol._bas)
mol._env[AS_NECPBAS] = len(mol._ecpbas)
buf = numpy.empty((di,dj), order='F', dtype=numpy.complex128)
cache = numpy.empty(buf.size*48)
fn = libecp.ECPso_spinor
fn(buf.ctypes.data_as(ctypes.c_void_p),
(ctypes.c_int*2)(di, dj),
(ctypes.c_int*2)(*shls),
mol._atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.natm),
bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(mol.nbas),
mol._env.ctypes.data_as(ctypes.c_void_p), lib.c_null_ptr(),
cache.ctypes.data_as(ctypes.c_void_p))
return buf
def core_configuration(nelec_core):
conf_dic = {
0 : '0s0p0d0f',
2 : '1s0p0d0f',
10: '2s1p0d0f',
18: '3s2p0d0f',
28: '3s2p1d0f',
36: '4s3p1d0f',
46: '4s3p2d0f',
54: '5s4p2d0f',
60: '4s3p2d1f',
68: '5s4p2d1f',
78: '5s4p3d1f',
92: '5s4p3d2f',
}
if nelec_core not in conf_dic:
raise RuntimeError('Core configuration for %d core electrons is not available.')
coreshell = [int(x) for x in conf_dic[nelec_core][::2]]
return coreshell
if __name__ == '__main__':
from pyscf import gto, scf
mol = gto.M(atom='''
Cu 0. 0. 0.
H 0. 0. -1.56
H 0. 0. 1.56
''',
basis={'Cu':'lanl2dz', 'H':'sto3g'},
ecp = {'cu':'lanl2dz'},
#basis={'Cu':'crenbs', 'H':'sto3g'},
#ecp = {'cu':'crenbs'},
charge=-1,
verbose=4)
mf = scf.RHF(mol)
print(mf.kernel(), -196.09477546034623)
mol = gto.M(atom='''
Na 0. 0. 0.
H 0. 0. 1.
''',
basis={'Na':'lanl2dz', 'H':'sto3g'},
ecp = {'Na':'lanl2dz'},
verbose=0)
mf = scf.RHF(mol)
print(mf.kernel(), -0.45002315562861461)
|
gkc1000/pyscf
|
pyscf/gto/ecp.py
|
Python
|
apache-2.0
| 5,175
|
[
"PySCF"
] |
2ceb3de52c64c535ec43c7c37993dba10a5b9dd0cd105d61bff849ea4c375a92
|
# Copyright 2008 by Bartek Wilczynski.
# Adapted from Bio.MEME.Parser by Jason A. Hackney. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from Bio.Alphabet import IUPAC
from Bio.Motif.Parsers.MEME import MEMEMotif
class Record:
"""The class for holding the results from a MAST run.
A MAST.Record holds data about matches between motifs and sequences.
The motifs held by the Record are objects of the class MEMEMotif.
Methods:
get_motif_by_name (motif_name): returns a MEMEMotif with the given
name.
"""
def __init__ (self):
self.sequences = []
self.version = ""
self.database = ""
self.diagrams = {}
self.alphabet = None
self.motifs = []
def get_motif_by_name (self, name):
for m in self.motifs:
if m.name == name:
return m
def read(handle):
"""read(handle)"""
record = Record()
__read_version(record, handle)
__read_database_and_motifs(record, handle)
__read_section_i(record, handle)
__read_section_ii(record, handle)
__read_section_iii(record, handle)
return record
# Everything below is private
def __read_version(record, handle):
for line in handle:
if "MAST version" in line:
break
else:
raise ValueError("Improper input file. Does not begin with a line with 'MAST version'")
record.version = line.strip().split()[2]
def __read_database_and_motifs(record, handle):
for line in handle:
if line.startswith('DATABASE AND MOTIFS'):
break
line = handle.next()
if not line.startswith('****'):
raise ValueError("Line does not start with '****':\n%s" % line)
line = handle.next()
if not 'DATABASE' in line:
raise ValueError("Line does not contain 'DATABASE':\n%s" % line)
words = line.strip().split()
record.database = words[1]
if words[2] == '(nucleotide)':
record.alphabet = IUPAC.unambiguous_dna
elif words[2] == '(peptide)':
record.alphabet = IUPAC.protein
for line in handle:
if 'MOTIF WIDTH' in line:
break
line = handle.next()
if not '----' in line:
raise ValueError("Line does not contain '----':\n%s" % line)
for line in handle:
if not line.strip():
break
words = line.strip().split()
motif = MEMEMotif()
motif.alphabet = record.alphabet
motif.name = words[0]
motif.length = int(words[1])
# motif.add_instance(words[2])
record.motifs.append(motif)
def __read_section_i(record, handle):
for line in handle:
if line.startswith('SECTION I:'):
break
for line in handle:
if line.startswith('SEQUENCE NAME'):
break
line = handle.next()
if not line.startswith('---'):
raise ValueError("Line does not start with '---':\n%s" % line)
for line in handle:
if not line.strip():
break
else:
sequence, description_evalue_length = line.split(None, 1)
record.sequences.append(sequence)
line = handle.next()
if not line.startswith('****'):
raise ValueError("Line does not start with '****':\n%s" % line)
def __read_section_ii(record, handle):
for line in handle:
if line.startswith('SECTION II:'):
break
for line in handle:
if line.startswith('SEQUENCE NAME'):
break
line = handle.next()
if not line.startswith('---'):
raise ValueError("Line does not start with '---':\n%s" % line)
for line in handle:
if not line.strip():
break
elif line.startswith(" "):
diagram = line.strip()
record.diagrams[sequence] += diagram
else:
sequence, pvalue, diagram = line.split()
record.diagrams[sequence] = diagram
line = handle.next()
if not line.startswith('****'):
raise ValueError("Line does not start with '****':\n%s" % line)
def __read_section_iii(record, handle):
for line in handle:
if line.startswith('SECTION III:'):
break
for line in handle:
if line.startswith('****'):
break
for line in handle:
if line.startswith('*****'):
break
for line in handle:
if line.strip():
break
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/Motif/Parsers/MAST.py
|
Python
|
gpl-2.0
| 4,534
|
[
"Biopython"
] |
bd281be86b480dc59635564a11bda0e399b5c958510f9b2a9289a328b8069446
|
"""
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
"""
__revision__ = "$Revision: 620 $"
import sys
if 'qt' in sys.modules.keys():
from qtprogress import QtProgress
if 'PyQt4' in sys.modules.keys():
from qt4progress import Qt4Progress
from textprogress import TextProgress
|
connie/RMG-Java
|
source/cclib/progress/__init__.py
|
Python
|
mit
| 376
|
[
"cclib"
] |
49ae66bd37e50f67a00423fadcb7d2e3286003c79b749c416eed5791898c2813
|
#!/usr/bin/env python
"""
wxBlender
=========
wxPython in Blender Addon
wxPython toolkit widgets running in Blender.
Menus, Frames, Dialogs, Custom Buttons, Thumbnails, etc...

Requirements
------------
* Blender 2.65+(built with Python33+)
http://www.blender.org/
* wxPython Project Phoenix(works with Python3)
http://wxpython.org/Phoenix/snapshot-builds/
* wxBlender
https://github.com/Metallicow/wxBlender
Installation
------------
1. The phoenix download might come as a python .egg.
Python .egg files are simply a renamed .zip file,
so in other words you can open/extract the egg
with an compression application
such as 7-zip, WinRAR, etc.
Place wx directory from the phoenix download
in Blender's `#.##/python/lib/site-packages` directory.
2. Place the wx_blender directory in your Blender User `scripts/addons` directory.
Depending on your type of Blender build, this location may vary.
Ex: if Blender is portable.
Running wxBlender
-----------------
1. Start up Blender.
2. Go to File>User Preferences... in the MenuBar.
3. Under the "Addons" tab of the Blender User Preferences Dialog...
...Check the "3D View: wxBlender" addon checkbox.
4. Click the "Save User Settings" button in the Blender User Preferences Dialog
so the plugin starts at startup.
5. Close the Blender User Preferences Dialog.
In the properties window of the 3D View there should now be a Panel
named "wxBlender".
[Blender logo usage guidelines](http://www.blender.org/about/logo/)
-------------------------------------------------------------------


The Blender logo is a copyrighted property of NaN Holding B.V, and has been licensed in 2002 to the Blender Foundation. The logo and the brand name "Blender" are not part of the GNU GPL, and can only be used commercially by the Blender Foundation on products, websites and publications.
Under the following conditions, third parties may use the Blender logo as well:
1. The logo can only be used to point to the product Blender. When used with a link on a web page, it should point to the url [blender.org](http://www.blender.org/).
2. You will visualize and promote your own branding more prominent than you use the Blender logo. The Blender logo only can be used as a secondary brand, which means it has to be clear for an average viewer that this is not an official Blender or Blender Foundation website, publication or product.
3. You can use the Blender logo on promotion products, such as T-shirts or caps or trade show booths, provided it is a secondary brand as described in point 2.
4. The logo is used unaltered, without fancy enhancements, in original colors, original typography, and always complete (logo + text blender).
5. In case you use the logo on products you sell commercially, you always have to [contact us](http://www.blender.org/foundation/) with a picture of how it will be used, and ask for explicit permission.
If you have further questions or doubts, do not hesitate to [contact us](http://www.blender.org/foundation/).
Usage in artwork and community websites
[Usage in artwork and community websites](http://www.blender.org/about/logo/)
-----------------------------------------------------------------------------
Blender's logo has been used in hundreds of ways. This was - and still is - considered to be an honest tribute to Blender, and the guidelines are not meant to make these uses "illegal" or "officially disapproved". This page is only meant to clarify the Blender Foundation guidelines so that people know their minimum rights and where they can use the logo.
Modifying the Blender logo is really part of your own artistic freedom, and the Blender Foundation will never act against such tributes. Just don't expect us to "officially approve" of it, that's all.
Thanks,
Ton Roosendaal, Chairman of the Blender Foundation
Amsterdam, March 2009
License
-------
Anything **NOT** involving "Blender" Logos and Graphics/Images/blender16 PyEmbeddedImage is licensed
GNU GPL v2.0
http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
Tested On
---------
| Operating System | Blender Versions | Phoenix |
|:-------------------------:|:---------------------------:|:-----------------:|
| Windows XP SP3 32bit | 2.65 - 2.69 | 3.0.1.dev75563 |
| Other OS | TODO | |
"""
#-Imports.----------------------------------------------------------------------
#--Python Imports.
import os
import sys
## print('sys.executable', sys.executable)
# returns C:\Program Files\Blender Foundation\Blender #.##\blender.exe
## print('sys.prefix', sys.prefix)
# returns C:\Program Files\Blender Foundation\Blender #.##\#.##\python
# Add site-packages to the system path.
# Older Blenders might not have it on the sys.path.
# We need to find wx in site-packages.
sitePackPath = os.path.join(sys.prefix, 'lib', 'site-packages')
print(os.path.exists(sitePackPath))
if os.path.exists(sitePackPath) and sitePackPath in sys.path:
pass # Ok. Good.
elif os.path.exists(sitePackPath) and sitePackPath not in sys.path:
sys.path.insert(0, sitePackPath)
else:
raise Exception("Can't locate %s" % sitePackPath)
#--Blender Imports.
import bpy
#--wxPython Imports.
import wx
from wx.lib.embeddedimage import PyEmbeddedImage
#--Local Plugin Imports.
from .HackRefresh import HackRefresh
#-Globals.----------------------------------------------------------------------
# Define a translation function.
_ = wx.GetTranslation
# Get version infos.
wxPythonVersion = wx.version()
major, minor, micro, release = sys.version_info[0:-1]
pythonVersion = u'%d.%d.%d-%s' %(major, minor, micro, release)
gFileDir = os.path.dirname(os.path.abspath(__file__))
gImgDir = gFileDir + os.sep + 'images'
gThumbDir = gFileDir + os.sep + 'thumbnails'
ID_SHOW_FRAME = wx.NewId()
ID_BLENDER_ORG = wx.NewId()
ID_WXBLENDER_GITHUB = wx.NewId()
ID_OPEN_WXBLENDER_PLUGINDIR = wx.NewId()
ID_HELLOBLENDER = wx.NewId()
blender16 = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACPUlEQVR4Ac2Ra0hTYQCGD0gE"
"bLq7bjtbBAEEBRBSgGhJJVG4hktUNKhEsMJCKSoJ2sWd4zItqYwQlYmEQkAZXZUgQAwLuhFB"
"kaaamFGmSrbpPE+bSgSCRBD0wPvzeb/3+z7hnzFZbl6BJPaMlFve383TpP5VyetDBmdIFj+H"
"JXH2Zk78QWE58IurFI81a+6MxaG4LQ481szwaXPau5JE96RXVKi2012kb2516XRL5G/llriw"
"T+yixg5V0VTaFiOi+PSK4tEq+LQQ0DNwwvz4mku3fklJy+74VG+aao+crt71psTUFpPxG6G9"
"EAbvwMcH0FEKgSTCFZYv0lb1xiUlT4uN6k+nzFJEEifwG6BLpncSDgdfkXuxh0d9P+BFPQRM"
"9B03P6zPTFj3S0YSV0fnP4/dFTkJriSjhMdJOXkbIVlG2BTAmN3E0HgI2pzMeA08KTb4F+QK"
"MQvZNhaTJ9yWkYjPOEtwO1OhEPb8IEJKFcKWGuLSL/BseAbuHQGvFiRbS+xkDwHb9OIrN3QW"
"aJxzkgWq18DEWxq7htHnNbHSeZWjzS/h+wjUbSDiT2LgmPmSEKm0jYcrbfSXJXqEKDey1aaQ"
"XxxCNkLTjqgwyNgMjE4rKOFRaHWBpOer29rb4FDvFIIO9bbaDNVm4TcaM1X75r9TNi0saXPB"
"9RyoXct88Tk75zNUBcJy3C9IODDltvZz1gqyAaRoAhZCPutgx15NkfAndOZrtO258YV9pcbL"
"H8pMdbdyEwq79+u0wn/FT0hkVS62vfMaAAAAAElFTkSuQmCC")
class AddMeshesPanel(bpy.types.Panel):
"""Creates a Panel in the 3D View properties window"""
bl_label = "Add Mesh"
bl_idname = "OBJECT_3DVIEW_AddMeshesPanel"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_context = "objectmode"
def draw_header(self, context):
layout = self.layout
# Always place the PLUGIN Icon in the header, so you know it is a plugin.
layout.label(icon='PLUGIN')
# Optionally you can add more than one icon to further visually describe your plugin's nature.
# In this case the same icon as a community addon.
layout.label(icon='POSE_DATA')
def draw(self, context):
layout = self.layout
obj = context.object
row = layout.row()
row.label(text="About This Plugin", icon='HELP')
row = layout.row()
row.label(text="Blender Meshes", icon='BLENDER')
col = layout.column(align=True)
row = col.row(align=True)
row.operator("mesh.primitive_plane_add", text='Plane', icon='MESH_PLANE')
row.operator("mesh.primitive_grid_add", text='Grid', icon='MESH_GRID')
row = col.row(align=True)
row.operator("mesh.primitive_cube_add", text='Cube', icon='MESH_CUBE')
row.operator("mesh.primitive_circle_add", text='Circle', icon='MESH_CIRCLE')
row = col.row(align=True)
row.operator("mesh.primitive_uv_sphere_add", text='UV Sphere', icon='MESH_UVSPHERE')
row.operator("mesh.primitive_ico_sphere_add", text='Ico Sphere', icon='MESH_ICOSPHERE')
row = col.row(align=True)
row.operator("mesh.primitive_cylinder_add", text='Cylinder', icon='MESH_CYLINDER')
row.operator("mesh.primitive_cone_add", text='Cone', icon='MESH_CONE')
row = col.row(align=True)
row.operator("mesh.primitive_torus_add", text='Torus', icon='MESH_TORUS')
row.operator("mesh.primitive_monkey_add", text='Monkey', icon='MESH_MONKEY')
row = layout.row()
row.label(icon='POSE_DATA')
row.label(text="Extra Objects Plugin Meshes")
row.label(icon='PLUGIN') # Add Extra Icon To Other Side of Label
box = layout.box()
col = box.column(align=True)
row = col.row(align=True)
row.operator("mesh.bolt_add", text='Bolt', icon='MOD_SCREW')
row.operator("mesh.landscape_add", text='Landscape', icon='RNDCURVE')
row = col.row(align=True)
row.operator("mesh.primitive_diamond_add", text='Diamond', icon='PMARKER')
row.operator("mesh.primitive_gem_add", text='Gem', icon='SPACE3')
row = col.row(align=True)
row.operator("mesh.primitive_gear", text='Gear', icon='SCRIPTWIN')
row.operator("mesh.primitive_worm_gear", text='Worm Gear', icon='SCRIPTWIN')
row = col.row(align=True)
row.operator("mesh.primitive_steppyramid_add", text='Step Pyramid', icon='SORTSIZE')
row.operator("mesh.honeycomb_add", text='Honeycomb', icon='PLUGIN')
row = col.row(align=True)
row.operator("mesh.primitive_teapot_add", text='Teapot+', icon='PLUGIN')
# Start wxPython Frame App -----------------------------------------------------
class wxGradientBannerPanel(wx.Panel):
def __init__(self, parent, id=wx.ID_ANY,
pos=wx.DefaultPosition, size=(-1, 48),
style=wx.BORDER_NONE, name='panel'):
wx.Panel.__init__(self, parent, id, pos, size, style, name)
self.parent = parent
self.dcBmp = wx.Bitmap(gImgDir + os.sep + 'logo.png', wx.BITMAP_TYPE_PNG)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMotion)
def DrawGradientBanner(self, dc):
dc.GradientFillLinear(self.GetClientRect(), '#FF8000', '#FFFFFF', wx.NORTH)
dc.DrawBitmap(self.dcBmp, 10, (self.GetSize()[1] - self.dcBmp.GetHeight()) // 2)
dc.SetFont(wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD))
dc.SetTextForeground('#005385')
dc.DrawText('wx', 44, 4)
def OnPaint(self, event):
"""
Handle the wx.EVT_PAINT event for :class: wxGradientBannerPanel.
"""
# wx.EVT_PAINT event handlers should always use only
# those types of DC's specific for wx.EVT_PAINT events.
## dc = wx.BufferedPaintDC(self)
dc = wx.PaintDC(self)
self.DrawGradientBanner(dc)
event.Skip() # Very important to let any higher level handlers be called.
def OnLeftDown(self, event):
"""
Handle the wx.EVT_LEFT_DOWN event for :class: wxGradientBannerPanel.
"""
self.CaptureMouse()
x, y = self.parent.ClientToScreen(event.GetPosition())
originx, originy = self.parent.GetPosition()
dx = x - originx
dy = y - originy
self.parent.delta = ((dx, dy))
def OnLeftUp(self, event):
"""
Handle the wx.EVT_LEFT_UP event for :class: wxGradientBannerPanel.
"""
if self.HasCapture():
self.ReleaseMouse()
def OnMotion(self, event):
"""
Handle the wx.EVT_MOTION event for :class: wxGradientBannerPanel.
"""
if event.Dragging() and event.LeftIsDown():
x, y = self.parent.ClientToScreen(event.GetPosition())
self.parent.Move((x - self.parent.delta[0], y - self.parent.delta[1]))
class wxAddMeshesPanel(wx.Panel):
def __init__(self, parent, id=wx.ID_ANY,
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.BORDER_SUNKEN, name='panel'):
wx.Panel.__init__(self, parent, id, pos, size, style, name)
self.parent = parent
btn1 = wx.Button(self, -1, 'Add Meshes Panel to 3D View Properties')
btn1.Bind(wx.EVT_BUTTON, self.OnRegisterAddMeshesPanel)
gSizer = wx.GridSizer(rows=2, cols=2, vgap=5, hgap=5)
dirBpyOpsMesh = dir(bpy.ops.mesh)
btn2 = wx.BitmapButton(self, -1, wx.Bitmap(gThumbDir + os.sep + 'cube.png', wx.BITMAP_TYPE_PNG))
btn2.SetToolTip(wx.ToolTip('bpy.ops.mesh.primitive_cube_add()'))
btn2.Bind(wx.EVT_BUTTON, self.OnAddMeshFromToolTip)
btn3 = wx.BitmapButton(self, -1, wx.Bitmap(gThumbDir + os.sep + 'monkey.png', wx.BITMAP_TYPE_PNG))
btn3.SetToolTip(wx.ToolTip('bpy.ops.mesh.primitive_monkey_add()'))
btn3.Bind(wx.EVT_BUTTON, self.OnAddMeshFromToolTip)
btn4 = wx.BitmapButton(self, -1, wx.Bitmap(gThumbDir + os.sep + 'pyramid.png', wx.BITMAP_TYPE_PNG))
btn4.SetToolTip(wx.ToolTip('bpy.ops.mesh.primitive_steppyramid_add()'))
btn4.Bind(wx.EVT_BUTTON, self.OnAddMeshFromToolTip)
if not 'primitive_steppyramid_add' in dirBpyOpsMesh:
btn4.Enable(False)
btn5 = wx.BitmapButton(self, -1, wx.Bitmap(gThumbDir + os.sep + 'teapot.png', wx.BITMAP_TYPE_PNG))
btn5.SetToolTip(wx.ToolTip('bpy.ops.mesh.primitive_teapot_add()'))
btn5.Bind(wx.EVT_BUTTON, self.OnAddMeshFromToolTip)
if not 'primitive_teapot_add' in dirBpyOpsMesh:
btn5.Enable(False)
gSizer.AddMany([(btn2, 0, wx.ALIGN_TOP | wx.ALIGN_LEFT),
(btn3, 0, wx.ALIGN_TOP | wx.ALIGN_RIGHT),
(btn4, 0, wx.ALIGN_BOTTOM | wx.ALIGN_LEFT),
(btn5, 0, wx.ALIGN_BOTTOM | wx.ALIGN_RIGHT),
])
vbSizer = wx.BoxSizer(wx.VERTICAL)
vbSizer.Add(btn1, 0, wx.EXPAND | wx.ALL, 5)
vbSizer.Add(wx.StaticLine(self), 0, wx.EXPAND | wx.ALL, 0)
vbSizer.Add(gSizer, 1, wx.ALIGN_CENTRE | wx.SHAPED, 5)
self.SetSizer(vbSizer)
def OnAddMeshFromToolTip(self, event):
evtObj = event.GetEventObject()
tt = evtObj.GetToolTip()
ttStr = tt.GetTip()
try:
eval(ttStr)
except AttributeError as exc:
import traceback
tb = traceback.format_exc()
wx.MessageBox('%s' % tb, 'AttributeError', wx.ICON_ERROR)
## wx.CallAfter(HackRefresh)
self.parent.Close() # Optional: Lets destroy after clicking.
def OnRegisterAddMeshesPanel(self, event):
bpy.utils.register_class(AddMeshesPanel)
## wx.CallAfter(HackRefresh)
self.parent.Close() # Optional: Lets destroy after clicking.
class wxBlenderAddMeshesFrame(wx.Frame):
def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString,
pos=wx.DefaultPosition, size=wx.DefaultSize,
# Use these styleBits for a standard frame...
## style=wx.DEFAULT_FRAME_STYLE |
## wx.STAY_ON_TOP
# ...Or use these styleBits for a borderless/taskbarless frame.(WinXP)
style=wx.STAY_ON_TOP |
wx.FRAME_NO_TASKBAR |
wx.RESIZE_BORDER
, name='frame'):
wx.Frame.__init__(self, parent, id, title, pos, size, style, name)
self.delta = (0, 0)
## self.SetDoubleBuffered(True)
self.CreateStatusBar()
self.SetStatusText('wxPython %s running on Python %s' %(wxPythonVersion, pythonVersion))
banner_panel = wxGradientBannerPanel(self)
panel = wxAddMeshesPanel(self)
self.SetBackgroundColour(panel.GetBackgroundColour())
vbSizer = wx.BoxSizer(wx.VERTICAL)
vbSizer.Add(banner_panel, 0, wx.EXPAND | wx.ALL)
vbSizer.Add(panel, 1, wx.EXPAND | wx.ALL)
self.SetSizer(vbSizer)
self.Fit()
self.SetMinSize(self.GetSize())
self.sizeAttr = self.GetSize()
#### self.SetTransparent(200) # Not working out so well with the HackRefresh...
# We want the Escape key to Close the frame, so bind the whole family tree also.
for child in self.GetChildren():
child.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
for grandchild in child.GetChildren():
grandchild.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.SetIcon(wx.Icon(wx.Bitmap(gImgDir + os.sep + 'favicon.ico', wx.BITMAP_TYPE_ICO)))
self.Bind(wx.EVT_CLOSE, self.OnDestroy)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_MOVE, self.OnMove)
self.Centre()
self.SetFocus()
# Lose focus destroy workaround.
wx.CallAfter(self.Bind, wx.EVT_ACTIVATE, self.OnActivate)
def OnKeyUp(self, event):
keyCode = event.GetKeyCode()
if keyCode == wx.WXK_ESCAPE:
self.Close()
def OnMove(self, event):
HackRefresh()
def SetSelfSizeAttr(self):
self.sizeAttr = self.GetSize()
def OnSize(self, event):
self.Layout()
# Smooth sizing(what it should be normally) when sizing the frame bigger.
if event.GetSize()[0] < self.sizeAttr[0] or event.GetSize()[1] < self.sizeAttr[1]: # The user is decreasing the frame size.
HackRefresh()
wx.CallAfter(self.SetSelfSizeAttr)
def OnActivate(self, event):
"""
Destroy when frame loses focus to workaround Blender GUI lockup
issue when wxapp is active.
Ex: User has clicked back to the Blender GUI or another application.
"""
try:
self.Close()
except RuntimeError:
###############
## Traceback (most recent call last):
## File "....Blender\2.69\scripts\addons\wx_blender\wxblender.py", line ###, in OnActivate
## self.Close()
## RuntimeError: wrapped C/C++ object of type wxBlenderAddMeshesFrame has been deleted
###############
pass
except Exception as exc:
wx.Bell()
import traceback
tb = traceback.format_exc()
f = open(gFileDir + os.sep + 'traceback.log', 'w')
f.write('%s' %tb)
f.close()
def OnDestroy(self, event):
self.Destroy()
class wxBlenderApp(wx.App):
def OnInit(self):
self.SetClassName('wxBlenderApp')
self.SetAppName('wxBlenderApp')
gMainWin = wxBlenderAddMeshesFrame(None)
gMainWin.SetTitle('wxBlenderApp')
self.SetTopWindow(gMainWin)
gMainWin.Show()
return True
# End wxPython Frame App -------------------------------------------------------
def OnHelloBlender(event):
print('Hello Blender!')
def OnShowwxFrame(event):
gMainWin = wxBlenderAddMeshesFrame(None)
gMainWin.SetTitle('wxBlenderApp')
gMainWin.Show()
#### app = wxBlenderApp(False)
#### app.MainLoop()
def OnVisitWebsite(event):
"""
Open a website in users default webbrowser.
"""
import webbrowser
evtId = event.GetId()
if evtId == ID_BLENDER_ORG:
webbrowser.open('http://www.blender.org/')
elif evtId == ID_WXBLENDER_GITHUB:
webbrowser.open('https://github.com/Metallicow/wxBlender')
def OnAddMeshFromMenuItemHelpString(event):
"""
Add mesh to scene.
"""
evtObj = event.GetEventObject()
evtId = event.GetId()
eval(evtObj.GetHelpString(evtId))
def OnOpenwxBlenderPluginDir(event):
"""
Open the local plugin dir in explorer, etc...
"""
import webbrowser
webbrowser.open(gFileDir)
class wxPythonFrameInBlender(bpy.types.Operator):
"""Standard wx.Frame App."""
bl_idname = "mcow.wxblender_wxframe"
bl_label = "wxPython Frame in Blender"
def execute(self, context):
gApp = wxBlenderApp(redirect=False,
filename=None,
useBestVisual=False,
clearSigInt=True)
gApp.MainLoop()
return {'FINISHED'}
class wxPythonMenuInBlender(bpy.types.Operator):
"""Simple wx.Menu PopupMenu App."""
bl_idname = "mcow.wxblender_wxmenu"
bl_label = "wxPython Menu in Blender"
def execute(self, context):
gApp = wx.App(redirect=False,
filename=None,
useBestVisual=False,
clearSigInt=True)
# Even though the frame will not be shown,
# we need to make a frame so the munu will have something to popup on.
frame = wx.Frame(None)
menu = wx.Menu()
menu.Append(ID_SHOW_FRAME, 'Show wxBlender Add Meshes Frame')
mi = wx.MenuItem(menu, ID_BLENDER_ORG, 'Visit www.blender.org')
mi.SetBitmap(wx.Bitmap(gImgDir + os.sep + 'tango_internet_web_browser16.png', wx.BITMAP_TYPE_PNG))
menu.Append(mi)
mi = wx.MenuItem(menu, ID_WXBLENDER_GITHUB, 'Visit wxBlender on GitHub')
mi.SetBitmap(wx.Bitmap(gImgDir + os.sep + 'tango_internet_web_browser16.png', wx.BITMAP_TYPE_PNG))
menu.Append(mi)
mi = wx.MenuItem(menu, ID_HELLOBLENDER, 'print "Hello Blender!"')
mi.SetBackgroundColour('#393939')
mi.SetTextColour('#FF8000')
mi.SetBitmap(blender16.GetBitmap()) # Show how to use a PyEmbeddedImage.
menu.Append(mi)
mi = wx.MenuItem(menu, ID_OPEN_WXBLENDER_PLUGINDIR, 'Open wxBlender Plugin Dir')
mi.SetBitmap(wx.Bitmap(gImgDir + os.sep + 'tango_folder_open_blue_24.png', wx.BITMAP_TYPE_PNG))
menu.Append(mi)
subMenu = wx.Menu()
for name in ['Plane', 'Cube', 'Circle', 'UV Sphere', 'Icosphere',
'Cylinder', 'Cone', 'Grid', 'Monkey', 'Torus']:
newid = wx.NewId()
if name == 'Icosphere':
subMenu.Append(newid, '%s' % name, 'bpy.ops.mesh.primitive_ico_sphere_add()')
else:
subMenu.Append(newid, '%s' % name, 'bpy.ops.mesh.primitive_%s_add()' % name.lower().replace(' ', '_'))
menu.Bind(wx.EVT_MENU, OnAddMeshFromMenuItemHelpString, id=newid)
menu.AppendSubMenu(submenu=subMenu, text='Add Mesh')
menu.Bind(wx.EVT_MENU, OnShowwxFrame, id=ID_SHOW_FRAME)
menu.Bind(wx.EVT_MENU, OnVisitWebsite, id=ID_BLENDER_ORG)
menu.Bind(wx.EVT_MENU, OnVisitWebsite, id=ID_WXBLENDER_GITHUB)
menu.Bind(wx.EVT_MENU, OnHelloBlender, id=ID_HELLOBLENDER)
menu.Bind(wx.EVT_MENU, OnOpenwxBlenderPluginDir, id=ID_OPEN_WXBLENDER_PLUGINDIR)
frame.PopupMenu(menu)
menu.Destroy()
frame.Close()
gApp.MainLoop()
return {'FINISHED'}
class wxBlenderPanel(bpy.types.Panel):
"""Creates a Panel in the 3D View properties window"""
bl_label = "wxBlender"
bl_idname = "WXBLENDER"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_context = "objectmode"
def draw_header(self, context):
layout = self.layout
# Always place the PLUGIN Icon in the header, so you know it is a plugin.
layout.label(icon='PLUG')
layout.label(icon='PLUGIN')
# Optionally you can add more than one icon to further visually describe your plugin's nature.
## layout.label(icon='FILE_BLEND')
def draw(self, context):
layout = self.layout
obj = context.object
row = layout.row()
row.label(text="About This Plugin", icon='HELP')
row = layout.row()
row.operator("mcow.wxblender_wxmenu", text='wxPython Menu', icon='LAMP_DATA')
row = layout.row()
row.operator("mcow.wxblender_wxframe", text='wxPython Frame', icon='LAMP_DATA')
|
Metallicow/wxBlender
|
wx_blender/wxblender.py
|
Python
|
gpl-2.0
| 24,689
|
[
"VisIt"
] |
7d703eb5e5a6e842a01381f4dad0af3f16a55def58075be93d9ea7f3dbc7d1bc
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.