src
stringlengths 721
1.04M
|
|---|
# MIT License
#
# Copyright (c) 2017 Matt Boyer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import logging
import os.path
import shutil
import sqlite3
from . import PROJECT_DESCRIPTION, PROJECT_NAME
from . import _LOGGER
from .db import SQLite_DB
from .heuristics import HeuristicsRegistry
from .pages import Page
def gen_output_dir(db_path):
db_abspath = os.path.abspath(db_path)
db_dir, db_name = os.path.split(db_abspath)
munged_name = db_name.replace('.', '_')
out_dir = os.path.join(db_dir, munged_name)
if not os.path.exists(out_dir):
return out_dir
suffix = 1
while suffix <= 10:
out_dir = os.path.join(db_dir, "{}_{}".format(munged_name, suffix))
if not os.path.exists(out_dir):
return out_dir
suffix += 1
raise SystemError(
"Unreasonable number of output directories for {}".format(db_path)
)
def _load_db(sqlite_path):
_LOGGER.info("Processing %s", sqlite_path)
registry = HeuristicsRegistry()
registry.load_heuristics()
db = SQLite_DB(sqlite_path, registry)
_LOGGER.info("Database: %r", db)
db.populate_freelist_pages()
db.populate_ptrmap_pages()
db.populate_overflow_pages()
# Should we aim to instantiate specialised b-tree objects here, or is the
# use of generic btree page objects acceptable?
db.populate_btree_pages()
db.map_tables()
# We need a first pass to process tables that are disconnected
# from their table's root page
db.reparent_orphaned_table_leaf_pages()
# All pages should now be represented by specialised objects
assert(all(isinstance(p, Page) for p in db.pages.values()))
assert(not any(type(p) is Page for p in db.pages.values()))
return db
def dump_to_csv(args):
out_dir = args.output_dir or gen_output_dir(args.sqlite_path)
db = _load_db(args.sqlite_path)
if os.path.exists(out_dir):
raise ValueError("Output directory {} exists!".format(out_dir))
os.mkdir(out_dir)
for table_name in sorted(db.tables):
table = db.tables[table_name]
_LOGGER.info("Table \"%s\"", table)
table.recover_records(args.database_name)
table.csv_dump(out_dir)
def undelete(args):
db_abspath = os.path.abspath(args.sqlite_path)
db = _load_db(db_abspath)
output_path = os.path.abspath(args.output_path)
if os.path.exists(output_path):
raise ValueError("Output file {} exists!".format(output_path))
shutil.copyfile(db_abspath, output_path)
with sqlite3.connect(output_path) as output_db_connection:
cursor = output_db_connection.cursor()
for table_name in sorted(db.tables):
table = db.tables[table_name]
_LOGGER.info("Table \"%s\"", table)
table.recover_records(args.database_name)
failed_inserts = 0
constraint_violations = 0
successful_inserts = 0
for leaf_page in table.leaves:
if not leaf_page.recovered_records:
continue
for record in leaf_page.recovered_records:
insert_statement, values = table.build_insert_SQL(record)
try:
cursor.execute(insert_statement, values)
except sqlite3.IntegrityError:
# We gotta soldier on, there's not much we can do if a
# constraint is violated by this insert
constraint_violations += 1
except (
sqlite3.ProgrammingError,
sqlite3.OperationalError,
sqlite3.InterfaceError
) as insert_ex:
_LOGGER.warning(
(
"Caught %r while executing INSERT statement "
"in \"%s\""
),
insert_ex,
table
)
failed_inserts += 1
# pdb.set_trace()
else:
successful_inserts += 1
if failed_inserts > 0:
_LOGGER.warning(
"%d failed INSERT statements in \"%s\"",
failed_inserts, table
)
if constraint_violations > 0:
_LOGGER.warning(
"%d constraint violations statements in \"%s\"",
constraint_violations, table
)
_LOGGER.info(
"%d successful INSERT statements in \"%s\"",
successful_inserts, table
)
def find_in_db(args):
db = _load_db(args.sqlite_path)
db.grep(args.needle)
def list_supported(args): # pylint:disable=W0613
registry = HeuristicsRegistry()
registry.load_heuristics()
for db in registry.groupings:
print(db)
subcmd_actions = {
'csv': dump_to_csv,
'grep': find_in_db,
'undelete': undelete,
'list': list_supported,
}
def subcmd_dispatcher(arg_ns):
return subcmd_actions[arg_ns.subcmd](arg_ns)
def main():
verbose_parser = argparse.ArgumentParser(add_help=False)
verbose_parser.add_argument(
'-v', '--verbose',
action='count',
help='Give *A LOT* more output.',
)
cli_parser = argparse.ArgumentParser(
description=PROJECT_DESCRIPTION,
parents=[verbose_parser],
)
subcmd_parsers = cli_parser.add_subparsers(
title='Subcommands',
description='%(prog)s implements the following subcommands:',
dest='subcmd',
)
csv_parser = subcmd_parsers.add_parser(
'csv',
parents=[verbose_parser],
help='Dumps visible and recovered records to CSV files',
description=(
'Recovers as many records as possible from the database passed as '
'argument and outputs all visible and recovered records to CSV '
'files in output_dir'
),
)
csv_parser.add_argument(
'sqlite_path',
help='sqlite3 file path'
)
csv_parser.add_argument(
'output_dir',
nargs='?',
default=None,
help='Output directory'
)
csv_parser.add_argument(
'-d', '--database-name',
nargs='?',
default=None,
help='Database name'
)
list_parser = subcmd_parsers.add_parser( # pylint:disable=W0612
'list',
parents=[verbose_parser],
help='Displays supported DB types',
description=(
'Displays the names of all database types with table heuristics '
'known to {}'.format(PROJECT_NAME)
),
)
grep_parser = subcmd_parsers.add_parser(
'grep',
parents=[verbose_parser],
help='Matches a string in one or more pages of the database',
description='Bar',
)
grep_parser.add_argument(
'sqlite_path',
help='sqlite3 file path'
)
grep_parser.add_argument(
'needle',
help='String to match in the database'
)
undelete_parser = subcmd_parsers.add_parser(
'undelete',
parents=[verbose_parser],
help='Inserts recovered records into a copy of the database',
description=(
'Recovers as many records as possible from the database passed as '
'argument and inserts all recovered records into a copy of'
'the database.'
),
)
undelete_parser.add_argument(
'sqlite_path',
help='sqlite3 file path'
)
undelete_parser.add_argument(
'output_path',
help='Output database path'
)
undelete_parser.add_argument(
'-d', '--database-name',
nargs='?',
default=None,
help='Database name'
)
cli_args = cli_parser.parse_args()
if cli_args.verbose:
_LOGGER.setLevel(logging.DEBUG)
if cli_args.subcmd:
subcmd_dispatcher(cli_args)
else:
# No subcommand specified, print the usage and bail
cli_parser.print_help()
|
##
# Copyright 2013-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing PSI, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
@author: Ward Poelmans (Ghent University)
"""
from distutils.version import LooseVersion
import glob
import os
import shutil
import tempfile
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import BUILD
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
class EB_PSI(CMakeMake):
"""
Support for building and installing PSI
"""
def __init__(self, *args, **kwargs):
"""Initialize class variables custom to PSI."""
super(EB_PSI, self).__init__(*args, **kwargs)
self.psi_srcdir = None
self.install_psi_objdir = None
self.install_psi_srcdir = None
@staticmethod
def extra_options():
"""Extra easyconfig parameters specific to PSI."""
extra_vars = CMakeMake.extra_options()
extra_vars.update({
# always include running PSI unit tests (takes about 2h or less)
'runtest': ["tests TESTFLAGS='-u -q'", "Run tests included with PSI, without interruption.", BUILD],
})
# Doesn't work with out-of-source build
extra_vars['separate_build_dir'][0] = False
return extra_vars
def configure_step(self):
"""
Configure build outside of source directory.
"""
try:
objdir = os.path.join(self.builddir, 'obj')
os.makedirs(objdir)
os.chdir(objdir)
except OSError as err:
raise EasyBuildError("Failed to prepare for configuration of PSI build: %s", err)
env.setvar('F77FLAGS', os.getenv('F90FLAGS'))
# In order to create new plugins with PSI, it needs to know the location of the source
# and the obj dir after install. These env vars give that information to the configure script.
self.psi_srcdir = os.path.basename(self.cfg['start_dir'].rstrip(os.sep))
self.install_psi_objdir = os.path.join(self.installdir, 'obj')
self.install_psi_srcdir = os.path.join(self.installdir, self.psi_srcdir)
env.setvar('PSI_OBJ_INSTALL_DIR', self.install_psi_objdir)
env.setvar('PSI_SRC_INSTALL_DIR', self.install_psi_srcdir)
# explicitely specify Python binary to use
pythonroot = get_software_root('Python')
if not pythonroot:
raise EasyBuildError("Python module not loaded.")
# pre 4.0b5, they were using autotools, on newer it's CMake
if LooseVersion(self.version) <= LooseVersion("4.0b5") and self.name == "PSI":
# Use EB Boost
boostroot = get_software_root('Boost')
if not boostroot:
raise EasyBuildError("Boost module not loaded.")
self.log.info("Using configure based build")
env.setvar('PYTHON', os.path.join(pythonroot, 'bin', 'python'))
env.setvar('USE_SYSTEM_BOOST', 'TRUE')
if self.toolchain.options.get('usempi', None):
# PSI doesn't require a Fortran compiler itself, but may require it to link to BLAS/LAPACK correctly
# we should always specify the sequential Fortran compiler,
# to avoid problems with -lmpi vs -lmpi_mt during linking
fcompvar = 'F77_SEQ'
else:
fcompvar = 'F77'
# update configure options
# using multi-threaded BLAS/LAPACK is important for performance,
# cfr. http://sirius.chem.vt.edu/psi4manual/latest/installfile.html#sec-install-iii
opt_vars = [
('cc', 'CC'),
('cxx', 'CXX'),
('fc', fcompvar),
('libdirs', 'LDFLAGS'),
('blas', 'LIBBLAS_MT'),
('lapack', 'LIBLAPACK_MT'),
]
for (opt, var) in opt_vars:
self.cfg.update('configopts', "--with-%s='%s'" % (opt, os.getenv(var)))
# -DMPICH_IGNORE_CXX_SEEK dances around problem with order of stdio.h and mpi.h headers
# both define SEEK_SET, this makes the one for MPI be ignored
self.cfg.update('configopts', "--with-opt='%s -DMPICH_IGNORE_CXX_SEEK'" % os.getenv('CFLAGS'))
# specify location of Boost
self.cfg.update('configopts', "--with-boost=%s" % boostroot)
# enable support for plugins
self.cfg.update('configopts', "--with-plugins")
ConfigureMake.configure_step(self, cmd_prefix=self.cfg['start_dir'])
else:
self.log.info("Using CMake based build")
self.cfg.update('configopts', ' -DPYTHON_EXECUTABLE=%s' % os.path.join(pythonroot, 'bin', 'python'))
if self.name == 'PSI4' and LooseVersion(self.version) >= LooseVersion("1.2"):
self.log.info("Remove the CMAKE_BUILD_TYPE test in PSI4 source and the downloaded dependencies!")
self.log.info("Use PATCH_COMMAND in the corresponding CMakeLists.txt")
self.cfg['build_type'] = 'EasyBuildRelease'
if self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', " -DENABLE_MPI=ON")
if get_software_root('imkl'):
self.cfg.update('configopts', " -DENABLE_CSR=ON -DBLAS_TYPE=MKL")
if self.name == 'PSI4':
pcmsolverroot = get_software_root('PCMSolver')
if pcmsolverroot:
if LooseVersion(self.version) >= LooseVersion("1.1"):
pcmsolver = 'PCMSolver'
else:
pcmsolver = 'PCMSOLVER'
self.cfg.update('configopts', " -DENABLE_%s=ON" % pcmsolver)
if LooseVersion(self.version) < LooseVersion("1.2"):
self.cfg.update('configopts', " -DPCMSOLVER_ROOT=%s" % pcmsolverroot)
else:
self.cfg.update('configopts', " -DCMAKE_INSIST_FIND_PACKAGE_PCMSolver=ON "
"-DPCMSolver_DIR=%s/share/cmake/PCMSolver" % pcmsolverroot)
chempsroot = get_software_root('CheMPS2')
if chempsroot:
if LooseVersion(self.version) >= LooseVersion("1.1"):
chemps2 = 'CheMPS2'
else:
chemps2 = 'CHEMPS2'
self.cfg.update('configopts', " -DENABLE_%s=ON" % chemps2)
if LooseVersion(self.version) < LooseVersion("1.2"):
self.cfg.update('configopts', " -DCHEMPS2_ROOT=%s" % chempsroot)
else:
self.cfg.update('configopts', " -DCMAKE_INSIST_FIND_PACKAGE_CheMPS2=ON "
"-DCheMPS2_DIR=%s/share/cmake/CheMPS2" % chempsroot)
# Be aware, PSI4 wants exact versions of the following deps! built with CMake!!
# If you want to use non-CMake build versions, the you have to provide the
# corresponding Find<library-name>.cmake scripts
# In PSI4 version 1.2.1, you can check the corresponding CMakeLists.txt file
# in external/upstream/<library-name>/
if LooseVersion(self.version) >= LooseVersion("1.2"):
for dep in ['libxc', 'Libint', 'pybind11', 'gau2grid']:
deproot = get_software_root(dep)
if deproot:
self.cfg.update('configopts', " -DCMAKE_INSIST_FIND_PACKAGE_%s=ON" % dep)
dep_dir = os.path.join(deproot, 'share', 'cmake', dep)
self.cfg.update('configopts', " -D%s_DIR=%s " % (dep, dep_dir))
CMakeMake.configure_step(self, srcdir=self.cfg['start_dir'])
def install_step(self):
"""Custom install procedure for PSI."""
super(EB_PSI, self).install_step()
# the obj and unpacked sources must remain available for working with plugins
try:
for subdir in ['obj', self.psi_srcdir]:
# copy symlinks as symlinks to work around broken symlinks
shutil.copytree(os.path.join(self.builddir, subdir), os.path.join(self.installdir, subdir),
symlinks=True)
except OSError as err:
raise EasyBuildError("Failed to copy obj and unpacked sources to install dir: %s", err)
def test_step(self):
"""
Run the testsuite of PSI4
"""
testdir = tempfile.mkdtemp()
env.setvar('PSI_SCRATCH', testdir)
if self.name == 'PSI4' and LooseVersion(self.version) >= LooseVersion("1.2"):
if self.cfg['runtest']:
paracmd = ''
# Run ctest parallel, but limit to maximum 4 jobs (in case of slow disks)
if self.cfg['parallel']:
if self.cfg['parallel'] > 4:
paracmd = '-j 4'
else:
paracmd = "-j %s" % self.cfg['parallel']
cmd = "ctest %s %s" % (paracmd, self.cfg['runtest'])
run_cmd(cmd, log_all=True, simple=False)
else:
super(EB_PSI, self).test_step()
try:
shutil.rmtree(testdir)
except OSError as err:
raise EasyBuildError("Failed to remove test directory %s: %s", testdir, err)
def sanity_check_step(self):
"""Custom sanity check for PSI."""
custom_paths = {
'files': ['bin/psi4'],
'dirs': ['include', ('share/psi', 'share/psi4')],
}
super(EB_PSI, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Custom variables for PSI module."""
txt = super(EB_PSI, self).make_module_extra()
share_dir = os.path.join(self.installdir, 'share')
if os.path.exists(share_dir):
psi4datadir = glob.glob(os.path.join(share_dir, 'psi*'))
if len(psi4datadir) == 1:
txt += self.module_generator.set_environment('PSI4DATADIR', psi4datadir[0])
else:
raise EasyBuildError("Failed to find exactly one PSI4 data dir: %s", psi4datadir)
return txt
|
"""MODFLOW support utilities"""
import os
from datetime import datetime
import shutil
import warnings
import numpy as np
import pandas as pd
import re
pd.options.display.max_colwidth = 100
from pyemu.pst.pst_utils import (
SFMT,
IFMT,
FFMT,
pst_config,
parse_tpl_file,
try_process_output_file,
)
from pyemu.utils.os_utils import run
from pyemu.utils.helpers import _write_df_tpl
from ..pyemu_warnings import PyemuWarning
PP_FMT = {
"name": SFMT,
"x": FFMT,
"y": FFMT,
"zone": IFMT,
"tpl": SFMT,
"parval1": FFMT,
}
PP_NAMES = ["name", "x", "y", "zone", "parval1"]
def modflow_pval_to_template_file(pval_file, tpl_file=None):
"""write a template file for a modflow parameter value file.
Args:
pval_file (`str`): the path and name of the existing modflow pval file
tpl_file (`str`, optional): template file to write. If None, use
`pval_file` +".tpl". Default is None
Note:
Uses names in the first column in the pval file as par names.
Returns:
**pandas.DataFrame**: a dataFrame with control file parameter information
"""
if tpl_file is None:
tpl_file = pval_file + ".tpl"
pval_df = pd.read_csv(
pval_file,
delim_whitespace=True,
header=None,
skiprows=2,
names=["parnme", "parval1"],
)
pval_df.index = pval_df.parnme
pval_df.loc[:, "tpl"] = pval_df.parnme.apply(lambda x: " ~ {0:15s} ~".format(x))
with open(tpl_file, "w") as f:
f.write("ptf ~\n#pval template file from pyemu\n")
f.write("{0:10d} #NP\n".format(pval_df.shape[0]))
f.write(
pval_df.loc[:, ["parnme", "tpl"]].to_string(
col_space=0,
formatters=[SFMT, SFMT],
index=False,
header=False,
justify="left",
)
)
return pval_df
def modflow_hob_to_instruction_file(hob_file, ins_file=None):
"""write an instruction file for a modflow head observation file
Args:
hob_file (`str`): the path and name of the existing modflow hob file
ins_file (`str`, optional): the name of the instruction file to write.
If `None`, `hob_file` +".ins" is used. Default is `None`.
Returns:
**pandas.DataFrame**: a dataFrame with control file observation information
"""
hob_df = pd.read_csv(
hob_file,
delim_whitespace=True,
skiprows=1,
header=None,
names=["simval", "obsval", "obsnme"],
)
hob_df.loc[:, "obsnme"] = hob_df.obsnme.apply(str.lower)
hob_df.loc[:, "ins_line"] = hob_df.obsnme.apply(lambda x: "l1 !{0:s}!".format(x))
hob_df.loc[0, "ins_line"] = hob_df.loc[0, "ins_line"].replace("l1", "l2")
if ins_file is None:
ins_file = hob_file + ".ins"
f_ins = open(ins_file, "w")
f_ins.write("pif ~\n")
f_ins.write(
hob_df.loc[:, ["ins_line"]].to_string(
col_space=0,
columns=["ins_line"],
header=False,
index=False,
formatters=[SFMT],
)
+ "\n"
)
hob_df.loc[:, "weight"] = 1.0
hob_df.loc[:, "obgnme"] = "obgnme"
f_ins.close()
return hob_df
def modflow_hydmod_to_instruction_file(hydmod_file, ins_file=None):
"""write an instruction file for a modflow hydmod file
Args:
hydmod_file (`str`): the path and name of the existing modflow hob file
ins_file (`str`, optional): the name of the instruction file to write.
If `None`, `hydmod_file` +".ins" is used. Default is `None`.
Returns:
**pandas.DataFrame**: a dataFrame with control file observation information
Note:
calls `pyemu.gw_utils.modflow_read_hydmod_file()`
"""
hydmod_df, hydmod_outfile = modflow_read_hydmod_file(hydmod_file)
hydmod_df.loc[:, "ins_line"] = hydmod_df.obsnme.apply(
lambda x: "l1 w !{0:s}!".format(x)
)
if ins_file is None:
ins_file = hydmod_outfile + ".ins"
with open(ins_file, "w") as f_ins:
f_ins.write("pif ~\nl1\n")
f_ins.write(
hydmod_df.loc[:, ["ins_line"]].to_string(
col_space=0,
columns=["ins_line"],
header=False,
index=False,
formatters=[SFMT],
)
+ "\n"
)
hydmod_df.loc[:, "weight"] = 1.0
hydmod_df.loc[:, "obgnme"] = "obgnme"
df = try_process_output_file(hydmod_outfile + ".ins")
if df is not None:
df.loc[:, "obsnme"] = df.index.values
df.loc[:, "obgnme"] = df.obsnme.apply(lambda x: x[:-9])
df.to_csv("_setup_" + os.path.split(hydmod_outfile)[-1] + ".csv", index=False)
return df
return hydmod_df
def modflow_read_hydmod_file(hydmod_file, hydmod_outfile=None):
"""read a binary hydmod file and return a dataframe of the results
Args:
hydmod_file (`str`): The path and name of the existing modflow hydmod binary file
hydmod_outfile (`str`, optional): output file to write. If `None`, use `hydmod_file` +".dat".
Default is `None`.
Returns:
**pandas.DataFrame**: a dataFrame with hymod_file values
"""
try:
import flopy.utils as fu
except Exception as e:
print("flopy is not installed - cannot read {0}\n{1}".format(hydmod_file, e))
return
obs = fu.HydmodObs(hydmod_file)
hyd_df = obs.get_dataframe()
hyd_df.columns = [i[2:] if i.lower() != "totim" else i for i in hyd_df.columns]
# hyd_df.loc[:,"datetime"] = hyd_df.index
hyd_df["totim"] = hyd_df.index.map(lambda x: x.strftime("%Y%m%d"))
hyd_df.rename(columns={"totim": "datestamp"}, inplace=True)
# reshape into a single column
hyd_df = pd.melt(hyd_df, id_vars="datestamp")
hyd_df.rename(columns={"value": "obsval"}, inplace=True)
hyd_df["obsnme"] = [
i.lower() + "_" + j.lower() for i, j in zip(hyd_df.variable, hyd_df.datestamp)
]
vc = hyd_df.obsnme.value_counts().sort_values()
vc = list(vc.loc[vc > 1].index.values)
if len(vc) > 0:
hyd_df.to_csv("hyd_df.duplciates.csv")
obs.get_dataframe().to_csv("hyd_org.duplicates.csv")
raise Exception("duplicates in obsnme:{0}".format(vc))
# assert hyd_df.obsnme.value_counts().max() == 1,"duplicates in obsnme"
if not hydmod_outfile:
hydmod_outfile = hydmod_file + ".dat"
hyd_df.to_csv(hydmod_outfile, columns=["obsnme", "obsval"], sep=" ", index=False)
# hyd_df = hyd_df[['obsnme','obsval']]
return hyd_df[["obsnme", "obsval"]], hydmod_outfile
def setup_mtlist_budget_obs(
list_filename,
gw_filename="mtlist_gw.dat",
sw_filename="mtlist_sw.dat",
start_datetime="1-1-1970",
gw_prefix="gw",
sw_prefix="sw",
save_setup_file=False,
):
"""setup observations of gw (and optionally sw) mass budgets from mt3dusgs list file.
Args:
list_filename (`str`): path and name of existing modflow list file
gw_filename (`str`, optional): output filename that will contain the gw budget
observations. Default is "mtlist_gw.dat"
sw_filename (`str`, optional): output filename that will contain the sw budget
observations. Default is "mtlist_sw.dat"
start_datetime (`str`, optional): an str that can be parsed into a `pandas.TimeStamp`.
used to give budget observations meaningful names. Default is "1-1-1970".
gw_prefix (`str`, optional): a prefix to add to the GW budget observations.
Useful if processing more than one list file as part of the forward run process.
Default is 'gw'.
sw_prefix (`str`, optional): a prefix to add to the SW budget observations. Useful
if processing more than one list file as part of the forward run process.
Default is 'sw'.
save_setup_file (`bool`, optional): a flag to save "_setup_"+ `list_filename` +".csv" file
that contains useful control file information. Default is `False`.
Returns:
tuple containing
- **str**: the command to add to the forward run script
- **str**: the names of the instruction files that were created
- **pandas.DataFrame**: a dataframe with information for constructing a control file
Note:
writes an instruction file and also a _setup_.csv to use when constructing a pest
control file
The instruction files are named `out_filename` +".ins"
It is recommended to use the default value for `gw_filename` or `sw_filename`.
This is the companion function of `gw_utils.apply_mtlist_budget_obs()`.
"""
gw, sw = apply_mtlist_budget_obs(
list_filename, gw_filename, sw_filename, start_datetime
)
gw_ins = gw_filename + ".ins"
_write_mtlist_ins(gw_ins, gw, gw_prefix)
ins_files = [gw_ins]
df_gw = try_process_output_file(gw_ins, gw_filename)
if df_gw is None:
raise Exception("error processing groundwater instruction file")
if sw is not None:
sw_ins = sw_filename + ".ins"
_write_mtlist_ins(sw_ins, sw, sw_prefix)
ins_files.append(sw_ins)
df_sw = try_process_output_file(sw_ins, sw_filename)
if df_sw is None:
raise Exception("error processing surface water instruction file")
df_gw = df_gw.append(df_sw)
df_gw.loc[:, "obsnme"] = df_gw.index.values
if save_setup_file:
df_gw.to_csv("_setup_" + os.path.split(list_filename)[-1] + ".csv", index=False)
frun_line = "pyemu.gw_utils.apply_mtlist_budget_obs('{0}')".format(list_filename)
return frun_line, ins_files, df_gw
def _write_mtlist_ins(ins_filename, df, prefix):
"""write an instruction file for a MT3D-USGS list file"""
try:
dt_str = df.index.map(lambda x: x.strftime("%Y%m%d"))
except:
dt_str = df.index.map(lambda x: "{0:08.1f}".format(x).strip())
with open(ins_filename, "w") as f:
f.write("pif ~\nl1\n")
for dt in dt_str:
f.write("l1 ")
for col in df.columns.str.translate(
{ord(s): None for s in ["(", ")", "/", "="]}
):
if prefix == "":
obsnme = "{0}_{1}".format(col, dt)
else:
obsnme = "{0}_{1}_{2}".format(prefix, col, dt)
f.write(" w !{0}!".format(obsnme))
f.write("\n")
def apply_mtlist_budget_obs(
list_filename,
gw_filename="mtlist_gw.dat",
sw_filename="mtlist_sw.dat",
start_datetime="1-1-1970",
):
"""process an MT3D-USGS list file to extract mass budget entries.
Args:
list_filename (`str`): the path and name of an existing MT3D-USGS list file
gw_filename (`str`, optional): the name of the output file with gw mass
budget information. Default is "mtlist_gw.dat"
sw_filename (`str`): the name of the output file with sw mass budget information.
Default is "mtlist_sw.dat"
start_datatime (`str`): an str that can be cast to a pandas.TimeStamp. Used to give
observations a meaningful name
Returns:
2-element tuple containing
- **pandas.DataFrame**: the gw mass budget dataframe
- **pandas.DataFrame**: (optional) the sw mass budget dataframe.
If the SFT process is not active, this returned value is `None`.
Note:
This is the companion function of `gw_utils.setup_mtlist_budget_obs()`.
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
mt = flopy.utils.MtListBudget(list_filename)
gw, sw = mt.parse(start_datetime=start_datetime, diff=True)
gw = gw.drop(
[
col
for col in gw.columns
for drop_col in ["kper", "kstp", "tkstp"]
if (col.lower().startswith(drop_col))
],
axis=1,
)
gw.to_csv(gw_filename, sep=" ", index_label="datetime", date_format="%Y%m%d")
if sw is not None:
sw = sw.drop(
[
col
for col in sw.columns
for drop_col in ["kper", "kstp", "tkstp"]
if (col.lower().startswith(drop_col))
],
axis=1,
)
sw.to_csv(sw_filename, sep=" ", index_label="datetime", date_format="%Y%m%d")
return gw, sw
def setup_mflist_budget_obs(
list_filename,
flx_filename="flux.dat",
vol_filename="vol.dat",
start_datetime="1-1'1970",
prefix="",
save_setup_file=False,
specify_times=None,
):
"""setup observations of budget volume and flux from modflow list file.
Args:
list_filename (`str`): path and name of the existing modflow list file
flx_filename (`str`, optional): output filename that will contain the budget flux
observations. Default is "flux.dat"
vol_filename (`str`, optional): output filename that will contain the budget volume
observations. Default is "vol.dat"
start_datetime (`str`, optional): a string that can be parsed into a pandas.TimeStamp.
This is used to give budget observations meaningful names. Default is "1-1-1970".
prefix (`str`, optional): a prefix to add to the water budget observations. Useful if
processing more than one list file as part of the forward run process. Default is ''.
save_setup_file (`bool`): a flag to save "_setup_"+ `list_filename` +".csv" file that contains useful
control file information
specify_times (`np.ndarray`-like, optional): An array of times to
extract from the budget dataframes returned by the flopy
MfListBudget(list_filename).get_dataframe() method. This can be
useful to ensure consistent observation times for PEST.
Array needs to be alignable with index of dataframe
return by flopy method, care should be take to ensure that
this is the case. If passed will be written to
"budget_times.config" file as strings to be read by the companion
`apply_mflist_budget_obs()` method at run time.
Returns:
**pandas.DataFrame**: a dataframe with information for constructing a control file.
Note:
This method writes instruction files and also a _setup_.csv to use when constructing a pest
control file. The instruction files are named <flux_file>.ins and <vol_file>.ins, respectively
It is recommended to use the default values for flux_file and vol_file.
This is the companion function of `gw_utils.apply_mflist_budget_obs()`.
"""
flx, vol = apply_mflist_budget_obs(
list_filename, flx_filename, vol_filename, start_datetime, times=specify_times
)
_write_mflist_ins(flx_filename + ".ins", flx, prefix + "flx")
_write_mflist_ins(vol_filename + ".ins", vol, prefix + "vol")
df = try_process_output_file(flx_filename + ".ins")
if df is None:
raise Exception("error processing flux instruction file")
df2 = try_process_output_file(vol_filename + ".ins")
if df2 is None:
raise Exception("error processing volume instruction file")
df = df.append(df2)
df.loc[:, "obsnme"] = df.index.values
if save_setup_file:
df.to_csv("_setup_" + os.path.split(list_filename)[-1] + ".csv", index=False)
if specify_times is not None:
np.savetxt(
os.path.join(os.path.dirname(flx_filename), "budget_times.config"),
specify_times,
fmt="%s",
)
return df
def apply_mflist_budget_obs(
list_filename,
flx_filename="flux.dat",
vol_filename="vol.dat",
start_datetime="1-1-1970",
times=None,
):
"""process a MODFLOW list file to extract flux and volume water budget
entries.
Args:
list_filename (`str`): path and name of the existing modflow list file
flx_filename (`str`, optional): output filename that will contain the
budget flux observations. Default is "flux.dat"
vol_filename (`str`, optional): output filename that will contain the
budget volume observations. Default is "vol.dat"
start_datetime (`str`, optional): a string that can be parsed into a
pandas.TimeStamp. This is used to give budget observations
meaningful names. Default is "1-1-1970".
times (`np.ndarray`-like or `str`, optional): An array of times to
extract from the budget dataframes returned by the flopy
MfListBudget(list_filename).get_dataframe() method. This can be
useful to ensure consistent observation times for PEST.
If type `str`, will assume `times=filename` and attempt to read
single vector (no header or index) from file, parsing datetime
using pandas. Array needs to be alignable with index of dataframe
return by flopy method, care should be take to ensure that
this is the case. If setup with `setup_mflist_budget_obs()`
specifying `specify_times` argument `times` should be set to
"budget_times.config".
Note:
This is the companion function of `gw_utils.setup_mflist_budget_obs()`.
Returns:
tuple containing
- **pandas.DataFrame**: a dataframe with flux budget information
- **pandas.DataFrame**: a dataframe with cumulative budget information
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
mlf = flopy.utils.MfListBudget(list_filename)
flx, vol = mlf.get_dataframes(start_datetime=start_datetime, diff=True)
if times is not None:
if isinstance(times, str):
if vol.index.tzinfo:
parse_date = {"t": [0]}
names = [None]
else:
parse_date = False
names = ["t"]
times = pd.read_csv(
times, header=None, names=names, parse_dates=parse_date
)["t"].values
flx = flx.loc[times]
vol = vol.loc[times]
flx.to_csv(flx_filename, sep=" ", index_label="datetime", date_format="%Y%m%d")
vol.to_csv(vol_filename, sep=" ", index_label="datetime", date_format="%Y%m%d")
return flx, vol
def _write_mflist_ins(ins_filename, df, prefix):
"""write an instruction file for a MODFLOW list file"""
dt_str = df.index.map(lambda x: x.strftime("%Y%m%d"))
with open(ins_filename, "w") as f:
f.write("pif ~\nl1\n")
for dt in dt_str:
f.write("l1 ")
for col in df.columns:
obsnme = "{0}_{1}_{2}".format(prefix, col, dt)
f.write(" w !{0}!".format(obsnme))
f.write("\n")
def setup_hds_timeseries(
bin_file,
kij_dict,
prefix=None,
include_path=False,
model=None,
postprocess_inact=None,
text=None,
fill=None,
precision="single",
):
"""a function to setup a forward process to extract time-series style values
from a binary modflow binary file (or equivalent format - hds, ucn, sub, cbb, etc).
Args:
bin_file (`str`): path and name of existing modflow binary file - headsave, cell budget and MT3D UCN supported.
kij_dict (`dict`): dictionary of site_name: [k,i,j] pairs. For example: `{"wel1":[0,1,1]}`.
prefix (`str`, optional): string to prepend to site_name when forming observation names. Default is None
include_path (`bool`, optional): flag to setup the binary file processing in directory where the hds_file
is located (if different from where python is running). This is useful for setting up
the process in separate directory for where python is running.
model (`flopy.mbase`, optional): a `flopy.basemodel` instance. If passed, the observation names will
have the datetime of the observation appended to them (using the flopy `start_datetime` attribute.
If None, the observation names will have the zero-based stress period appended to them. Default is None.
postprocess_inact (`float`, optional): Inactive value in heads/ucn file e.g. mt.btn.cinit. If `None`, no
inactive value processing happens. Default is `None`.
text (`str`): the text record entry in the binary file (e.g. "constant_head").
Used to indicate that the binary file is a MODFLOW cell-by-cell budget file.
If None, headsave or MT3D unformatted concentration file
is assummed. Default is None
fill (`float`): fill value for NaNs in the extracted timeseries dataframe. If
`None`, no filling is done, which may yield model run failures as the resulting
processed timeseries CSV file (produced at runtime) may have missing values and
can't be processed with the cooresponding instruction file. Default is `None`.
precision (`str`): the precision of the binary file. Can be "single" or "double".
Default is "single".
Returns:
tuple containing
- **str**: the forward run command to execute the binary file process during model runs.
- **pandas.DataFrame**: a dataframe of observation information for use in the pest control file
Note:
This function writes hds_timeseries.config that must be in the same
dir where `apply_hds_timeseries()` is called during the forward run
Assumes model time units are days
This is the companion function of `gw_utils.apply_hds_timeseries()`.
"""
try:
import flopy
except Exception as e:
print("error importing flopy, returning {0}".format(str(e)))
return
assert os.path.exists(bin_file), "binary file not found"
iscbc = False
if text is not None:
text = text.upper()
try:
# hack: if model is passed and its None, it trips up CellBudgetFile...
if model is not None:
bf = flopy.utils.CellBudgetFile(
bin_file, precision=precision, model=model
)
iscbc = True
else:
bf = flopy.utils.CellBudgetFile(bin_file, precision=precision)
iscbc = True
except Exception as e:
try:
if model is not None:
bf = flopy.utils.HeadFile(
bin_file, precision=precision, model=model, text=text
)
else:
bf = flopy.utils.HeadFile(bin_file, precision=precision, text=text)
except Exception as e1:
raise Exception(
"error instantiating binary file as either CellBudgetFile:{0} or as HeadFile with text arg: {1}".format(
str(e), str(e1)
)
)
if iscbc:
tl = [t.decode().strip() for t in bf.textlist]
if text not in tl:
raise Exception(
"'text' {0} not found in CellBudgetFile.textlist:{1}".format(
text, tl
)
)
elif bin_file.lower().endswith(".ucn"):
try:
bf = flopy.utils.UcnFile(bin_file, precision=precision)
except Exception as e:
raise Exception("error instantiating UcnFile:{0}".format(str(e)))
else:
try:
bf = flopy.utils.HeadFile(bin_file, precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
if text is None:
text = "none"
nlay, nrow, ncol = bf.nlay, bf.nrow, bf.ncol
# if include_path:
# pth = os.path.join(*[p for p in os.path.split(hds_file)[:-1]])
# config_file = os.path.join(pth,"{0}_timeseries.config".format(hds_file))
# else:
config_file = "{0}_timeseries.config".format(bin_file)
print("writing config file to {0}".format(config_file))
if fill is None:
fill = "none"
f_config = open(config_file, "w")
if model is not None:
if model.dis.itmuni != 4:
warnings.warn(
"setup_hds_timeseries only supports 'days' time units...", PyemuWarning
)
f_config.write(
"{0},{1},d,{2},{3},{4},{5}\n".format(
os.path.split(bin_file)[-1],
model.start_datetime,
text,
fill,
precision,
iscbc,
)
)
start = pd.to_datetime(model.start_datetime)
else:
f_config.write(
"{0},none,none,{1},{2},{3},{4}\n".format(
os.path.split(bin_file)[-1], text, fill, precision, iscbc
)
)
f_config.write("site,k,i,j\n")
dfs = []
for site, (k, i, j) in kij_dict.items():
assert k >= 0 and k < nlay, k
assert i >= 0 and i < nrow, i
assert j >= 0 and j < ncol, j
site = site.lower().replace(" ", "")
if iscbc:
ts = bf.get_ts((k, i, j), text=text)
# print(ts)
df = pd.DataFrame(data=ts, columns=["totim", site])
else:
df = pd.DataFrame(data=bf.get_ts((k, i, j)), columns=["totim", site])
if model is not None:
dts = start + pd.to_timedelta(df.totim, unit="d")
df.loc[:, "totim"] = dts
# print(df)
f_config.write("{0},{1},{2},{3}\n".format(site, k, i, j))
df.index = df.pop("totim")
dfs.append(df)
f_config.close()
df = pd.concat(dfs, axis=1).T
df.to_csv(bin_file + "_timeseries.processed", sep=" ")
if model is not None:
t_str = df.columns.map(lambda x: x.strftime("%Y%m%d"))
else:
t_str = df.columns.map(lambda x: "{0:08.2f}".format(x))
ins_file = bin_file + "_timeseries.processed.ins"
print("writing instruction file to {0}".format(ins_file))
with open(ins_file, "w") as f:
f.write("pif ~\n")
f.write("l1 \n")
for site in df.index:
# for t in t_str:
f.write("l1 w ")
# for site in df.columns:
for t in t_str:
if prefix is not None:
obsnme = "{0}_{1}_{2}".format(prefix, site, t)
else:
obsnme = "{0}_{1}".format(site, t)
f.write(" !{0}!".format(obsnme))
f.write("\n")
if postprocess_inact is not None:
_setup_postprocess_hds_timeseries(
bin_file, df, config_file, prefix=prefix, model=model
)
bd = "."
if include_path:
bd = os.getcwd()
pth = os.path.join(*[p for p in os.path.split(bin_file)[:-1]])
os.chdir(pth)
config_file = os.path.split(config_file)[-1]
try:
df = apply_hds_timeseries(config_file, postprocess_inact=postprocess_inact)
except Exception as e:
os.chdir(bd)
raise Exception("error in apply_hds_timeseries(): {0}".format(str(e)))
os.chdir(bd)
df = try_process_output_file(ins_file)
if df is None:
raise Exception("error processing {0} instruction file".format(ins_file))
df.loc[:, "weight"] = 0.0
if prefix is not None:
df.loc[:, "obgnme"] = df.index.map(lambda x: "_".join(x.split("_")[:2]))
else:
df.loc[:, "obgnme"] = df.index.map(lambda x: x.split("_")[0])
frun_line = "pyemu.gw_utils.apply_hds_timeseries('{0}',{1})\n".format(
config_file, postprocess_inact
)
return frun_line, df
def apply_hds_timeseries(config_file=None, postprocess_inact=None):
"""process a modflow binary file using a previously written
configuration file
Args:
config_file (`str`, optional): configuration file written by `pyemu.gw_utils.setup_hds_timeseries`.
If `None`, looks for `hds_timeseries.config`
postprocess_inact (`float`, optional): Inactive value in heads/ucn file e.g. mt.btn.cinit. If `None`, no
inactive value processing happens. Default is `None`.
Note:
This is the companion function of `gw_utils.setup_hds_timeseries()`.
"""
import flopy
if config_file is None:
config_file = "hds_timeseries.config"
assert os.path.exists(config_file), config_file
with open(config_file, "r") as f:
line = f.readline()
(
bf_file,
start_datetime,
time_units,
text,
fill,
precision,
_iscbc,
) = line.strip().split(",")
if len(line.strip().split(",")) == 6:
(
bf_file,
start_datetime,
time_units,
text,
fill,
precision,
) = line.strip().split(",")
_iscbc = "false"
else:
(
bf_file,
start_datetime,
time_units,
text,
fill,
precision,
_iscbc,
) = line.strip().split(",")
site_df = pd.read_csv(f)
text = text.upper()
if _iscbc.lower().strip() == "false":
iscbc = False
elif _iscbc.lower().strip() == "true":
iscbc = True
else:
raise Exception(
"apply_hds_timeseries() error: unrecognized 'iscbc' string in config file: {0}".format(
_iscbc
)
)
assert os.path.exists(bf_file), "head save file not found"
if iscbc:
try:
bf = flopy.utils.CellBudgetFile(bf_file, precision=precision)
except Exception as e:
raise Exception("error instantiating CellBudgetFile:{0}".format(str(e)))
elif bf_file.lower().endswith(".ucn"):
try:
bf = flopy.utils.UcnFile(bf_file, precision=precision)
except Exception as e:
raise Exception("error instantiating UcnFile:{0}".format(str(e)))
else:
try:
if text != "NONE":
bf = flopy.utils.HeadFile(bf_file, text=text, precision=precision)
else:
bf = flopy.utils.HeadFile(bf_file, precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
nlay, nrow, ncol = bf.nlay, bf.nrow, bf.ncol
dfs = []
for site, k, i, j in zip(site_df.site, site_df.k, site_df.i, site_df.j):
assert k >= 0 and k < nlay
assert i >= 0 and i < nrow
assert j >= 0 and j < ncol
if iscbc:
df = pd.DataFrame(
data=bf.get_ts((k, i, j), text=text), columns=["totim", site]
)
else:
df = pd.DataFrame(data=bf.get_ts((k, i, j)), columns=["totim", site])
df.index = df.pop("totim")
dfs.append(df)
df = pd.concat(dfs, axis=1).T
if df.shape != df.dropna().shape:
warnings.warn("NANs in processed timeseries file", PyemuWarning)
if fill.upper() != "NONE":
fill = float(fill)
df.fillna(fill, inplace=True)
# print(df)
df.to_csv(bf_file + "_timeseries.processed", sep=" ")
if postprocess_inact is not None:
_apply_postprocess_hds_timeseries(config_file, postprocess_inact)
return df
def _setup_postprocess_hds_timeseries(
hds_file, df, config_file, prefix=None, model=None
):
"""Dirty function to setup post processing concentrations in inactive/dry cells"""
warnings.warn(
"Setting up post processing of hds or ucn timeseries obs. "
"Prepending 'pp' to obs name may cause length to exceed 20 chars",
PyemuWarning,
)
if model is not None:
t_str = df.columns.map(lambda x: x.strftime("%Y%m%d"))
else:
t_str = df.columns.map(lambda x: "{0:08.2f}".format(x))
if prefix is not None:
prefix = "pp{0}".format(prefix)
else:
prefix = "pp"
ins_file = hds_file + "_timeseries.post_processed.ins"
print("writing instruction file to {0}".format(ins_file))
with open(ins_file, "w") as f:
f.write("pif ~\n")
f.write("l1 \n")
for site in df.index:
f.write("l1 w ")
# for site in df.columns:
for t in t_str:
obsnme = "{0}{1}_{2}".format(prefix, site, t)
f.write(" !{0}!".format(obsnme))
f.write("\n")
frun_line = "pyemu.gw_utils._apply_postprocess_hds_timeseries('{0}')\n".format(
config_file
)
return frun_line
def _apply_postprocess_hds_timeseries(config_file=None, cinact=1e30):
"""private function to post processing binary files"""
import flopy
if config_file is None:
config_file = "hds_timeseries.config"
assert os.path.exists(config_file), config_file
with open(config_file, "r") as f:
line = f.readline()
(
hds_file,
start_datetime,
time_units,
text,
fill,
precision,
_iscbc,
) = line.strip().split(",")
if len(line.strip().split(",")) == 6:
(
hds_file,
start_datetime,
time_units,
text,
fill,
precision,
) = line.strip().split(",")
_iscbc = "false"
else:
(
hds_file,
start_datetime,
time_units,
text,
fill,
precision,
_iscbc,
) = line.strip().split(",")
site_df = pd.read_csv(f)
# print(site_df)
text = text.upper()
assert os.path.exists(hds_file), "head save file not found"
if hds_file.lower().endswith(".ucn"):
try:
hds = flopy.utils.UcnFile(hds_file, precision=precision)
except Exception as e:
raise Exception("error instantiating UcnFile:{0}".format(str(e)))
else:
try:
if text != "NONE":
hds = flopy.utils.HeadFile(hds_file, text=text, precision=precision)
else:
hds = flopy.utils.HeadFile(hds_file, precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
nlay, nrow, ncol = hds.nlay, hds.nrow, hds.ncol
dfs = []
for site, k, i, j in zip(site_df.site, site_df.k, site_df.i, site_df.j):
assert k >= 0 and k < nlay
assert i >= 0 and i < nrow
assert j >= 0 and j < ncol
if text.upper() != "NONE":
df = pd.DataFrame(data=hds.get_ts((k, i, j)), columns=["totim", site])
else:
df = pd.DataFrame(data=hds.get_ts((k, i, j)), columns=["totim", site])
df.index = df.pop("totim")
inact_obs = df[site].apply(lambda x: np.isclose(x, cinact))
if inact_obs.sum() > 0:
assert k + 1 < nlay, "Inactive observation in lowest layer"
df_lower = pd.DataFrame(
data=hds.get_ts((k + 1, i, j)), columns=["totim", site]
)
df_lower.index = df_lower.pop("totim")
df.loc[inact_obs] = df_lower.loc[inact_obs]
print(
"{0} observation(s) post-processed for site {1} at kij ({2},{3},{4})".format(
inact_obs.sum(), site, k, i, j
)
)
dfs.append(df)
df = pd.concat(dfs, axis=1).T
# print(df)
df.to_csv(hds_file + "_timeseries.post_processed", sep=" ")
return df
def setup_hds_obs(
hds_file,
kperk_pairs=None,
skip=None,
prefix="hds",
text="head",
precision="single",
include_path=False,
):
"""a function to setup using all values from a layer-stress period
pair for observations.
Args:
hds_file (`str`): path and name of an existing MODFLOW head-save file.
If the hds_file endswith 'ucn', then the file is treated as a UcnFile type.
kperk_pairs ([(int,int)]): a list of len two tuples which are pairs of kper
(zero-based stress period index) and k (zero-based layer index) to
setup observations for. If None, then all layers and stress period records
found in the file will be used. Caution: a shit-ton of observations may be produced!
skip (variable, optional): a value or function used to determine which values
to skip when setting up observations. If np.scalar(skip)
is True, then values equal to skip will not be used.
If skip can also be a np.ndarry with dimensions equal to the model.
Observations are set up only for cells with Non-zero values in the array.
If not np.ndarray or np.scalar(skip), then skip will be treated as a lambda function that
returns np.NaN if the value should be skipped.
prefix (`str`): the prefix to use for the observation names. default is "hds".
text (`str`): the text tag the flopy HeadFile instance. Default is "head"
precison (`str`): the precision string for the flopy HeadFile instance. Default is "single"
include_path (`bool`, optional): flag to setup the binary file processing in directory where the hds_file
is located (if different from where python is running). This is useful for setting up
the process in separate directory for where python is running.
Returns:
tuple containing
- **str**: the forward run script line needed to execute the headsave file observation
operation
- **pandas.DataFrame**: a dataframe of pest control file information
Note:
Writes an instruction file and a _setup_ csv used construct a control file.
This is the companion function to `gw_utils.apply_hds_obs()`.
"""
try:
import flopy
except Exception as e:
print("error importing flopy, returning {0}".format(str(e)))
return
assert os.path.exists(hds_file), "head save file not found"
if hds_file.lower().endswith(".ucn"):
try:
hds = flopy.utils.UcnFile(hds_file)
except Exception as e:
raise Exception("error instantiating UcnFile:{0}".format(str(e)))
elif text.lower() == "headu":
try:
hds = flopy.utils.HeadUFile(hds_file, text=text, precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
else:
try:
hds = flopy.utils.HeadFile(hds_file, text=text, precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
if kperk_pairs is None:
kperk_pairs = []
for kstp, kper in hds.kstpkper:
kperk_pairs.extend([(kper - 1, k) for k in range(hds.nlay)])
if len(kperk_pairs) == 2:
try:
if len(kperk_pairs[0]) == 2:
pass
except:
kperk_pairs = [kperk_pairs]
# if start_datetime is not None:
# start_datetime = pd.to_datetime(start_datetime)
# dts = start_datetime + pd.to_timedelta(hds.times,unit='d')
data = {}
kpers = [kper - 1 for kstp, kper in hds.kstpkper]
for kperk_pair in kperk_pairs:
kper, k = kperk_pair
assert kper in kpers, "kper not in hds:{0}".format(kper)
assert k in range(hds.nlay), "k not in hds:{0}".format(k)
kstp = last_kstp_from_kper(hds, kper)
d = hds.get_data(kstpkper=(kstp, kper))[k]
data["{0}_{1}".format(kper, k)] = d.flatten()
# data[(kper,k)] = d.flatten()
idx, iidx, jidx = [], [], []
for _ in range(len(data)):
for i in range(hds.nrow):
iidx.extend([i for _ in range(hds.ncol)])
jidx.extend([j for j in range(hds.ncol)])
idx.extend(["i{0:04d}_j{1:04d}".format(i, j) for j in range(hds.ncol)])
idx = idx[: hds.nrow * hds.ncol]
df = pd.DataFrame(data, index=idx)
data_cols = list(df.columns)
data_cols.sort()
# df.loc[:,"iidx"] = iidx
# df.loc[:,"jidx"] = jidx
if skip is not None:
for col in data_cols:
if np.isscalar(skip):
df.loc[df.loc[:, col] == skip, col] = np.NaN
elif isinstance(skip, np.ndarray):
assert (
skip.ndim >= 2
), "skip passed as {}D array, At least 2D (<= 4D) array required".format(
skip.ndim
)
assert skip.shape[-2:] == (
hds.nrow,
hds.ncol,
), "Array dimensions of arg. skip needs to match model dimensions ({0},{1}). ({2},{3}) passed".format(
hds.nrow, hds.ncol, skip.shape[-2], skip.shape[-1]
)
if skip.ndim == 2:
print(
"2D array passed for skip, assuming constant for all layers and kper"
)
skip = np.tile(skip, (len(kpers), hds.nlay, 1, 1))
if skip.ndim == 3:
print("3D array passed for skip, assuming constant for all kper")
skip = np.tile(skip, (len(kpers), 1, 1, 1))
kper, k = [int(c) for c in col.split("_")]
df.loc[
df.index.map(
lambda x: skip[
kper,
k,
int(x.split("_")[0].strip("i")),
int(x.split("_")[1].strip("j")),
]
== 0
),
col,
] = np.NaN
else:
df.loc[:, col] = df.loc[:, col].apply(skip)
# melt to long form
df = df.melt(var_name="kperk", value_name="obsval")
# set row and col identifies
df.loc[:, "iidx"] = iidx
df.loc[:, "jidx"] = jidx
# drop nans from skip
df = df.dropna()
# set some additional identifiers
df.loc[:, "kper"] = df.kperk.apply(lambda x: int(x.split("_")[0]))
df.loc[:, "kidx"] = df.pop("kperk").apply(lambda x: int(x.split("_")[1]))
# form obs names
# def get_kper_str(kper):
# if start_datetime is not None:
# return dts[int(kper)].strftime("%Y%m%d")
# else:
# return "kper{0:04.0f}".format(kper)
fmt = prefix + "_{0:02.0f}_{1:03.0f}_{2:03.0f}_{3:03.0f}"
# df.loc[:,"obsnme"] = df.apply(lambda x: fmt.format(x.kidx,x.iidx,x.jidx,
# get_kper_str(x.kper)),axis=1)
df.loc[:, "obsnme"] = df.apply(
lambda x: fmt.format(x.kidx, x.iidx, x.jidx, x.kper), axis=1
)
df.loc[:, "ins_str"] = df.obsnme.apply(lambda x: "l1 w !{0}!".format(x))
df.loc[:, "obgnme"] = prefix
# write the instruction file
with open(hds_file + ".dat.ins", "w") as f:
f.write("pif ~\nl1\n")
df.ins_str.to_string(f, index=False, header=False)
# write the corresponding output file
df.loc[:, ["obsnme", "obsval"]].to_csv(hds_file + ".dat", sep=" ", index=False)
hds_path = os.path.dirname(hds_file)
setup_file = os.path.join(
hds_path, "_setup_{0}.csv".format(os.path.split(hds_file)[-1])
)
df.to_csv(setup_file)
if not include_path:
hds_file = os.path.split(hds_file)[-1]
fwd_run_line = (
"pyemu.gw_utils.apply_hds_obs('{0}',precision='{1}',text='{2}')\n".format(
hds_file, precision, text
)
)
df.index = df.obsnme
return fwd_run_line, df
def last_kstp_from_kper(hds, kper):
"""function to find the last time step (kstp) for a
give stress period (kper) in a modflow head save file.
Args:
hds (`flopy.utils.HeadFile`): head save file
kper (`int`): the zero-index stress period number
Returns:
**int**: the zero-based last time step during stress period
kper in the head save file
"""
# find the last kstp with this kper
kstp = -1
for kkstp, kkper in hds.kstpkper:
if kkper == kper + 1 and kkstp > kstp:
kstp = kkstp
if kstp == -1:
raise Exception("kstp not found for kper {0}".format(kper))
kstp -= 1
return kstp
def apply_hds_obs(hds_file, inact_abs_val=1.0e20, precision="single", text="head"):
"""process a modflow head save file. A companion function to
`gw_utils.setup_hds_obs()` that is called during the forward run process
Args:
hds_file (`str`): a modflow head save filename. if hds_file ends with 'ucn',
then the file is treated as a UcnFile type.
inact_abs_val (`float`, optional): the value that marks the mininum and maximum
active value. values in the headsave file greater than `inact_abs_val` or less
than -`inact_abs_val` are reset to `inact_abs_val`
Returns:
**pandas.DataFrame**: a dataframe with extracted simulated values.
Note:
This is the companion function to `gw_utils.setup_hds_obs()`.
"""
try:
import flopy
except Exception as e:
raise Exception("apply_hds_obs(): error importing flopy: {0}".format(str(e)))
from .. import pst_utils
assert os.path.exists(hds_file)
out_file = hds_file + ".dat"
ins_file = out_file + ".ins"
assert os.path.exists(ins_file)
df = pd.DataFrame({"obsnme": pst_utils.parse_ins_file(ins_file)})
df.index = df.obsnme
# populate metdata
items = ["k", "i", "j", "kper"]
for i, item in enumerate(items):
df.loc[:, item] = df.obsnme.apply(lambda x: int(x.split("_")[i + 1]))
if hds_file.lower().endswith("ucn"):
hds = flopy.utils.UcnFile(hds_file)
elif text.lower() == "headu":
hds = flopy.utils.HeadUFile(hds_file)
else:
hds = flopy.utils.HeadFile(hds_file, precision=precision, text=text)
kpers = df.kper.unique()
df.loc[:, "obsval"] = np.NaN
for kper in kpers:
kstp = last_kstp_from_kper(hds, kper)
data = hds.get_data(kstpkper=(kstp, kper))
# jwhite 15jan2018 fix for really large values that are getting some
# trash added to them...
if text.lower() != "headu":
data[np.isnan(data)] = 0.0
data[data > np.abs(inact_abs_val)] = np.abs(inact_abs_val)
data[data < -np.abs(inact_abs_val)] = -np.abs(inact_abs_val)
df_kper = df.loc[df.kper == kper, :]
df.loc[df_kper.index, "obsval"] = data[df_kper.k, df_kper.i, df_kper.j]
else:
df_kper = df.loc[df.kper == kper, :]
for k, d in enumerate(data):
d[np.isnan(d)] = 0.0
d[d > np.abs(inact_abs_val)] = np.abs(inact_abs_val)
d[d < -np.abs(inact_abs_val)] = -np.abs(inact_abs_val)
df_kperk = df_kper.loc[df_kper.k == k, :]
df.loc[df_kperk.index, "obsval"] = d[df_kperk.i]
assert df.dropna().shape[0] == df.shape[0]
df.loc[:, ["obsnme", "obsval"]].to_csv(out_file, index=False, sep=" ")
return df
def setup_sft_obs(sft_file, ins_file=None, start_datetime=None, times=None, ncomp=1):
"""writes a post-processor and instruction file for a mt3d-usgs sft output file
Args:
sft_file (`str`): path and name of an existing sft output file (ASCII)
ins_file (`str`, optional): the name of the instruction file to create.
If None, the name is `sft_file`+".ins". Default is `None`.
start_datetime (`str`): a pandas.to_datetime() compatible str. If not None,
then the resulting observation names have the datetime
suffix. If None, the suffix is the output totim. Default
is `None`.
times ([`float`]): a list of times to make observations for. If None, all times
found in the file are used. Default is None.
ncomp (`int`): number of components in transport model. Default is 1.
Returns:
**pandas.DataFrame**: a dataframe with observation names and values for the sft simulated
concentrations.
Note:
This is the companion function to `gw_utils.apply_sft_obs()`.
"""
df = pd.read_csv(sft_file, skiprows=1, delim_whitespace=True)
df.columns = [c.lower().replace("-", "_") for c in df.columns]
if times is None:
times = df.time.unique()
missing = []
utimes = df.time.unique()
for t in times:
if t not in utimes:
missing.append(str(t))
if len(missing) > 0:
print(df.time)
raise Exception("the following times are missing:{0}".format(",".join(missing)))
with open("sft_obs.config", "w") as f:
f.write(sft_file + "\n")
[f.write("{0:15.6E}\n".format(t)) for t in times]
df = apply_sft_obs()
utimes = df.time.unique()
for t in times:
assert t in utimes, "time {0} missing in processed dataframe".format(t)
idx = df.time.apply(lambda x: x in times)
if start_datetime is not None:
start_datetime = pd.to_datetime(start_datetime)
df.loc[:, "time_str"] = pd.to_timedelta(df.time, unit="d") + start_datetime
df.loc[:, "time_str"] = df.time_str.apply(
lambda x: datetime.strftime(x, "%Y%m%d")
)
else:
df.loc[:, "time_str"] = df.time.apply(lambda x: "{0:08.2f}".format(x))
df.loc[:, "ins_str"] = "l1\n"
# check for multiple components
df_times = df.loc[idx, :]
df.loc[:, "icomp"] = 1
icomp_idx = list(df.columns).index("icomp")
for t in times:
df_time = df.loc[df.time == t, :].copy()
vc = df_time.sfr_node.value_counts()
ncomp = vc.max()
assert np.all(vc.values == ncomp)
nstrm = df_time.shape[0] / ncomp
for icomp in range(ncomp):
s = int(nstrm * (icomp))
e = int(nstrm * (icomp + 1))
idxs = df_time.iloc[s:e, :].index
# df_time.iloc[nstrm*(icomp):nstrm*(icomp+1),icomp_idx.loc["icomp"] = int(icomp+1)
df_time.loc[idxs, "icomp"] = int(icomp + 1)
# df.loc[df_time.index,"ins_str"] = df_time.apply(lambda x: "l1 w w !sfrc{0}_{1}_{2}! !swgw{0}_{1}_{2}! !gwcn{0}_{1}_{2}!\n".\
# format(x.sfr_node,x.icomp,x.time_str),axis=1)
df.loc[df_time.index, "ins_str"] = df_time.apply(
lambda x: "l1 w w !sfrc{0}_{1}_{2}!\n".format(
x.sfr_node, x.icomp, x.time_str
),
axis=1,
)
df.index = np.arange(df.shape[0])
if ins_file is None:
ins_file = sft_file + ".processed.ins"
with open(ins_file, "w") as f:
f.write("pif ~\nl1\n")
[f.write(i) for i in df.ins_str]
# df = try_process_ins_file(ins_file,sft_file+".processed")
df = try_process_output_file(ins_file, sft_file + ".processed")
return df
def apply_sft_obs():
"""process an mt3d-usgs sft ASCII output file using a previous-written
config file
Returns:
**pandas.DataFrame**: a dataframe of extracted simulated outputs
Note:
This is the companion function to `gw_utils.setup_sft_obs()`.
"""
# this is for dealing with the missing 'e' problem
def try_cast(x):
try:
return float(x)
except:
return 0.0
times = []
with open("sft_obs.config") as f:
sft_file = f.readline().strip()
for line in f:
times.append(float(line.strip()))
df = pd.read_csv(sft_file, skiprows=1, delim_whitespace=True) # ,nrows=10000000)
df.columns = [c.lower().replace("-", "_") for c in df.columns]
df = df.loc[df.time.apply(lambda x: x in times), :]
# print(df.dtypes)
# normalize
for c in df.columns:
# print(c)
if not "node" in c:
df.loc[:, c] = df.loc[:, c].apply(try_cast)
# print(df.loc[df.loc[:,c].apply(lambda x : type(x) == str),:])
if df.dtypes[c] == float:
df.loc[df.loc[:, c] < 1e-30, c] = 0.0
df.loc[df.loc[:, c] > 1e30, c] = 1.0e30
df.loc[:, "sfr_node"] = df.sfr_node.apply(np.int)
df.to_csv(sft_file + ".processed", sep=" ", index=False)
return df
def setup_sfr_seg_parameters(
nam_file, model_ws=".", par_cols=None, tie_hcond=True, include_temporal_pars=None
):
"""Setup multiplier parameters for SFR segment data.
Args:
nam_file (`str`): MODFLOw name file. DIS, BAS, and SFR must be
available as pathed in the nam_file. Optionally, `nam_file` can be
an existing `flopy.modflow.Modflow`.
model_ws (`str`): model workspace for flopy to load the MODFLOW model from
par_cols ([`str`]): a list of segment data entires to parameterize
tie_hcond (`bool`): flag to use same mult par for hcond1 and hcond2 for a
given segment. Default is `True`.
include_temporal_pars ([`str`]): list of spatially-global multipliers to set up for
each stress period. Default is None
Returns:
**pandas.DataFrame**: a dataframe with useful parameter setup information
Note:
This function handles the standard input case, not all the cryptic SFR options. Loads the
dis, bas, and sfr files with flopy using model_ws.
This is the companion function to `gw_utils.apply_sfr_seg_parameters()` .
The number (and numbering) of segment data entries must consistent across
all stress periods.
Writes `nam_file` +"_backup_.sfr" as the backup of the original sfr file
Skips values = 0.0 since multipliers don't work for these
"""
try:
import flopy
except Exception as e:
return
if par_cols is None:
par_cols = ["flow", "runoff", "hcond1", "pptsw"]
if tie_hcond:
if "hcond1" not in par_cols or "hcond2" not in par_cols:
tie_hcond = False
if isinstance(nam_file, flopy.modflow.mf.Modflow) and nam_file.sfr is not None:
m = nam_file
nam_file = m.namefile
model_ws = m.model_ws
else:
# load MODFLOW model # is this needed? could we just pass the model if it has already been read in?
m = flopy.modflow.Modflow.load(
nam_file, load_only=["sfr"], model_ws=model_ws, check=False, forgive=False
)
if include_temporal_pars:
if include_temporal_pars is True:
tmp_par_cols = {col: range(m.dis.nper) for col in par_cols}
elif isinstance(include_temporal_pars, str):
tmp_par_cols = {include_temporal_pars: range(m.dis.nper)}
elif isinstance(include_temporal_pars, list):
tmp_par_cols = {col: range(m.dis.nper) for col in include_temporal_pars}
elif isinstance(include_temporal_pars, dict):
tmp_par_cols = include_temporal_pars
include_temporal_pars = True
else:
tmp_par_cols = {}
include_temporal_pars = False
# make backup copy of sfr file
shutil.copy(
os.path.join(model_ws, m.sfr.file_name[0]),
os.path.join(model_ws, nam_file + "_backup_.sfr"),
)
# get the segment data (dict)
segment_data = m.sfr.segment_data
shape = segment_data[list(segment_data.keys())[0]].shape
# check
for kper, seg_data in m.sfr.segment_data.items():
assert (
seg_data.shape == shape
), "cannot use: seg data must have the same number of entires for all kpers"
seg_data_col_order = list(seg_data.dtype.names)
# convert segment_data dictionary to multi index df - this could get ugly
reform = {
(k, c): segment_data[k][c]
for k in segment_data.keys()
for c in segment_data[k].dtype.names
}
seg_data_all_kper = pd.DataFrame.from_dict(reform)
seg_data_all_kper.columns.names = ["kper", "col"]
# extract the first seg data kper to a dataframe
seg_data = seg_data_all_kper[0].copy() # pd.DataFrame.from_records(seg_data)
# make sure all par cols are found and search of any data in kpers
missing = []
cols = par_cols.copy()
for par_col in set(par_cols + list(tmp_par_cols.keys())):
if par_col not in seg_data.columns:
if par_col in cols:
missing.append(cols.pop(cols.index(par_col)))
if par_col in tmp_par_cols.keys():
_ = tmp_par_cols.pop(par_col)
# look across all kper in multiindex df to check for values entry - fill with absmax should capture entries
else:
seg_data.loc[:, par_col] = (
seg_data_all_kper.loc[:, (slice(None), par_col)]
.abs()
.max(level=1, axis=1)
)
if len(missing) > 0:
warnings.warn(
"the following par_cols were not found in segment data: {0}".format(
",".join(missing)
),
PyemuWarning,
)
if len(missing) >= len(par_cols):
warnings.warn(
"None of the passed par_cols ({0}) were found in segment data.".format(
",".join(par_cols)
),
PyemuWarning,
)
seg_data = seg_data[seg_data_col_order] # reset column orders to inital
seg_data_org = seg_data.copy()
seg_data.to_csv(os.path.join(model_ws, "sfr_seg_pars.dat"), sep=",")
# the data cols not to parameterize
# better than a column indexer as pandas can change column orders
idx_cols = ["nseg", "icalc", "outseg", "iupseg", "iprior", "nstrpts"]
notpar_cols = [c for c in seg_data.columns if c not in cols + idx_cols]
# process par cols
tpl_str, pvals = [], []
if include_temporal_pars:
tmp_pnames, tmp_tpl_str = [], []
tmp_df = pd.DataFrame(
data={c: 1.0 for c in tmp_par_cols.keys()},
index=list(m.sfr.segment_data.keys()),
)
tmp_df.sort_index(inplace=True)
tmp_df.to_csv(os.path.join(model_ws, "sfr_seg_temporal_pars.dat"))
for par_col in set(cols + list(tmp_par_cols.keys())):
print(par_col)
prefix = par_col
if tie_hcond and par_col == "hcond2":
prefix = "hcond1"
if seg_data.loc[:, par_col].sum() == 0.0:
print("all zeros for {0}...skipping...".format(par_col))
# seg_data.loc[:,par_col] = 1
# all zero so no need to set up
if par_col in cols:
# - add to notpar
notpar_cols.append(cols.pop(cols.index(par_col)))
if par_col in tmp_par_cols.keys():
_ = tmp_par_cols.pop(par_col)
if par_col in cols:
seg_data.loc[:, par_col] = seg_data.apply(
lambda x: "~ {0}_{1:04d} ~".format(prefix, int(x.nseg))
if float(x[par_col]) != 0.0
else "1.0",
axis=1,
)
org_vals = seg_data_org.loc[seg_data_org.loc[:, par_col] != 0.0, par_col]
pnames = seg_data.loc[org_vals.index, par_col]
pvals.extend(list(org_vals.values))
tpl_str.extend(list(pnames.values))
if par_col in tmp_par_cols.keys():
parnme = tmp_df.index.map(
lambda x: "{0}_{1:04d}_tmp".format(par_col, int(x))
if x in tmp_par_cols[par_col]
else 1.0
)
sel = parnme != 1.0
tmp_df.loc[sel, par_col] = parnme[sel].map(lambda x: "~ {0} ~".format(x))
tmp_tpl_str.extend(list(tmp_df.loc[sel, par_col].values))
tmp_pnames.extend(list(parnme[sel].values))
pnames = [t.replace("~", "").strip() for t in tpl_str]
df = pd.DataFrame(
{"parnme": pnames, "org_value": pvals, "tpl_str": tpl_str}, index=pnames
)
df.drop_duplicates(inplace=True)
if df.empty:
warnings.warn(
"No spatial sfr segment parameters have been set up, "
"either none of {0} were found or all were zero.".format(
",".join(par_cols)
),
PyemuWarning,
)
# return df
# set not par cols to 1.0
seg_data.loc[:, notpar_cols] = "1.0"
# write the template file
_write_df_tpl(os.path.join(model_ws, "sfr_seg_pars.dat.tpl"), seg_data, sep=",")
# make sure the tpl file exists and has the same num of pars
parnme = parse_tpl_file(os.path.join(model_ws, "sfr_seg_pars.dat.tpl"))
assert len(parnme) == df.shape[0]
# set some useful par info
df["pargp"] = df.parnme.apply(lambda x: x.split("_")[0])
if include_temporal_pars:
_write_df_tpl(
filename=os.path.join(model_ws, "sfr_seg_temporal_pars.dat.tpl"), df=tmp_df
)
pargp = [pname.split("_")[0] + "_tmp" for pname in tmp_pnames]
tmp_df = pd.DataFrame(
data={"parnme": tmp_pnames, "pargp": pargp}, index=tmp_pnames
)
if not tmp_df.empty:
tmp_df.loc[:, "org_value"] = 1.0
tmp_df.loc[:, "tpl_str"] = tmp_tpl_str
df = df.append(tmp_df[df.columns])
if df.empty:
warnings.warn(
"No sfr segment parameters have been set up, "
"either none of {0} were found or all were zero.".format(
",".join(set(par_cols + list(tmp_par_cols.keys())))
),
PyemuWarning,
)
return df
# write the config file used by apply_sfr_pars()
with open(os.path.join(model_ws, "sfr_seg_pars.config"), "w") as f:
f.write("nam_file {0}\n".format(nam_file))
f.write("model_ws {0}\n".format(model_ws))
f.write("mult_file sfr_seg_pars.dat\n")
f.write("sfr_filename {0}\n".format(m.sfr.file_name[0]))
if include_temporal_pars:
f.write("time_mult_file sfr_seg_temporal_pars.dat\n")
# set some useful par info
df.loc[:, "parubnd"] = 1.25
df.loc[:, "parlbnd"] = 0.75
hpars = df.loc[df.pargp.apply(lambda x: x.startswith("hcond")), "parnme"]
df.loc[hpars, "parubnd"] = 100.0
df.loc[hpars, "parlbnd"] = 0.01
return df
def setup_sfr_reach_parameters(nam_file, model_ws=".", par_cols=["strhc1"]):
"""Setup multiplier paramters for reach data, when reachinput option is specififed in sfr.
Args:
nam_file (`str`): MODFLOw name file. DIS, BAS, and SFR must be
available as pathed in the nam_file. Optionally, `nam_file` can be
an existing `flopy.modflow.Modflow`.
model_ws (`str`): model workspace for flopy to load the MODFLOW model from
par_cols ([`str`]): a list of segment data entires to parameterize
tie_hcond (`bool`): flag to use same mult par for hcond1 and hcond2 for a
given segment. Default is `True`.
include_temporal_pars ([`str`]): list of spatially-global multipliers to set up for
each stress period. Default is None
Returns:
**pandas.DataFrame**: a dataframe with useful parameter setup information
Note:
Similar to `gw_utils.setup_sfr_seg_parameters()`, method will apply params to sfr reachdata
Can load the dis, bas, and sfr files with flopy using model_ws. Or can pass a model object
(SFR loading can be slow)
This is the companion function of `gw_utils.apply_sfr_reach_parameters()`
Skips values = 0.0 since multipliers don't work for these
"""
try:
import flopy
except Exception as e:
return
if par_cols is None:
par_cols = ["strhc1"]
if isinstance(nam_file, flopy.modflow.mf.Modflow) and nam_file.sfr is not None:
# flopy MODFLOW model has been passed and has SFR loaded
m = nam_file
nam_file = m.namefile
model_ws = m.model_ws
else:
# if model has not been passed or SFR not loaded # load MODFLOW model
m = flopy.modflow.Modflow.load(
nam_file, load_only=["sfr"], model_ws=model_ws, check=False, forgive=False
)
# get reachdata as dataframe
reach_data = pd.DataFrame.from_records(m.sfr.reach_data)
# write inital reach_data as csv
reach_data_orig = reach_data.copy()
reach_data.to_csv(os.path.join(m.model_ws, "sfr_reach_pars.dat"), sep=",")
# generate template file with pars in par_cols
# process par cols
tpl_str, pvals = [], []
# par_cols=["strhc1"]
idx_cols = ["node", "k", "i", "j", "iseg", "ireach", "reachID", "outreach"]
# the data cols not to parameterize
notpar_cols = [c for c in reach_data.columns if c not in par_cols + idx_cols]
# make sure all par cols are found and search of any data in kpers
missing = []
cols = par_cols.copy()
for par_col in par_cols:
if par_col not in reach_data.columns:
missing.append(par_col)
cols.remove(par_col)
if len(missing) > 0:
warnings.warn(
"the following par_cols were not found in reach data: {0}".format(
",".join(missing)
),
PyemuWarning,
)
if len(missing) >= len(par_cols):
warnings.warn(
"None of the passed par_cols ({0}) were found in reach data.".format(
",".join(par_cols)
),
PyemuWarning,
)
for par_col in cols:
if par_col == "strhc1":
prefix = "strk" # shorten par
else:
prefix = par_col
reach_data.loc[:, par_col] = reach_data.apply(
lambda x: "~ {0}_{1:04d} ~".format(prefix, int(x.reachID))
if float(x[par_col]) != 0.0
else "1.0",
axis=1,
)
org_vals = reach_data_orig.loc[reach_data_orig.loc[:, par_col] != 0.0, par_col]
pnames = reach_data.loc[org_vals.index, par_col]
pvals.extend(list(org_vals.values))
tpl_str.extend(list(pnames.values))
pnames = [t.replace("~", "").strip() for t in tpl_str]
df = pd.DataFrame(
{"parnme": pnames, "org_value": pvals, "tpl_str": tpl_str}, index=pnames
)
df.drop_duplicates(inplace=True)
if df.empty:
warnings.warn(
"No sfr reach parameters have been set up, either none of {0} were found or all were zero.".format(
",".join(par_cols)
),
PyemuWarning,
)
else:
# set not par cols to 1.0
reach_data.loc[:, notpar_cols] = "1.0"
# write the template file
_write_df_tpl(
os.path.join(model_ws, "sfr_reach_pars.dat.tpl"), reach_data, sep=","
)
# write the config file used by apply_sfr_pars()
with open(os.path.join(model_ws, "sfr_reach_pars.config"), "w") as f:
f.write("nam_file {0}\n".format(nam_file))
f.write("model_ws {0}\n".format(model_ws))
f.write("mult_file sfr_reach_pars.dat\n")
f.write("sfr_filename {0}".format(m.sfr.file_name[0]))
# make sure the tpl file exists and has the same num of pars
parnme = parse_tpl_file(os.path.join(model_ws, "sfr_reach_pars.dat.tpl"))
assert len(parnme) == df.shape[0]
# set some useful par info
df.loc[:, "pargp"] = df.parnme.apply(lambda x: x.split("_")[0])
df.loc[:, "parubnd"] = 1.25
df.loc[:, "parlbnd"] = 0.75
hpars = df.loc[df.pargp.apply(lambda x: x.startswith("strk")), "parnme"]
df.loc[hpars, "parubnd"] = 100.0
df.loc[hpars, "parlbnd"] = 0.01
return df
def apply_sfr_seg_parameters(seg_pars=True, reach_pars=False):
"""apply the SFR segement multiplier parameters.
Args:
seg_pars (`bool`, optional): flag to apply segment-based parameters.
Default is True
reach_pars (`bool`, optional): flag to apply reach-based parameters.
Default is False
Returns:
**flopy.modflow.ModflowSfr**: the modified SFR package instance
Note:
Expects "sfr_seg_pars.config" to exist
Expects `nam_file` +"_backup_.sfr" to exist
"""
if not seg_pars and not reach_pars:
raise Exception(
"gw_utils.apply_sfr_pars() error: both seg_pars and reach_pars are False"
)
# if seg_pars and reach_pars:
# raise Exception("gw_utils.apply_sfr_pars() error: both seg_pars and reach_pars are True")
import flopy
bak_sfr_file, pars = None, None
if seg_pars:
assert os.path.exists("sfr_seg_pars.config")
with open("sfr_seg_pars.config", "r") as f:
pars = {}
for line in f:
line = line.strip().split()
pars[line[0]] = line[1]
bak_sfr_file = pars["nam_file"] + "_backup_.sfr"
# m = flopy.modflow.Modflow.load(pars["nam_file"],model_ws=pars["model_ws"],load_only=["sfr"],check=False)
m = flopy.modflow.Modflow.load(pars["nam_file"], load_only=[], check=False)
sfr = flopy.modflow.ModflowSfr2.load(os.path.join(bak_sfr_file), m)
sfrfile = pars["sfr_filename"]
mlt_df = pd.read_csv(pars["mult_file"], delim_whitespace=False, index_col=0)
# time_mlt_df = None
# if "time_mult_file" in pars:
# time_mult_file = pars["time_mult_file"]
# time_mlt_df = pd.read_csv(pars["time_mult_file"], delim_whitespace=False,index_col=0)
idx_cols = ["nseg", "icalc", "outseg", "iupseg", "iprior", "nstrpts"]
present_cols = [c for c in idx_cols if c in mlt_df.columns]
mlt_cols = mlt_df.columns.drop(present_cols)
for key, val in m.sfr.segment_data.items():
df = pd.DataFrame.from_records(val)
df.loc[:, mlt_cols] *= mlt_df.loc[:, mlt_cols]
val = df.to_records(index=False)
sfr.segment_data[key] = val
if reach_pars:
assert os.path.exists("sfr_reach_pars.config")
with open("sfr_reach_pars.config", "r") as f:
r_pars = {}
for line in f:
line = line.strip().split()
r_pars[line[0]] = line[1]
if bak_sfr_file is None: # will be the case is seg_pars is false
bak_sfr_file = r_pars["nam_file"] + "_backup_.sfr"
# m = flopy.modflow.Modflow.load(pars["nam_file"],model_ws=pars["model_ws"],load_only=["sfr"],check=False)
m = flopy.modflow.Modflow.load(
r_pars["nam_file"], load_only=[], check=False
)
sfr = flopy.modflow.ModflowSfr2.load(os.path.join(bak_sfr_file), m)
sfrfile = r_pars["sfr_filename"]
r_mlt_df = pd.read_csv(r_pars["mult_file"], sep=",", index_col=0)
r_idx_cols = ["node", "k", "i", "j", "iseg", "ireach", "reachID", "outreach"]
r_mlt_cols = r_mlt_df.columns.drop(r_idx_cols)
r_df = pd.DataFrame.from_records(m.sfr.reach_data)
r_df.loc[:, r_mlt_cols] *= r_mlt_df.loc[:, r_mlt_cols]
sfr.reach_data = r_df.to_records(index=False)
# m.remove_package("sfr")
if pars is not None and "time_mult_file" in pars:
time_mult_file = pars["time_mult_file"]
time_mlt_df = pd.read_csv(time_mult_file, delim_whitespace=False, index_col=0)
for kper, sdata in m.sfr.segment_data.items():
assert kper in time_mlt_df.index, (
"gw_utils.apply_sfr_seg_parameters() error: kper "
+ "{0} not in time_mlt_df index".format(kper)
)
for col in time_mlt_df.columns:
sdata[col] *= time_mlt_df.loc[kper, col]
sfr.write_file(filename=sfrfile)
return sfr
def apply_sfr_parameters(seg_pars=True, reach_pars=False):
"""thin wrapper around `gw_utils.apply_sfr_seg_parameters()`
Args:
seg_pars (`bool`, optional): flag to apply segment-based parameters.
Default is True
reach_pars (`bool`, optional): flag to apply reach-based parameters.
Default is False
Returns:
**flopy.modflow.ModflowSfr**: the modified SFR package instance
Note:
Expects "sfr_seg_pars.config" to exist
Expects `nam_file` +"_backup_.sfr" to exist
"""
sfr = apply_sfr_seg_parameters(seg_pars=seg_pars, reach_pars=reach_pars)
return sfr
def setup_sfr_obs(
sfr_out_file, seg_group_dict=None, ins_file=None, model=None, include_path=False
):
"""setup observations using the sfr ASCII output file. Setups
the ability to aggregate flows for groups of segments. Applies
only flow to aquier and flow out.
Args:
sft_out_file (`str`): the name and path to an existing SFR output file
seg_group_dict (`dict`): a dictionary of SFR segements to aggregate together for a single obs.
the key value in the dict is the base observation name. If None, all segments
are used as individual observations. Default is None
model (`flopy.mbase`): a flopy model. If passed, the observation names will have
the datetime of the observation appended to them. If None, the observation names
will have the stress period appended to them. Default is None.
include_path (`bool`): flag to prepend sfr_out_file path to sfr_obs.config. Useful for setting up
process in separate directory for where python is running.
Returns:
**pandas.DataFrame**: dataframe of observation name, simulated value and group.
Note:
This is the companion function of `gw_utils.apply_sfr_obs()`.
This function writes "sfr_obs.config" which must be kept in the dir where
"gw_utils.apply_sfr_obs()" is being called during the forward run
"""
sfr_dict = load_sfr_out(sfr_out_file)
kpers = list(sfr_dict.keys())
kpers.sort()
if seg_group_dict is None:
seg_group_dict = {"seg{0:04d}".format(s): s for s in sfr_dict[kpers[0]].segment}
else:
warnings.warn(
"Flow out (flout) of grouped segments will be aggregated... ", PyemuWarning
)
sfr_segs = set(sfr_dict[list(sfr_dict.keys())[0]].segment)
keys = ["sfr_out_file"]
if include_path:
values = [os.path.split(sfr_out_file)[-1]]
else:
values = [sfr_out_file]
for oname, segs in seg_group_dict.items():
if np.isscalar(segs):
segs_set = {segs}
segs = [segs]
else:
segs_set = set(segs)
diff = segs_set.difference(sfr_segs)
if len(diff) > 0:
raise Exception(
"the following segs listed with oname {0} where not found: {1}".format(
oname, ",".join([str(s) for s in diff])
)
)
for seg in segs:
keys.append(oname)
values.append(seg)
df_key = pd.DataFrame({"obs_base": keys, "segment": values})
if include_path:
pth = os.path.join(*[p for p in os.path.split(sfr_out_file)[:-1]])
config_file = os.path.join(pth, "sfr_obs.config")
else:
config_file = "sfr_obs.config"
print("writing 'sfr_obs.config' to {0}".format(config_file))
df_key.to_csv(config_file)
bd = "."
if include_path:
bd = os.getcwd()
os.chdir(pth)
try:
df = apply_sfr_obs()
except Exception as e:
os.chdir(bd)
raise Exception("error in apply_sfr_obs(): {0}".format(str(e)))
os.chdir(bd)
if model is not None:
dts = (
pd.to_datetime(model.start_datetime)
+ pd.to_timedelta(np.cumsum(model.dis.perlen.array), unit="d")
).date
df.loc[:, "datetime"] = df.kper.apply(lambda x: dts[x])
df.loc[:, "time_str"] = df.datetime.apply(lambda x: x.strftime("%Y%m%d"))
else:
df.loc[:, "time_str"] = df.kper.apply(lambda x: "{0:04d}".format(x))
df.loc[:, "flaqx_obsnme"] = df.apply(
lambda x: "{0}_{1}_{2}".format("fa", x.obs_base, x.time_str), axis=1
)
df.loc[:, "flout_obsnme"] = df.apply(
lambda x: "{0}_{1}_{2}".format("fo", x.obs_base, x.time_str), axis=1
)
if ins_file is None:
ins_file = sfr_out_file + ".processed.ins"
with open(ins_file, "w") as f:
f.write("pif ~\nl1\n")
for fla, flo in zip(df.flaqx_obsnme, df.flout_obsnme):
f.write("l1 w w !{0}! !{1}!\n".format(fla, flo))
df = None
pth = os.path.split(ins_file)[:-1]
pth = os.path.join(*pth)
if pth == "":
pth = "."
bd = os.getcwd()
os.chdir(pth)
df = try_process_output_file(
os.path.split(ins_file)[-1], os.path.split(sfr_out_file + ".processed")[-1]
)
os.chdir(bd)
if df is not None:
df.loc[:, "obsnme"] = df.index.values
df.loc[:, "obgnme"] = df.obsnme.apply(
lambda x: "flaqx" if x.startswith("fa") else "flout"
)
return df
def apply_sfr_obs():
"""apply the sfr observation process
Args:
None
Returns:
**pandas.DataFrame**: a dataframe of aggregrated sfr segment aquifer and outflow
Note:
This is the companion function of `gw_utils.setup_sfr_obs()`.
Requires `sfr_obs.config`.
Writes `sfr_out_file`+".processed", where `sfr_out_file` is defined in "sfr_obs.config"
"""
assert os.path.exists("sfr_obs.config")
df_key = pd.read_csv("sfr_obs.config", index_col=0)
assert df_key.iloc[0, 0] == "sfr_out_file", df_key.iloc[0, :]
sfr_out_file = df_key.iloc[0, 1]
df_key = df_key.iloc[1:, :]
df_key.loc[:, "segment"] = df_key.segment.apply(np.int)
df_key.index = df_key.segment
seg_group_dict = df_key.groupby(df_key.obs_base).groups
sfr_kper = load_sfr_out(sfr_out_file)
kpers = list(sfr_kper.keys())
kpers.sort()
# results = {o:[] for o in seg_group_dict.keys()}
results = []
for kper in kpers:
df = sfr_kper[kper]
for obs_base, segs in seg_group_dict.items():
agg = df.loc[
segs.values, :
].sum() # still agg flout where seg groups are passed!
# print(obs_base,agg)
results.append([kper, obs_base, agg["flaqx"], agg["flout"]])
df = pd.DataFrame(data=results, columns=["kper", "obs_base", "flaqx", "flout"])
df.sort_values(by=["kper", "obs_base"], inplace=True)
df.to_csv(sfr_out_file + ".processed", sep=" ", index=False)
return df
def load_sfr_out(sfr_out_file, selection=None):
"""load an ASCII SFR output file into a dictionary of kper: dataframes.
Args:
sfr_out_file (`str`): SFR ASCII output file
selection (`pandas.DataFrame`): a dataframe of `reach` and `segment` pairs to
load. If `None`, all reach-segment pairs are loaded. Default is `None`.
Returns:
**dict**: dictionary of {kper:`pandas.DataFrame`} of SFR output.
Note:
Aggregates flow to aquifer for segments and returns and flow out at
downstream end of segment.
"""
assert os.path.exists(sfr_out_file), "couldn't find sfr out file {0}".format(
sfr_out_file
)
tag = " stream listing"
lcount = 0
sfr_dict = {}
if selection is None:
pass
elif isinstance(selection, str):
assert (
selection == "all"
), "If string passed as selection only 'all' allowed: " "{}".format(selection)
else:
assert isinstance(
selection, pd.DataFrame
), "'selection needs to be pandas Dataframe. " "Type {} passed.".format(
type(selection)
)
assert np.all(
[sr in selection.columns for sr in ["segment", "reach"]]
), "Either 'segment' or 'reach' not in selection columns"
with open(sfr_out_file) as f:
while True:
line = f.readline().lower()
lcount += 1
if line == "":
break
if line.startswith(tag):
raw = line.strip().split()
kper = int(raw[3]) - 1
kstp = int(raw[5]) - 1
[f.readline() for _ in range(4)] # skip to where the data starts
lcount += 4
dlines = []
while True:
dline = f.readline()
lcount += 1
if dline.strip() == "":
break
draw = dline.strip().split()
dlines.append(draw)
df = pd.DataFrame(data=np.array(dlines)).iloc[:, [3, 4, 6, 7]]
df.columns = ["segment", "reach", "flaqx", "flout"]
df["segment"] = df.segment.astype(np.int)
df["reach"] = df.reach.astype(np.int)
df["flaqx"] = df.flaqx.astype(np.float)
df["flout"] = df.flout.astype(np.float)
df.index = [
"{0:03d}_{1:03d}".format(s, r)
for s, r in np.array([df.segment.values, df.reach.values]).T
]
# df.index = df.apply(
# lambda x: "{0:03d}_{1:03d}".format(
# int(x.segment), int(x.reach)), axis=1)
if selection is None: # setup for all segs, aggregate
gp = df.groupby(df.segment)
bot_reaches = (
gp[["reach"]]
.max()
.apply(
lambda x: "{0:03d}_{1:03d}".format(
int(x.name), int(x.reach)
),
axis=1,
)
)
# only sum distributed output # take flow out of seg
df2 = pd.DataFrame(
{
"flaqx": gp.flaqx.sum(),
"flout": df.loc[bot_reaches, "flout"].values,
},
index=gp.groups.keys(),
)
# df = df.groupby(df.segment).sum()
df2["segment"] = df2.index
elif isinstance(selection, str) and selection == "all":
df2 = df
else:
seg_reach_id = selection.apply(
lambda x: "{0:03d}_{1:03d}".format(
int(x.segment), int(x.reach)
),
axis=1,
).values
for sr in seg_reach_id:
if sr not in df.index:
s, r = [x.lstrip("0") for x in sr.split("_")]
warnings.warn(
"Requested segment reach pair ({0},{1}) "
"is not in sfr output. Dropping...".format(
int(r), int(s)
),
PyemuWarning,
)
seg_reach_id = np.delete(
seg_reach_id, np.where(seg_reach_id == sr), axis=0
)
df2 = df.loc[seg_reach_id].copy()
if kper in sfr_dict.keys():
print(
"multiple entries found for kper {0}, "
"replacing...".format(kper)
)
sfr_dict[kper] = df2
return sfr_dict
def setup_sfr_reach_obs(
sfr_out_file, seg_reach=None, ins_file=None, model=None, include_path=False
):
"""setup observations using the sfr ASCII output file. Setups
sfr point observations using segment and reach numbers.
Args:
sft_out_file (`str`): the path and name of an existing SFR output file
seg_reach (varies): a dict, or list of SFR [segment,reach] pairs identifying
locations of interest. If `dict`, the key value in the dict is the base
observation name. If None, all reaches are used as individual observations.
Default is None - THIS MAY SET UP A LOT OF OBS!
model (`flopy.mbase`): a flopy model. If passed, the observation names will
have the datetime of the observation appended to them. If None, the
observation names will have the stress period appended to them. Default is None.
include_path (`bool`): a flag to prepend sfr_out_file path to sfr_obs.config. Useful
for setting up process in separate directory for where python is running.
Returns:
`pd.DataFrame`: a dataframe of observation names, values, and groups
Note:
This is the companion function of `gw_utils.apply_sfr_reach_obs()`.
This function writes "sfr_reach_obs.config" which must be kept in the dir where
"apply_sfr_reach_obs()" is being called during the forward run
"""
if seg_reach is None:
warnings.warn("Obs will be set up for every reach", PyemuWarning)
seg_reach = "all"
elif isinstance(seg_reach, list) or isinstance(seg_reach, np.ndarray):
if np.ndim(seg_reach) == 1:
seg_reach = [seg_reach]
assert (
np.shape(seg_reach)[1] == 2
), "varible seg_reach expected shape (n,2), received {0}".format(
np.shape(seg_reach)
)
seg_reach = pd.DataFrame(seg_reach, columns=["segment", "reach"])
seg_reach.index = seg_reach.apply(
lambda x: "s{0:03d}r{1:03d}".format(int(x.segment), int(x.reach)), axis=1
)
elif isinstance(seg_reach, dict):
seg_reach = pd.DataFrame.from_dict(
seg_reach, orient="index", columns=["segment", "reach"]
)
else:
assert isinstance(
seg_reach, pd.DataFrame
), "'selection needs to be pandas Dataframe. Type {} passed.".format(
type(seg_reach)
)
assert np.all(
[sr in seg_reach.columns for sr in ["segment", "reach"]]
), "Either 'segment' or 'reach' not in selection columns"
sfr_dict = load_sfr_out(sfr_out_file, selection=seg_reach)
kpers = list(sfr_dict.keys())
kpers.sort()
if isinstance(seg_reach, str) and seg_reach == "all":
seg_reach = sfr_dict[kpers[0]][["segment", "reach"]]
seg_reach.index = seg_reach.apply(
lambda x: "s{0:03d}r{1:03d}".format(int(x.segment), int(x.reach)), axis=1
)
keys = ["sfr_out_file"]
if include_path:
values = [os.path.split(sfr_out_file)[-1]]
else:
values = [sfr_out_file]
diff = seg_reach.loc[
seg_reach.apply(
lambda x: "{0:03d}_{1:03d}".format(int(x.segment), int(x.reach))
not in sfr_dict[list(sfr_dict.keys())[0]].index,
axis=1,
)
]
if len(diff) > 0:
for ob in diff.itertuples():
warnings.warn(
"segs,reach pair listed with onames {0} was not found: {1}".format(
ob.Index, "({},{})".format(ob.segment, ob.reach)
),
PyemuWarning,
)
seg_reach = seg_reach.drop(diff.index)
seg_reach["obs_base"] = seg_reach.index
df_key = pd.DataFrame({"obs_base": keys, "segment": 0, "reach": values})
df_key = pd.concat([df_key, seg_reach], sort=True).reset_index(drop=True)
if include_path:
pth = os.path.join(*[p for p in os.path.split(sfr_out_file)[:-1]])
config_file = os.path.join(pth, "sfr_reach_obs.config")
else:
config_file = "sfr_reach_obs.config"
print("writing 'sfr_reach_obs.config' to {0}".format(config_file))
df_key.to_csv(config_file)
bd = "."
if include_path:
bd = os.getcwd()
os.chdir(pth)
try:
df = apply_sfr_reach_obs()
except Exception as e:
os.chdir(bd)
raise Exception("error in apply_sfr_reach_obs(): {0}".format(str(e)))
os.chdir(bd)
if model is not None:
dts = (
pd.to_datetime(model.start_datetime)
+ pd.to_timedelta(np.cumsum(model.dis.perlen.array), unit="d")
).date
df.loc[:, "datetime"] = df.kper.apply(lambda x: dts[x])
df.loc[:, "time_str"] = df.datetime.apply(lambda x: x.strftime("%Y%m%d"))
else:
df.loc[:, "time_str"] = df.kper.apply(lambda x: "{0:04d}".format(x))
df.loc[:, "flaqx_obsnme"] = df.apply(
lambda x: "{0}_{1}_{2}".format("fa", x.obs_base, x.time_str), axis=1
)
df.loc[:, "flout_obsnme"] = df.apply(
lambda x: "{0}_{1}_{2}".format("fo", x.obs_base, x.time_str), axis=1
)
if ins_file is None:
ins_file = sfr_out_file + ".reach_processed.ins"
with open(ins_file, "w") as f:
f.write("pif ~\nl1\n")
for fla, flo in zip(df.flaqx_obsnme, df.flout_obsnme):
f.write("l1 w w !{0}! !{1}!\n".format(fla, flo))
df = None
pth = os.path.split(ins_file)[:-1]
pth = os.path.join(*pth)
if pth == "":
pth = "."
bd = os.getcwd()
os.chdir(pth)
try:
df = try_process_output_file(
os.path.split(ins_file)[-1], os.path.split(sfr_out_file + ".processed")[-1]
)
except Exception as e:
pass
os.chdir(bd)
if df is not None:
df.loc[:, "obsnme"] = df.index.values
df.loc[:, "obgnme"] = df.obsnme.apply(
lambda x: "flaqx" if x.startswith("fa") else "flout"
)
return df
def apply_sfr_reach_obs():
"""apply the sfr reach observation process.
Returns:
`pd.DataFrame`: a dataframe of sfr aquifer and outflow ad segment,reach locations
Note:
This is the companion function of `gw_utils.setup_sfr_reach_obs()`.
Requires sfr_reach_obs.config.
Writes <sfr_out_file>.processed, where <sfr_out_file> is defined in
"sfr_reach_obs.config"
"""
assert os.path.exists("sfr_reach_obs.config")
df_key = pd.read_csv("sfr_reach_obs.config", index_col=0)
assert df_key.iloc[0, 0] == "sfr_out_file", df_key.iloc[0, :]
sfr_out_file = df_key.iloc[0].reach
df_key = df_key.iloc[1:, :].copy()
df_key.loc[:, "segment"] = df_key.segment.apply(np.int)
df_key.loc[:, "reach"] = df_key.reach.apply(np.int)
df_key = df_key.set_index("obs_base")
sfr_kper = load_sfr_out(sfr_out_file, df_key)
kpers = list(sfr_kper.keys())
kpers.sort()
results = []
for kper in kpers:
df = sfr_kper[kper]
for sr in df_key.itertuples():
ob = df.loc["{0:03d}_{1:03d}".format(sr.segment, sr.reach), :]
results.append([kper, sr.Index, ob["flaqx"], ob["flout"]])
df = pd.DataFrame(data=results, columns=["kper", "obs_base", "flaqx", "flout"])
df.sort_values(by=["kper", "obs_base"], inplace=True)
df.to_csv(sfr_out_file + ".reach_processed", sep=" ", index=False)
return df
def modflow_sfr_gag_to_instruction_file(
gage_output_file, ins_file=None, parse_filename=False
):
"""writes an instruction file for an SFR gage output file to read Flow only at all times
Args:
gage_output_file (`str`): the gage output filename (ASCII).
ins_file (`str`, optional): the name of the instruction file to
create. If None, the name is `gage_output_file` +".ins".
Default is None
parse_filename (`bool`): if True, get the gage_num parameter by
parsing the gage output file filename if False, get the gage
number from the file itself
Returns:
tuple containing
- **pandas.DataFrame**: a dataframe with obsnme and obsval for the sfr simulated flows.
- **str**: file name of instructions file relating to gage output.
- **str**: file name of processed gage output for all times
Note:
Sets up observations for gage outputs only for the Flow column.
If `parse_namefile` is true, only text up to first '.' is used as the gage_num
"""
if ins_file is None:
ins_file = gage_output_file + ".ins"
# navigate the file to be sure the header makes sense
indat = [line.strip() for line in open(gage_output_file, "r").readlines()]
header = [i for i in indat if i.startswith('"')]
# yank out the gage number to identify the observation names
if parse_filename:
gage_num = os.path.basename(gage_output_file).split(".")[0]
else:
gage_num = re.sub(
"[^0-9]", "", indat[0].lower().split("gage no.")[-1].strip().split()[0]
)
# get the column names
cols = (
[i.lower() for i in header if "data" in i.lower()][0]
.lower()
.replace('"', "")
.replace("data:", "")
.split()
)
# make sure "Flow" is included in the columns
if "flow" not in cols:
raise Exception('Requested field "Flow" not in gage output columns')
# find which column is for "Flow"
flowidx = np.where(np.array(cols) == "flow")[0][0]
# write out the instruction file lines
inslines = [
"l1 " + (flowidx + 1) * "w " + "!g{0}_{1:d}!".format(gage_num, j)
for j in range(len(indat) - len(header))
]
inslines[0] = inslines[0].replace("l1", "l{0:d}".format(len(header) + 1))
# write the instruction file
with open(ins_file, "w") as ofp:
ofp.write("pif ~\n")
[ofp.write("{0}\n".format(line)) for line in inslines]
df = try_process_output_file(ins_file, gage_output_file)
return df, ins_file, gage_output_file
def setup_gage_obs(gage_file, ins_file=None, start_datetime=None, times=None):
"""setup a forward run post processor routine for the modflow gage file
Args:
gage_file (`str`): the gage output file (ASCII)
ins_file (`str`, optional): the name of the instruction file to create. If None, the name
is `gage_file`+".processed.ins". Default is `None`
start_datetime (`str`): a `pandas.to_datetime()` compatible `str`. If not `None`,
then the resulting observation names have the datetime suffix. If `None`,
the suffix is the output totim. Default is `None`.
times ([`float`]): a container of times to make observations for. If None,
all times are used. Default is None.
Returns:
tuple containing
- **pandas.DataFrame**: a dataframe with observation name and simulated values for the
values in the gage file.
- **str**: file name of instructions file that was created relating to gage output.
- **str**: file name of processed gage output (processed according to times passed above.)
Note:
Setups up observations for gage outputs (all columns).
This is the companion function of `gw_utils.apply_gage_obs()`
"""
with open(gage_file, "r") as f:
line1 = f.readline()
gage_num = int(
re.sub("[^0-9]", "", line1.split("GAGE No.")[-1].strip().split()[0])
)
gage_type = line1.split("GAGE No.")[-1].strip().split()[1].lower()
obj_num = int(line1.replace('"', "").strip().split()[-1])
line2 = f.readline()
df = pd.read_csv(
f, delim_whitespace=True, names=line2.replace('"', "").split()[1:]
)
df.columns = [
c.lower().replace("-", "_").replace(".", "_").strip("_") for c in df.columns
]
# get unique observation ids
obs_ids = {
col: "" for col in df.columns[1:]
} # empty dictionary for observation ids
for col in df.columns[1:]: # exclude column 1 (TIME)
colspl = col.split("_")
if len(colspl) > 1:
# obs name built out of "g"(for gage) "s" or "l"(for gage type) 2 chars from column name - date added later
obs_ids[col] = "g{0}{1}{2}".format(
gage_type[0], colspl[0][0], colspl[-1][0]
)
else:
obs_ids[col] = "g{0}{1}".format(gage_type[0], col[0:2])
with open(
"_gage_obs_ids.csv", "w"
) as f: # write file relating obs names to meaningfull keys!
[f.write("{0},{1}\n".format(key, obs)) for key, obs in obs_ids.items()]
# find passed times in df
if times is None:
times = df.time.unique()
missing = []
utimes = df.time.unique()
for t in times:
if not np.isclose(t, utimes).any():
missing.append(str(t))
if len(missing) > 0:
print(df.time)
raise Exception("the following times are missing:{0}".format(",".join(missing)))
# write output times to config file
with open("gage_obs.config", "w") as f:
f.write(gage_file + "\n")
[f.write("{0:15.10E}\n".format(t)) for t in times]
# extract data for times: returns dataframe and saves a processed df - read by pest
df, obs_file = apply_gage_obs(return_obs_file=True)
utimes = df.time.unique()
for t in times:
assert np.isclose(
t, utimes
).any(), "time {0} missing in processed dataframe".format(t)
idx = df.time.apply(
lambda x: np.isclose(x, times).any()
) # boolean selector of desired times in df
if start_datetime is not None:
# convert times to usable observation times
start_datetime = pd.to_datetime(start_datetime)
df.loc[:, "time_str"] = pd.to_timedelta(df.time, unit="d") + start_datetime
df.loc[:, "time_str"] = df.time_str.apply(
lambda x: datetime.strftime(x, "%Y%m%d")
)
else:
df.loc[:, "time_str"] = df.time.apply(lambda x: "{0:08.2f}".format(x))
# set up instructions (line feed for lines without obs (not in time)
df.loc[:, "ins_str"] = "l1\n"
df_times = df.loc[idx, :] # Slice by desired times
# TODO include GAGE No. in obs name (if permissible)
df.loc[df_times.index, "ins_str"] = df_times.apply(
lambda x: "l1 w {}\n".format(
" w ".join(
["!{0}{1}!".format(obs, x.time_str) for key, obs in obs_ids.items()]
)
),
axis=1,
)
df.index = np.arange(df.shape[0])
if ins_file is None:
ins_file = gage_file + ".processed.ins"
with open(ins_file, "w") as f:
f.write("pif ~\nl1\n")
[f.write(i) for i in df.ins_str]
df = try_process_output_file(ins_file, gage_file + ".processed")
return df, ins_file, obs_file
def apply_gage_obs(return_obs_file=False):
"""apply the modflow gage obs post-processor
Args:
return_obs_file (`bool`): flag to return the processed
observation file. Default is `False`.
Note:
This is the companion function of `gw_utils.setup_gage_obs()`
"""
times = []
with open("gage_obs.config") as f:
gage_file = f.readline().strip()
for line in f:
times.append(float(line.strip()))
obs_file = gage_file + ".processed"
with open(gage_file, "r") as f:
line1 = f.readline()
gage_num = int(
re.sub("[^0-9]", "", line1.split("GAGE No.")[-1].strip().split()[0])
)
gage_type = line1.split("GAGE No.")[-1].strip().split()[1].lower()
obj_num = int(line1.replace('"', "").strip().split()[-1])
line2 = f.readline()
df = pd.read_csv(
f, delim_whitespace=True, names=line2.replace('"', "").split()[1:]
)
df.columns = [c.lower().replace("-", "_").replace(".", "_") for c in df.columns]
df = df.loc[df.time.apply(lambda x: np.isclose(x, times).any()), :]
df.to_csv(obs_file, sep=" ", index=False)
if return_obs_file:
return df, obs_file
else:
return df
def apply_hfb_pars(par_file="hfb6_pars.csv"):
"""a function to apply HFB multiplier parameters.
Args:
par_file (`str`): the HFB parameter info file.
Default is `hfb_pars.csv`
Note:
This is the companion function to
`gw_utils.write_hfb_zone_multipliers_template()`
This is to account for the horrible HFB6 format that differs from other
BCs making this a special case
Requires "hfb_pars.csv"
Should be added to the forward_run.py script
"""
hfb_pars = pd.read_csv(par_file)
hfb_mults_contents = open(hfb_pars.mlt_file.values[0], "r").readlines()
skiprows = (
sum([1 if i.strip().startswith("#") else 0 for i in hfb_mults_contents]) + 1
)
header = hfb_mults_contents[:skiprows]
# read in the multipliers
names = ["lay", "irow1", "icol1", "irow2", "icol2", "hydchr"]
hfb_mults = pd.read_csv(
hfb_pars.mlt_file.values[0],
skiprows=skiprows,
delim_whitespace=True,
names=names,
).dropna()
# read in the original file
hfb_org = pd.read_csv(
hfb_pars.org_file.values[0],
skiprows=skiprows,
delim_whitespace=True,
names=names,
).dropna()
# multiply it out
hfb_org.hydchr *= hfb_mults.hydchr
for cn in names[:-1]:
hfb_mults[cn] = hfb_mults[cn].astype(np.int)
hfb_org[cn] = hfb_org[cn].astype(np.int)
# write the results
with open(hfb_pars.model_file.values[0], "w", newline="") as ofp:
[ofp.write("{0}\n".format(line.strip())) for line in header]
ofp.flush()
hfb_org[["lay", "irow1", "icol1", "irow2", "icol2", "hydchr"]].to_csv(
ofp, sep=" ", header=None, index=None
)
def write_hfb_zone_multipliers_template(m):
"""write a template file for an hfb using multipliers per zone (double yuck!)
Args:
m (`flopy.modflow.Modflow`): a model instance with an HFB package
Returns:
tuple containing
- **dict**: a dictionary with original unique HFB conductivity values and their
corresponding parameter names
- **str**: the template filename that was created
"""
if m.hfb6 is None:
raise Exception("no HFB package found")
# find the model file
hfb_file = os.path.join(m.model_ws, m.hfb6.file_name[0])
# this will use multipliers, so need to copy down the original
if not os.path.exists(os.path.join(m.model_ws, "hfb6_org")):
os.mkdir(os.path.join(m.model_ws, "hfb6_org"))
# copy down the original file
shutil.copy2(
os.path.join(m.model_ws, m.hfb6.file_name[0]),
os.path.join(m.model_ws, "hfb6_org", m.hfb6.file_name[0]),
)
if not os.path.exists(os.path.join(m.model_ws, "hfb6_mlt")):
os.mkdir(os.path.join(m.model_ws, "hfb6_mlt"))
# read in the model file
hfb_file_contents = open(hfb_file, "r").readlines()
# navigate the header
skiprows = (
sum([1 if i.strip().startswith("#") else 0 for i in hfb_file_contents]) + 1
)
header = hfb_file_contents[:skiprows]
# read in the data
names = ["lay", "irow1", "icol1", "irow2", "icol2", "hydchr"]
hfb_in = pd.read_csv(
hfb_file, skiprows=skiprows, delim_whitespace=True, names=names
).dropna()
for cn in names[:-1]:
hfb_in[cn] = hfb_in[cn].astype(np.int)
# set up a multiplier for each unique conductivity value
unique_cond = hfb_in.hydchr.unique()
hfb_mults = dict(
zip(unique_cond, ["hbz_{0:04d}".format(i) for i in range(len(unique_cond))])
)
# set up the TPL line for each parameter and assign
hfb_in["tpl"] = "blank"
for cn, cg in hfb_in.groupby("hydchr"):
hfb_in.loc[hfb_in.hydchr == cn, "tpl"] = "~{0:^10s}~".format(hfb_mults[cn])
assert "blank" not in hfb_in.tpl
# write out the TPL file
tpl_file = os.path.join(m.model_ws, "hfb6.mlt.tpl")
with open(tpl_file, "w", newline="") as ofp:
ofp.write("ptf ~\n")
[ofp.write("{0}\n".format(line.strip())) for line in header]
ofp.flush()
hfb_in[["lay", "irow1", "icol1", "irow2", "icol2", "tpl"]].to_csv(
ofp, sep=" ", quotechar=" ", header=None, index=None, mode="a"
)
# make a lookup for lining up the necessary files to
# perform multiplication with the helpers.apply_hfb_pars() function
# which must be added to the forward run script
with open(os.path.join(m.model_ws, "hfb6_pars.csv"), "w") as ofp:
ofp.write("org_file,mlt_file,model_file\n")
ofp.write(
"{0},{1},{2}\n".format(
os.path.join(m.model_ws, "hfb6_org", m.hfb6.file_name[0]),
os.path.join(
m.model_ws,
"hfb6_mlt",
os.path.basename(tpl_file).replace(".tpl", ""),
),
hfb_file,
)
)
return hfb_mults, tpl_file
def write_hfb_template(m):
"""write a template file for an hfb (yuck!)
Args:
m (`flopy.modflow.Modflow`): a model instance with an HFB package
Returns:
tuple containing
- **str**: name of the template file that was created
- **pandas.DataFrame**: a dataframe with use control file info for the
HFB parameters
"""
assert m.hfb6 is not None
hfb_file = os.path.join(m.model_ws, m.hfb6.file_name[0])
assert os.path.exists(hfb_file), "couldn't find hfb_file {0}".format(hfb_file)
f_in = open(hfb_file, "r")
tpl_file = hfb_file + ".tpl"
f_tpl = open(tpl_file, "w")
f_tpl.write("ptf ~\n")
parnme, parval1, xs, ys = [], [], [], []
iis, jjs, kks = [], [], []
xc = m.sr.xcentergrid
yc = m.sr.ycentergrid
while True:
line = f_in.readline()
if line == "":
break
f_tpl.write(line)
if not line.startswith("#"):
raw = line.strip().split()
nphfb = int(raw[0])
mxfb = int(raw[1])
nhfbnp = int(raw[2])
if nphfb > 0 or mxfb > 0:
raise Exception("not supporting terrible HFB pars")
for i in range(nhfbnp):
line = f_in.readline()
if line == "":
raise Exception("EOF")
raw = line.strip().split()
k = int(raw[0]) - 1
i = int(raw[1]) - 1
j = int(raw[2]) - 1
pn = "hb{0:02}{1:04d}{2:04}".format(k, i, j)
pv = float(raw[5])
raw[5] = "~ {0} ~".format(pn)
line = " ".join(raw) + "\n"
f_tpl.write(line)
parnme.append(pn)
parval1.append(pv)
xs.append(xc[i, j])
ys.append(yc[i, j])
iis.append(i)
jjs.append(j)
kks.append(k)
break
f_tpl.close()
f_in.close()
df = pd.DataFrame(
{
"parnme": parnme,
"parval1": parval1,
"x": xs,
"y": ys,
"i": iis,
"j": jjs,
"k": kks,
},
index=parnme,
)
df.loc[:, "pargp"] = "hfb_hydfac"
df.loc[:, "parubnd"] = df.parval1.max() * 10.0
df.loc[:, "parlbnd"] = df.parval1.min() * 0.1
return tpl_file, df
class GsfReader:
"""
a helper class to read a standard modflow-usg gsf file
Args:
gsffilename (`str`): filename
"""
def __init__(self, gsffilename):
with open(gsffilename, "r") as f:
self.read_data = f.readlines()
self.nnode, self.nlay, self.iz, self.ic = [
int(n) for n in self.read_data[1].split()
]
self.nvertex = int(self.read_data[2])
def get_vertex_coordinates(self):
"""
Returns:
Dictionary containing list of x, y and z coordinates for each vertex
"""
# vdata = self.read_data[3:self.nvertex+3]
vertex_coords = {}
for vert in range(self.nvertex):
x, y, z = self.read_data[3 + vert].split()
vertex_coords[vert + 1] = [float(x), float(y), float(z)]
return vertex_coords
def get_node_data(self):
"""
Returns:
nodedf: a pd.DataFrame containing Node information; Node, X, Y, Z, layer, numverts, vertidx
"""
node_data = []
for node in range(self.nnode):
nid, x, y, z, lay, numverts = self.read_data[
self.nvertex + 3 + node
].split()[:6]
# vertidx = {'ivertex': [int(n) for n in self.read_data[self.nvertex+3 + node].split()[6:]]}
vertidx = [
int(n) for n in self.read_data[self.nvertex + 3 + node].split()[6:]
]
node_data.append(
[
int(nid),
float(x),
float(y),
float(z),
int(lay),
int(numverts),
vertidx,
]
)
nodedf = pd.DataFrame(
node_data, columns=["node", "x", "y", "z", "layer", "numverts", "vertidx"]
)
return nodedf
def get_node_coordinates(self, zcoord=False, zero_based=False):
"""
Args:
zcoord (`bool`): flag to add z coord to coordinates. Default is False
zero_based (`bool`): flag to subtract one from the node numbers in the returned
node_coords dict. This is needed to support PstFrom. Default is False
Returns:
node_coords: Dictionary containing x and y coordinates for each node
"""
node_coords = {}
for node in range(self.nnode):
nid, x, y, z, lay, numverts = self.read_data[
self.nvertex + 3 + node
].split()[:6]
nid = int(nid)
if zero_based:
nid -= 1
node_coords[nid] = [float(x), float(y)]
if zcoord:
node_coords[nid] += [float(z)]
return node_coords
|
#
# ------------------------------------------------------------
# Copyright (c) All rights reserved
# SiLab, Institute of Physics, University of Bonn
# ------------------------------------------------------------
#
# Initial version by Chris Higgs <chris.higgs@potentialventures.com>
#
# pylint: disable=pointless-statement, expression-not-assigned
from cocotb.binary import BinaryValue
from cocotb.triggers import RisingEdge, Timer
from cocotb_bus.drivers import BusDriver
class BasilBusDriver(BusDriver):
"""Abastract away interactions with the control bus."""
_signals = ["BUS_CLK", "BUS_RST", "BUS_DATA", "BUS_ADD", "BUS_RD", "BUS_WR"]
_optional_signals = ["BUS_BYTE_ACCESS"]
def __init__(self, entity):
BusDriver.__init__(self, entity, "", entity.BUS_CLK)
# Create an appropriately sized high-impedence value
self._high_impedence = BinaryValue(n_bits=len(self.bus.BUS_DATA))
self._high_impedence.binstr = "Z" * len(self.bus.BUS_DATA)
# Create an appropriately sized high-impedence value
self._x = BinaryValue(n_bits=len(self.bus.BUS_ADD))
self._x.binstr = "x" * len(self.bus.BUS_ADD)
self._has_byte_acces = False
async def init(self):
# Defaults
self.bus.BUS_RST <= 1
self.bus.BUS_RD <= 0
self.bus.BUS_WR <= 0
self.bus.BUS_ADD <= self._x
self.bus.BUS_DATA <= self._high_impedence
for _ in range(8):
await RisingEdge(self.clock)
self.bus.BUS_RST <= 0
for _ in range(2):
await RisingEdge(self.clock)
# why this does not work? hasattr(self.bus, 'BUS_BYTE_ACCESS'):
try:
getattr(self.bus, "BUS_BYTE_ACCESS")
except Exception:
self._has_byte_acces = False
else:
self._has_byte_acces = True
async def read(self, address, size):
result = []
self.bus.BUS_DATA <= self._high_impedence
self.bus.BUS_ADD <= self._x
self.bus.BUS_RD <= 0
await RisingEdge(self.clock)
byte = 0
while byte <= size:
if byte == size:
self.bus.BUS_RD <= 0
else:
self.bus.BUS_RD <= 1
self.bus.BUS_ADD <= address + byte
await RisingEdge(self.clock)
if byte != 0:
if self._has_byte_acces and self.bus.BUS_BYTE_ACCESS.value.integer == 0:
result.append(self.bus.BUS_DATA.value.integer & 0x000000FF)
result.append((self.bus.BUS_DATA.value.integer & 0x0000FF00) >> 8)
result.append((self.bus.BUS_DATA.value.integer & 0x00FF0000) >> 16)
result.append((self.bus.BUS_DATA.value.integer & 0xFF000000) >> 24)
else:
# result.append(self.bus.BUS_DATA.value[24:31].integer & 0xff)
if len(self.bus.BUS_DATA.value) == 8:
result.append(self.bus.BUS_DATA.value.integer & 0xFF)
else:
result.append(self.bus.BUS_DATA.value[24:31].integer & 0xFF)
if self._has_byte_acces and self.bus.BUS_BYTE_ACCESS.value.integer == 0:
byte += 4
else:
byte += 1
self.bus.BUS_ADD <= self._x
self.bus.BUS_DATA <= self._high_impedence
await RisingEdge(self.clock)
return result
async def write(self, address, data):
self.bus.BUS_ADD <= self._x
self.bus.BUS_DATA <= self._high_impedence
self.bus.BUS_WR <= 0
await RisingEdge(self.clock)
for index, byte in enumerate(data):
self.bus.BUS_DATA <= byte
self.bus.BUS_WR <= 1
self.bus.BUS_ADD <= address + index
await Timer(1) # This is hack for iverilog
self.bus.BUS_DATA <= byte
self.bus.BUS_WR <= 1
self.bus.BUS_ADD <= address + index
await RisingEdge(self.clock)
if self._has_byte_acces and self.bus.BUS_BYTE_ACCESS.value.integer == 0:
raise NotImplementedError("BUS_BYTE_ACCESS for write to be implemented.")
self.bus.BUS_DATA <= self._high_impedence
self.bus.BUS_ADD <= self._x
self.bus.BUS_WR <= 0
await RisingEdge(self.clock)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Ole Krause-Sparmann
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy
import scipy
from nearpy.distances.distance import Distance
class CosineDistance(Distance):
""" Uses 1-cos(angle(x,y)) as distance measure. """
def distance(self, x, y):
"""
Computes distance measure between vectors x and y. Returns float.
"""
if scipy.sparse.issparse(x):
x = x.toarray().ravel()
y = y.toarray().ravel()
return 1.0 - numpy.dot(x, y) / (numpy.linalg.norm(x) *
numpy.linalg.norm(y))
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.plant import PlantEquipmentOperationUncontrolled
log = logging.getLogger(__name__)
class TestPlantEquipmentOperationUncontrolled(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_plantequipmentoperationuncontrolled(self):
pyidf.validation_level = ValidationLevel.error
obj = PlantEquipmentOperationUncontrolled()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_equipment_list_name = "object-list|Equipment List Name"
obj.equipment_list_name = var_equipment_list_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.plantequipmentoperationuncontrolleds[0].name, var_name)
self.assertEqual(idf2.plantequipmentoperationuncontrolleds[0].equipment_list_name, var_equipment_list_name)
|
''''
In this code we'll have a bunch of examples you can use at your own discretion.
Simply remove the three ' marks above and below the code you want in order to run it, while
leaving the text within a new set of three ' marks.
Once that's done, go to your Terminal, navigate to where this code and the twitter_follow_bot
code is (they have to be in the same folder), and just type in "python sample_twitter_codes.py" (without quotes)
WARNING: Following too many people, favoriting too many things, CAN and WILL get you banned.
Be smart. And have fun :).
Justin and Nat
'''
'''
#1 Here you can automatically follow people who tweet about a certain phrase. Just replace the phrase
with something relevant to you! Also you can set the count to whatever makes you most comfortable.
'''
#from twitter_follow_bot import auto_follow
#auto_follow("sharing economy 2.0", count=100)
'''
#2 In this code, change "jwmares" to the twitter handle whose followers you want to follow,
and set the count to how many people should be followed. Default is 100.
'''
#from twitter_follow_bot import auto_follow_followers_for_user
#auto_follow_followers_for_user("@InnoCentive", count=100)
'''
#3 This code will let you favourite things that are relevant to you. Just replace "phrase" with the phrase
you want to favorite for, and set the count to how many things you want to favorite.
'''
#from twitter_follow_bot import auto_fav
#auto_fav("#openinnovation", count=100)
'''
#4 This code will automatically un-follow everyone who hasn't followed you back.
from twitter_follow_bot import auto_unfollow_nonfollowers
auto_unfollow_nonfollowers()
'''
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__title__ = "echobot"
__version__ = "0.0.11"
__author__ = "Anton Batenev"
__license__ = "BSD"
import sys
import os
import time
import re
from pytoxcore import ToxCore
# PEP-8
try:
import configparser as EchoBotConfigParser
except ImportError:
import ConfigParser as EchoBotConfigParser
# PEP-469
try:
dict.iteritems
except AttributeError:
def itervalues(d):
return iter(d.values())
def iteritems(d):
return iter(d.items())
def listvalues(d):
return list(d.values())
def listitems(d):
return list(d.items())
else:
def itervalues(d):
return d.itervalues()
def iteritems(d):
return d.iteritems()
def listvalues(d):
return d.values()
def listitems(d):
return d.items()
class EchoBotOptions(object):
"""
Опции приложения
"""
def __init__(self, options):
"""
Аргументы:
options (dict) -- опции приложения
"""
self.debug = self._bool(options["debug"])
self.verbose = self._bool(options["verbose"]) or self.debug
self.name = str(options["name"])
self.status_message = str(options["status_message"])
self.avatar = str(options["avatar"])
self.save_file = str(options["save_file"])
self.save_tmp_file = str(options["save_tmp_file"])
self.save_interval = int(options["save_interval"])
self.bootstrap_host = str(options["bootstrap_host"])
self.bootstrap_port = int(options["bootstrap_port"])
self.bootstrap_key = str(options["bootstrap_key"])
self.ipv6_enabled = self._bool(options["ipv6_enabled"])
self.udp_enabled = self._bool(options["udp_enabled"])
proxy_type = options["proxy_type"].lower()
if len(proxy_type) == 0:
self.proxy_type = ToxCore.TOX_PROXY_TYPE_NONE
elif proxy_type == "http":
self.proxy_type = ToxCore.TOX_PROXY_TYPE_HTTP
elif proxy_type == "socks":
self.proxy_type = ToxCore.TOX_PROXY_TYPE_SOCKS5
else:
raise ValueError("Unknown proxy type: {0}".format(options["proxy_type"]))
self.proxy_host = str(options["proxy_host"])
self.proxy_port = int(options["proxy_port"])
self.start_port = int(options["start_port"])
self.end_port = int(options["end_port"])
self.tcp_port = int(options["tcp_port"])
self.accept_avatars = self._bool(options["accept_avatars"])
self.max_avatar_size = int(options["max_avatar_size"])
self.avatars_path = str(options["avatars_path"])
self.accept_files = self._bool(options["accept_files"])
self.max_file_size = int(options["max_file_size"])
self.files_path = str(options["files_path"])
def __repr__(self):
return "{0!s}({1!r})".format(self.__class__, self.__dict__)
@staticmethod
def _bool(value):
"""
Преобразование строкового значения к булевому
Аргументы:
value (str|bool) -- Строковое представление булева значения
Результат (bool):
Результат преобразования строкового значения к булеву - [true|yes|t|y|1] => True, иначе False
"""
if type(value) is bool:
return value
value = value.lower().strip()
if value == "true" or value == "yes" or value == "t" or value == "y" or value == "1":
return True
return False
@staticmethod
def defaultOptions():
"""
Опции по умолчанию
Результат (dict):
Словарь опций по умолчанию
"""
tox_opts = ToxCore.tox_options_default()
options = {
"debug" : "yes",
"verbose" : "yes",
"name" : "EchoBot",
"status_message" : "Think Safety",
"avatar" : "echobot.png",
"save_file" : "echobot.data",
"save_tmp_file" : "echobot.data.tmp",
"save_interval" : "300",
"bootstrap_host" : "178.62.250.138", # https://wiki.tox.chat/users/nodes
"bootstrap_port" : "33445",
"bootstrap_key" : "788236D34978D1D5BD822F0A5BEBD2C53C64CC31CD3149350EE27D4D9A2F9B6B",
"ipv6_enabled" : "yes" if tox_opts["ipv6_enabled"] else "no",
"udp_enabled" : "yes" if tox_opts["udp_enabled"] else "no",
"proxy_type" : "",
"proxy_host" : "" if tox_opts["proxy_host"] is None else tox_opts["proxy_host"],
"proxy_port" : str(tox_opts["proxy_port"]),
"start_port" : str(tox_opts["start_port"]),
"end_port" : str(tox_opts["end_port"]),
"tcp_port" : str(tox_opts["tcp_port"]),
"accept_avatars" : "no",
"max_avatar_size" : "0",
"avatars_path" : "",
"accept_files" : "no",
"max_file_size" : "0",
"files_path" : "",
}
if tox_opts["proxy_type"] == ToxCore.TOX_PROXY_TYPE_SOCKS5:
options["proxy_type"] = "socks"
elif tox_opts["proxy_type"] == ToxCore.TOX_PROXY_TYPE_HTTP:
options["proxy_type"] = "http"
elif tox_opts["proxy_type"] != ToxCore.TOX_PROXY_TYPE_NONE:
raise NotImplementedError("Unknown proxy_type: {0}".format(tox_opts["proxy_type"]))
return options
@staticmethod
def loadOptions(filename, options = None):
"""
Чтение секции echobot INI файла echobot.cfg
Аргументы:
filename (str) -- Имя INI файла
options (dict) -- Базовая конфигурация
Результат (dict):
Конфигурация приложения на основе файла конфигурации
"""
if options is None:
options = EchoBotOptions.defaultOptions()
options = options.copy()
parser = EchoBotConfigParser.ConfigParser()
parser.read(filename)
for section in parser.sections():
name = section.lower()
if name == "echobot":
for option in parser.options(section):
options[option.lower()] = parser.get(section, option).strip()
return options
class EchoBotFile(object):
"""
Описатель файла
"""
def __init__(self):
self.fd = None
self.write = False
self.read = False
self.size = 0
self.position = 0
self.path = ""
self.name = ""
self.id = ""
self.kind = ToxCore.TOX_FILE_KIND_DATA
class EchoBot(ToxCore):
"""
Бот
"""
def __init__(self, options):
"""
Аргументы:
options (EchoBotOptions) -- Опции приложения
"""
self.options = options
tox_opts = {
"ipv6_enabled" : self.options.ipv6_enabled,
"udp_enabled" : self.options.udp_enabled,
"proxy_type" : self.options.proxy_type,
"proxy_host" : self.options.proxy_host,
"proxy_port" : self.options.proxy_port,
"start_port" : self.options.start_port,
"end_port" : self.options.end_port,
"tcp_port" : self.options.tcp_port
}
if os.path.isfile(self.options.save_file):
self.debug("Load data from file: {0}".format(self.options.save_file))
with open(self.options.save_file, "rb") as f:
tox_opts["savedata_data"] = f.read()
super(EchoBot, self).__init__(tox_opts)
self.debug("Set self name: {0}".format(self.options.name))
self.tox_self_set_name(self.options.name)
self.debug("Set self status: {0}".format(self.options.status_message))
self.tox_self_set_status_message(self.options.status_message)
self.debug("Get self ToxID: {0}".format(self.tox_self_get_address()))
# список активных файловых операций { friend_id: { file_id: EchoBotFile } }
self.files = {}
def debug(self, message):
"""
Вывод отладочной информации
Аргументы:
message (str) -- Сообщение для вывода
"""
if self.options.debug:
sys.stderr.write("[{0}] {1}\n".format(time.strftime("%Y-%m-%d %H:%M:%S"), message))
def verbose(self, message):
"""
Вывод расширенной информации
Аргументы:
message (str) -- Сообщение для вывода
"""
if self.options.verbose:
sys.stderr.write("[{0}] {1}\n".format(time.strftime("%Y-%m-%d %H:%M:%S"), message))
def run(self):
"""
Рабочий цикл
"""
self.debug("Connecting to: {0} {1} {2}".format(self.options.bootstrap_host, self.options.bootstrap_port, self.options.bootstrap_key))
self.tox_bootstrap(self.options.bootstrap_host, self.options.bootstrap_port, self.options.bootstrap_key)
self.debug("Connected to: {0} {1} {2}".format(self.options.bootstrap_host, self.options.bootstrap_port, self.options.bootstrap_key))
checked = False
savetime = 0
save_interval = self.options.save_interval * 1000
while True:
status = self.tox_self_get_connection_status()
if not checked and status != ToxCore.TOX_CONNECTION_NONE:
checked = True
if checked and status == ToxCore.TOX_CONNECTION_NONE:
self.debug("Connecting to: {0} {1} {2}".format(self.options.bootstrap_host, self.options.bootstrap_port, self.options.bootstrap_key))
self.tox_bootstrap(self.options.bootstrap_host, self.options.bootstrap_port, self.options.bootstrap_key)
self.debug("Connected to: {0} {1} {2}".format(self.options.bootstrap_host, self.options.bootstrap_port, self.options.bootstrap_key))
checked = False
self.tox_iterate()
interval = self.tox_iteration_interval()
time.sleep(interval / 1000.0)
savetime += interval
if savetime > save_interval:
self.save_file()
savetime = 0
def save_file(self):
"""
Сохранение данных
"""
self.debug("Save data to file: {0}".format(self.options.save_tmp_file))
with open(self.options.save_tmp_file, "wb") as f:
f.write(self.tox_get_savedata());
self.debug("Move data to file: {0}".format(self.options.save_file))
os.rename(self.options.save_tmp_file, self.options.save_file)
def send_avatar(self, friend_number):
"""
Отправка аватара
Агрументы:
friend_number (int) -- Номер друга
"""
if len(self.options.avatar) == 0 or not os.path.isfile(self.options.avatar):
return
friend_name = self.tox_friend_get_name(friend_number)
self.verbose("Send avatar to {0}/{1}".format(friend_name, friend_number))
f = EchoBotFile()
f.kind = ToxCore.TOX_FILE_KIND_AVATAR
f.size = os.path.getsize(self.options.avatar)
f.read = True
f.path = self.options.avatar
f.fd = open(f.path, "rb")
data = f.fd.read()
f.fd.seek(0, 0)
f.id = ToxCore.tox_hash(data)
f.name = f.id
file_number = self.tox_file_send(friend_number, ToxCore.TOX_FILE_KIND_AVATAR, f.size, f.id, f.name)
if friend_number not in self.files:
self.files[friend_number] = {}
self.files[friend_number][file_number] = f
def send_file(self, friend_number, path, name = None):
"""
Отправка файла
Аргументы:
friend_number (int) -- Номер друга
path (str) -- Путь к файлу
name (str) -- Имя файла (опционально, если не совпадает с именем из path)
"""
if not os.path.isfile(path):
return
friend_name = self.tox_friend_get_name(friend_number)
if name is not None:
self.verbose("Send file {0} as {1} to {2}/{3}".format(path, name, friend_name, friend_number))
else:
self.verbose("Send file {0} to {1}/{2}".format(path, friend_name, friend_number))
f = EchoBotFile()
f.kind = ToxCore.TOX_FILE_KIND_DATA
f.size = os.path.getsize(path)
f.read = True
f.path = path
f.fd = open(f.path, "rb")
f.name = name
if f.name is None:
f.name = os.path.basename(f.path)
file_number = self.tox_file_send(friend_number, ToxCore.TOX_FILE_KIND_DATA, f.size, None, f.name)
f.id = self.tox_file_get_file_id(friend_number, file_number)
if friend_number not in self.files:
self.files[friend_number] = {}
self.files[friend_number][file_number] = f
def tox_self_connection_status_cb(self, connection_status):
"""
Изменение состояния соединения
Аргументы:
connection_status (int) -- Статус
"""
if connection_status == ToxCore.TOX_CONNECTION_NONE:
self.debug("Disconnected from DHT")
elif connection_status == ToxCore.TOX_CONNECTION_TCP:
self.debug("Connected to DHT via TCP")
elif connection_status == ToxCore.TOX_CONNECTION_UDP:
self.debug("Connected to DHT via UDP")
else:
raise NotImplementedError("Unknown connection_status: {0}".format(connection_status))
def tox_friend_request_cb(self, public_key, message):
"""
Запрос на добавление в друзья
Аргументы:
public_key (str) -- Публичный ключ друга
message (str) -- Сообщение запроса для добавления в друзья
"""
self.verbose("Friend request from {0}: {1}".format(public_key, message))
self.tox_friend_add_norequest(public_key)
self.verbose("Friend request from {0}: accepted".format(public_key))
def tox_friend_connection_status_cb(self, friend_number, connection_status):
"""
Изменение состояния соединения друга
Агрументы:
friend_number (int) -- Номер друга
connection_status (int) -- Статус соединения друга (см. enum TOX_CONNECTION)
"""
friend_name = self.tox_friend_get_name(friend_number)
if connection_status == ToxCore.TOX_CONNECTION_NONE:
self.verbose("Friend {0}/{1} is offline".format(friend_name, friend_number))
if friend_number in self.files:
for f in itervalues(self.files[friend_number]):
f.fd.close()
del self.files[friend_number]
elif connection_status == ToxCore.TOX_CONNECTION_TCP:
self.verbose("Friend {0}/{1} connected via TCP".format(friend_name, friend_number))
elif connection_status == ToxCore.TOX_CONNECTION_UDP:
self.verbose("Friend {0}/{1} connected via UDP".format(friend_name, friend_number))
else:
raise NotImplementedError("Unknown connection_status: {0}".format(connection_status))
if connection_status == ToxCore.TOX_CONNECTION_TCP or connection_status == ToxCore.TOX_CONNECTION_UDP:
self.send_avatar(friend_number)
def tox_friend_name_cb(self, friend_number, name):
"""
Смена имени друга
Аргументы:
friend_number (int) -- Номер друга
name (str) -- Новое имя
"""
self.verbose("Friend name change {0}/{1}".format(name, friend_number))
def tox_friend_status_message_cb(self, friend_number, message):
"""
Смена сообщения статуса
Аргументы:
friend_number (int) -- Номер друга
message (str) -- Сообщение статуса
"""
friend_name = self.tox_friend_get_name(friend_number)
self.verbose("Friend status message change {0}/{1}: {2}".format(friend_name, friend_number, message))
def tox_friend_status_cb(self, friend_number, status):
"""
Смена статуса
Аргументы:
friend_number (int) -- Номер друга
status (int) -- Статус (см. TOX_USER_STATUS)
"""
friend_name = self.tox_friend_get_name(friend_number)
if status ==ToxCore.TOX_USER_STATUS_NONE:
self.verbose("Friend {0}/{1} is online now".format(friend_name, friend_number))
elif status == ToxCore.TOX_USER_STATUS_AWAY:
self.verbose("Friend {0}/{1} is away now".format(friend_name, friend_number))
elif status == ToxCore.TOX_USER_STATUS_BUSY:
self.verbose("Friend {0}/{1} is busy now".format(friend_name, friend_number))
else:
raise NotImplementedError("Unknown status: {0}".format(status))
def tox_friend_message_cb(self, friend_number, message):
"""
Сообщение от друга
Аргументы:
friend_number (int) -- Номер друга
message (str) -- Сообщение
"""
friend_name = self.tox_friend_get_name(friend_number)
self.verbose("Message from {0}/{1}: {2}".format(friend_name, friend_number, message))
message_id = self.tox_friend_send_message(friend_number, ToxCore.TOX_MESSAGE_TYPE_NORMAL, message)
self.verbose("Message {0} to {1}/{2}: {3}".format(message_id, friend_name, friend_number, message))
def tox_friend_read_receipt_cb(self, friend_number, message_id):
"""
Квитанция о доставке сообщения
Аргументы:
friend_number (int) -- Номер друга
message_id (int) -- ID сообщения
"""
friend_name = self.tox_friend_get_name(friend_number)
self.verbose("Message receipt {0} from {1}/{2}".format(message_id, friend_name, friend_number))
def can_accept_file(self, friend_number, file_number, kind, file_size, filename):
"""
Проверка, что файл можно принимать
Аргументы:
friend_number (int) -- Номер друга
file_number (int) -- Номер файла (случайный номер в рамках передачи)
kind (int) -- Значение файла (см. TOX_FILE_KIND)
file_size (int) -- Размер файла
filename (str) -- Имя файла
Результат (bool):
Флаг разрешения на принятие файла
"""
# поток?
if file_size <= 0:
return False
# ограничение количества отдновременных файлов до 10 в обе стороны
if friend_number in self.files and len(self.files[friend_number]) >= 20:
return False
if kind == ToxCore.TOX_FILE_KIND_DATA:
return (
self.options.accept_files and
(self.options.max_file_size == 0 or file_size <= self.options.max_file_size) and
(os.path.isdir(self.options.files_path)))
elif kind == ToxCore.TOX_FILE_KIND_AVATAR:
return (
self.options.accept_avatars and
(self.options.max_avatar_size == 0 or file_size <= self.options.max_avatar_size) and \
(os.path.isdir(self.options.avatars_path)))
raise NotImplementedError("Unknown kind: {0}".format(kind))
def tox_file_recv_cb(self, friend_number, file_number, kind, file_size, filename):
"""
Получение файла
(см. tox_file_recv_cb)
Аргументы:
friend_number (int) -- Номер друга
file_number (int) -- Номер файла (случайный номер в рамках передачи)
kind (int) -- Значение файла (см. TOX_FILE_KIND)
file_size (int) -- Размер файла
filename (str) -- Имя файла
"""
friend_name = self.tox_friend_get_name(friend_number)
if kind == ToxCore.TOX_FILE_KIND_DATA:
file_id = self.tox_file_get_file_id(friend_number, file_number)
self.verbose("File from {0}/{1}: number = {2}, size = {3}, id = {4}, name = {5}".format(friend_name, friend_number, file_number, file_size, file_id, filename))
elif kind == ToxCore.TOX_FILE_KIND_AVATAR:
if file_size != 0:
file_id = self.tox_file_get_file_id(friend_number, file_number)
self.verbose("Avatar from {0}/{1}: number = {2}, size = {3}, id = {4}".format(friend_name, friend_number, file_number, file_size, file_id))
else:
self.verbose("No Avatar from {0}/{1}: number = {2}".format(friend_name, friend_number, file_number))
else:
raise NotImplementedError("Unknown kind: {0}".format(kind))
if self.can_accept_file(friend_number, file_number, kind, file_size, filename):
f = EchoBotFile()
f.kind = kind
f.size = file_size
f.write = True
f.name = filename
f.id = file_id
if f.kind == ToxCore.TOX_FILE_KIND_DATA:
f.path = self.options.files_path + "/" + f.id
elif f.kind == ToxCore.TOX_FILE_KIND_AVATAR:
f.path = self.options.avatars_path + "/" + f.id
f.fd = open(f.path, "wb")
if friend_number not in self.files:
self.files[friend_number] = {}
self.files[friend_number][file_number] = f
self.tox_file_control(friend_number, file_number, ToxCore.TOX_FILE_CONTROL_RESUME)
else:
self.tox_file_control(friend_number, file_number, ToxCore.TOX_FILE_CONTROL_CANCEL)
def tox_file_recv_control_cb(self, friend_number, file_number, control):
"""
Контроль получения файла
(см. tox_file_recv_control_cb)
Аргументы:
friend_number (int) -- Номер друга
file_number (int) -- Номер файла (случайный номер в рамках передачи)
control (int) -- Полученная команда контроля (см. TOX_FILE_CONTROL)
"""
friend_name = self.tox_friend_get_name(friend_number)
if control == ToxCore.TOX_FILE_CONTROL_RESUME:
self.verbose("File resumed from {0}/{1}: number = {2}".format(friend_name, friend_number, file_number))
elif control == ToxCore.TOX_FILE_CONTROL_PAUSE:
self.verbose("File paused from {0}/{1}: number = {2}".format(friend_name, friend_number, file_number))
elif control == ToxCore.TOX_FILE_CONTROL_CANCEL:
self.verbose("File canceled from {0}/{1}: number = {2}".format(friend_name, friend_number, file_number))
if friend_number in self.files and file_number in self.files[friend_number]:
self.files[friend_number][file_number].fd.close()
del self.files[friend_number][file_number]
else:
raise NotImplementedError("Unknown control: {0}".format(control))
def tox_file_recv_chunk_cb(self, friend_number, file_number, position, data):
"""
Получение чанка данных при приеме
(см. tox_file_recv_chunk_cb)
Аргументы:
friend_number (int) -- Номер друга
file_number (int) -- Номер файла (случайный номер в рамках передачи)
position (int) -- Номер позиции
data (str) -- Данные
"""
if friend_number not in self.files:
return
if file_number not in self.files[friend_number]:
return
f = self.files[friend_number][file_number]
if f.write == False:
return
if f.position != position:
f.fd.seek(position, 0)
f.position = position
if data is not None:
f.fd.write(data)
length = len(data)
f.position += length
else:
length = 0
if length == 0 or f.position > f.size:
f.fd.close()
del self.files[friend_number][file_number]
if f.kind == ToxCore.TOX_FILE_KIND_DATA:
self.send_file(friend_number, f.path, f.name)
else:
self.files[friend_number][file_number] = f
def tox_file_chunk_request_cb(self, friend_number, file_number, position, length):
"""
Запрос чанка данных для передачи
(см. tox_file_chunk_request_cb)
Аргументы:
friend_number (int) -- Номер друга
file_number (int) -- Номер файла (случайный номер в рамках передачи)
position (int) -- Номер позиции
length (str) -- Требуемая длина чанка
"""
if friend_number not in self.files:
return
if file_number not in self.files[friend_number]:
return
f = self.files[friend_number][file_number]
if f.read == False:
return
if length == 0:
f.fd.close()
del self.files[friend_number][file_number]
return
if f.position != position:
f.fd.seek(position, 0)
f.position = position
data = f.fd.read(length)
f.position += len(data)
self.files[friend_number][file_number] = f
self.tox_file_send_chunk(friend_number, file_number, position, data)
if __name__ == "__main__":
regexp = re.compile("--config=(.*)")
cfgfile = [match.group(1) for arg in sys.argv for match in [regexp.search(arg)] if match]
if len(cfgfile) == 0:
cfgfile = "echobot.cfg"
else:
cfgfile = cfgfile[0]
options = EchoBotOptions(EchoBotOptions.loadOptions(cfgfile))
bot = EchoBot(options)
try:
bot.run()
except KeyboardInterrupt:
bot.save_file()
|
#!/usr/bin/env python
import os
import sys
import django
import random
randomUser = lambda: get_user_model().objects.all()[random.randrange(0, get_user_model().objects.count())]
rr = lambda x, y: random.randrange(x, y)
def endall():
# Delete Snippets.
for obj in Recipe.objects.all():
obj.delete()
# Delete Users.
for obj in get_user_model().objects.all():
obj.delete()
def makeUser(n, e, p):
try:
usr = get_user_model().objects.get(username = n)
return usr
except get_user_model().DoesNotExist:
return get_user_model().objects.create_user(username = n, email = e, password = p)
def makeRecipe(author = None, title = None, slug = None, ingredients = None, method = None, notes = None, tags = None):
y = rr(1, 10)
p = rr(1, 15)
c = rr(15, 50)
s = Recipe.objects.create(author = author, title = title, ingredients = ingredients, method = method, yieldAmt = y, prepTime = p, cookTime = c, notes = notes)
for t in tags:
s.tags.add(t)
s.save()
## title, slug, ingredients, method, yieldAmt, prepTime, cookTime, notes, author
def main():
# Make Users.
Emily = makeUser("emily", "emily@gmail.com", "emily")
Ramin = makeUser("ramin", "ramin@gmail.com", "ramin")
Tyler = makeUser("tyler", "tyler@gmail.com", "tyler")
Bella = makeUser("bella", "bella@gmail.com", "bella")
Jessica = makeUser("jessica", "jessica@gmail.com", "jessica")
Freddy = makeUser("freddy", "freddy@gmail.com", "freddy")
Drogo = get_user_model().objects.create_superuser(username = "drogo", email = "drogo@gmail.com", password = "drogo")
Harry = get_user_model().objects.create_superuser(username = "harry", email = "harry@gmail.com", password = "harry")
###################################
In_r1 = {
"1":"8 cups finely diced cabbage",
"2":"1/4 cup diced carrots",
"3":"2 tablespoons minced onions",
"4":"1/3 cup granulated sugar",
"5":"1/2 teaspoon salt",
"6":"1/8 teaspoon pepper",
"7":"1/4 cup milk",
"8":"1/2 cup mayonnaise",
"9":"1/4 cup buttermilk",
"10":"1 1/2 tablespoons white vinegar",
"11":"2 1/2 tablespoons lemon juice"
}
Md_r1 = {
"1":"Cabbage and carrots must be finely diced. (I use fine shredder disc on food processor).",
"2":"Pour cabbage and carrot mixture into large bowl and stir in minced onions.",
"3":"Using regular blade on food processor process remaining ingredients until smooth.",
"4":"Pour over vegetable mixture and mix thoroughly."
}
N_r1 = "Cover bowl and refrigerate several hours or overnight before serving."
makeRecipe(
title = "KFC Coleslaw",
ingredients = In_r1,
method = Md_r1,
notes = N_r1,
author = randomUser(),
tags = ["coleslaw", "salad", "dinner", "lettuce"]
)
#--------------------------------------------------------
In_r1 = {
"1":"1 cup old fashioned rolled oats",
"2":"2 cups water",
"3":"sea salt to taste",
"4":"1/2 tsp cinnamon",
"5":"1/4 cup raisins",
"6":"1/4 cup sliced almonds",
"7":"1 cup skim milk",
"8":"1 TBS blackstrap molasses",
}
Md_r1 = {
"1":"Bring the water and salt to a boil in a saucepan, then turn the heat to low and add the oats.",
"2":"Cook for about 5 minutes, stirring regularly so that the oatmeal will not clump together.",
"3":"Add cinnamon, raisins and almonds, stir, cover the pan and turn off heat.",
"4":"Let sit for 5 minutes.",
}
N_r1 = "Serve with milk and sweetener."
makeRecipe(
title = "Five Minute Energizing Oatmeal",
ingredients = In_r1,
method = Md_r1,
notes = N_r1,
author = randomUser(),
tags = ["oatmeal", "energy", "food", "yes"]
)
#--------------------------------------------------------
In_r1 = {
"1":"1 cup old fashioned rolled oats",
"2":"2 cups water",
"3":"sea salt to taste",
"4":"1/2 tsp cinnamon",
"5":"1/4 cup raisins",
"6":"1/4 cup sliced almonds",
"7":"1 cup skim milk",
"8":"1 TBS blackstrap molasses",
}
Md_r1 = {
"1":"Bring the water and salt to a boil in a saucepan, then turn the heat to low and add the oats.",
"2":"Cook for about 5 minutes, stirring regularly so that the oatmeal will not clump together.",
"3":"Add cinnamon, raisins and almonds, stir, cover the pan and turn off heat.",
"4":"Let sit for 5 minutes.",
}
N_r1 = "Serve with milk and sweetener."
makeRecipe(
title = "Five Minute Energizing Oatmeal",
ingredients = In_r1,
method = Md_r1,
notes = N_r1,
author = randomUser(),
tags = ["oatmeal", "energy", "food", "yes"]
)
#--------------------------------------------------------
In_r1 = {
"1":"2 omega-3-rich eggs",
"2":"1/2 can black beans, drained and mashed",
"3":"1 TBS extra virgin olive oil",
"4":"1 tsp lemon juice",
"5":"sea salt and pepper, to taste",
"6":"1/4 avocado, sliced",
"7":"salsa from a jar, to taste",
"8":"3 TBS grated low-fat cheddar cheese",
"9":"chopped cilantro, to taste",
}
Md_r1 = {
"1":"Poach eggs.",
"2":"Heat beans in a skillet while eggs are cooking.",
"3":"Remove beans from heat and mix in olive oil, lemon juice, salt and pepper Add a pinch of cayenne for spicy beans.",
"4":"Place beans on plate, top with poached eggs, avocado, salsa, cheese and cilantro",
}
N_r1 = "Serve with milk and sweetener."
makeRecipe(
title = "Two Minute Huevos Rancheros",
ingredients = In_r1,
method = Md_r1,
notes = N_r1,
author = randomUser(),
tags = ["huevos", "ranchero", "mexican", "eggs"]
)
#--------------------------------------------------------
In_r1 = {
"1":"3 eggs warmed in hot water for 5 minutes",
"2":"pinch salt",
"3":"1 teaspoon room temperature butter, plus 1/2 teaspoon for finishing omelet",
"4":"1/2 teaspoon fresh chopped chives",
}
Md_r1 = {
"1":"Crack the warm eggs into a bowl, add salt and blend with a fork. Heat a 10-inch nonstick aluminum pan over medium-high heat. Once the pan is hot, add the butter and brush around the surface of the pan. Pour the eggs into the center of the pan and stir vigorously with a rubber spatula for 5 seconds.",
"2":"As soon as a semi-solid mass begins to form, lift the pan and move it around until the excess liquid pours off into the pan. Using your spatula, move it around the edge of the egg mixture to help shape into a round and loosen the edge. Let the omelet sit in the pan for 10 seconds without touching",
"3":"Shake the pan to loosen from the pan. Lift up the far edge of the pan and snap it back toward you. Using your spatula, fold over one-third of the omelet. Slide the omelet onto a plate and fold over so that the omelet is a tri-fold.",
"4":"Coat with the remaining butter and sprinkle with the chives. Serve immediately.",
}
N_r1 = "Add scallions"
makeRecipe(
title = "Omelet",
ingredients = In_r1,
method = Md_r1,
notes = N_r1,
author = randomUser(),
tags = ["omelet", "tasty", "peppers", "eggs"]
)
#--------------------------------------------------------
In_r1 = {
"1":"11 cup high fiber cereal",
"2":"1 cup blueberries",
"3":"2 tsp blackstrap molasses",
"4":"1/2 cup skim milk or dairy-free milk alternative",
}
Md_r1 = {
"1":"Combine all ingredients and enjoy!",
}
N_r1 = ""
makeRecipe(
title = "High Fiber Cereal",
ingredients = In_r1,
method = Md_r1,
notes = N_r1,
author = randomUser(),
tags = ["cereal", "oats", "fiber", "health"]
)
#--------------------------------------------------------
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Proj.settings")
django.setup()
from myrecipe.models import Recipe
from django.contrib.auth import get_user_model
if 'x' in sys.argv[1:]:
print "Clearing database...\n"
endall()
main()
|
# coding=utf-8
"""
OBJETO
Maneja objetos del mundo, como pistas y decoraciones, dibuja a los elementos
en pantalla.
Autor: PABLO PIZARRO @ppizarror
Fecha: ABRIL 2015
"""
if __name__ == '__main__':
# noinspection PyUnresolvedReferences
from path import *
class Gameobject(object):
"""Objetos del juego"""
def __init__(self, texture, _type, pos):
"""
Función constructora.
:param texture: Textura del objeto
:param _type: Tipo del objeto
:param pos: Posición (x,y) del objeto
:return: void
"""
self.texture = texture
self.type = _type # 1: pista, 2: decoracion, 3: obstaculo
self.pos = pos
self.rect = self.texture.get_rect()
self.width, self.height = self.texture.get_size()
def draw(self, surface, window, camera_pos):
"""
Dibujar el objeto en pantalla.
:param surface: Superficie de dibujo
:param window: Ventana de la aplicación
:param camera_pos: Posición de la camara
:return: void
"""
drawx = camera_pos[0] - self.pos[0] + (window.get_window_width() - 1000) / 2
drawy = camera_pos[1] - self.pos[1] + (window.get_window_height() - 600) / 2
if (-self.width <= drawx <= window.get_window_width()) and (
-self.height <= drawy <= window.get_window_height()):
surface.blit(self.texture, (drawx, drawy))
def get_dimension(self):
"""
Retorna las dimensiones del objeto.
:return: void
"""
return self.width, self.height
def get_pos(self):
"""
Retorna la posición.
:return: void
"""
return self.pos
def get_rect(self):
"""
Retorna el rectangulo de la imágen.
:return: void
"""
return self.rect
def get_type(self):
"""
Retorna el tipo de objeto.
:return: void
"""
return self.type
|
"""
The :mod:`sklearn.lda` module implements Linear Discriminant Analysis (LDA).
"""
# Authors: Matthieu Perrot
# Mathieu Blondel
import warnings
import numpy as np
from scipy import linalg, ndimage
from .base import BaseEstimator, ClassifierMixin, TransformerMixin
from .utils.extmath import logsumexp
class LDA(BaseEstimator, ClassifierMixin, TransformerMixin):
"""
Linear Discriminant Analysis (LDA)
A classifier with a linear decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that
all classes share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality
of the input, by projecting it to the most discriminative
directions.
Parameters
----------
n_components: int
Number of components (< n_classes - 1) for dimensionality reduction
priors : array, optional, shape = [n_classes]
Priors on classes
Attributes
----------
`means_` : array-like, shape = [n_classes, n_features]
Class means
`xbar_` : float, shape = [n_features]
Over all mean
`priors_` : array-like, shape = [n_classes]
Class priors (sum to 1)
`covariance_` : array-like, shape = [n_features, n_features]
Covariance matrix (shared by all classes)
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
"""
def __init__(self, n_components=None, priors=None):
self.n_components = n_components
self.priors = np.asarray(priors) if priors is not None else None
if self.priors is not None:
if (self.priors < 0).any():
raise ValueError('priors must be non-negative')
if self.priors.sum() != 1:
print 'warning: the priors do not sum to 1. Renormalizing'
self.priors = self.priors / self.priors.sum()
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""
Fit the LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariance : boolean
If True the covariance matrix (shared by all classes) is computed
and stored in `self.covariance_` attribute.
"""
X = np.asarray(X)
y = np.asarray(y)
if y.dtype.char.lower() not in ('b', 'h', 'i'):
# We need integer values to be able to use
# ndimage.measurements and np.bincount on numpy >= 2.0.
# We currently support (u)int8, (u)int16 and (u)int32.
# Note that versions of scipy >= 0.8 can also accept
# (u)int64. We however don't support it for backwards
# compatibility.
y = y.astype(np.int32)
if X.ndim != 2:
raise ValueError('X must be a 2D array')
if X.shape[0] != y.shape[0]:
raise ValueError(
'Incompatible shapes: X has %s samples, while y '
'has %s' % (X.shape[0], y.shape[0]))
n_samples = X.shape[0]
n_features = X.shape[1]
classes = np.unique(y)
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
classes_indices = [(y == c).ravel() for c in classes]
if self.priors is None:
counts = np.array(ndimage.measurements.sum(
np.ones(n_samples, dtype=y.dtype), y, index=classes))
self.priors_ = counts / float(n_samples)
else:
self.priors_ = self.priors
# Group means n_classes*n_features matrix
means = []
Xc = []
cov = None
if store_covariance:
cov = np.zeros((n_features, n_features))
for group_indices in classes_indices:
Xg = X[group_indices, :]
meang = Xg.mean(0)
means.append(meang)
# centered group data
Xgc = Xg - meang
Xc.append(Xgc)
if store_covariance:
cov += np.dot(Xgc.T, Xgc)
if store_covariance:
cov /= (n_samples - n_classes)
self.covariance_ = cov
self.means_ = np.asarray(means)
Xc = np.concatenate(Xc, 0)
# ----------------------------
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = float(1) / (n_samples - n_classes)
# ----------------------------
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
# Scaling of within covariance is: V' 1/S
scaling = (V[:rank] / std).T / S[:rank]
## ----------------------------
## 3) Between variance scaling
# Overall mean
xbar = np.dot(self.priors_, self.means_)
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(means - xbar).T).T, scaling)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use svd to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
# compose the scalings
self.scaling = np.dot(scaling, V.T[:, :rank])
self.xbar_ = xbar
# weight vectors / centroids
self.coef_ = np.dot(self.means_ - self.xbar_, self.scaling)
self.intercept_ = -0.5 * np.sum(self.coef_ ** 2, axis=1) + \
np.log(self.priors_)
self.classes = classes
return self
def decision_function(self, X):
"""
This function return the decision function values related to each
class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
X = np.asarray(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scaling)
return np.dot(X, self.coef_.T) + self.intercept_
def transform(self, X):
"""
Project the data so as to maximize class separation (large separation
between projected class means and small variance within each class).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
"""
X = np.asarray(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scaling)
n_comp = X.shape[1] if self.n_components is None else self.n_components
return np.dot(X, self.coef_[:n_comp].T)
def predict(self, X):
"""
This function does classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self.decision_function(X)
y_pred = self.classes[d.argmax(1)]
return y_pred
def predict_proba(self, X):
"""
This function return posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
values = self.decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""
This function return posterior log-probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
values = self.decision_function(X)
loglikelihood = (values - values.max(axis=1)[:, np.newaxis])
normalization = logsumexp(loglikelihood, axis=1)
return loglikelihood - normalization[:, np.newaxis]
|
from sys import stdout
from os import makedirs
from os.path import exists, abspath
from requests import Session
from datetime import datetime, timedelta
from getpass import getpass
periods_path = abspath(__file__ + "/../periods.txt")
site_url = "http://meterdata.submetersolutions.com"
login_url = "/login.php"
file_url = "/consumption_csv.php"
terminal = stdout.isatty() # not all functions work on PyCharm
def get_data(site_id, site_name, period=None):
"""
Access the online submeter database to download and save
data for a given (or asked) period.
Requires authentication.
:param str site_id: the looked-up "SiteID" param in the data query string
:param str site_name: the "SiteName" param in the data query string
:param str|List period: the month(s) to get data for (or formatted periods)
:return:
"""
# Get period to process (if not given)
if not period or not isinstance(period, list):
period = period or input("Enter a period to get data for: ")
periods = []
months = 0
try:
if len(period) == 7: # one month
start = datetime.strptime(period, "%b%Y")
end = last_day_of_month(start)
periods.append((start, end))
months += 1
else: # a period
first = datetime.strptime(period[:7], "%b%Y")
last = datetime.strptime(period[-7:], "%b%Y")
months += (last.year - first.year)*12 + \
(last.month - first.month + 1)
start = first
for _ in range(months):
end = last_day_of_month(start)
periods.append((start, end))
start = next_month(start)
except ValueError as e:
raise Exception("Incorrect period format. Accepted formats:\n"
"\tJan2016 (single month)\n"
"\tJan2016-Feb2017 (range of months)") from e
else: # properly formatted list
periods = period
months = len(periods)
# print(*periods, sep="\n")
if not exists("Data"):
makedirs("Data")
username = input("Username: ")
password = getpass() if terminal else input("Password: ")
# (Thanks to tigerFinch @ http://stackoverflow.com/a/17633072)
# Fill in your details here to be posted to the login form.
login_payload = {"txtUserName": username,
"txtPassword": password,
"btnLogin": "Login"}
query_string = {"SiteID": site_id,
"SiteName": site_name}
# print(query_string)
# Use 'with' to ensure the session context is closed after use.
with Session() as session:
response = session.post(site_url + login_url, data=login_payload)
assert response.status_code == 200, "Error from data server"
# print("url: {}".format(response.url))
assert response.url == site_url + "/propertylist.php", \
"Incorrect username/password"
update_progress_bar(0) # start progress bar
for idx, (start, end) in enumerate(periods):
if end - start > timedelta(days=55): # more than 1 month
x = start + timedelta(days=3) # actual month
y = end - timedelta(days=3) # actual month
period = "{}-{}_data.csv".format(x.strftime("%b%Y"),
y.strftime("%b%Y"))
else:
period = midpoint_day(start, end).strftime("Data/%b%Y_data.csv")
# Submeter Solutions uses inclusive dates, but City doesn't, so exclude "ToDate":
end = end - timedelta(days=1)
query_string["FromDate"] = start.strftime("%m/%d/%Y")
query_string["ToDate"] = end.strftime("%m/%d/%Y")
# print(period, ':',
# query_string["FromDate"], '-', query_string["ToDate"])
# An authorised request.
response = session.get(site_url + file_url, params=query_string)
assert response.status_code == 200, "Error from data server"
with open(period, 'xb') as f:
f.write(response.content)
update_progress_bar((idx+1) / months)
print("Data download complete. See 'Data' folder for files.")
def next_month(date):
month_after = date.replace(day=28) + timedelta(days=4) # never fails
return month_after.replace(day=1)
def last_day_of_month(date):
"""
Return the last day of the given month (leap year-sensitive),
with date unchanged.
Thanks to Augusto Men: http://stackoverflow.com/a/13565185
:param datetime date: the first day of the given month
:return: datetime
>>> d = datetime(2012, 2, 1)
>>> last_day_of_month(d)
datetime.datetime(2012, 2, 29, 0, 0)
>>> d.day == 1
True
"""
month_after = next_month(date)
return month_after - timedelta(days=month_after.day)
def midpoint_day(date1, date2):
"""
Finds the midpoint between two dates. (Rounds down.)
:type date1: datetime
:type date2: datetime
:return: datetime
>>> d1 = datetime(2016, 1, 1)
>>> d2 = datetime(2016, 1, 6)
>>> midpoint_day(d1, d2)
datetime.datetime(2016, 1, 3, 0, 0)
"""
if date1 > date2:
date1, date2 = date2, date1
return (date1 + (date2 - date1) / 2).replace(hour=0)
def update_progress_bar(percent: float):
if not terminal: # because PyCharm doesn't treat '\r' well
print("[{}{}]".format('#' * int(percent * 20),
' ' * (20 - int(percent * 20))))
elif percent == 1:
print("Progress: {:3.1%}".format(percent))
else:
print("Progress: {:3.1%}\r".format(percent), end="")
if __name__ == "__main__":
if not terminal:
print("WARNING: This is not a TTY/terminal. "
"Passwords will not be hidden.")
if periods_path and exists(periods_path):
p = []
with open(periods_path, 'r') as pf:
for line in pf:
if line[0] != '#': # skip comment lines
top, pot = line.split()[:2] # ignore inline comments
top = datetime.strptime(top, "%Y-%m-%d")
pot = datetime.strptime(pot, "%Y-%m-%d")
assert top < pot, "Improper period range (start !< end)"
p.append((top, pot))
get_data("128", "Brimley Plaza", p)
else:
get_data("128", "Brimley Plaza")
|
# -*- coding: utf-8 -*-
#
# This file is based upon the file generated by sphinx-quickstart. However,
# where sphinx-quickstart hardcodes values in this file that you input, this
# file has been changed to pull from your module's metadata module.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# Import project metadata
from lsf_ibutils import metadata
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# show todos
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = metadata.project
copyright = metadata.copyright
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = metadata.version
# The full version, including alpha/beta/rc tags.
release = metadata.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = metadata.project_no_spaces + 'doc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
('index', metadata.project_no_spaces + '.tex',
metadata.project + ' Documentation', metadata.authors_string,
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', metadata.package, metadata.project + ' Documentation',
metadata.authors_string, 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', metadata.project_no_spaces,
metadata.project + ' Documentation', metadata.authors_string,
metadata.project_no_spaces, metadata.description, 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
}
# Extra local configuration. This is useful for placing the class description
# in the class docstring and the __init__ parameter documentation in the
# __init__ docstring. See
# <http://sphinx-doc.org/ext/autodoc.html#confval-autoclass_content> for more
# information.
autoclass_content = 'both'
|
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import errno
import json
import os
import shutil
import tempfile
import uuid
import numpy
import six
from gnocchi import incoming
from gnocchi import utils
class FileStorage(incoming.IncomingDriver):
def __init__(self, conf):
super(FileStorage, self).__init__(conf)
self.basepath = conf.file_basepath
self.basepath_tmp = os.path.join(self.basepath, 'tmp')
def __str__(self):
return "%s: %s" % (self.__class__.__name__, str(self.basepath))
def upgrade(self, num_sacks):
super(FileStorage, self).upgrade(num_sacks)
utils.ensure_paths([self.basepath_tmp])
def get_storage_sacks(self):
try:
with open(os.path.join(self.basepath_tmp, self.CFG_PREFIX),
'r') as f:
return json.load(f)[self.CFG_SACKS]
except IOError as e:
if e.errno == errno.ENOENT:
return
raise
def set_storage_settings(self, num_sacks):
data = {self.CFG_SACKS: num_sacks}
with open(os.path.join(self.basepath_tmp, self.CFG_PREFIX), 'w') as f:
json.dump(data, f)
utils.ensure_paths([self._sack_path(i)
for i in six.moves.range(self.NUM_SACKS)])
def remove_sack_group(self, num_sacks):
prefix = self.get_sack_prefix(num_sacks)
for i in six.moves.xrange(num_sacks):
shutil.rmtree(os.path.join(self.basepath, prefix % i))
def _sack_path(self, sack):
return os.path.join(self.basepath, self.get_sack_name(sack))
def _measure_path(self, sack, metric_id):
return os.path.join(self._sack_path(sack), six.text_type(metric_id))
def _build_measure_path(self, metric_id, random_id=None):
sack = self.sack_for_metric(metric_id)
path = self._measure_path(sack, metric_id)
if random_id:
if random_id is True:
now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S")
random_id = six.text_type(uuid.uuid4()) + now
return os.path.join(path, random_id)
return path
def _store_new_measures(self, metric, data):
tmpfile = tempfile.NamedTemporaryFile(
prefix='gnocchi', dir=self.basepath_tmp,
delete=False)
tmpfile.write(data)
tmpfile.close()
path = self._build_measure_path(metric.id, True)
while True:
try:
os.rename(tmpfile.name, path)
break
except OSError as e:
if e.errno != errno.ENOENT:
raise
try:
os.mkdir(self._build_measure_path(metric.id))
except OSError as e:
# NOTE(jd) It's possible that another process created the
# path just before us! In this case, good for us, let's do
# nothing then! (see bug #1475684)
if e.errno != errno.EEXIST:
raise
def _build_report(self, details):
report_vars = {'metrics': 0, 'measures': 0, 'metric_details': {}}
if details:
def build_metric_report(metric, sack):
report_vars['metric_details'][metric] = len(
self._list_measures_container_for_metric_id_str(sack,
metric))
else:
def build_metric_report(metric, sack):
report_vars['metrics'] += 1
report_vars['measures'] += len(
self._list_measures_container_for_metric_id_str(sack,
metric))
for i in six.moves.range(self.NUM_SACKS):
for metric in self.list_metric_with_measures_to_process(i):
build_metric_report(metric, i)
return (report_vars['metrics'] or
len(report_vars['metric_details'].keys()),
report_vars['measures'] or
sum(report_vars['metric_details'].values()),
report_vars['metric_details'] if details else None)
def list_metric_with_measures_to_process(self, sack):
return set(self._list_target(self._sack_path(sack)))
def _list_measures_container_for_metric_id_str(self, sack, metric_id):
return self._list_target(self._measure_path(sack, metric_id))
def _list_measures_container_for_metric_id(self, metric_id):
return self._list_target(self._build_measure_path(metric_id))
@staticmethod
def _list_target(target):
try:
return os.listdir(target)
except OSError as e:
# Some other process treated this one, then do nothing
if e.errno == errno.ENOENT:
return []
raise
def _delete_measures_files_for_metric_id(self, metric_id, files):
for f in files:
try:
os.unlink(self._build_measure_path(metric_id, f))
except OSError as e:
# Another process deleted it in the meantime, no prob'
if e.errno != errno.ENOENT:
raise
try:
os.rmdir(self._build_measure_path(metric_id))
except OSError as e:
# ENOENT: ok, it has been removed at almost the same time
# by another process
# ENOTEMPTY: ok, someone pushed measure in the meantime,
# we'll delete the measures and directory later
# EEXIST: some systems use this instead of ENOTEMPTY
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY, errno.EEXIST):
raise
def delete_unprocessed_measures_for_metric_id(self, metric_id):
files = self._list_measures_container_for_metric_id(metric_id)
self._delete_measures_files_for_metric_id(metric_id, files)
def has_unprocessed(self, metric):
return os.path.isdir(self._build_measure_path(metric.id))
@contextlib.contextmanager
def process_measure_for_metric(self, metric):
files = self._list_measures_container_for_metric_id(metric.id)
measures = self._make_measures_array()
for f in files:
abspath = self._build_measure_path(metric.id, f)
with open(abspath, "rb") as e:
measures = numpy.append(
measures, self._unserialize_measures(f, e.read()))
yield measures
self._delete_measures_files_for_metric_id(metric.id, files)
|
#! /usr/bin/python
"""
Software-in-the-loop simulation script for the multi quadcopter flocking control.
This is the main script for the multi quadcopter flocking control (SITL).
The script runs under the dronekit-sitl environment.
A high-level XBee module should be connected for the inter communication between
the drones and the ground control station if specified the hardware ports.
Otherwise, a ZeroMQ publisher-subscriber network is set to simulate the
communication.
The XBee module runs in API2, escaped character mode. By the time written, an
XBee Pro S1 module is used (with the DIJI Mesh firmware). See the official site
of DIJI and the datasheets for more details. Simulated XBee modules uses the
same interface as the real ones.
The dronekit API package supports Python 2.7 for now. Preferably, Ubuntu is
the better choice of onboard Linux OS as it is uses `apt` to get distributed
packages, which is easy to setup and very convenient.
See reference [1] for more details about the algorithm.
Reference:
DIJI Xbee: https://docs.digi.com/display/WirelessConnectivityKit/XBee+API+mode
python-xbee: https://github.com/nioinnovation/python-xbee
DKPY-API Reference: http://python.dronekit.io/automodule.html
Dronekit-SITL: http://python.dronekit.io/develop/sitl_setup.html?highlight=sitl
[1] Q. Yuan, J. Zhan and X. Li, Outdoor flocking of quadcopter drones with
decentralized model predictive control, ISA Transactions, 2017.
Environment:
Computer and OS: Raspberry Model 3B with Ubuntu MATE 16.04LTS.
Wireless module: XBee Pro S1 with DIJI Mesh firmware.
Python packages: dronekit, dronekit-sitl, xbee, numpy
Attibutes:
start_loc(dict): starting location coordinates related to agent_id.
comm_port_list(dict): SITL TCP ports related to agent_id.
Copyright:
Copyright 2017 Quan Yuan, Adaptive Networks and Control Lab,
Research Center of Smart Networks and Systems,
School of Information Science and Engineering,
Fudan University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import sys
import time
import math
import serial
import logging
import argparse
import threading
from dronekit_sitl import SITL
from src import nav
from src import mas
from src import comm
from src import util
from src import shared
def _add_listeners(vehicle):
"""
Add listeners to monitor vehicle status.
Args:
vehicle(dronekit.Vehicle): the copter to be controlled.
"""
@vehicle.on_attribute('mode')
def mode_listener(self,name, msg):
util.log_info("Mode switched to %s" % msg.name)
if msg.name != shared.status['manual_mode']: # manual override
if msg.name == 'RTL' or msg.name == 'LAND':
util.log_warning("External %s detected. Abort." % msg.name)
shared.status['abort'] = True
@vehicle.on_attribute('gps_0')
def gps_listener(self,name, msg): # monitor satellites
if not shared.status['thread_flag'] & shared.NSATS_TOO_LOW:
if msg.satellites_visible < 6:
util.log_warning("Satellites dropped below 5!")
shared.status['thread_flag'] |= shared.NSATS_TOO_LOW
elif msg.satellites_visible >= 10:
util.log_info("Satellites recovered to %d." % msg.satellites_visible)
shared.status['thread_flag'] &= ~shared.NSATS_TOO_LOW
@vehicle.on_message('SYSTEM_TIME')
def time_listener(self,name, msg): # log timestamp
format = '%Y-%m-%d %H:%M:%S'
val = time.localtime(msg.time_unix_usec/1000000)
shared.timestamp = time.strftime(format, val)
def _parse_arguments():
"""
Parse the arguments to the main script and validate the inputs.
Returns:
argparse.ArgumentParser: the argument structure.
"""
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
description = 'Arguments for the SITL simulation.'
)
parser.add_argument('-id', type=str, default='FF', metavar='AgentID', required=True,
help="AGENT_ID, must be a 2-digit integer.")
parser.add_argument('-alt', type=float, default=15.0, metavar='',
help='Takeoff altitude, within [10.0, 100.0] (m).')
parser.add_argument('-xbee', type=str, default=None, metavar='',
help="XBee module's device path. If not provided, use ZeroMQ.")
parser.add_argument('-pix', type=str, default='fw/ac3.5.2_port5760', metavar='',
help="Pixhawk's device path. Can be SITL firmware.")
parser.add_argument('-algorithm', '-a', type=str, default='MPC', metavar='',
choices=['Vicsek','MPC'],
help="Algorithm used for main script.")
parser.add_argument('-character', '-c', type=str, default='follower', metavar='',
choices=['square','passive','follower'],
help="Whether this agent is leader or follower?")
parser.add_argument('-n', type=int, default=5, metavar='',
help="Total agent count.")
parser.add_argument('-level', '-l', type=str, default='info', metavar='',
choices=['warning','debug','info'],
help="Logging level: ['warning','debug','info']")
args = parser.parse_args()
# get correct parameters
if args.alt < 10.0 or args.alt > 100.0:
raise Exception('-alt should between [10.0, 100.0]')
if not args.id.isdigit() or len(args.id) != 2:
raise Exception('-id shoud be a 2-digit integer')
return args
def _choose_algorithm(vehicle, xbee, neighbors):
"""
Choose which algorithm thread to be instantiated.
Args:
vehicle(dronekit.Vehicle): the copter to be controlled.
xbee(xbee.Zigbee): the XBee communication interface.
neighbors(dict): the dictionary containing neighbors data.
Returns:
mas.Object: different thread instance based on the parameters.
"""
if shared.AGENT_CHARACTER == 'square':
return mas.SquareRoute(vehicle, xbee)
elif shared.AGENT_CHARACTER == 'passive':
return mas.PassiveLeader(vehicle, xbee)
elif shared.CURRENT_ALGORITHM == 'Vicsek':
return mas.Vicsek(vehicle, xbee, neighbors)
elif shared.CURRENT_ALGORITHM == 'MPC':
return mas.Decentralized(vehicle, xbee, neighbors)
# starting location GNSS coordinates. Modify accordingly.
# Format: latitude,longitude,MSL altitude, heading
# 'FFF' is reserved for not IDed ones. Not available when using pyzmq.
start_loc = {
'A01': '31.2991103,121.4953190,9,340',
'A02': '31.2989222,121.4954363,9,340',
'A03': '31.2988302,121.4953633,9,340',
'A04': '31.2988857,121.4954170,9,340',
'A05': '31.2989833,121.4955480,9,340',
'FFF': '31.3012010,121.4981920,9,340'
}
# port list for SITL communications
comm_port_list = {
'A01': 5789,
'A02': 6789,
'A03': 7789,
'A04': 8789,
'A05': 9789,
'GCS': 1789
}
def main():
"""
The Main function of this script.
"""
args = _parse_arguments()
util.log_init("sitl_A%s_%s.txt" % (args.id, util.get_latest_log("latest_sitl.txt")), util.log_level[args.level])
shared.AGENT_ID = 'A%s' % args.id
shared.AGENT_COUNT = args.n
shared.CURRENT_ALGORITHM = args.algorithm
shared.AGENT_CHARACTER = args.character
shared.des_alt = args.alt
util.log_info("AGENT_ID = %s" % shared.AGENT_ID)
util.log_info("Algorithm: %s" % shared.CURRENT_ALGORITHM)
util.log_info("Agent type: %s" % shared.AGENT_CHARACTER)
print "Start simulator (SITL)"
sitl = SITL(args.pix) # initialize SITL with firmware path
if shared.AGENT_ID in start_loc:
sitl_args = ['--home=%s' % start_loc[shared.AGENT_ID]]
else:
sitl_args = ['--home=%s' % start_loc['FFF']]
# Pre-recorded coordinates.
#sitl_args = ['-I0', '--model', 'quad', '--home=31.301201,121.498192,9,353']
sitl.launch(sitl_args, await_ready=True, restart=True)
# Connect to the vehicle. (Spawn an instance of Vehicle named "vehicle")
# connection port is coded in the file name of the firmware like "ac3.4.5_port5760"
# use regular expression to search the string and extract port number
port = re.search(r'port\d{4}', args.pix)
port = re.search(r'\d{4}', port.group()).group()
print "Connecting to copter on: TCP: 127.0.0.1:%s" % port
copter = nav.connect('tcp:127.0.0.1:%s' % port, wait_ready=True, rate=20)
util.log_info("Copter connected. Firmware: %s" % copter.version)
if not args.xbee: # simulate XBee using ZeroMQ
[pub, sub] = comm.zmq_init(comm_port_list[shared.AGENT_ID], comm_port_list)
subscriber_thread = comm.Subscriber(shared.AGENT_ID, sub)
subscriber_thread.start()
xbee = pub # make xbee the publisher
util.log_info("ZeroMQ initialzied.")
else: # use actual xbee ports
ser = serial.Serial(args.xbee, 57600)
xbee = comm.xbee_init(ser)
util.log_info("Xbee initialzed.")
info = "IFO,%s connected with firmware %s" % (shared.AGENT_ID, copter.version)
comm.xbee_broadcast(xbee, info)
_add_listeners(copter)
takeoff_thread = nav.Takeoff(copter, xbee, shared.des_alt, 3)
purge_thread = comm.Purge(shared.neighbors)
broadcast_thread = comm.Broadcast(shared.AGENT_ID, copter, xbee)
flocking_thread = _choose_algorithm(copter, xbee, shared.neighbors)
takeoff_thread.start()
takeoff_thread.join() # wait until takeoff procedure completed
if shared.status['airborne']: # only execute the threads when airborne
util.log_info("Copter is airborne, starting threads.")
broadcast_thread.start()
purge_thread.start()
flocking_thread.start()
# main loop
while True:
try: time.sleep(.2)
except KeyboardInterrupt: break
if shared.status['airborne']:
# echo exiting status
if shared.status['exiting']:
info = "IFO,%s %s-ing." % (shared.AGENT_ID,shared.status['command'])
comm.xbee_broadcast(xbee, info)
util.log_info(info)
# if an rtl or land command is received, kill flocking and set the `exiting` flag
elif shared.status['command'] == 'RTL' or shared.status['command'] == 'LAND':
shared.status['thread_flag'] |= shared.FLOCKING_FLAG
nav.set_mode(copter, shared.status['command'])
shared.status['exiting'] = True
if not flocking_thread.is_alive(): # break the loop if finished
break
nav.wait_for_disarm(copter) # wait for disarm
comm.xbee_broadcast(xbee, 'IFO,%s terminated.' % shared.AGENT_ID)
# clean up
purge_thread.stop()
while purge_thread.is_alive():
util.log_info('Waiting for purge to shutdown')
purge_thread.join(3)
util.log_info('Purge killed.')
broadcast_thread.stop()
while broadcast_thread.is_alive():
util.log_info('Waiting for broadcast to shutdown')
broadcast_thread.join(3)
util.log_info('Broadcast killed.')
copter.close()
util.log_info("Copter shutdown.")
if args.xbee:
xbee.halt()
ser.close()
util.log_info("Xbee and serial closed.")
else:
subscriber_thread.stop()
while subscriber_thread.is_alive():
util.log_info('Waiting for Subscriber to shutdown')
subscriber_thread.join(3)
util.log_info('Subscriber killed.')
sitl.stop()
util.log_info("SITL shutdown.")
if __name__ == '__main__':
main()
|
import numpy as np
from rllab.core.serializable import Serializable
from rllab.core.parameterized import Parameterized
from rllab.misc import logger
# the regressor will be choosen to be from the same distribution as the latents
from rllab.regressors.gaussian_mlp_regressor import GaussianMLPRegressor
from rllab.regressors.categorical_mlp_regressor import CategoricalMLPRegressor # could be Categorical_oneAxis
from sandbox.snn4hrl.regressors.categorical_recurrent_regressor import CategoricalRecurrentRegressor
from sandbox.snn4hrl.regressors.bernoulli_mlp_regressor import BernoulliMLPRegressor
from sandbox.snn4hrl.regressors.bernoulli_recurrent_regressor import BernoulliRecurrentRegressor
from rllab.optimizers.first_order_optimizer import FirstOrderOptimizer
class Latent_regressor(Parameterized, Serializable):
def __init__(
self,
env_spec,
policy,
recurrent=False,
predict_all=True,
obs_regressed='all',
act_regressed='all',
use_only_sign=False,
noisify_traj_coef=0,
optimizer=None, # this defaults to LBFGS
regressor_args=None, # here goes all args straight to the regressor: hidden_sizes, TR, step_size....
):
"""
:param predict_all: this is only for the recurrent case, to use all hidden states as predictions
:param obs_regressed: list of index of the obs variables used to fit the regressor. default string 'all'
:param act_regressed: list of index of the act variables used to fit the regressor. default string 'all'
:param regressor_args:
"""
self.env_spec = env_spec
self.policy = policy
self.latent_dim = policy.latent_dim
self.recurrent = recurrent
self.predict_all = predict_all
self.use_only_sign = use_only_sign
self.noisify_traj_coef = noisify_traj_coef
self.regressor_args = regressor_args
# decide what obs variables will be regressed upon
if obs_regressed == 'all':
self.obs_regressed = list(range(env_spec.observation_space.flat_dim))
else:
self.obs_regressed = obs_regressed
# decide what action variables will be regressed upon
if act_regressed == 'all':
self.act_regressed = list(range(env_spec.action_space.flat_dim))
else:
self.act_regressed = act_regressed
# shape the input dimension of the NN for the above decisions.
self.obs_act_dim = len(self.obs_regressed) + len(self.act_regressed)
Serializable.quick_init(self, locals()) # ??
if regressor_args is None:
regressor_args = dict()
if optimizer == 'first_order':
self.optimizer = FirstOrderOptimizer(
max_epochs=10, # both of these are to match Rocky's 10
batch_size=128,
)
elif optimizer is None:
self.optimizer = None
else:
raise NotImplementedError
if policy.latent_name == 'bernoulli':
if self.recurrent:
self._regressor = BernoulliRecurrentRegressor(
input_shape=(self.obs_act_dim,),
output_dim=policy.latent_dim,
optimizer=self.optimizer,
predict_all=self.predict_all,
**regressor_args
)
else:
self._regressor = BernoulliMLPRegressor(
input_shape=(self.obs_act_dim,),
output_dim=policy.latent_dim,
optimizer=self.optimizer,
**regressor_args
)
elif policy.latent_name == 'categorical':
if self.recurrent:
self._regressor = CategoricalRecurrentRegressor( # not implemented
input_shape=(self.obs_act_dim,),
output_dim=policy.latent_dim,
optimizer=self.optimizer,
# predict_all=self.predict_all,
**regressor_args
)
else:
self._regressor = CategoricalMLPRegressor(
input_shape=(self.obs_act_dim,),
output_dim=policy.latent_dim,
optimizer=self.optimizer,
**regressor_args
)
elif policy.latent_name == 'normal':
self._regressor = GaussianMLPRegressor(
input_shape=(self.obs_act_dim,),
output_dim=policy.latent_dim,
optimizer=self.optimizer,
**regressor_args
)
else:
raise NotImplementedError
def fit(self, paths):
logger.log('fitting the regressor...')
if self.recurrent:
observations = np.array([p["observations"][:, self.obs_regressed] for p in paths])
actions = np.array([p["actions"][:, self.act_regressed] for p in paths])
obs_actions = np.concatenate([observations, actions], axis=2)
if self.noisify_traj_coef:
obs_actions += np.random.normal(loc=0.0,
scale=float(np.mean(np.abs(obs_actions))) * self.noisify_traj_coef,
size=np.shape(obs_actions))
latents = np.array([p['agent_infos']['latents'] for p in paths])
self._regressor.fit(obs_actions, latents) # the input shapes are (traj, time, dim)
else:
observations = np.concatenate([p["observations"][:, self.obs_regressed] for p in paths])
actions = np.concatenate([p["actions"][:, self.act_regressed] for p in paths])
obs_actions = np.concatenate([observations, actions], axis=1)
latents = np.concatenate([p['agent_infos']["latents"] for p in paths])
if self.noisify_traj_coef:
obs_actions += np.random.normal(loc=0.0,
scale=float(np.mean(np.abs(obs_actions))) * self.noisify_traj_coef,
size=np.shape(obs_actions))
self._regressor.fit(obs_actions, latents.reshape((-1, self.latent_dim))) # why reshape??
logger.log('done fitting the regressor')
def predict(self, path):
if self.recurrent:
obs_actions = [np.concatenate([path["observations"][:, self.obs_regressed],
path["actions"][:, self.act_regressed]],
axis=1)] # is this the same??
else:
obs_actions = np.concatenate([path["observations"][:, self.obs_regressed],
path["actions"][:, self.act_regressed]], axis=1)
if self.noisify_traj_coef:
obs_actions += np.random.normal(loc=0.0, scale=float(np.mean(np.abs(obs_actions))) * self.noisify_traj_coef,
size=np.shape(obs_actions))
if self.use_only_sign:
obs_actions = np.sign(obs_actions)
return self._regressor.predict(obs_actions).flatten()
def get_output_p(self, path): # this gives the p_dist for every step: the latent posterior wrt obs_act
if self.recurrent:
obs_actions = [np.concatenate([path["observations"][:, self.obs_regressed],
path["actions"][:, self.act_regressed]],
axis=1)] # is this the same??
else:
obs_actions = np.concatenate([path["observations"][:, self.obs_regressed],
path["actions"][:, self.act_regressed]], axis=1)
if self.noisify_traj_coef:
obs_actions += np.random.normal(loc=0.0, scale=float(np.mean(np.abs(obs_actions))) * self.noisify_traj_coef,
size=np.shape(obs_actions))
if self.use_only_sign:
obs_actions = np.sign(obs_actions)
if self.policy.latent_name == 'bernoulli':
return self._regressor._f_p(obs_actions).flatten()
elif self.policy.latent_name == 'normal':
return self._regressor._f_pdists(obs_actions).flatten()
def get_param_values(self, **tags):
return self._regressor.get_param_values(**tags)
def set_param_values(self, flattened_params, **tags):
self._regressor.set_param_values(flattened_params, **tags)
def predict_log_likelihood(self, paths, latents):
if self.recurrent:
observations = np.array([p["observations"][:, self.obs_regressed] for p in paths])
actions = np.array([p["actions"][:, self.act_regressed] for p in paths])
obs_actions = np.concatenate([observations, actions], axis=2) # latents must match first 2dim: (batch,time)
else:
observations = np.concatenate([p["observations"][:, self.obs_regressed] for p in paths])
actions = np.concatenate([p["actions"][:, self.act_regressed] for p in paths])
obs_actions = np.concatenate([observations, actions], axis=1)
latents = np.concatenate(latents, axis=0)
if self.noisify_traj_coef:
noise = np.random.multivariate_normal(mean=np.zeros_like(np.mean(obs_actions, axis=0)),
cov=np.diag(np.mean(np.abs(obs_actions),
axis=0) * self.noisify_traj_coef),
size=np.shape(obs_actions)[0])
obs_actions += noise
if self.use_only_sign:
obs_actions = np.sign(obs_actions)
return self._regressor.predict_log_likelihood(obs_actions, latents) # see difference with fit above...
def lowb_mutual(self, paths, times=(0, None)):
if self.recurrent:
observations = np.array([p["observations"][times[0]:times[1], self.obs_regressed] for p in paths])
actions = np.array([p["actions"][times[0]:times[1], self.act_regressed] for p in paths])
obs_actions = np.concatenate([observations, actions], axis=2)
latents = np.array([p['agent_infos']['latents'][times[0]:times[1]] for p in paths])
else:
observations = np.concatenate([p["observations"][times[0]:times[1], self.obs_regressed] for p in paths])
actions = np.concatenate([p["actions"][times[0]:times[1], self.act_regressed] for p in paths])
obs_actions = np.concatenate([observations, actions], axis=1)
latents = np.concatenate([p['agent_infos']["latents"][times[0]:times[1]] for p in paths])
if self.noisify_traj_coef:
obs_actions += np.random.multivariate_normal(mean=np.zeros_like(np.mean(obs_actions,axis=0)),
cov=np.diag(np.mean(np.abs(obs_actions),
axis=0) * self.noisify_traj_coef),
size=np.shape(obs_actions)[0])
if self.use_only_sign:
obs_actions = np.sign(obs_actions)
H_latent = self.policy.latent_dist.entropy(self.policy.latent_dist_info) # sum of entropies latents in
return H_latent + np.mean(self._regressor.predict_log_likelihood(obs_actions, latents))
def log_diagnostics(self, paths):
logger.record_tabular(self._regressor._name + 'LowerB_MI', self.lowb_mutual(paths))
logger.record_tabular(self._regressor._name + 'LowerB_MI_5first', self.lowb_mutual(paths, times=(0, 5)))
logger.record_tabular(self._regressor._name + 'LowerB_MI_5last', self.lowb_mutual(paths, times=(-5, None)))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Test tools package alone which don't fit into other tests."""
#
# (C) Pywikibot team, 2016
#
# Distributed under the terms of the MIT license.
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
import collections
import decimal
import inspect
import os.path
import subprocess
import tempfile
import warnings
from pywikibot import tools
from tests import join_xml_data_path
from tests.aspects import (
unittest, require_modules, DeprecationTestCase, TestCase, MetaTestCaseClass
)
from tests.utils import expected_failure_if, add_metaclass
class ContextManagerWrapperTestCase(TestCase):
"""Test that ContextManagerWrapper is working correctly."""
class DummyClass(object):
"""A dummy class which has some values and a close method."""
class_var = 42
def __init__(self):
"""Create instance with dummy values."""
self.instance_var = 1337
self.closed = False
def close(self):
"""Just store that it has been closed."""
self.closed = True
net = False
def test_wrapper(self):
"""Create a test instance and verify the wrapper redirects."""
obj = self.DummyClass()
wrapped = tools.ContextManagerWrapper(obj)
self.assertIs(wrapped.class_var, obj.class_var)
self.assertIs(wrapped.instance_var, obj.instance_var)
self.assertIs(wrapped._wrapped, obj)
self.assertFalse(obj.closed)
with wrapped as unwrapped:
self.assertFalse(obj.closed)
self.assertIs(unwrapped, obj)
unwrapped.class_var = 47
self.assertTrue(obj.closed)
self.assertEqual(wrapped.class_var, 47)
def test_exec_wrapper(self):
"""Check that the wrapper permits exceptions."""
wrapper = tools.ContextManagerWrapper(self.DummyClass())
self.assertFalse(wrapper.closed)
with self.assertRaises(ZeroDivisionError):
with wrapper:
1 / 0
self.assertTrue(wrapper.closed)
class OpenArchiveTestCase(TestCase):
"""
Unit test class for tools.
The tests for open_archive requires that article-pyrus.xml* contain all
the same content after extraction. The content itself is not important.
The file article-pyrus.xml_invalid.7z is not a valid 7z file and
open_archive will fail extracting it using 7za.
"""
net = False
@classmethod
def setUpClass(cls):
"""Define base_file and original_content."""
super(OpenArchiveTestCase, cls).setUpClass()
cls.base_file = join_xml_data_path('article-pyrus.xml')
with open(cls.base_file, 'rb') as f:
cls.original_content = f.read()
def _get_content(self, *args, **kwargs):
"""Use open_archive and return content using a with-statement."""
with tools.open_archive(*args, **kwargs) as f:
return f.read()
def test_open_archive_normal(self):
"""Test open_archive with no compression in the standard library."""
self.assertEqual(self._get_content(self.base_file), self.original_content)
def test_open_archive_bz2(self):
"""Test open_archive with bz2 compressor in the standard library."""
self.assertEqual(self._get_content(self.base_file + '.bz2'), self.original_content)
self.assertEqual(self._get_content(self.base_file + '.bz2', use_extension=False),
self.original_content)
@require_modules('bz2file')
def test_open_archive_with_bz2file(self):
"""Test open_archive when bz2file library."""
old_bz2 = tools.bz2
try:
tools.bz2 = __import__('bz2file')
self.assertEqual(self._get_content(self.base_file + '.bz2'),
self.original_content)
self.assertEqual(self._get_content(self.base_file + '.bz2',
use_extension=False),
self.original_content)
finally:
tools.bz2 = old_bz2
def test_open_archive_without_bz2(self):
"""Test open_archive when bz2 and bz2file are not available."""
old_bz2 = tools.bz2
try:
tools.bz2 = ImportError()
self.assertRaises(ImportError, self._get_content, self.base_file + '.bz2')
finally:
tools.bz2 = old_bz2
def test_open_archive_gz(self):
"""Test open_archive with gz compressor in the standard library."""
self.assertEqual(self._get_content(self.base_file + '.gz'), self.original_content)
def test_open_archive_7z(self):
"""Test open_archive with 7za if installed."""
try:
subprocess.Popen(['7za'], stdout=subprocess.PIPE).stdout.close()
except OSError:
raise unittest.SkipTest('7za not installed')
self.assertEqual(self._get_content(self.base_file + '.7z'), self.original_content)
self.assertRaises(OSError, self._get_content, self.base_file + '_invalid.7z',
use_extension=True)
class OpenCompressedTestCase(OpenArchiveTestCase, DeprecationTestCase):
"""Test opening files with the deprecated open_compressed."""
net = False
def _get_content(self, *args, **kwargs):
"""Use open_compressed and return content using a with-statement."""
# open_archive default is True, so if it's False it's not the default
# so use the non-default of open_compressed (which is True)
if kwargs.get('use_extension') is False:
kwargs['use_extension'] = True
with tools.open_compressed(*args, **kwargs) as f:
content = f.read()
self.assertOneDeprecation(self.INSTEAD)
return content
class OpenArchiveWriteTestCase(TestCase):
"""Test writing with open_archive."""
net = False
@classmethod
def setUpClass(cls):
"""Define base_file and original_content."""
super(OpenArchiveWriteTestCase, cls).setUpClass()
cls.base_file = join_xml_data_path('article-pyrus.xml')
with open(cls.base_file, 'rb') as f:
cls.original_content = f.read()
def _write_content(self, suffix):
try:
fh, fn = tempfile.mkstemp(suffix)
with tools.open_archive(fn, 'wb') as f:
f.write(self.original_content)
with tools.open_archive(fn, 'rb') as f:
self.assertEqual(f.read(), self.original_content)
with open(fn, 'rb') as f:
return f.read()
finally:
os.close(fh)
os.remove(fn)
def test_invalid_modes(self):
"""Test various invalid mode configurations."""
self.assertRaises(ValueError, tools.open_archive,
'/dev/null', 'ra') # two modes besides
self.assertRaises(ValueError, tools.open_archive,
'/dev/null', 'rt') # text mode
self.assertRaises(ValueError, tools.open_archive,
'/dev/null', 'br') # binary at front
self.assertRaises(ValueError, tools.open_archive,
'/dev/null', 'wb', False) # writing without extension
def test_binary_mode(self):
"""Test that it uses binary mode."""
with tools.open_archive(self.base_file, 'r') as f:
self.assertEqual(f.mode, 'rb')
self.assertIsInstance(f.read(), bytes)
def test_write_archive_bz2(self):
"""Test writing a bz2 archive."""
content = self._write_content('.bz2')
with open(self.base_file + '.bz2', 'rb') as f:
self.assertEqual(content, f.read())
def test_write_archive_gz(self):
"""Test writing a gz archive."""
content = self._write_content('.gz')
self.assertEqual(content[:3], b'\x1F\x8B\x08')
def test_write_archive_7z(self):
"""Test writing an archive as a 7z archive."""
self.assertRaises(NotImplementedError, tools.open_archive,
'/dev/null.7z', mode='wb')
class MergeUniqueDicts(TestCase):
"""Test merge_unique_dicts."""
net = False
dct1 = {'foo': 'bar', '42': 'answer'}
dct2 = {47: 'Star', 74: 'Trek'}
dct_both = dct1.copy()
dct_both.update(dct2)
def test_single(self):
"""Test that it returns the dict itself when there is only one."""
self.assertEqual(tools.merge_unique_dicts(self.dct1), self.dct1)
self.assertEqual(tools.merge_unique_dicts(**self.dct1), self.dct1)
def test_multiple(self):
"""Test that it actually merges dicts."""
self.assertEqual(tools.merge_unique_dicts(self.dct1, self.dct2),
self.dct_both)
self.assertEqual(tools.merge_unique_dicts(self.dct2, **self.dct1),
self.dct_both)
def test_different_type(self):
"""Test that the keys can be different types."""
self.assertEqual(tools.merge_unique_dicts({'1': 'str'}, {1: 'int'}),
{'1': 'str', 1: 'int'})
def test_conflict(self):
"""Test that it detects conflicts."""
self.assertRaisesRegex(
ValueError, '42', tools.merge_unique_dicts, self.dct1, **{'42': 'bad'})
self.assertRaisesRegex(
ValueError, '42', tools.merge_unique_dicts, self.dct1, self.dct1)
self.assertRaisesRegex(
ValueError, '42', tools.merge_unique_dicts, self.dct1, **self.dct1)
def passthrough(x):
"""Return x."""
return x
class SkipList(set):
"""Container that ignores items."""
skip_list = [1, 3]
def __contains__(self, item):
"""Override to not process some items."""
if item in self.skip_list:
return True
else:
return super(SkipList, self).__contains__(item)
class ProcessAgainList(set):
"""Container that keeps processing certain items."""
process_again_list = [1, 3]
def add(self, item):
"""Override to not add some items."""
if item in self.process_again_list:
return
else:
return super(ProcessAgainList, self).add(item)
class ContainsStopList(set):
"""Container that stops when encountering items."""
stop_list = []
def __contains__(self, item):
"""Override to stop on encountering items."""
if item in self.stop_list:
raise StopIteration
else:
return super(ContainsStopList, self).__contains__(item)
class AddStopList(set):
"""Container that stops when encountering items."""
stop_list = []
def add(self, item):
"""Override to not continue on encountering items."""
if item in self.stop_list:
raise StopIteration
else:
super(AddStopList, self).add(item)
class TestFilterUnique(TestCase):
"""Test filter_unique."""
net = False
ints = [1, 3, 2, 1, 2, 1, 2, 4, 2]
strs = [str(i) for i in ints]
decs = [decimal.Decimal(i) for i in ints]
def _test_dedup_int(self, deduped, deduper, key=None):
"""Test filter_unique results for int."""
if not key:
key = passthrough
self.assertEqual(len(deduped), 0)
self.assertEqual(next(deduper), 1)
self.assertEqual(next(deduper), 3)
if key in (hash, passthrough):
if isinstance(deduped, tools.OrderedDict):
self.assertEqual(list(deduped.keys()), [1, 3])
elif isinstance(deduped, collections.Mapping):
self.assertCountEqual(list(deduped.keys()), [1, 3])
else:
self.assertEqual(deduped, set([1, 3]))
self.assertEqual(next(deduper), 2)
self.assertEqual(next(deduper), 4)
if key in (hash, passthrough):
if isinstance(deduped, tools.OrderedDict):
self.assertEqual(list(deduped.keys()), [1, 3, 2, 4])
elif isinstance(deduped, collections.Mapping):
self.assertCountEqual(list(deduped.keys()), [1, 2, 3, 4])
else:
self.assertEqual(deduped, set([1, 2, 3, 4]))
self.assertRaises(StopIteration, next, deduper)
def _test_dedup_str(self, deduped, deduper, key=None):
"""Test filter_unique results for str."""
if not key:
key = passthrough
self.assertEqual(len(deduped), 0)
self.assertEqual(next(deduper), '1')
self.assertEqual(next(deduper), '3')
if key in (hash, passthrough):
if isinstance(deduped, collections.Mapping):
self.assertEqual(deduped.keys(), [key('1'), key('3')])
else:
self.assertEqual(deduped, set([key('1'), key('3')]))
self.assertEqual(next(deduper), '2')
self.assertEqual(next(deduper), '4')
if key in (hash, passthrough):
if isinstance(deduped, collections.Mapping):
self.assertEqual(deduped.keys(), [key(i) for i in self.strs])
else:
self.assertEqual(deduped, set(key(i) for i in self.strs))
self.assertRaises(StopIteration, next, deduper)
def test_set(self):
"""Test filter_unique with a set."""
deduped = set()
deduper = tools.filter_unique(self.ints, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_dict(self):
"""Test filter_unique with a dict."""
deduped = dict()
deduper = tools.filter_unique(self.ints, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_OrderedDict(self):
"""Test filter_unique with a OrderedDict."""
deduped = tools.OrderedDict()
deduper = tools.filter_unique(self.ints, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_int_hash(self):
"""Test filter_unique with ints using hash as key."""
deduped = set()
deduper = tools.filter_unique(self.ints, container=deduped, key=hash)
self._test_dedup_int(deduped, deduper, hash)
def test_int_id(self):
"""Test filter_unique with ints using id as key."""
deduped = set()
deduper = tools.filter_unique(self.ints, container=deduped, key=id)
self._test_dedup_int(deduped, deduper, id)
def test_obj(self):
"""Test filter_unique with objects."""
deduped = set()
deduper = tools.filter_unique(self.decs, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_obj_hash(self):
"""Test filter_unique with objects using hash as key."""
deduped = set()
deduper = tools.filter_unique(self.decs, container=deduped, key=hash)
self._test_dedup_int(deduped, deduper, hash)
def test_obj_id(self):
"""Test filter_unique with objects using id as key, which fails."""
# Two objects which may be equal do not necessary have the same id.
deduped = set()
deduper = tools.filter_unique(self.decs, container=deduped, key=id)
self.assertEqual(len(deduped), 0)
for _ in self.decs:
self.assertEqual(id(next(deduper)), deduped.pop())
self.assertRaises(StopIteration, next, deduper)
# No. of Decimal with distinct ids != no. of Decimal with distinct value.
deduper_ids = list(tools.filter_unique(self.decs, key=id))
self.assertNotEqual(len(deduper_ids), len(set(deduper_ids)))
def test_str(self):
"""Test filter_unique with str."""
deduped = set()
deduper = tools.filter_unique(self.strs, container=deduped)
self._test_dedup_str(deduped, deduper)
def test_str_hash(self):
"""Test filter_unique with str using hash as key."""
deduped = set()
deduper = tools.filter_unique(self.strs, container=deduped, key=hash)
self._test_dedup_str(deduped, deduper, hash)
@expected_failure_if(not tools.PY2)
def test_str_id(self):
"""Test str using id as key fails on Python 3."""
# str in Python 3 behave like objects.
deduped = set()
deduper = tools.filter_unique(self.strs, container=deduped, key=id)
self._test_dedup_str(deduped, deduper, id)
def test_for_resumable(self):
"""Test filter_unique is resumable after a for loop."""
gen2 = tools.filter_unique(self.ints)
deduped = []
for item in gen2:
deduped.append(item)
if len(deduped) == 3:
break
self.assertEqual(deduped, [1, 3, 2])
last = next(gen2)
self.assertEqual(last, 4)
self.assertRaises(StopIteration, next, gen2)
def test_skip(self):
"""Test filter_unique with a container that skips items."""
deduped = SkipList()
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertCountEqual(deduped, deduped_out)
self.assertEqual(deduped, set([2, 4]))
def test_process_again(self):
"""Test filter_unique with an ignoring container."""
deduped = ProcessAgainList()
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertEqual(deduped_out, [1, 3, 2, 1, 1, 4])
self.assertEqual(deduped, set([2, 4]))
def test_stop(self):
"""Test filter_unique with an ignoring container."""
deduped = ContainsStopList()
deduped.stop_list = [2]
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertCountEqual(deduped, deduped_out)
self.assertEqual(deduped, set([1, 3]))
# And it should not resume
self.assertRaises(StopIteration, next, deduper)
deduped = AddStopList()
deduped.stop_list = [4]
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertCountEqual(deduped, deduped_out)
self.assertEqual(deduped, set([1, 2, 3]))
# And it should not resume
self.assertRaises(StopIteration, next, deduper)
class MetaTestArgSpec(MetaTestCaseClass):
"""Metaclass to create dynamically the tests. Set the net flag to false."""
def __new__(cls, name, bases, dct):
"""Create a new test case class."""
def create_test(method):
def test_method(self):
"""Test getargspec."""
# all expect at least self and param
expected = method(1, 2)
returned = self.getargspec(method)
self.assertEqual(returned, expected)
self.assertIsInstance(returned, self.expected_class)
self.assertNoDeprecation()
return test_method
for attr, tested_method in list(dct.items()):
if attr.startswith('_method_test_'):
suffix = attr[len('_method_test_'):]
cls.add_method(dct, 'test_method_' + suffix,
create_test(tested_method),
doc_suffix='on {0}'.format(suffix))
dct['net'] = False
return super(MetaTestArgSpec, cls).__new__(cls, name, bases, dct)
@add_metaclass
class TestArgSpec(DeprecationTestCase):
"""Test getargspec and ArgSpec from tools."""
__metaclass__ = MetaTestArgSpec
expected_class = tools.ArgSpec
def _method_test_args(self, param):
"""Test method with two positional arguments."""
return (['self', 'param'], None, None, None)
def _method_test_kwargs(self, param=42):
"""Test method with one positional and one keyword argument."""
return (['self', 'param'], None, None, (42,))
def _method_test_varargs(self, param, *var):
"""Test method with two positional arguments and var args."""
return (['self', 'param'], 'var', None, None)
def _method_test_varkwargs(self, param, **var):
"""Test method with two positional arguments and var kwargs."""
return (['self', 'param'], None, 'var', None)
def _method_test_vars(self, param, *args, **kwargs):
"""Test method with two positional arguments and both var args."""
return (['self', 'param'], 'args', 'kwargs', None)
def getargspec(self, method):
"""Call tested getargspec function."""
return tools.getargspec(method)
@unittest.skipIf(tools.PYTHON_VERSION >= (3, 6), 'removed in Python 3.6')
class TestPythonArgSpec(TestArgSpec):
"""Test the same tests using Python's implementation."""
expected_class = inspect.ArgSpec
def getargspec(self, method):
"""Call inspect's getargspec function."""
with warnings.catch_warnings():
if tools.PYTHON_VERSION >= (3, 5):
warnings.simplefilter('ignore', DeprecationWarning)
return inspect.getargspec(method)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import sys
import tarfile
from .Extractor import ArchiveError, CRCError, Extractor
from .misc import encode, fsjoin
class UnTar(Extractor):
__name__ = "UnTar"
__type__ = "extractor"
__version__ = "0.05"
__status__ = "stable"
__description__ = """TAR extractor plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
VERSION = "%s.%s.%s" % (sys.version_info[0],
sys.version_info[1],
sys.version_info[2])
@classmethod
def isarchive(cls, filename):
try:
return tarfile.is_tarfile(encode(filename))
except:
return False
@classmethod
def find(cls):
return sys.version_info[:2] >= (2, 5)
def list(self, password=None):
with tarfile.open(self.filename) as t:
self.files = [fsjoin(self.dest, _f) for _f in t.getnames()]
return self.files
def verify(self, password=None):
try:
t = tarfile.open(self.filename, errorlevel=1)
except tarfile.CompressionError, e:
raise CRCError(e)
except (OSError, tarfile.TarError), e:
raise ArchiveError(e)
else:
t.close()
def extract(self, password=None):
self.verify(password)
try:
with tarfile.open(self.filename, errorlevel=2) as t:
t.extractall(self.dest)
self.files = t.getnames()
return self.files
except tarfile.ExtractError, e:
self.log_warning(e)
except tarfile.CompressionError, e:
raise CRCError(e)
except (OSError, tarfile.TarError), e:
raise ArchiveError(e)
|
"""
Verify traffic_dump functionality.
"""
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = '''
Verify traffic_dump functionality.
'''
Test.SkipUnless(
Condition.PluginExists('traffic_dump.so'),
)
# Configure the origin server.
replay_file = "replay/traffic_dump.yaml"
server = Test.MakeVerifierServerProcess(
"server", replay_file,
ssl_cert="ssl/server_combined.pem", ca_cert="ssl/signer.pem")
# Define ATS and configure it.
ts = Test.MakeATSProcess("ts", enable_tls=True)
replay_dir = os.path.join(ts.RunDirectory, "ts", "log")
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.addSSLfile("ssl/signer.pem")
ts.Setup.Copy("ssl/signed-foo.pem")
ts.Setup.Copy("ssl/signed-foo.key")
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'traffic_dump',
'proxy.config.http.insert_age_in_response': 0,
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.url_remap.pristine_host_hdr': 1,
'proxy.config.ssl.CA.cert.filename': '{0}/signer.pem'.format(ts.Variables.SSLDir),
'proxy.config.exec_thread.autoconfig.scale': 1.0,
'proxy.config.http.host_sni_policy': 2,
'proxy.config.ssl.TLSv1_3': 0,
'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE',
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.remap_config.AddLine(
'map https://www.client_only_tls.com/ http://127.0.0.1:{0}'.format(server.Variables.http_port)
)
ts.Disk.remap_config.AddLine(
'map https://www.tls.com/ https://127.0.0.1:{0}'.format(server.Variables.https_port)
)
ts.Disk.remap_config.AddLine(
'map / http://127.0.0.1:{0}'.format(server.Variables.http_port)
)
# Configure traffic_dump.
ts.Disk.plugin_config.AddLine(
'traffic_dump.so --logdir {0} --sample 1 --limit 1000000000 '
'--sensitive-fields "cookie,set-cookie,x-request-1,x-request-2"'.format(replay_dir)
)
# Configure logging of transactions. This is helpful for the cache test below.
ts.Disk.logging_yaml.AddLines(
'''
logging:
formats:
- name: basic
format: "%<cluc>: Read result: %<crc>:%<crsc>:%<chm>, Write result: %<cwr>"
logs:
- filename: transactions
format: basic
'''.split('\n'))
# Set up trafficserver expectations.
ts.Disk.diags_log.Content = Testers.ContainsExpression(
"loading plugin.*traffic_dump.so",
"Verify the traffic_dump plugin got loaded.")
ts.Streams.stderr = Testers.ContainsExpression(
"Initialized with log directory: {0}".format(replay_dir),
"Verify traffic_dump initialized with the configured directory.")
ts.Streams.stderr += Testers.ContainsExpression(
"Initialized with sample pool size 1 bytes and disk limit 1000000000 bytes",
"Verify traffic_dump initialized with the configured disk limit.")
ts.Streams.stderr += Testers.ContainsExpression(
"Finish a session with log file of.*bytes",
"Verify traffic_dump sees the end of sessions and accounts for it.")
# Set up the json replay file expectations.
replay_file_session_1 = os.path.join(replay_dir, "127", "0000000000000000")
ts.Disk.File(replay_file_session_1, exists=True)
replay_file_session_2 = os.path.join(replay_dir, "127", "0000000000000001")
ts.Disk.File(replay_file_session_2, exists=True)
replay_file_session_3 = os.path.join(replay_dir, "127", "0000000000000002")
ts.Disk.File(replay_file_session_3, exists=True)
replay_file_session_4 = os.path.join(replay_dir, "127", "0000000000000003")
ts.Disk.File(replay_file_session_4, exists=True)
replay_file_session_5 = os.path.join(replay_dir, "127", "0000000000000004")
ts.Disk.File(replay_file_session_5, exists=True)
replay_file_session_6 = os.path.join(replay_dir, "127", "0000000000000005")
ts.Disk.File(replay_file_session_6, exists=True)
replay_file_session_7 = os.path.join(replay_dir, "127", "0000000000000006")
ts.Disk.File(replay_file_session_7, exists=True)
replay_file_session_8 = os.path.join(replay_dir, "127", "0000000000000007")
ts.Disk.File(replay_file_session_8, exists=True)
replay_file_session_9 = os.path.join(replay_dir, "127", "0000000000000008")
ts.Disk.File(replay_file_session_9, exists=True)
replay_file_session_10 = os.path.join(replay_dir, "127", "0000000000000009")
ts.Disk.File(replay_file_session_10, exists=True)
# Execute the first transaction. We limit the threads to 1 so that the sessions
# are run in serial.
tr = Test.AddTestRun("Run the test traffic.")
tr.AddVerifierClientProcess(
"client", replay_file, http_ports=[ts.Variables.port],
https_ports=[ts.Variables.ssl_port],
ssl_cert="ssl/server_combined.pem", ca_cert="ssl/signer.pem",
other_args='--thread-limit 1')
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(ts)
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
#
# Test 1: Verify the correct behavior of two transactions across two sessions.
#
# Verify the properties of the replay file for the first transaction.
tr = Test.AddTestRun("Verify the json content of the first session")
http_protocols = "tcp,ip"
verify_replay = "verify_replay.py"
sensitive_fields_arg = (
"--sensitive-fields cookie "
"--sensitive-fields set-cookie "
"--sensitive-fields x-request-1 "
"--sensitive-fields x-request-2 ")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = \
('python3 {0} {1} {2} {3} --client-http-version "1.1" '
'--client-protocols "{4}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_1,
sensitive_fields_arg,
http_protocols))
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
# Verify the properties of the replay file for the second transaction.
tr = Test.AddTestRun("Verify the json content of the second session")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = \
('python3 {0} {1} {2} {3} --client-http-version "1.1" '
'--request-target "/two"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_2,
sensitive_fields_arg))
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
#
# Test 2: Verify the correct behavior of an explicit path in the request line.
#
# Verify recording of a request target with the host specified.
tr = Test.AddTestRun("Verify the replay file has the explicit target.")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = "python3 {0} {1} {2} {3} --request-target '{4}'".format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_3,
sensitive_fields_arg,
"http://www.some.host.com/candy")
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
#
# Test 3: Verify correct handling of a POST with body data.
#
tr = Test.AddTestRun("Verify the client-request size node for a request with a body.")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
size_of_verify_replay_file = os.path.getsize(os.path.join(Test.TestDirectory, verify_replay))
expected_body_size = 12345
tr.Processes.Default.Command = \
"python3 {0} {1} {2} {3} --client-request-size {4}".format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_4,
sensitive_fields_arg,
expected_body_size)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
#
# Test 4: Verify correct handling of a response produced out of the cache.
#
tr = Test.AddTestRun("Verify that the cached response's replay file looks appropriate.")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = 'python3 {0} {1} {2} --client-protocols "{3}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_6,
http_protocols)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
#
# Test 5: Verify correct handling of two transactions in a session.
#
tr = Test.AddTestRun("Verify the dump file of two transactions in a session.")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = 'python3 {0} {1} {2} --client-protocols "{3}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_7,
http_protocols)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
#
# Test 6: Verify correct protcol dumping of a TLS connection.
#
tr = Test.AddTestRun("Verify the client protocol stack of a TLS session.")
https_protocols = "tls,tcp,ip"
client_tls_features = "sni:www.tls.com,proxy-verify-mode:0,proxy-provided-cert:true"
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = 'python3 {0} {1} {2} --client-protocols "{3}" --client-tls-features "{4}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_8,
https_protocols,
client_tls_features)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr = Test.AddTestRun("Verify the server TLS protocol stack.")
https_server_stack = "http,tls,tcp,ip"
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
server_tls_features = 'proxy-provided-cert:false,sni:www.tls.com,proxy-verify-mode:1'
tr.Processes.Default.Command = 'python3 {0} {1} {2} --server-protocols "{3}" --server-tls-features "{4}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_8,
https_server_stack,
server_tls_features)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
#
# Test 7: Verify correct protcol dumping of TLS and HTTP/2 connections.
#
tr = Test.AddTestRun("Verify the client HTTP/2 protocol stack.")
h2_protocols = "http,tls,tcp,ip"
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = \
('python3 {0} {1} {2} --client-http-version "2" '
'--client-protocols "{3}" --client-tls-features "{4}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_9,
h2_protocols,
client_tls_features))
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr = Test.AddTestRun("Verify the server HTTP/2 protocol stack.")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = 'python3 {0} {1} {2} --server-protocols "{3}" --server-tls-features "{4}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_9,
https_server_stack,
server_tls_features)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
#
# Test 8: Verify correct protcol dumping of client-side TLS and server-side HTTP.
#
tr = Test.AddTestRun("Verify the client TLS protocol stack.")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = 'python3 {0} {1} {2} --client-http-version "1.1" --client-protocols "{3}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_10,
https_protocols)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr = Test.AddTestRun("Verify the server HTTP protocol stack.")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
http_server_stack = "http,tcp,ip"
tr.Processes.Default.Command = 'python3 {0} {1} {2} --server-protocols "{3}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_10,
http_server_stack)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pifpaf import drivers
class ZooKeeperDriver(drivers.Driver):
DEFAULT_PORT = 2181
PATH = ["/usr/share/zookeeper/bin",
"/usr/local/opt/zookeeper/libexec/bin"]
def __init__(self, port=DEFAULT_PORT, **kwargs):
"""Create a new ZooKeeper server."""
super(ZooKeeperDriver, self).__init__(**kwargs)
self.port = port
@classmethod
def get_options(cls):
return [
{"param_decls": ["--port"],
"type": int,
"default": cls.DEFAULT_PORT,
"help": "port to use for ZooKeeper"},
]
def _setUp(self):
super(ZooKeeperDriver, self)._setUp()
cfgfile = os.path.join(self.tempdir, "zoo.cfg")
with open(cfgfile, "w") as f:
f.write("""dataDir=%s
clientPort=%s""" % (self.tempdir, self.port))
logdir = os.path.join(self.tempdir, "log")
os.mkdir(logdir)
self.putenv("ZOOCFGDIR", self.tempdir, True)
self.putenv("ZOOCFG", cfgfile, True)
self.putenv("ZOO_LOG_DIR", logdir, True)
c, _ = self._exec(
["zkServer.sh", "start", cfgfile],
wait_for_line="STARTED",
path=self.PATH)
self.addCleanup(self._exec,
["zkServer.sh", "stop", cfgfile],
path=self.PATH)
self.putenv("ZOOKEEPER_PORT", str(self.port))
self.putenv("URL", "zookeeper://localhost:%d" % self.port)
|
# Copyright (C) 2008 One Laptop Per Child
# Copyright (C) 2014 Ignacio Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gettext import gettext as _
from gettext import ngettext
import logging
import os
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import Gio
from gi.repository import SugarExt
from sugar3.graphics import style
from sugar3.graphics.palette import Palette
from sugar3.graphics.menuitem import MenuItem
from sugar3.graphics.icon import Icon
from sugar3.graphics.xocolor import XoColor
from sugar3.graphics.alert import Alert
from sugar3 import mime
from sugar3 import profile
from jarabe.model import friends
from jarabe.model import filetransfer
from jarabe.model import mimeregistry
from jarabe.journal import misc
from jarabe.journal import model
from jarabe.journal import journalwindow
from jarabe.webservice import accountsmanager
from jarabe.journal.misc import get_mount_color
PROJECT_BUNDLE_ID = 'org.sugarlabs.Project'
class ObjectPalette(Palette):
__gtype_name__ = 'ObjectPalette'
__gsignals__ = {
'detail-clicked': (GObject.SignalFlags.RUN_FIRST, None,
([str])),
'volume-error': (GObject.SignalFlags.RUN_FIRST, None,
([str, str])),
'choose-project': (GObject.SignalFlags.RUN_FIRST, None,
([object])),
}
def __init__(self, journalactivity, metadata, detail=False):
self._journalactivity = journalactivity
self._metadata = metadata
activity_icon = Icon(pixel_size=style.STANDARD_ICON_SIZE)
activity_icon.props.file = misc.get_icon_name(metadata)
color = misc.get_icon_color(metadata)
activity_icon.props.xo_color = color
if 'title' in metadata:
title = metadata['title']
else:
title = _('Untitled')
Palette.__init__(self, primary_text=title,
icon=activity_icon)
description = metadata.get('description', '')
if description:
self.set_secondary_text(description)
if misc.can_resume(metadata):
if metadata.get('activity_id', ''):
resume_label = _('Resume')
resume_with_label = _('Resume with')
else:
resume_label = _('Start')
resume_with_label = _('Start with')
menu_item = MenuItem(resume_label, 'activity-start')
menu_item.connect('activate', self.__start_activate_cb)
self.menu.append(menu_item)
menu_item.show()
menu_item = MenuItem(resume_with_label, 'activity-start')
self.menu.append(menu_item)
menu_item.show()
start_with_menu = StartWithMenu(self._metadata)
menu_item.set_submenu(start_with_menu)
elif metadata.get('activity', None) == PROJECT_BUNDLE_ID:
open_label = _('Open')
menu_item = MenuItem(open_label, 'project-box')
menu_item.connect('activate', self.__open_project_activate_cb)
self.menu.append(menu_item)
menu_item.show()
else:
menu_item = MenuItem(_('No activity to start entry'))
menu_item.set_sensitive(False)
self.menu.append(menu_item)
menu_item.show()
menu_item = MenuItem(_('Copy to'))
icon = Icon(icon_name='edit-copy', xo_color=color,
pixel_size=style.SMALL_ICON_SIZE)
menu_item.set_image(icon)
self.menu.append(menu_item)
menu_item.show()
copy_menu = CopyMenu(self._journalactivity, self.__get_uid_list_cb)
copy_menu.connect('volume-error', self.__volume_error_cb)
menu_item.set_submenu(copy_menu)
if not metadata.get('activity', None) == PROJECT_BUNDLE_ID:
menu_item = MenuItem(_('Send to project...'), 'project-box')
menu_item.connect('activate', self.__copy_to_project_activated_cb)
self.menu.append(menu_item)
menu_item.show()
if self._metadata['mountpoint'] == '/':
menu_item = MenuItem(_('Duplicate'))
icon = Icon(icon_name='edit-duplicate', xo_color=color,
pixel_size=style.SMALL_ICON_SIZE)
menu_item.set_image(icon)
menu_item.connect('activate', self.__duplicate_activate_cb)
self.menu.append(menu_item)
menu_item.show()
menu_item = MenuItem(_('Send to'), 'document-send')
self.menu.append(menu_item)
menu_item.show()
friends_menu = FriendsMenu()
friends_menu.connect('friend-selected', self.__friend_selected_cb)
menu_item.set_submenu(friends_menu)
if detail is True:
menu_item = MenuItem(_('View Details'), 'go-right')
menu_item.connect('activate', self.__detail_activate_cb)
self.menu.append(menu_item)
menu_item.show()
menu_item = MenuItem(_('Erase'), 'list-remove')
menu_item.connect('activate', self.__erase_activate_cb)
self.menu.append(menu_item)
menu_item.show()
def __get_uid_list_cb(self):
return [self._metadata['uid']]
def __copy_to_project_activated_cb(self, menu_item):
self.emit('choose-project', self._metadata)
self.destroy()
def __open_project_activate_cb(self, menu_item):
self._journalactivity.project_view_activated_cb(
list_view=None,
metadata=self._metadata)
def __start_activate_cb(self, menu_item):
misc.resume(self._metadata,
alert_window=journalwindow.get_journal_window())
def __duplicate_activate_cb(self, menu_item):
try:
model.copy(self._metadata, '/')
except IOError as e:
logging.exception('Error while copying the entry. %s', e.strerror)
self.emit('volume-error',
_('Error while copying the entry. %s') % e.strerror,
_('Error'))
def __erase_activate_cb(self, menu_item):
alert = Alert()
erase_string = _('Erase')
alert.props.title = erase_string
alert.props.msg = _('Do you want to permanently erase \"%s\"?') \
% self._metadata['title']
icon = Icon(icon_name='dialog-cancel')
alert.add_button(Gtk.ResponseType.CANCEL, _('Cancel'), icon)
icon.show()
ok_icon = Icon(icon_name='dialog-ok')
alert.add_button(Gtk.ResponseType.OK, erase_string, ok_icon)
ok_icon.show()
alert.connect('response', self.__erase_alert_response_cb)
journalwindow.get_journal_window().add_alert(alert)
alert.show()
def __erase_alert_response_cb(self, alert, response_id):
journalwindow.get_journal_window().remove_alert(alert)
if response_id is Gtk.ResponseType.OK:
model.delete(self._metadata['uid'])
def __detail_activate_cb(self, menu_item):
self.emit('detail-clicked', self._metadata['uid'])
def __volume_error_cb(self, menu_item, message, severity):
self.emit('volume-error', message, severity)
def __friend_selected_cb(self, menu_item, buddy):
logging.debug('__friend_selected_cb')
file_name = model.get_file(self._metadata['uid'])
if not file_name or not os.path.exists(file_name):
logging.warn('Entries without a file cannot be sent.')
self.emit('volume-error',
_('Entries without a file cannot be sent.'),
_('Warning'))
return
title = str(self._metadata['title'])
description = str(self._metadata.get('description', ''))
mime_type = str(self._metadata['mime_type'])
if not mime_type:
mime_type = mime.get_for_file(file_name)
filetransfer.start_transfer(buddy, file_name, title, description,
mime_type)
def popup(self, immediate=False, state=None):
if self._journalactivity.get_list_view().is_dragging():
return
Palette.popup(self, immediate)
class CopyMenu(Gtk.Menu):
__gtype_name__ = 'JournalCopyMenu'
__gsignals__ = {
'volume-error': (GObject.SignalFlags.RUN_FIRST, None,
([str, str])),
}
def __init__(self, journalactivity, get_uid_list_cb):
Gtk.Menu.__init__(self)
CopyMenuBuilder(journalactivity, get_uid_list_cb,
self.__volume_error_cb, self)
def __volume_error_cb(self, menu_item, message, severity):
self.emit('volume-error', message, severity)
class CopyMenuBuilder():
def __init__(self, journalactivity, get_uid_list_cb, __volume_error_cb,
menu, add_clipboard_menu=True, add_webservices_menu=True):
self._journalactivity = journalactivity
self._get_uid_list_cb = get_uid_list_cb
self.__volume_error_cb = __volume_error_cb
self._menu = menu
self._add_clipboard_menu = add_clipboard_menu
self._add_webservices_menu = add_webservices_menu
self._mount_added_hid = None
self._mount_removed_hid = None
self._create_menu_items()
def _create_menu_items(self):
if self._add_clipboard_menu:
clipboard_menu = ClipboardMenu(self._get_uid_list_cb)
clipboard_menu.set_image(Icon(icon_name='toolbar-edit',
pixel_size=style.SMALL_ICON_SIZE))
clipboard_menu.connect('volume-error', self.__volume_error_cb)
self._menu.append(clipboard_menu)
clipboard_menu.show()
if self._journalactivity.get_mount_point() != '/':
color = profile.get_color()
journal_menu = VolumeMenu(self._journalactivity,
self._get_uid_list_cb, _('Journal'), '/')
journal_menu.set_image(Icon(icon_name='activity-journal',
xo_color=color,
pixel_size=style.SMALL_ICON_SIZE))
journal_menu.connect('volume-error', self.__volume_error_cb)
self._menu.append(journal_menu)
journal_menu.show()
documents_path = model.get_documents_path()
if documents_path is not None and \
self._journalactivity.get_mount_point() != documents_path:
documents_menu = VolumeMenu(self._journalactivity,
self._get_uid_list_cb, _('Documents'),
documents_path)
documents_menu.set_image(Icon(icon_name='user-documents',
pixel_size=style.SMALL_ICON_SIZE))
documents_menu.connect('volume-error', self.__volume_error_cb)
self._menu.append(documents_menu)
documents_menu.show()
volume_monitor = Gio.VolumeMonitor.get()
self._volumes = {}
for mount in volume_monitor.get_mounts():
self._add_mount(mount)
self._mount_added_hid = volume_monitor.connect('mount-added',
self.__mount_added_cb)
self._mount_removed_hid = volume_monitor.connect(
'mount-removed',
self.__mount_removed_cb)
if self._add_webservices_menu:
for account in accountsmanager.get_configured_accounts():
if hasattr(account, 'get_shared_journal_entry'):
entry = account.get_shared_journal_entry()
if hasattr(entry, 'get_share_menu'):
self._menu.append(entry.get_share_menu(
self._get_uid_list_cb))
def update_mount_point(self):
for menu_item in self._menu.get_children():
if isinstance(menu_item, MenuItem):
self._menu.remove(menu_item)
self._create_menu_items()
def __mount_added_cb(self, volume_monitor, mount):
self._add_mount(mount)
def _add_mount(self, mount):
mount_path = mount.get_root().get_path()
if mount_path in self._volumes:
return
if self._journalactivity.get_mount_point() == mount_path:
return
volume_menu = VolumeMenu(self._journalactivity,
self._get_uid_list_cb, mount.get_name(),
mount.get_root().get_path())
icon_name = misc.get_mount_icon_name(mount, Gtk.IconSize.MENU)
icon = Icon(pixel_size=style.SMALL_ICON_SIZE,
icon_name=icon_name,
xo_color=get_mount_color(mount))
volume_menu.set_image(icon)
volume_menu.connect('volume-error', self.__volume_error_cb)
self._menu.append(volume_menu)
self._volumes[mount.get_root().get_path()] = volume_menu
volume_menu.show()
def __mount_removed_cb(self, volume_monitor, mount):
volume_menu = self._volumes[mount.get_root().get_path()]
self._menu.remove(volume_menu)
del self._volumes[mount.get_root().get_path()]
def __destroy_cb(self, widget):
volume_monitor = Gio.VolumeMonitor.get()
volume_monitor.disconnect(self._mount_added_hid)
volume_monitor.disconnect(self._mount_removed_hid)
class VolumeMenu(MenuItem):
__gtype_name__ = 'JournalVolumeMenu'
__gsignals__ = {
'volume-error': (GObject.SignalFlags.RUN_FIRST, None,
([str, str])),
}
def __init__(self, journalactivity, get_uid_list_cb, label, mount_point):
MenuItem.__init__(self, label)
self._get_uid_list_cb = get_uid_list_cb
self._journalactivity = journalactivity
self._mount_point = mount_point
self.connect('activate', self.__copy_to_volume_cb)
def __copy_to_volume_cb(self, menu_item):
uid_list = self._get_uid_list_cb()
if len(uid_list) == 1:
uid = uid_list[0]
file_path = model.get_file(uid)
if not file_path or not os.path.exists(file_path):
logging.warn('Entries without a file cannot be copied.')
self.emit('volume-error',
_('Entries without a file cannot be copied.'),
_('Warning'))
return
try:
metadata = model.get(uid)
model.copy(metadata, self._mount_point)
except IOError as e:
logging.exception('Error while copying the entry. %s',
e.strerror)
self.emit('volume-error',
_('Error while copying the entry. %s') % e.strerror,
_('Error'))
else:
BatchOperator(
self._journalactivity, uid_list, _('Copy'),
self._get_confirmation_alert_message(len(uid_list)),
self._perform_copy)
def _get_confirmation_alert_message(self, entries_len):
return ngettext('Do you want to copy %d entry?',
'Do you want to copy %d entries?',
entries_len) % (entries_len)
def _perform_copy(self, metadata):
file_path = model.get_file(metadata['uid'])
if not file_path or not os.path.exists(file_path):
logging.warn('Entries without a file cannot be copied.')
return
try:
model.copy(metadata, self._mount_point)
except IOError as e:
logging.exception('Error while copying the entry. %s',
e.strerror)
class ClipboardMenu(MenuItem):
__gtype_name__ = 'JournalClipboardMenu'
__gsignals__ = {
'volume-error': (GObject.SignalFlags.RUN_FIRST, None,
([str, str])),
}
def __init__(self, get_uid_list_cb):
MenuItem.__init__(self, _('Clipboard'))
self._temp_file_path = None
self._get_uid_list_cb = get_uid_list_cb
self.connect('activate', self.__copy_to_clipboard_cb)
def __copy_to_clipboard_cb(self, menu_item):
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
uid_list = self._get_uid_list_cb()
if len(uid_list) == 1:
uid = uid_list[0]
file_path = model.get_file(uid)
if not file_path or not os.path.exists(file_path):
logging.warn('Entries without a file cannot be copied.')
self.emit('volume-error',
_('Entries without a file cannot be copied.'),
_('Warning'))
return
# XXX SL#4307 - until set_with_data bindings are fixed upstream
if hasattr(clipboard, 'set_with_data'):
clipboard.set_with_data(
[Gtk.TargetEntry.new('text/uri-list', 0, 0)],
self.__clipboard_get_func_cb,
self.__clipboard_clear_func_cb,
None)
else:
SugarExt.clipboard_set_with_data(
clipboard,
[Gtk.TargetEntry.new('text/uri-list', 0, 0)],
self.__clipboard_get_func_cb,
self.__clipboard_clear_func_cb,
None)
def __clipboard_get_func_cb(self, clipboard, selection_data, info, data):
# Get hold of a reference so the temp file doesn't get deleted
for uid in self._get_uid_list_cb():
self._temp_file_path = model.get_file(uid)
logging.debug('__clipboard_get_func_cb %r', self._temp_file_path)
selection_data.set_uris(['file://' + self._temp_file_path])
def __clipboard_clear_func_cb(self, clipboard, data):
# Release and delete the temp file
self._temp_file_path = None
class FriendsMenu(Gtk.Menu):
__gtype_name__ = 'JournalFriendsMenu'
__gsignals__ = {
'friend-selected': (GObject.SignalFlags.RUN_FIRST, None,
([object])),
}
def __init__(self):
Gtk.Menu.__init__(self)
if filetransfer.file_transfer_available():
friends_model = friends.get_model()
for friend in friends_model:
if friend.is_present():
menu_item = MenuItem(text_label=friend.get_nick(),
icon_name='computer-xo',
xo_color=friend.get_color())
menu_item.connect('activate', self.__item_activate_cb,
friend)
self.append(menu_item)
menu_item.show()
if not self.get_children():
menu_item = MenuItem(_('No friends present'))
menu_item.set_sensitive(False)
self.append(menu_item)
menu_item.show()
else:
menu_item = MenuItem(_('No valid connection found'))
menu_item.set_sensitive(False)
self.append(menu_item)
menu_item.show()
def __item_activate_cb(self, menu_item, friend):
self.emit('friend-selected', friend)
class StartWithMenu(Gtk.Menu):
__gtype_name__ = 'JournalStartWithMenu'
def __init__(self, metadata):
Gtk.Menu.__init__(self)
self._metadata = metadata
for activity_info in misc.get_activities(metadata):
menu_item = MenuItem(activity_info.get_name())
menu_item.set_image(Icon(file=activity_info.get_icon(),
pixel_size=style.SMALL_ICON_SIZE))
menu_item.connect('activate', self.__item_activate_cb,
activity_info.get_bundle_id())
self.append(menu_item)
menu_item.show()
if not self.get_children():
if metadata.get('activity_id', ''):
resume_label = _('No activity to resume entry')
else:
resume_label = _('No activity to start entry')
menu_item = MenuItem(resume_label)
menu_item.set_sensitive(False)
self.append(menu_item)
menu_item.show()
def __item_activate_cb(self, menu_item, service_name):
mime_type = self._metadata.get('mime_type', '')
if mime_type:
mime_registry = mimeregistry.get_registry()
mime_registry.set_default_activity(mime_type, service_name)
misc.resume(self._metadata, bundle_id=service_name,
alert_window=journalwindow.get_journal_window())
class BuddyPalette(Palette):
def __init__(self, buddy):
self._buddy = buddy
nick, colors = buddy
buddy_icon = Icon(icon_name='computer-xo',
pixel_size=style.STANDARD_ICON_SIZE,
xo_color=XoColor(colors))
Palette.__init__(self, primary_text=nick, icon=buddy_icon)
# TODO: Support actions on buddies, like make friend, invite, etc.
class BatchOperator(GObject.GObject):
"""
This class implements the course of actions that happens when clicking
upon an BatchOperation (eg. Batch-Copy-Toolbar-button;
Batch-Copy-To-Journal-button;
Batch-Copy-To-Documents-button;
Batch-Copy-To-Mounted-Drive-button;
Batch-Copy-To-Clipboard-button;
Batch-Erase-Button;
"""
def __init__(self, journalactivity,
uid_list,
alert_title, alert_message,
operation_cb):
GObject.GObject.__init__(self)
self._journalactivity = journalactivity
self._uid_list = uid_list[:]
self._alert_title = alert_title
self._alert_message = alert_message
self._operation_cb = operation_cb
self._show_confirmation_alert()
def _show_confirmation_alert(self):
self._journalactivity.freeze_ui()
GObject.idle_add(self.__show_confirmation_alert_internal)
def __show_confirmation_alert_internal(self):
# Show a alert requesting confirmation before run the batch operation
self._confirmation_alert = Alert()
self._confirmation_alert.props.title = self._alert_title
self._confirmation_alert.props.msg = self._alert_message
stop_icon = Icon(icon_name='dialog-cancel')
self._confirmation_alert.add_button(Gtk.ResponseType.CANCEL,
_('Stop'), stop_icon)
stop_icon.show()
ok_icon = Icon(icon_name='dialog-ok')
self._confirmation_alert.add_button(Gtk.ResponseType.OK,
_('Continue'), ok_icon)
ok_icon.show()
self._journalactivity.add_alert(self._confirmation_alert)
self._confirmation_alert.connect('response',
self.__confirmation_response_cb)
self._confirmation_alert.show()
def __confirmation_response_cb(self, alert, response):
if response == Gtk.ResponseType.CANCEL:
self._journalactivity.unfreeze_ui()
self._journalactivity.remove_alert(alert)
# this is only in the case the operation already started
# and the user want stop it.
self._stop_batch_execution()
elif hasattr(self, '_object_index') == False:
self._object_index = 0
GObject.idle_add(self._operate_by_uid_internal)
def _operate_by_uid_internal(self):
# If there is still some uid left, proceed with the operation.
# Else, proceed to post-operations.
if self._object_index < len(self._uid_list):
uid = self._uid_list[self._object_index]
metadata = model.get(uid)
title = None
if 'title' in metadata:
title = metadata['title']
if title is None or title == '':
title = _('Untitled')
alert_message = _('%(index)d of %(total)d : %(object_title)s') % {
'index': self._object_index + 1,
'total': len(self._uid_list),
'object_title': title}
self._confirmation_alert.props.msg = alert_message
GObject.idle_add(self._operate_per_metadata, metadata)
else:
self._finish_batch_execution()
def _operate_per_metadata(self, metadata):
self._operation_cb(metadata)
# process the next
self._object_index = self._object_index + 1
GObject.idle_add(self._operate_by_uid_internal)
def _stop_batch_execution(self):
self._object_index = len(self._uid_list)
def _finish_batch_execution(self):
del self._object_index
self._journalactivity.unfreeze_ui()
self._journalactivity.remove_alert(self._confirmation_alert)
self._journalactivity.update_selected_items_ui()
|
"""
Objects for dealing with Chebyshev series.
This module provides a number of objects (mostly functions) useful for
dealing with Chebyshev series, including a `Chebyshev` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `chebdomain` -- Chebyshev series default domain, [-1,1].
- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates
identically to 0.
- `chebone` -- (Coefficients of the) Chebyshev series that evaluates
identically to 1.
- `chebx` -- (Coefficients of the) Chebyshev series for the identity map,
``f(x) = x``.
Arithmetic
----------
- `chebadd` -- add two Chebyshev series.
- `chebsub` -- subtract one Chebyshev series from another.
- `chebmul` -- multiply two Chebyshev series.
- `chebdiv` -- divide one Chebyshev series by another.
- `chebval` -- evaluate a Chebyshev series at given points.
Calculus
--------
- `chebder` -- differentiate a Chebyshev series.
- `chebint` -- integrate a Chebyshev series.
Misc Functions
--------------
- `chebfromroots` -- create a Chebyshev series with specified roots.
- `chebroots` -- find the roots of a Chebyshev series.
- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials.
- `chebfit` -- least-squares fit returning a Chebyshev series.
- `chebtrim` -- trim leading coefficients from a Chebyshev series.
- `chebline` -- Chebyshev series of given straight line.
- `cheb2poly` -- convert a Chebyshev series to a polynomial.
- `poly2cheb` -- convert a polynomial to a Chebyshev series.
Classes
-------
- `Chebyshev` -- A Chebyshev series class.
See also
--------
`numpy.polynomial`
Notes
-----
The implementations of multiplication, division, integration, and
differentiation use the algebraic identities [1]_:
.. math ::
T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\
z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}.
where
.. math :: x = \\frac{z + z^{-1}}{2}.
These identities allow a Chebyshev series to be expressed as a finite,
symmetric Laurent series. In this module, this sort of Laurent series
is referred to as a "z-series."
References
----------
.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev
Polynomials," *Journal of Statistical Planning and Inference 14*, 2008
(preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
"""
from __future__ import division
__all__ = ['chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline',
'chebadd', 'chebsub', 'chebmul', 'chebdiv', 'chebval', 'chebder',
'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots', 'chebvander',
'chebfit', 'chebtrim', 'chebroots', 'Chebyshev']
import numpy as np
import numpy.linalg as la
import polyutils as pu
import warnings
from polytemplate import polytemplate
chebtrim = pu.trimcoef
#
# A collection of functions for manipulating z-series. These are private
# functions and do minimal error checking.
#
def _cseries_to_zseries(cs) :
"""Covert Chebyshev series to z-series.
Covert a Chebyshev series to the equivalent z-series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
cs : 1-d ndarray
Chebyshev coefficients, ordered from low to high
Returns
-------
zs : 1-d ndarray
Odd length symmetric z-series, ordered from low to high.
"""
n = cs.size
zs = np.zeros(2*n-1, dtype=cs.dtype)
zs[n-1:] = cs/2
return zs + zs[::-1]
def _zseries_to_cseries(zs) :
"""Covert z-series to a Chebyshev series.
Covert a z series to the equivalent Chebyshev series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
zs : 1-d ndarray
Odd length symmetric z-series, ordered from low to high.
Returns
-------
cs : 1-d ndarray
Chebyshev coefficients, ordered from low to high.
"""
n = (zs.size + 1)//2
cs = zs[n-1:].copy()
cs[1:n] *= 2
return cs
def _zseries_mul(z1, z2) :
"""Multiply two z-series.
Multiply two z-series to produce a z-series.
Parameters
----------
z1, z2 : 1-d ndarray
The arrays must be 1-d but this is not checked.
Returns
-------
product : 1-d ndarray
The product z-series.
Notes
-----
This is simply convolution. If symmetic/anti-symmetric z-series are
denoted by S/A then the following rules apply:
S*S, A*A -> S
S*A, A*S -> A
"""
return np.convolve(z1, z2)
def _zseries_div(z1, z2) :
"""Divide the first z-series by the second.
Divide `z1` by `z2` and return the quotient and remainder as z-series.
Warning: this implementation only applies when both z1 and z2 have the
same symmetry, which is sufficient for present purposes.
Parameters
----------
z1, z2 : 1-d ndarray
The arrays must be 1-d and have the same symmetry, but this is not
checked.
Returns
-------
(quotient, remainder) : 1-d ndarrays
Quotient and remainder as z-series.
Notes
-----
This is not the same as polynomial division on account of the desired form
of the remainder. If symmetic/anti-symmetric z-series are denoted by S/A
then the following rules apply:
S/S -> S,S
A/A -> S,A
The restriction to types of the same symmetry could be fixed but seems like
uneeded generality. There is no natural form for the remainder in the case
where there is no symmetry.
"""
z1 = z1.copy()
z2 = z2.copy()
len1 = len(z1)
len2 = len(z2)
if len2 == 1 :
z1 /= z2
return z1, z1[:1]*0
elif len1 < len2 :
return z1[:1]*0, z1
else :
dlen = len1 - len2
scl = z2[0]
z2 /= scl
quo = np.empty(dlen + 1, dtype=z1.dtype)
i = 0
j = dlen
while i < j :
r = z1[i]
quo[i] = z1[i]
quo[dlen - i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
z1[j:j+len2] -= tmp
i += 1
j -= 1
r = z1[i]
quo[i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
quo /= scl
rem = z1[i+1:i-1+len2].copy()
return quo, rem
def _zseries_der(zs) :
"""Differentiate a z-series.
The derivative is with respect to x, not z. This is achieved using the
chain rule and the value of dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to differentiate.
Returns
-------
derivative : z-series
The derivative
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
multiplying the value of zs by two also so that the two cancels in the
division.
"""
n = len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs *= np.arange(-n, n+1)*2
d, r = _zseries_div(zs, ns)
return d
def _zseries_int(zs) :
"""Integrate a z-series.
The integral is with respect to x, not z. This is achieved by a change
of variable using dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to integrate
Returns
-------
integral : z-series
The indefinite integral
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
dividing the resulting zs by two.
"""
n = 1 + len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs = _zseries_mul(zs, ns)
div = np.arange(-n, n+1)*2
zs[:n] /= div[:n]
zs[n+1:] /= div[n+1:]
zs[n] = 0
return zs
#
# Chebyshev series functions
#
def poly2cheb(pol) :
"""
poly2cheb(pol)
Convert a polynomial to a Chebyshev series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Chebyshev series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-d array containing the polynomial coefficients
Returns
-------
cs : ndarray
1-d array containing the coefficients of the equivalent Chebyshev
series.
See Also
--------
cheb2poly
Notes
-----
Note that a consequence of the input needing to be array_like and that
the output is an ndarray, is that if one is going to use this function
to convert a Polynomial instance, P, to a Chebyshev instance, T, the
usage is ``T = Chebyshev(poly2cheb(P.coef))``; see Examples below.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
Polynomial([ 0., 1., 2., 3.], [-1., 1.])
>>> c = P.Chebyshev(P.poly2cheb(p.coef))
>>> c
Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.])
"""
[pol] = pu.as_series([pol])
pol = pol[::-1]
zs = pol[:1].copy()
x = np.array([.5, 0, .5], dtype=pol.dtype)
for i in range(1, len(pol)) :
zs = _zseries_mul(zs, x)
zs[i] += pol[i]
return _zseries_to_cseries(zs)
def cheb2poly(cs) :
"""
cheb2poly(cs)
Convert a Chebyshev series to a polynomial.
Convert an array representing the coefficients of a Chebyshev series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
cs : array_like
1-d array containing the Chebyshev series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-d array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2cheb
Notes
-----
Note that a consequence of the input needing to be array_like and that
the output is an ndarray, is that if one is going to use this function
to convert a Chebyshev instance, T, to a Polynomial instance, P, the
usage is ``P = Polynomial(cheb2poly(T.coef))``; see Examples below.
Examples
--------
>>> from numpy import polynomial as P
>>> c = P.Chebyshev(np.arange(4))
>>> c
Chebyshev([ 0., 1., 2., 3.], [-1., 1.])
>>> p = P.Polynomial(P.cheb2poly(c.coef))
>>> p
Polynomial([ -2., -8., 4., 12.], [-1., 1.])
"""
[cs] = pu.as_series([cs])
pol = np.zeros(len(cs), dtype=cs.dtype)
quo = _cseries_to_zseries(cs)
x = np.array([.5, 0, .5], dtype=pol.dtype)
for i in range(0, len(cs) - 1) :
quo, rem = _zseries_div(quo, x)
pol[i] = rem[0]
pol[-1] = quo[0]
return pol
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Chebyshev default domain.
chebdomain = np.array([-1,1])
# Chebyshev coefficients representing zero.
chebzero = np.array([0])
# Chebyshev coefficients representing one.
chebone = np.array([1])
# Chebyshev coefficients representing the identity x.
chebx = np.array([0,1])
def chebline(off, scl) :
"""
Chebyshev series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Chebyshev series for
``off + scl*x``.
See Also
--------
polyline
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebline(3,2)
array([3, 2])
>>> C.chebval(-3, C.chebline(3,2)) # should be -3
-3.0
"""
if scl != 0 :
return np.array([off,scl])
else :
return np.array([off])
def chebfromroots(roots) :
"""
Generate a Chebyshev series with the given roots.
Return the array of coefficients for the C-series whose roots (a.k.a.
"zeros") are given by *roots*. The returned array of coefficients is
ordered from lowest order "term" to highest, and zeros of multiplicity
greater than one must be included in *roots* a number of times equal
to their multiplicity (e.g., if `2` is a root of multiplicity three,
then [2,2,2] must be in *roots*).
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-d array of the C-series' coefficients, ordered from low to
high. If all roots are real, ``out.dtype`` is a float type;
otherwise, ``out.dtype`` is a complex type, even if all the
coefficients in the result are real (see Examples below).
See Also
--------
polyfromroots
Notes
-----
What is returned are the :math:`c_i` such that:
.. math::
\\sum_{i=0}^{n} c_i*T_i(x) = \\prod_{i=0}^{n} (x - roots[i])
where ``n == len(roots)`` and :math:`T_i(x)` is the `i`-th Chebyshev
(basis) polynomial over the domain `[-1,1]`. Note that, unlike
`polyfromroots`, due to the nature of the C-series basis set, the
above identity *does not* imply :math:`c_n = 1` identically (see
Examples).
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.25, 0. , 0.25])
>>> j = complex(0,1)
>>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.5+0.j, 0.0+0.j, 0.5+0.j])
"""
if len(roots) == 0 :
return np.ones(1)
else :
[roots] = pu.as_series([roots], trim=False)
prd = np.array([1], dtype=roots.dtype)
for r in roots :
fac = np.array([.5, -r, .5], dtype=roots.dtype)
prd = _zseries_mul(fac, prd)
return _zseries_to_cseries(prd)
def chebadd(c1, c2):
"""
Add one Chebyshev series to another.
Returns the sum of two Chebyshev series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Chebyshev series of their sum.
See Also
--------
chebsub, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Chebyshev series
is a Chebyshev series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] += c2
ret = c1
else :
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebsub(c1, c2):
"""
Subtract one Chebyshev series from another.
Returns the difference of two Chebyshev series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their difference.
See Also
--------
chebadd, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Chebyshev
series is a Chebyshev series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebsub(c1,c2)
array([-2., 0., 2.])
>>> C.chebsub(c2,c1) # -C.chebsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] -= c2
ret = c1
else :
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebmul(c1, c2):
"""
Multiply one Chebyshev series by another.
Returns the product of two Chebyshev series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their product.
See Also
--------
chebadd, chebsub, chebdiv, chebpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Chebyshev polynomial basis set. Thus, to express
the product as a C-series, it is typically necessary to "re-project"
the product onto said basis set, which typically produces
"un-intuitive" (but correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebmul(c1,c2) # multiplication requires "reprojection"
array([ 6.5, 12. , 12. , 4. , 1.5])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
prd = _zseries_mul(z1, z2)
ret = _zseries_to_cseries(prd)
return pu.trimseq(ret)
def chebdiv(c1, c2):
"""
Divide one Chebyshev series by another.
Returns the quotient-with-remainder of two Chebyshev series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Chebyshev series coefficients representing the quotient and
remainder.
See Also
--------
chebadd, chebsub, chebmul, chebpow
Notes
-----
In general, the (polynomial) division of one C-series by another
results in quotient and remainder terms that are not in the Chebyshev
polynomial basis set. Thus, to express these results as C-series, it
is typically necessary to "re-project" the results onto said basis
set, which typically produces "un-intuitive" (but correct) results;
see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> C.chebdiv(c2,c1) # neither "intuitive"
(array([ 0., 2.]), array([-2., -4.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0 :
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2 :
return c1[:1]*0, c1
elif lc2 == 1 :
return c1/c2[-1], c1[:1]*0
else :
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
quo, rem = _zseries_div(z1, z2)
quo = pu.trimseq(_zseries_to_cseries(quo))
rem = pu.trimseq(_zseries_to_cseries(rem))
return quo, rem
def chebpow(cs, pow, maxpower=16) :
"""Raise a Chebyshev series to a power.
Returns the Chebyshev series `cs` raised to the power `pow`. The
arguement `cs` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``
Parameters
----------
cs : array_like
1d array of chebyshev series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to umanageable size. Default is 16
Returns
-------
coef : ndarray
Chebyshev series of power.
See Also
--------
chebadd, chebsub, chebmul, chebdiv
Examples
--------
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
power = int(pow)
if power != pow or power < 0 :
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower :
raise ValueError("Power is too large")
elif power == 0 :
return np.array([1], dtype=cs.dtype)
elif power == 1 :
return cs
else :
# This can be made more efficient by using powers of two
# in the usual way.
zs = _cseries_to_zseries(cs)
prd = zs
for i in range(2, power + 1) :
prd = np.convolve(prd, zs)
return _zseries_to_cseries(prd)
def chebder(cs, m=1, scl=1) :
"""
Differentiate a Chebyshev series.
Returns the series `cs` differentiated `m` times. At each iteration the
result is multiplied by `scl` (the scaling factor is for use in a linear
change of variable). The argument `cs` is the sequence of coefficients
from lowest order "term" to highest, e.g., [1,2,3] represents the series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
cs: array_like
1-d array of Chebyshev series coefficients ordered from low to high.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
Returns
-------
der : ndarray
Chebyshev series of the derivative.
See Also
--------
chebint
Notes
-----
In general, the result of differentiating a C-series needs to be
"re-projected" onto the C-series basis set. Thus, typically, the
result of this function is "un-intuitive," albeit correct; see Examples
section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> cs = (1,2,3,4)
>>> C.chebder(cs)
array([ 14., 12., 24.])
>>> C.chebder(cs,3)
array([ 96.])
>>> C.chebder(cs,scl=-1)
array([-14., -12., -24.])
>>> C.chebder(cs,2,-1)
array([ 12., 96.])
"""
cnt = int(m)
if cnt != m:
raise ValueError, "The order of derivation must be integer"
if cnt < 0 :
raise ValueError, "The order of derivation must be non-negative"
if not np.isscalar(scl) :
raise ValueError, "The scl parameter must be a scalar"
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if cnt == 0:
return cs
elif cnt >= len(cs):
return cs[:1]*0
else :
zs = _cseries_to_zseries(cs)
for i in range(cnt):
zs = _zseries_der(zs)*scl
return _zseries_to_cseries(zs)
def chebint(cs, m=1, k=[], lbnd=0, scl=1):
"""
Integrate a Chebyshev series.
Returns, as a C-series, the input C-series `cs`, integrated `m` times
from `lbnd` to `x`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `cs` is a sequence of
coefficients, from lowest order C-series "term" to highest, e.g.,
[1,2,3] represents the series :math:`T_0(x) + 2T_1(x) + 3T_2(x)`.
Parameters
----------
cs : array_like
1-d array of C-series coefficients, ordered from low to high.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
Returns
-------
S : ndarray
C-series coefficients of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
chebder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a`
- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "re-projected" onto the C-series basis set. Thus, typically,
the result of this function is "un-intuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> cs = (1,2,3)
>>> C.chebint(cs)
array([ 0.5, -0.5, 0.5, 0.5])
>>> C.chebint(cs,3)
array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667,
0.00625 ])
>>> C.chebint(cs, k=3)
array([ 3.5, -0.5, 0.5, 0.5])
>>> C.chebint(cs,lbnd=-2)
array([ 8.5, -0.5, 0.5, 0.5])
>>> C.chebint(cs,scl=-2)
array([-1., 1., -1., -1.])
"""
cnt = int(m)
if np.isscalar(k) :
k = [k]
if cnt != m:
raise ValueError, "The order of integration must be integer"
if cnt < 0 :
raise ValueError, "The order of integration must be non-negative"
if len(k) > cnt :
raise ValueError, "Too many integration constants"
if not np.isscalar(lbnd) :
raise ValueError, "The lbnd parameter must be a scalar"
if not np.isscalar(scl) :
raise ValueError, "The scl parameter must be a scalar"
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if cnt == 0:
return cs
else:
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt) :
zs = _cseries_to_zseries(cs)*scl
zs = _zseries_int(zs)
cs = _zseries_to_cseries(zs)
cs[0] += k[i] - chebval(lbnd, cs)
return cs
def chebval(x, cs):
"""Evaluate a Chebyshev series.
If `cs` is of length `n`, this function returns :
``p(x) = cs[0]*T_0(x) + cs[1]*T_1(x) + ... + cs[n-1]*T_{n-1}(x)``
If x is a sequence or array then p(x) will have the same shape as x.
If r is a ring_like object that supports multiplication and addition
by the values in `cs`, then an object of the same type is returned.
Parameters
----------
x : array_like, ring_like
Array of numbers or objects that support multiplication and
addition with themselves and with the elements of `cs`.
cs : array_like
1-d array of Chebyshev coefficients ordered from low to high.
Returns
-------
values : ndarray, ring_like
If the return is an ndarray then it has the same shape as `x`.
See Also
--------
chebfit
Examples
--------
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if isinstance(x, tuple) or isinstance(x, list) :
x = np.asarray(x)
if len(cs) == 1 :
c0 = cs[0]
c1 = 0
elif len(cs) == 2 :
c0 = cs[0]
c1 = cs[1]
else :
x2 = 2*x
c0 = cs[-2]
c1 = cs[-1]
for i in range(3, len(cs) + 1) :
tmp = c0
c0 = cs[-i] - c1
c1 = tmp + c1*x2
return c0 + c1*x
def chebvander(x, deg) :
"""Vandermonde matrix of given degree.
Returns the Vandermonde matrix of degree `deg` and sample points `x`.
This isn't a true Vandermonde matrix because `x` can be an arbitrary
ndarray and the Chebyshev polynomials aren't powers. If ``V`` is the
returned matrix and `x` is a 2d array, then the elements of ``V`` are
``V[i,j,k] = T_k(x[i,j])``, where ``T_k`` is the Chebyshev polynomial
of degree ``k``.
Parameters
----------
x : array_like
Array of points. The values are converted to double or complex
doubles.
deg : integer
Degree of the resulting matrix.
Returns
-------
vander : Vandermonde matrix.
The shape of the returned matrix is ``x.shape + (deg+1,)``. The last
index is the degree.
"""
x = np.asarray(x) + 0.0
order = int(deg) + 1
v = np.ones((order,) + x.shape, dtype=x.dtype)
if order > 1 :
x2 = 2*x
v[1] = x
for i in range(2, order) :
v[i] = v[i-1]*x2 - v[i-2]
return np.rollaxis(v, 0, v.ndim)
def chebfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Chebyshev series to data.
Fit a Chebyshev series ``p(x) = p[0] * T_{0}(x) + ... + p[deg] *
T_{deg}(x)`` of degree `deg` to points `(x, y)`. Returns a vector of
coefficients `p` that minimises the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Chebyshev coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : present when `full` = True
Residuals of the least-squares fit, the effective rank of the
scaled Vandermonde matrix and its singular values, and the
specified value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebval : Evaluates a Chebyshev series.
chebvander : Vandermonde matrix of Chebyshev series.
polyfit : least squares fit using polynomials.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution are the coefficients ``c[i]`` of the Chebyshev series
``T(x)`` that minimizes the squared error
``E = \\sum_j |y_j - T(x_j)|^2``.
This problem is solved by setting up as the overdetermined matrix
equation
``V(x)*c = y``,
where ``V`` is the Vandermonde matrix of `x`, the elements of ``c`` are
the coefficients to be solved for, and the elements of `y` are the
observed values. This equation is then solved using the singular value
decomposition of ``V``.
If some of the singular values of ``V`` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coeficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Chebyshev series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError, "expected deg >= 0"
if x.ndim != 1:
raise TypeError, "expected 1D vector for x"
if x.size == 0:
raise TypeError, "expected non-empty vector for x"
if y.ndim < 1 or y.ndim > 2 :
raise TypeError, "expected 1D or 2D array for y"
if len(x) != len(y):
raise TypeError, "expected x and y to have same length"
# set up the least squares matrices
lhs = chebvander(x, deg)
rhs = y
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError, "expected 1D vector for w"
if len(x) != len(w):
raise TypeError, "expected x and w to have same length"
# apply weights
if rhs.ndim == 2:
lhs *= w[:, np.newaxis]
rhs *= w[:, np.newaxis]
else:
lhs *= w[:, np.newaxis]
rhs *= w
# set rcond
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
# scale the design matrix and solve the least squares equation
scl = np.sqrt((lhs*lhs).sum(0))
c, resids, rank, s = la.lstsq(lhs/scl, rhs, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full :
return c, [resids, rank, s, rcond]
else :
return c
def chebroots(cs):
"""
Compute the roots of a Chebyshev series.
Return the roots (a.k.a "zeros") of the C-series represented by `cs`,
which is the sequence of the C-series' coefficients from lowest order
"term" to highest, e.g., [1,2,3] represents the C-series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
cs : array_like
1-d array of C-series coefficients ordered from low to high.
Returns
-------
out : ndarray
Array of the roots. If all the roots are real, then so is the
dtype of ``out``; otherwise, ``out``'s dtype is complex.
See Also
--------
polyroots
Notes
-----
Algorithm(s) used:
Remember: because the C-series basis set is different from the
"standard" basis set, the results of this function *may* not be what
one is expecting.
Examples
--------
>>> import numpy.polynomial as P
>>> import numpy.polynomial.chebyshev as C
>>> P.polyroots((-1,1,-1,1)) # x^3 - x^2 + x - 1 has two complex roots
array([ -4.99600361e-16-1.j, -4.99600361e-16+1.j, 1.00000e+00+0.j])
>>> C.chebroots((-1,1,-1,1)) # T3 - T2 + T1 - T0 has only real roots
array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00])
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if len(cs) <= 1 :
return np.array([], dtype=cs.dtype)
if len(cs) == 2 :
return np.array([-cs[0]/cs[1]])
n = len(cs) - 1
cmat = np.zeros((n,n), dtype=cs.dtype)
cmat.flat[1::n+1] = .5
cmat.flat[n::n+1] = .5
cmat[1, 0] = 1
cmat[:,-1] -= cs[:-1]*(.5/cs[-1])
roots = la.eigvals(cmat)
roots.sort()
return roots
#
# Chebyshev series class
#
exec polytemplate.substitute(name='Chebyshev', nick='cheb', domain='[-1,1]')
|
# Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.tests.distance.test_distance_kuhns_xii.
This module contains unit tests for abydos.distance.KuhnsXII
"""
import unittest
from abydos.distance import KuhnsXII
class KuhnsXIITestCases(unittest.TestCase):
"""Test KuhnsXII functions.
abydos.distance.KuhnsXII
"""
cmp = KuhnsXII()
cmp_no_d = KuhnsXII(alphabet=0)
def test_kuhns_xii_sim(self):
"""Test abydos.distance.KuhnsXII.sim."""
# Base cases
self.assertEqual(self.cmp.sim('', ''), 0.0)
self.assertEqual(self.cmp.sim('a', ''), 0.0)
self.assertEqual(self.cmp.sim('', 'a'), 0.0)
self.assertEqual(self.cmp.sim('abc', ''), 0.0)
self.assertEqual(self.cmp.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp.sim('abcd', 'efgh'), 0.0)
self.assertAlmostEqual(self.cmp.sim('Nigel', 'Niall'), 0.2490322581)
self.assertAlmostEqual(self.cmp.sim('Niall', 'Nigel'), 0.2490322581)
self.assertAlmostEqual(self.cmp.sim('Colin', 'Coiln'), 0.2490322581)
self.assertAlmostEqual(self.cmp.sim('Coiln', 'Colin'), 0.2490322581)
self.assertAlmostEqual(
self.cmp.sim('ATCAACGAGT', 'AACGATTAG'), 0.4444628099
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.sim('', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim('a', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim('', 'a'), 0.0)
self.assertEqual(self.cmp_no_d.sim('abc', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.sim('abc', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.sim('abcd', 'efgh'), 0.0)
self.assertAlmostEqual(self.cmp_no_d.sim('Nigel', 'Niall'), 0.375)
self.assertAlmostEqual(self.cmp_no_d.sim('Niall', 'Nigel'), 0.375)
self.assertAlmostEqual(self.cmp_no_d.sim('Colin', 'Coiln'), 0.375)
self.assertAlmostEqual(self.cmp_no_d.sim('Coiln', 'Colin'), 0.375)
self.assertAlmostEqual(
self.cmp_no_d.sim('ATCAACGAGT', 'AACGATTAG'), 0.4454545455
)
def test_kuhns_xii_dist(self):
"""Test abydos.distance.KuhnsXII.dist."""
# Base cases
self.assertEqual(self.cmp.dist('', ''), 1.0)
self.assertEqual(self.cmp.dist('a', ''), 1.0)
self.assertEqual(self.cmp.dist('', 'a'), 1.0)
self.assertEqual(self.cmp.dist('abc', ''), 1.0)
self.assertEqual(self.cmp.dist('', 'abc'), 1.0)
self.assertEqual(self.cmp.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp.dist('abcd', 'efgh'), 1.0)
self.assertAlmostEqual(self.cmp.dist('Nigel', 'Niall'), 0.7509677419)
self.assertAlmostEqual(self.cmp.dist('Niall', 'Nigel'), 0.7509677419)
self.assertAlmostEqual(self.cmp.dist('Colin', 'Coiln'), 0.7509677419)
self.assertAlmostEqual(self.cmp.dist('Coiln', 'Colin'), 0.7509677419)
self.assertAlmostEqual(
self.cmp.dist('ATCAACGAGT', 'AACGATTAG'), 0.5555371901
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.dist('', ''), 1.0)
self.assertEqual(self.cmp_no_d.dist('a', ''), 1.0)
self.assertEqual(self.cmp_no_d.dist('', 'a'), 1.0)
self.assertEqual(self.cmp_no_d.dist('abc', ''), 1.0)
self.assertEqual(self.cmp_no_d.dist('', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.dist('abc', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.dist('abcd', 'efgh'), 1.0)
self.assertAlmostEqual(self.cmp_no_d.dist('Nigel', 'Niall'), 0.625)
self.assertAlmostEqual(self.cmp_no_d.dist('Niall', 'Nigel'), 0.625)
self.assertAlmostEqual(self.cmp_no_d.dist('Colin', 'Coiln'), 0.625)
self.assertAlmostEqual(self.cmp_no_d.dist('Coiln', 'Colin'), 0.625)
self.assertAlmostEqual(
self.cmp_no_d.dist('ATCAACGAGT', 'AACGATTAG'), 0.5545454545
)
def test_kuhns_xii_sim_score(self):
"""Test abydos.distance.KuhnsXII.sim_score."""
# Base cases
self.assertEqual(self.cmp.sim_score('', ''), 0.0)
self.assertEqual(self.cmp.sim_score('a', ''), 0.0)
self.assertEqual(self.cmp.sim_score('', 'a'), 0.0)
self.assertEqual(self.cmp.sim_score('abc', ''), 0.0)
self.assertEqual(self.cmp.sim_score('', 'abc'), 0.0)
self.assertEqual(self.cmp.sim_score('abc', 'abc'), 195.0)
self.assertEqual(self.cmp.sim_score('abcd', 'efgh'), -1.0)
self.assertAlmostEqual(
self.cmp.sim_score('Nigel', 'Niall'), 64.3333333333
)
self.assertAlmostEqual(
self.cmp.sim_score('Niall', 'Nigel'), 64.3333333333
)
self.assertAlmostEqual(
self.cmp.sim_score('Colin', 'Coiln'), 64.3333333333
)
self.assertAlmostEqual(
self.cmp.sim_score('Coiln', 'Colin'), 64.3333333333
)
self.assertAlmostEqual(
self.cmp.sim_score('ATCAACGAGT', 'AACGATTAG'), 48.8909090909
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.sim_score('', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim_score('a', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim_score('', 'a'), 0.0)
self.assertEqual(self.cmp_no_d.sim_score('abc', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim_score('', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.sim_score('abc', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.sim_score('abcd', 'efgh'), -1.0)
self.assertAlmostEqual(
self.cmp_no_d.sim_score('Nigel', 'Niall'), -0.25
)
self.assertAlmostEqual(
self.cmp_no_d.sim_score('Niall', 'Nigel'), -0.25
)
self.assertAlmostEqual(
self.cmp_no_d.sim_score('Colin', 'Coiln'), -0.25
)
self.assertAlmostEqual(
self.cmp_no_d.sim_score('Coiln', 'Colin'), -0.25
)
self.assertAlmostEqual(
self.cmp_no_d.sim_score('ATCAACGAGT', 'AACGATTAG'), -0.1090909091
)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'ActionTutorial'
db.delete_table('website_actiontutorial')
# Deleting model 'Person'
db.delete_table('website_person')
# Deleting model 'UserReward'
db.delete_table('website_userreward')
# Deleting model 'ApplicationHistory'
db.delete_table('website_applicationhistory')
# Deleting model 'Document'
db.delete_table('website_document')
# Deleting model 'UserTutorialHistory'
db.delete_table('website_usertutorialhistory')
# Deleting model 'Tutorial'
db.delete_table('website_tutorial')
# Deleting model 'DocumentCategory'
db.delete_table('website_documentcategory')
# Deleting model 'QuestionDependency'
db.delete_table('website_questiondependency')
# Deleting model 'ApplicationAnswer'
db.delete_table('website_applicationanswer')
# Deleting model 'PersonAddress'
db.delete_table('website_personaddress')
# Deleting model 'Application'
db.delete_table('website_application')
# Deleting model 'Region'
db.delete_table('website_region')
# Deleting model 'UserTutorialPageHistory'
db.delete_table('website_usertutorialpagehistory')
# Deleting model 'TutorialPage'
db.delete_table('website_tutorialpage')
# Deleting field 'Jurisdiction.region'
db.delete_column('website_jurisdiction', 'region_id')
# Deleting field 'OrganizationAddress.address_type'
db.delete_column('website_organizationaddress', 'address_type')
# Deleting field 'OrganizationMember.person'
db.delete_column('website_organizationmember', 'person_id')
def backwards(self, orm):
# Adding model 'ActionTutorial'
db.create_table('website_actiontutorial', (
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('action_identifier', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tutorial', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Tutorial'], null=True, blank=True)),
))
db.send_create_signal('website', ['ActionTutorial'])
# Adding model 'Person'
db.create_table('website_person', (
('phone_primary', self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, null=True, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('first_name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)),
('phone_secondary', self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, null=True, blank=True)),
('phone_mobile', self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, null=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
))
db.send_create_signal('website', ['Person'])
# Adding model 'UserReward'
db.create_table('website_userreward', (
('reward_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('reward', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.RewardCategory'], null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('website', ['UserReward'])
# Adding model 'ApplicationHistory'
db.create_table('website_applicationhistory', (
('status', self.gf('django.db.models.fields.CharField')(blank=True, max_length=8, null=True, db_index=True)),
('application', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Application'])),
('status_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('status_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('website', ['ApplicationHistory'])
# Adding model 'Document'
db.create_table('website_document', (
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('file_name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)),
('jurisdiction', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Jurisdiction'], null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('reviewed', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
('accepted', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(blank=True, max_length=128, null=True, db_index=True)),
('region', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Region'], null=True, blank=True)),
('file_path', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
))
db.send_create_signal('website', ['Document'])
# Adding model 'UserTutorialHistory'
db.create_table('website_usertutorialhistory', (
('view_datetime', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('user_email', self.gf('django.db.models.fields.EmailField')(blank=True, max_length=75, null=True, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tutorial', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Tutorial'], null=True, blank=True)),
))
db.send_create_signal('website', ['UserTutorialHistory'])
# Adding model 'Tutorial'
db.create_table('website_tutorial', (
('start_datetime', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('identifier', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=128, null=True, db_index=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('end_datetime', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('website', ['Tutorial'])
# Adding model 'DocumentCategory'
db.create_table('website_documentcategory', (
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('website', ['DocumentCategory'])
# Adding model 'QuestionDependency'
db.create_table('website_questiondependency', (
('required', self.gf('django.db.models.fields.BooleanField')(default=False)),
('answer_text', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('question2', self.gf('django.db.models.fields.related.ForeignKey')(related_name='_questionDependency_question2', to=orm['website.Question'])),
('question1', self.gf('django.db.models.fields.related.ForeignKey')(related_name='_questionDependency_question1', to=orm['website.Question'])),
('strength', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('website', ['QuestionDependency'])
# Adding model 'ApplicationAnswer'
db.create_table('website_applicationanswer', (
('application', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Application'])),
('template', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Template'])),
('file_upload', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Question'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('value', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('website', ['ApplicationAnswer'])
# Adding model 'PersonAddress'
db.create_table('website_personaddress', (
('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Person'], null=True, blank=True)),
('address', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Address'], null=True, blank=True)),
('display_order', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('address_type', self.gf('django.db.models.fields.CharField')(blank=True, max_length=8, null=True, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('website', ['PersonAddress'])
# Adding model 'Application'
db.create_table('website_application', (
('jurisdiction', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Jurisdiction'], null=True, blank=True)),
('template', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Template'], null=True, blank=True)),
('address', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Address'], null=True, blank=True)),
('applicant', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('current_status', self.gf('django.db.models.fields.CharField')(blank=True, max_length=8, null=True, db_index=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('website', ['Application'])
# Adding model 'Region'
db.create_table('website_region', (
('state', self.gf('django.db.models.fields.CharField')(blank=True, max_length=8, null=True, db_index=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)),
('latitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=7, blank=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('longitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=7, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('website', ['Region'])
# Adding model 'UserTutorialPageHistory'
db.create_table('website_usertutorialpagehistory', (
('checked', self.gf('django.db.models.fields.BooleanField')(default=False)),
('tutorial', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Tutorial'], null=True, blank=True)),
('user_email', self.gf('django.db.models.fields.EmailField')(blank=True, max_length=75, null=True, db_index=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.TutorialPage'], null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('website', ['UserTutorialPageHistory'])
# Adding model 'TutorialPage'
db.create_table('website_tutorialpage', (
('selector', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('display_order', self.gf('django.db.models.fields.SmallIntegerField')(default=0)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('tip', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tutorial', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Tutorial'], null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('website', ['TutorialPage'])
# Adding field 'Jurisdiction.region'
db.add_column('website_jurisdiction', 'region',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Region'], null=True, blank=True),
keep_default=False)
# Adding field 'OrganizationAddress.address_type'
db.add_column('website_organizationaddress', 'address_type',
self.gf('django.db.models.fields.CharField')(blank=True, max_length=8, null=True, db_index=True),
keep_default=False)
# Adding field 'OrganizationMember.person'
db.add_column('website_organizationmember', 'person',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Person'], null=True, blank=True),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.action': {
'Meta': {'object_name': 'Action'},
'action_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ActionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.actioncategory': {
'Meta': {'object_name': 'ActionCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.address': {
'Meta': {'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'website.answerchoice': {
'Meta': {'object_name': 'AnswerChoice'},
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']"}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.answerchoicegroup': {
'Meta': {'object_name': 'AnswerChoiceGroup'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.answerreference': {
'Meta': {'object_name': 'AnswerReference'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_callout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'migrated_answer_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicability': {
'Meta': {'object_name': 'Applicability'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.comment': {
'Meta': {'object_name': 'Comment'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_reference'", 'null': 'True', 'to': "orm['website.Comment']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityview': {
'Meta': {'object_name': 'EntityView'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityviewcount': {
'Meta': {'object_name': 'EntityViewCount'},
'count_30_days': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'total_count': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'last_contributed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'last_contributed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'last_contributed_by_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_contributor'", 'null': 'True', 'to': "orm['website.Organization']"}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_jurisdiction'", 'null': 'True', 'to': "orm['website.Organization']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_jurisdiction'", 'null': 'True', 'to': "orm['website.Jurisdiction']"}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.jurisdictioncontributor': {
'Meta': {'object_name': 'JurisdictionContributor'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.migrationhistory': {
'Meta': {'object_name': 'MigrationHistory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes2': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'source_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'source_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'target_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'target_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.OrganizationCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_scaled': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'website.organizationaddress': {
'Meta': {'object_name': 'OrganizationAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'})
},
'website.organizationcategory': {
'Meta': {'object_name': 'OrganizationCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organizationmember': {
'Meta': {'object_name': 'OrganizationMember'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'invitor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_invitor'", 'null': 'True', 'to': "orm['auth.User']"}),
'join_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'requested_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RoleType']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_user'", 'null': 'True', 'to': "orm['auth.User']"})
},
'website.organizationrating': {
'Meta': {'object_name': 'OrganizationRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.question': {
'Meta': {'object_name': 'Question'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']", 'null': 'True', 'blank': 'True'}),
'applicability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Applicability']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'default_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'display_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_attributes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_suffix': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'form_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'has_multivalues': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'js': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'migration_type': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'qtemplate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'state_exclusive': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'terminology': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'validation_class': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.questioncategory': {
'Meta': {'object_name': 'QuestionCategory'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.ratingcategory': {
'Meta': {'object_name': 'RatingCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.ratinglevel': {
'Meta': {'object_name': 'RatingLevel'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.reaction': {
'Meta': {'object_name': 'Reaction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Action']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ReactionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'reaction_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.reactioncategory': {
'Meta': {'object_name': 'ReactionCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.rewardcategory': {
'Meta': {'object_name': 'RewardCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.roletype': {
'Meta': {'object_name': 'RoleType'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.servervariable': {
'Meta': {'object_name': 'ServerVariable'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.template': {
'Meta': {'object_name': 'Template'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.templatequestion': {
'Meta': {'object_name': 'TemplateQuestion'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"})
},
'website.usercommentview': {
'Meta': {'object_name': 'UserCommentView'},
'comments_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'last_comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Comment']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_preference': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'migrated_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'notification_preference': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'old_password': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'reset_password_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '124', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userfavorite': {
'Meta': {'object_name': 'UserFavorite'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userpageview': {
'Meta': {'object_name': 'UserPageView'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_page_view_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userrating': {
'Meta': {'object_name': 'UserRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usersearch': {
'Meta': {'object_name': 'UserSearch'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'search_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.view': {
'Meta': {'object_name': 'View'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.vieworgs': {
'Meta': {'object_name': 'ViewOrgs'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']"}),
'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.View']"})
},
'website.viewquestions': {
'Meta': {'object_name': 'ViewQuestions'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.View']"})
},
'website.zipcode': {
'Meta': {'object_name': 'Zipcode'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'})
}
}
complete_apps = ['website']
|
# WARNING: This recipe currently breaks display of the representation of strings containing other string escape sequences such as '\n'. I can't find a way to get ASPN to hide the recipe from public view until I can figure out a way to fix it though :(
# NOTE: This recipe is written to work with Python 3.0
# It would likely require changes to work on Python 2.6, and won't work at all
# on earlier 2.x versions
import sys, io
# With the new IO module, it's easy to create a variant of an
# existing IO class
class ParseUnicodeEscapes(io.TextIOWrapper):
def write(self, text):
super().write(text.encode('latin-1').decode('unicode_escape'))
# To replace sys.stdout/stderr, we first collect the necessary
# constructor arguments from the current streams
stdout_args = (sys.stdout.buffer, sys.stdout.encoding, sys.stdout.errors,
None, sys.stdout.line_buffering)
stderr_args = (sys.stderr.buffer, sys.stderr.encoding, sys.stderr.errors,
None, sys.stderr.line_buffering)
# Once we replace the streams, any '\uXXXX' sequences written to
# sys.stdout or sys.stderr will be replaced with the corresponding
# Unicode characters
sys.stdout = ParseUnicodeEscapes(*stdout_args)
sys.stderr = ParseUnicodeEscapes(*stderr_args)
|
##
# Copyright (c) 2005-2015 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DRI: Wilfredo Sanchez, wsanchez@apple.com
##
from txweb2.iweb import IResponse
import txweb2.dav.test.util
from txweb2.test.test_server import SimpleRequest
class OPTIONS(txweb2.dav.test.util.TestCase):
"""
OPTIONS request
"""
def test_DAV1(self):
"""
DAV level 1
"""
return self._test_level("1")
def test_DAV2(self):
"""
DAV level 2
"""
return self._test_level("2")
test_DAV2.todo = "DAV level 2 unimplemented"
def test_ACL(self):
"""
DAV ACL
"""
return self._test_level("access-control")
def _test_level(self, level):
def doTest(response):
response = IResponse(response)
dav = response.headers.getHeader("dav")
if not dav:
self.fail("no DAV header: %s" % (response.headers,))
self.assertIn(level, dav, "no DAV level %s header" % (level,))
return response
return self.send(SimpleRequest(self.site, "OPTIONS", "/"), doTest)
|
# -*- coding: utf-8 -*-
"""A plugin to generate a list of unique hashes and paths."""
from plaso.analysis import interface
from plaso.analysis import manager
from plaso.containers import reports
class FileHashesPlugin(interface.AnalysisPlugin):
"""A plugin for generating a list of file paths and corresponding hashes."""
NAME = u'file_hashes'
# Indicate that we can run this plugin during regular extraction.
ENABLE_IN_EXTRACTION = True
def __init__(self):
"""Initializes the unique hashes plugin."""
super(FileHashesPlugin, self).__init__()
self._paths_with_hashes = {}
def ExamineEvent(self, mediator, event):
"""Analyzes an event and creates extracts hashes as required.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event to examine.
"""
pathspec = getattr(event, u'pathspec', None)
if pathspec is None:
return
if self._paths_with_hashes.get(pathspec, None):
# We've already processed an event with this pathspec and extracted the
# hashes from it.
return
hash_attributes = {}
for attribute_name, attribute_value in event.GetAttributes():
if attribute_name.endswith(u'_hash'):
hash_attributes[attribute_name] = attribute_value
self._paths_with_hashes[pathspec] = hash_attributes
def _GeneratePathString(self, mediator, pathspec, hashes):
"""Generates a string containing a pathspec and its hashes.
Args:
mediator (AnalysisMediator): mediates interactions between analysis
plugins and other components, such as storage and dfvfs.
pathspec (dfvfs.Pathspec): the path specification) to generate a string
for.
hashes (dict[str, str]): mapping of hash attribute names to the value of
that hash for the path specification being processed.
Returns:
str: string of the form "display_name: hash_type=hash_value". For example,
"OS:/path/spec: test_hash=4 other_hash=5".
"""
display_name = mediator.GetDisplayName(pathspec)
path_string = u'{0:s}:'.format(display_name)
for hash_name, hash_value in sorted(hashes.items()):
path_string = u'{0:s} {1:s}={2:s}'.format(
path_string, hash_name, hash_value)
return path_string
def CompileReport(self, mediator):
"""Compiles an analysis report.
Args:
mediator (AnalysisMediator): mediates interactions between analysis
plugins and other components, such as storage and dfvfs.
Returns:
AnalysisReport: report.
"""
lines_of_text = [u'Listing file paths and hashes']
for pathspec, hashes in sorted(
self._paths_with_hashes.items(),
key=lambda tuple: tuple[0].comparable):
path_string = self._GeneratePathString(mediator, pathspec, hashes)
lines_of_text.append(path_string)
lines_of_text.append(u'')
report_text = u'\n'.join(lines_of_text)
return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)
manager.AnalysisPluginManager.RegisterPlugin(FileHashesPlugin)
|
""" This module provides a direct client interface for managing an IRC
connection. If you are trying to build a bot, :class:`ircutils.bot.SimpleBot`
inherits from :class:`SimpleClient` so it has the methods listed below.
"""
from __future__ import absolute_import
import collections
import pprint
from . import connection
from . import ctcp
from . import events
from . import format
from . import protocol
class SimpleClient(object):
""" SimpleClient is designed to provide a high level of abstraction
of the IRC protocol. It's methods are structured in a way that allows
you to often bypass the need to send raw IRC commands. By default,
``auto_handle`` is set to ``True`` and allows the client to handle the
following:
* Client nickname changes
* Client channel tracking
* CTCP version requests
"""
software = "http://dev.guardedcode.com/projects/ircutils/"
version = (0,1,3)
custom_listeners = {}
def __init__(self, nick, real_name="A Python IRC Bot by Johz", mode="+B", auto_handle=True):
self.nickname = nick
self.user = nick
self.real_name = real_name
self.filter_formatting = True
self.channels = collections.defaultdict(protocol.Channel)
self.events = events.EventDispatcher()
self._prev_nickname = None
self._mode = mode
self._register_default_listeners()
if auto_handle:
self._add_built_in_handlers()
def __getitem__(self, name):
return self.events[name]
def __setitem__(self, name, value):
self.register_listener(name, value)
def _register_default_listeners(self):
""" Registers the default listeners to the names listed in events. """
# Connection events
for name in events.connection:
self.events.register_listener(name, events.connection[name]())
# Standard events
for name in events.standard:
self.events.register_listener(name, events.standard[name]())
# Message events
for name in events.messages:
self.events.register_listener(name, events.messages[name]())
# CTCP events
for name in events.ctcp:
self.events.register_listener(name, events.ctcp[name]())
# RPL_ events
for name in events.replies:
self.events.register_listener(name, events.replies[name]())
# Custom listeners
for name in self.custom_listeners:
self.events.register_listener(name, self.custom_listeners[name])
def _add_built_in_handlers(self):
""" Adds basic client handlers.
These handlers are bound to events that affect the data the the
client handles. It is required to have these in order to keep
track of things like client nick changes, joined channels,
and channel user lists.
"""
self.events["any"].add_handler(_update_client_info)
self.events["name_reply"].add_handler(_set_channel_names)
self.events["ctcp_version"].add_handler(_reply_to_ctcp_version)
self.events["part"].add_handler(_remove_channel_user_on_part)
self.events["quit"].add_handler(_remove_channel_user_on_quit)
self.events["join"].add_handler(_add_channel_user)
def _dispatch_event(self, prefix, command, params):
""" Given the parameters, dispatch an event.
After first building an event, this method sends the event(s) to the
primary event dispatcher.
This replaces :func:`connection.Connection.handle_line`
"""
try:
self._pending_events
except AttributeError:
self._pending_events = []
# TODO: Event parsing doesn't belong here.
if command in ["PRIVMSG", "NOTICE"]:
event = events.MessageEvent(prefix, command, params)
message_data = event.params[-1]
message_data = ctcp.low_level_dequote(message_data)
message_data, ctcp_requests = ctcp.extract(event.params[-1])
if self.filter_formatting:
message_data = format.filter(message_data)
if message_data.strip() != "":
event.message = message_data
self._pending_events.append(event)
for command, params in ctcp_requests:
ctcp_event = events.CTCPEvent()
ctcp_event.command = "CTCP_%s" % command
ctcp_event.params = params
ctcp_event.source = event.source
ctcp_event.target = event.target
self._pending_events.append(ctcp_event)
else:
self._pending_events.append(events.StandardEvent(prefix, command, params))
while self._pending_events:
event = self._pending_events.pop(0)
self.events.dispatch(self, event)
def connect(self, host, port=None, channel=None, use_ssl=False,
password=None):
""" Connect to an IRC server. """
self.conn = connection.Connection()
self.conn.handle_line = self._dispatch_event
self.conn.connect(host, port, use_ssl, password)
self.conn.execute("USER", self.user, self._mode, "*",
trailing=self.real_name)
self.conn.execute("NICK", self.nickname)
self.conn.handle_connect = self._handle_connect
self.conn.handle_close = self._handle_disconnect
if channel is not None:
# Builds a handler on-the-fly for joining init channels
if isinstance(channel, basestring):
channels = [channel]
else:
channels = channel
def _auto_joiner(client, event):
for channel in channels:
client.join_channel(channel)
self.events["welcome"].add_handler(_auto_joiner)
def is_connected(self):
return self.conn.connected
def _handle_connect(self):
connection.Connection.handle_connect(self.conn)
event = events.ConnectionEvent("CONN_CONNECT")
self.events.dispatch(self, event)
def _handle_disconnect(self):
connection.Connection.handle_close(self.conn)
event = events.ConnectionEvent("CONN_DISCONNECT")
self.events.dispatch(self, event)
def register_listener(self, event_name, listener):
""" Registers an event listener for a given event name.
In essence, this binds the event name to the listener and simply
provides an easier way to reference the listener.
::
client.register_listener("event_name", MyListener())
"""
self.events.register_listener(event_name, listener)
def identify(self, ns_password):
""" Identify yourself with the NickServ service on IRC.
This assumes that NickServ is present on the server.
"""
self.send_message("NickServ", "IDENTIFY {0}".format(ns_password))
def join_channel(self, channel, key=None):
""" Join the specified channel. Optionally, provide a key to the channel
if it requires one.
::
client.join_channel("#channel_name")
client.join_channel("#channel_name", "channelkeyhere")
"""
if channel == "0":
self.channels = []
self.execute("JOIN", "0")
else:
if key is not None:
params = [channel, key]
else:
params = [channel]
self.execute("JOIN", *params)
def part_channel(self, channel, message=None):
""" Leave the specified channel.
You may provide a message that shows up during departure.
"""
self.execute("PART", channel, trailing=message)
def send_message(self, target, message, to_service=False):
""" Sends a message to the specified target.
If it is a service, it uses SQUERY instead.
"""
message = ctcp.low_level_quote(message)
if to_service:
self.execute("SQUERY", target, message)
else:
self.execute("PRIVMSG", target, trailing=message)
def send_notice(self, target, message):
""" Sends a NOTICE to the specified target.
"""
message = ctcp.low_level_quote(message)
self.execute("NOTICE", target, trailing=message)
def send_ctcp(self, target, command, params=None):
""" Sends a CTCP (Client-to-Client-Protocol) message to the target.
"""
if params is not None:
params.insert(0, command)
self.send_message(target, ctcp.tag(" ".join(params)))
else:
self.send_message(target, ctcp.tag(command))
def send_ctcp_reply(self, target, command, params=None):
""" Sends a CTCP reply message to the target.
This differs from send_ctcp() because it uses NOTICE instead, as
specified by the CTCP documentation.
"""
if params is not None:
params.insert(0, command)
self.send_notice(target, ctcp.tag(" ".join(params)))
else:
self.send_notice(target, ctcp.tag(command))
def send_action(self, target, action_message):
""" Perform an "action". This is the same as when a person uses the
``/me is jumping up and down!`` command in their IRC client.
"""
self.send_ctcp(target, "ACTION", [action_message])
def set_nickname(self, nickname):
""" Attempts to set the nickname for the client. """
self._prev_nickname = self.nickname
self.execute("NICK", nickname)
def disconnect(self, message=None):
""" Disconnects from the IRC server.
If `message` is set, it is provided as a departing message.
Example::
client.disconnect("Goodbye cruel world!")
"""
self.execute("QUIT", trailing=message)
self.channels = []
self.conn.close_when_done()
def start(self):
""" Begin the client.
If you wish to run multiple clients at the same time, be sure to
use ``ircutils.start_all()`` instead.
"""
self.conn.start()
def execute(self, command, *args, **kwargs):
""" Execute an IRC command on the server.
Example::
self.execute("PRIVMSG", channel, trailing="Hello, world!")
"""
command, params = self.conn.execute(command, *args, **kwargs)
# Some less verbose aliases
join = join_channel
part = part_channel
notice = send_notice
action = send_action
quit = disconnect
# TODO: UPDATE EVERYTHING HERE.
def _reply_to_ctcp_version(client, event):
version_info = "IRCUtils:%s:Python" % ".".join(map(str, client.version))
client.send_ctcp_reply(event.source, "VERSION", [version_info])
def _update_client_info(client, event):
command = event.command
params = event.params
if command == "RPL_WELCOME":
if client.nickname != event.target:
client.nickname = event.target
if command == "ERR_ERRONEUSNICKNAME":
client.set_nickname(protocol.filter_nick(client.nickname))
elif command == "ERR_NICKNAMEINUSE":
client.set_nickname(client.nickname + "_")
elif command == "ERR_UNAVAILRESOURCE":
if not protocol.is_channel(event.params[0]):
client.nickname = client._prev_nickname
elif command == "NICK" and event.source == client.nickname:
client.nickname = event.target
if command in ["ERR_INVITEONLYCHAN", "ERR_CHANNELISFULL", "ERR_BANNEDFROMCHAN",
"ERR_BADCHANNELKEY", "ERR_TOOMANYCHANNELS", "ERR_NOSUCHCHANNEL"
"ERR_BADCHANMASK"]:
channel_name = params[0].lower()
if channel_name in client.channels:
del client.channels[channel_name]
elif command == "ERR_UNAVAILRESOURCE":
channel_name = params[0].lower()
if protocol.is_channel(channel_name) and channel_name in client.channels:
del client.channels[channel_name]
def _set_channel_names(client, name_event):
channel_name = name_event.channel.lower()
client.channels[channel_name].name = channel_name
client.channels[channel_name].user_list = name_event.name_list
def _remove_channel_user_on_part(client, event):
channel = event.target.lower()
if event.source == client.nickname:
del client.channels[channel]
elif event.source in client.channels[channel].user_list:
client.channels[channel].user_list.remove(event.source)
def _remove_channel_user_on_quit(client, event):
# TODO: This solution is slow. There might be a better one.
for channel in client.channels:
if event.source in client.channels[channel].user_list:
client.channels[channel].user_list.remove(event.source)
def _add_channel_user(client, event):
channel = event.target.lower()
client.channels[channel].user_list.append(event.source)
|
import asyncio
import logging
from aiohttp import web
from .endpoint import cancel
from .endpoint import endpoint
from ..adjust import adjust
from .. import clone
from .. import pull
from .. import repo
from .endpoint import validation
from ..auth import auth
from ..config import config
logger = logging.getLogger(__name__)
#
# Setup
#
shutdown_callbacks = []
@asyncio.coroutine
def init(loop, bind, repo_provider, adjust_provider):
logger.debug("Running init")
c = yield from config.get_configuration()
auth_provider = c.get('auth', {}).get('provider', None)
logger.info("Using auth provider '" + str(auth_provider) + "'.")
app = web.Application(loop=loop, middlewares=[auth.providers[auth_provider]] if auth_provider else {})
logger.debug("Adding application resources")
app["repo_provider"] = repo.provider_types[repo_provider["type"]](**repo_provider["params"])
if repo_provider["type"] == "modeb":
logger.warn("Mode B selected, guarantees rescinded")
pull_source = endpoint.validated_json_endpoint(shutdown_callbacks, validation.pull_modeb, pull.pull)
adjust_source = endpoint.validated_json_endpoint(shutdown_callbacks, validation.adjust_modeb, adjust.adjust)
else:
pull_source = endpoint.validated_json_endpoint(shutdown_callbacks, validation.pull, pull.pull)
adjust_source = endpoint.validated_json_endpoint(shutdown_callbacks, validation.adjust, adjust.adjust)
logger.debug("Setting up handlers")
app.router.add_route("POST", "/pull", pull_source)
app.router.add_route("POST", "/adjust", adjust_source)
app.router.add_route("POST", "/clone", endpoint.validated_json_endpoint(shutdown_callbacks, validation.clone, clone.clone))
app.router.add_route("POST", "/cancel", cancel.handle_cancel)
logger.debug("Creating asyncio server")
srv = yield from loop.create_server(app.make_handler(), bind["address"], bind["port"])
for socket in srv.sockets:
logger.info("Server started on socket: {}".format(socket.getsockname()))
def start_server(bind, repo_provider, adjust_provider):
logger.debug("Starting server")
loop = asyncio.get_event_loop()
# Monkey patch for Python 3.4.1
if not hasattr(loop, "create_task"):
loop.create_task = lambda c: asyncio.async(c, loop=loop)
loop.run_until_complete(init(
loop=loop,
bind=bind,
repo_provider=repo_provider,
adjust_provider=adjust_provider,
))
try:
loop.run_forever()
except KeyboardInterrupt:
logger.debug("KeyboardInterrupt")
finally:
logger.info("Stopping tasks")
tasks = asyncio.Task.all_tasks()
for task in tasks:
task.cancel()
results = loop.run_until_complete(asyncio.gather(*tasks, loop=loop, return_exceptions=True))
for shutdown_callback in shutdown_callbacks:
shutdown_callback()
exception_results = [r for r in results if
isinstance(r, Exception) and not isinstance(r, asyncio.CancelledError)]
if len(exception_results) > 1:
raise Exception(exception_results)
elif len(exception_results) == 1:
raise exception_results[0]
loop.close()
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# partner_billing
# (C) 2015 Mikołaj Dziurzyński, Grzegorz Grzelak, Thorsten Vocks (big-consulting GmbH)
# All Rights reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp import fields, models
import logging
_logger = logging.getLogger(__name__)
class sale_advance_payment_inv(osv.osv_memory):
_inherit = "sale.advance.payment.inv"
def _prepare_advance_invoice_vals(self, cr, uid, ids, context=None):
res = super(sale_advance_payment_inv,self)._prepare_advance_invoice_vals(cr, uid, ids, context=context)
sale_order_obj = self.pool.get('sale.order')
for pair in res:
for sale in sale_order_obj.browse(cr, uid, [pair[0]]):
pair[1]['associated_partner'] = sale.associated_partner and sale.associated_partner.id or False
return res
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Post'
db.create_table(u'post_post', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('author', self.gf('django.db.models.fields.IntegerField')()),
('ts', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2015, 8, 30, 0, 0))),
('content', self.gf('django.db.models.fields.TextField')()),
('title', self.gf('django.db.models.fields.CharField')(default='New Post', max_length=100)),
('excerpt', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('status', self.gf('django.db.models.fields.IntegerField')(default=0)),
('comment_status', self.gf('django.db.models.fields.IntegerField')(default=0)),
('pwd', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('lastts', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('like_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('share_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('comment_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('album', self.gf('django.db.models.fields.IntegerField')(default=0)),
('cover', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
))
db.send_create_signal(u'post', ['Post'])
# Adding M2M table for field tags on 'Post'
m2m_table_name = db.shorten_name(u'post_post_tags')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('post', models.ForeignKey(orm[u'post.post'], null=False)),
('tag', models.ForeignKey(orm[u'tag.tag'], null=False))
))
db.create_unique(m2m_table_name, ['post_id', 'tag_id'])
def backwards(self, orm):
# Deleting model 'Post'
db.delete_table(u'post_post')
# Removing M2M table for field tags on 'Post'
db.delete_table(db.shorten_name(u'post_post_tags'))
models = {
u'post.post': {
'Meta': {'object_name': 'Post'},
'album': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'author': ('django.db.models.fields.IntegerField', [], {}),
'comment_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'comment_status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'content': ('django.db.models.fields.TextField', [], {}),
'cover': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastts': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'like_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pwd': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'share_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['tag.Tag']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'New Post'", 'max_length': '100'}),
'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 8, 30, 0, 0)'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'tag.tag': {
'Meta': {'object_name': 'Tag'},
'add_ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 8, 30, 0, 0)'}),
'cover': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
}
}
complete_apps = ['post']
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
from library import gibbs
from absolute_salinity_sstar_ct import CT_from_t
from gsw.utilities import match_args_return, strip_mask
from conversions import pt_from_CT, pt_from_t, pt0_from_t
from constants import Kelvin, db2Pascal, P0, SSO, cp0, R, sfac, M_S
__all__ = [
'rho_t_exact',
'pot_rho_t_exact',
'sigma0_pt0_exact',
'alpha_wrt_CT_t_exact',
'alpha_wrt_pt_t_exact',
'alpha_wrt_t_exact',
'beta_const_CT_t_exact',
'beta_const_pt_t_exact',
'beta_const_t_exact',
'specvol_t_exact',
'specvol_anom_t_exact',
'sound_speed_t_exact',
'kappa_t_exact',
'kappa_const_t_exact',
'internal_energy_t_exact',
'enthalpy_t_exact',
'dynamic_enthalpy_t_exact',
'SA_from_rho_t_exact',
#'t_from_rho_exact',
't_maxdensity_exact',
'entropy_t_exact',
'cp_t_exact',
'isochoric_heat_cap_t_exact',
'chem_potential_relative_t_exact',
'chem_potential_water_t_exact',
'chem_potential_salt_t_exact',
'Helmholtz_energy_t_exact',
'adiabatic_lapse_rate_t_exact',
'osmotic_coefficient_t_exact',
'osmotic_pressure_t_exact'
]
n0, n1, n2 = 0, 1, 2
@match_args_return
def Helmholtz_energy_t_exact(SA, t, p):
r"""Calculates the Helmholtz energy of seawater.
The specific Helmholtz energy of seawater :math:`f` is given by:
.. math::
f(SA, t, p) = g - (p + P_0) \nu =
g - (p + P_0) \frac{\partial g}{\partial P}\Big|_{SA,T}
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
Helmholtz_energy : array_like
Helmholtz energy [J kg :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.Helmholtz_energy_t_exact(SA, t, p)
array([-5985.58288209, -5830.81845224, -3806.96617841, -877.66369421,
-462.17033905, -245.50407205])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See section 2.13.
Modifications:
2011-03-29. Trevor McDougall
"""
return (gibbs(n0, n0, n0, SA, t, p) -
(db2Pascal * p + P0) * gibbs(n0, n0, n1, SA, t, p))
@match_args_return
def rho_t_exact(SA, t, p):
r"""Calculates in situ density of seawater from Absolute Salinity and in
situ temperature.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
rho_t_exact : array_like
in situ density [kg m :sup:`-3`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.rho(SA, t, p)
array([ 1021.84017319, 1022.26268993, 1024.42771594, 1027.79020181,
1029.83771473, 1032.00240412])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See section 2.8.
Modifications:
2011-03-29. Paul Barker, David Jackett and Trevor McDougal
"""
return 1. / gibbs(n0, n0, n1, SA, t, p)
@match_args_return
def sigma0_pt0_exact(SA, pt0):
r"""Calculates potential density anomaly with reference sea pressure of
zero (0) dbar. The temperature input to this function is potential
temperature referenced to zero dbar.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
pt0 : array_like
potential temperature [:math:`^\circ` C (ITS-90)]
with respect to a reference sea pressure of 0 dbar
Returns
-------
sigma0_pt0_exact : array_like
potential density anomaly [kg m :sup:`-3`]
respect to a reference pressure of 0 dbar
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.rho(SA, t, p)
array([ 1021.84017319, 1022.26268993, 1024.42771594, 1027.79020181,
1029.83771473, 1032.00240412])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (3.6.1).
Modifications:
2011-03-29. Trevor McDougal and Paul Barker.
"""
SA = np.maximum(SA, 0) # Ensure that SA is non-negative.
x2 = sfac * SA
x = np.sqrt(x2)
y = pt0 * 0.025
g03 = (100015.695367145 +
y * (-270.983805184062 +
y * (1455.0364540468 +
y * (-672.50778314507 +
y * (397.968445406972 +
y * (-194.618310617595 +
y * (63.5113936641785 -
y * 9.63108119393062)))))))
g08 = x2 * (-3310.49154044839 +
x * (199.459603073901 +
x * (-54.7919133532887 +
x * 36.0284195611086 -
y * 22.6683558512829) +
y * (-175.292041186547 +
y * (383.058066002476 +
y * (-460.319931801257 +
y * 234.565187611355)))) +
y * (729.116529735046 +
y * (-860.764303783977 +
y * (694.244814133268 +
y * (-297.728741987187)))))
"""The above code is exactly the same as the following two lines of code.
sigma0_pt_exact = rho_t_exact(SA, pt0, 0.) - 1000
"""
return 100000000. / (g03 + g08) - 1000.0
@match_args_return
def enthalpy_t_exact(SA, t, p):
r"""Calculates the specific enthalpy of seawater.
The specific enthalpy of seawater :math:`h` is given by:
.. math::
h(SA, t, p) = g + (T_0 + t)\eta =
g - (T_0 + t) \frac{\partial g}{\partial T}\Big|_{SA,p}
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
enthalpy : array_like
specific enthalpy [J kg :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.enthalpy(SA, t, p)
array([ 115103.26047838, 114014.8036012 , 92179.9209311 ,
43255.32838089, 33087.21597002, 26970.5880448 ])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See appendix A.11.
Modifications:
2011-03-29. David Jackett, Trevor McDougall and Paul Barker.
"""
return (gibbs(n0, n0, n0, SA, t, p) -
(t + Kelvin) * gibbs(n0, n1, n0, SA, t, p))
@match_args_return
def specvol_t_exact(SA, t, p):
r"""Calculates the specific volume of seawater.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
specvol : array_like
specific volume [m :sup:`3` kg :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.specvol(SA, t, p)
array([ 0.00097863, 0.00097822, 0.00097615, 0.00097296, 0.00097103,
0.00096899])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See section 2.7.
Modifications:
2011-03-23. David Jackett and Paul Barker.
"""
return gibbs(n0, n0, n1, SA, t, p)
@match_args_return
def entropy_t_exact(SA, t, p):
r"""Calculates specific entropy of seawater.
The specific entropy of seawater :math:`\eta` is given by:
.. math::
\eta(SA, t, p) = -g_T = \frac{\partial g}{\partial T}\Big|_{SA,p}
When taking derivatives with respect to *in situ* temperature, the symbol
:math:`T` will be used for temperature in order that these derivatives not
be confused with time derivatives.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
entropy : array_like
specific entropy [J kg :sup:`-1` K :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.entropy_t_exact(SA, t, p)
array([ 400.38942528, 395.43817843, 319.8664982 , 146.79088159,
98.64734087, 62.79150873])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp.
Modifications:
2011-03-29. David Jackett, Trevor McDougall and Paul Barker.
"""
return -gibbs(n0, n1, n0, SA, t, p)
@match_args_return
def cp_t_exact(SA, t, p):
r"""Calculates the isobaric heat capacity of seawater.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
cp_t_exact : array_like
heat capacity of seawater [J kg :sup:`-1` K :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.cp_t_exact(SA, t, p)
array([ 4002.88800396, 4000.98028393, 3995.54646889, 3985.07676902,
3973.59384348, 3960.18408479])
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp.
Modifications:
2011-03-29. David Jackett, Trevor McDougall and Paul Barker
"""
return -(t + Kelvin) * gibbs(n0, n2, n0, SA, t, p)
@match_args_return
def sound_speed_t_exact(SA, t, p):
r"""Calculates the speed of sound in seawater.
The speed of sound in seawater :math:`c` is given by:
.. math::
c(SA, t, p) = \sqrt{ \partial P / \partial \rho |_{SA,\eta}} =
\sqrt{(\rho\kappa)^{-1}} =
g_P \sqrt{g_{TT}/(g^2_{TP} - g_{TT}g_{PP})}
Note that in these expressions, since sound speed is in m s :sup`-1` and
density has units of kg m :sup:`-3` it follows that the pressure of the
partial derivatives must be in Pa and the isentropic compressibility
:math:`kappa` must have units of Pa :sup:`-1`. The sound speed c produced
by both the SIA and the GSW software libraries (appendices M and N) has
units of m s :sup:`-1`.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
sound_speed : array_like
speed of sound in seawater [m s :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.sound_speed_t_exact(SA, t, p)
array([ 1542.61580359, 1542.70353407, 1530.84497914, 1494.40999692,
1487.37710252, 1483.93460908])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (2.17.1)
Modifications:
2011-03-29. David Jackett, Paul Barker and Trevor McDougall.
"""
return (gibbs(n0, n0, n1, SA, t, p) * np.sqrt(gibbs(n0, n2, n0, SA, t, p) /
(gibbs(n0, n1, n1, SA, t, p) ** 2 - gibbs(n0, n2, n0, SA, t, p) *
gibbs(n0, n0, n2, SA, t, p))))
@match_args_return
def specvol_anom_t_exact(SA, t, p):
r"""Calculates specific volume anomaly from Absolute Salinity, in situ
temperature and pressure, using the full TEOS-10 Gibbs function.
The reference value of Absolute Salinity is SSO and the reference value of
Conservative Temperature is equal to 0 :math:`^\circ` C.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
specvol_anom_t_exact : array_like
specific volume anomaly [m :sup:`3` kg :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.specvol_anom_t_exact(SA, t, p)
array([ 6.01044463e-06, 5.78602432e-06, 4.05564999e-06,
1.42198662e-06, 1.04351837e-06, 7.63964850e-07])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (3.7.3)
Modifications:
2011-03-23. Trevor McDougall and Paul Barker
"""
pt_zero = pt_from_CT(SSO, 0)
t_zero = pt_from_t(SSO, pt_zero, 0, p)
return (gibbs(n0, n0, n1, SA, t, p) -
gibbs(n0, n0, n1, SSO, t_zero, p))
@match_args_return
def chem_potential_relative_t_exact(SA, t, p):
r"""Calculates the adiabatic lapse rate of seawater.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
chem_potential_relative : array_like
relative chemical potential [J kg :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.chem_potential_relative_t_exact(SA, t, p)
array([ 79.4254481 , 79.25989214, 74.69154859, 65.64063719,
61.22685656, 57.21298557])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp.
Modifications:
2011-03-29. Trevor McDougall and Paul Barker
"""
return gibbs(n1, n0, n0, SA, t, p)
@match_args_return
def internal_energy_t_exact(SA, t, p):
r"""Calculates the Helmholtz energy of seawater.
The specific internal energy of seawater :math:`u` is given by:
.. math::
u(SA, t, p) = g + (T_0 + t)\eta - (p + P_0)\nu =
g - (T_0 + t)\frac{\partial g}{\partial T}\Big|_{SA,p} -
(p + P_0)\frac{\partial g}{\partial P}\Big|_{SA,T}
where :math:`T_0` is the Celsius zero point, 273.15 K and
:math:`P_0` = 101 325 Pa is the standard atmosphere pressure.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
internal_energy (u) : array_like
specific internal energy [J kg :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.internal_energy_t_exact(SA, t, p)
array([ 114906.23847309, 113426.57417062, 90860.81858842,
40724.34005719, 27162.66600185, 17182.50522667])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (2.11.1)
Modifications:
2011-03-29. Trevor McDougall
"""
return (gibbs(n0, n0, n0, SA, t, p) -
(Kelvin + t) * gibbs(n0, n1, n0, SA, t, p) -
(db2Pascal * p + P0) * gibbs(n0, n0, n1, SA, t, p))
@match_args_return
def kappa_const_t_exact(SA, t, p):
r"""Calculates isothermal compressibility of seawater at constant in situ
temperature.
.. math::
\kappa^t(SA, t, p) =
\rho^{-1}\frac{\partial \rho}{\partial P}\Big|_{SA,T} =
-\nu^{-1}\frac{\partial \nu}{\partial P}\Big|_{SA,T} =
-\frac{g_{PP}}{g_P}
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
kappa : array_like
Isothermal compressibility [Pa :sup:`-1`]
See Also
--------
TODO
Notes
-----
This is the compressibility of seawater at constant in situ temperature.
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.kappa_const_t_exact(SA, t, p)
array([ 4.19071646e-10, 4.18743202e-10, 4.22265764e-10,
4.37735100e-10, 4.40373818e-10, 4.41156577e-10])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (2.15.1)
Modifications:
2011-03-29. David Jackett, Trevor McDougall and Paul Barker
"""
return -gibbs(n0, n0, n2, SA, t, p) / gibbs(n0, n0, n1, SA, t, p)
@match_args_return
def alpha_wrt_t_exact(SA, t, p):
r"""Calculates the thermal expansion coefficient of seawater with respect
to in situ temperature.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
alpha_wrt_t : array_like
thermal expansion coefficient [K :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.alpha_wrt_t_exact(SA, t, p)
array([ 0.0003256 , 0.00032345, 0.00028141, 0.00017283, 0.00014557,
0.00012836])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (2.18.1)
.. [2] McDougall, T.J., D.R. Jackett and F.J. Millero, 2010: An algorithm
for estimating Absolute Salinity in the global ocean. Submitted to Ocean
Science. A preliminary version is available at Ocean Sci. Discuss.,
6, 215-242.
Modifications:
2011-03-29. David Jackett, Trevor McDougall and Paul Barker
"""
return gibbs(n0, n1, n1, SA, t, p) / gibbs(n0, n0, n1, SA, t, p)
@match_args_return
def isochoric_heat_cap_t_exact(SA, t, p):
r"""Calculates the isochoric heat capacity of seawater.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
isochoric_heat_cap : array_like
isochoric heat capacity [J kg :sup:`-1` K :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.isochoric_heat_cap_t_exact(SA, t, p)
array([ 3928.13708702, 3927.27381633, 3941.36418525, 3966.26126146,
3960.50903222, 3950.13901342])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See section 2.21.
Modifications:
2011-03-29. Trevor McDougall
"""
return (-(Kelvin + t) * (gibbs(n0, n2, n0, SA, t, p) -
gibbs(n0, n1, n1, SA, t, p) ** 2 / gibbs(n0, n0, n2, SA, t, p)))
@match_args_return
def kappa_t_exact(SA, t, p):
r"""Calculates the isentropic compressibility of seawater.
When the entropy and Absolute Salinity are held constant while the pressure
is changed, the isentropic and isohaline compressibility
:math:`kappa` is obtained:
.. math::
\kappa(SA, t, p) =
\rho^{-1}\frac{\partial \rho}{\partial P}\Big|_{SA,\eta} =
-\nu^{-1}\frac{\partial \nu}{\partial P}\Big|_{SA,\eta} =
\rho^{-1}\frac{\partial \rho}{\partial P}\Big|_{SA,\theta} =
-\nu^{-1}\frac{\partial \nu}{\partial P}\Big|_{SA,\theta} =
-\frac{ (g_{TP}^2 - g_{TT} g_{PP} ) }{g_P g_{TT}}
The isentropic and isohaline compressibility is sometimes called simply the
isentropic compressibility (or sometimes the "adiabatic compressibility"),
on the unstated understanding that there is also no transfer of salt during
the isentropic or adiabatic change in pressure.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
kappa : array_like
Isentropic compressibility [Pa :sup:`-1`]
See Also
--------
TODO
Notes
-----
The output is Pascal and not dbar.
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.kappa_t_exact(SA, t, p)
array([ 4.11245799e-10, 4.11029072e-10, 4.16539558e-10,
4.35668338e-10, 4.38923693e-10, 4.40037576e-10])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqns. (2.16.1) and the row for kappa in
Table P.1 of appendix P
Modifications:
2011-03-23. David Jackett, Trevor McDougall and Paul Barker
"""
return ((gibbs(n0, n1, n1, SA, t, p) ** 2 - gibbs(n0, n2, n0, SA, t, p) *
gibbs(n0, n0, n2, SA, t, p)) / (gibbs(n0, n0, n1, SA, t, p) *
gibbs(n0, n2, n0, SA, t, p)))
@match_args_return
def SA_from_rho_t_exact(rho, t, p):
r"""Calculates the Absolute Salinity of a seawater sample, for given values
of its density, in situ temperature and sea pressure (in dbar).
One use for this function is in the laboratory where a measured value of
the in situ density :math:`\rho` of a seawater sample may have been made at
the laboratory temperature :math:`t` and at atmospheric pressure :math:`p`.
The present function will return the Absolute Salinity SA of this seawater
sample.
Parameters
----------
rho : array_like
in situ density [kg m :sup:`-3`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
See Also
--------
TODO
Notes
-----
This is expressed on the Reference-Composition Salinity Scale of
Millero et al. (2008).
After two iterations of a modified Newton-Raphson iteration,
the error in SA is typically no larger than
2 :math:`^\times` 10 :sup:`-13` [g kg :sup:`-1`]
Examples
--------
>>> import gsw
>>> rho = [1021.839, 1022.262, 1024.426, 1027.792, 1029.839, 1032.002]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.SA_from_rho_t_exact(rho, t, p)
array([ 34.71022966, 34.89057683, 35.02332421, 34.84952096,
34.73824809, 34.73188384])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See section 2.5.
.. [2] Millero, F. J., R. Feistel, D. G. Wright, and T. J. McDougall, 2008:
The composition of Standard Seawater and the definition of the
Reference-Composition Salinity Scale, Deep-Sea Res. I, 55, 50-72.
Modifications:
2011-03-28. Trevor McDougall and Paul Barker.
"""
v_lab = np.ones_like(rho) / rho
v_0 = gibbs(n0, n0, n1, 0, t, p)
v_120 = gibbs(n0, n0, n1, 120, t, p)
# Initial estimate of SA.
SA = 120 * (v_lab - v_0) / (v_120 - v_0)
Ior = np.logical_or(SA < 0, SA > 120)
# Initial estimate of v_SA, SA derivative of v
v_SA = (v_120 - v_0) / 120
for k in range(0, 2):
SA_old = SA
delta_v = gibbs(n0, n0, n1, SA_old, t, p) - v_lab
# Half way the mod. N-R method (McDougall and Wotherspoon, 2012)
SA = SA_old - delta_v / v_SA
SA_mean = 0.5 * (SA + SA_old)
v_SA = gibbs(n1, n0, n1, SA_mean, t, p)
SA = SA_old - delta_v / v_SA
SA[Ior] = np.ma.masked
return SA
@match_args_return
def t_from_rho_exact(rho, SA, p):
r"""Calculates the in-situ temperature of a seawater sample, for given
values of its density, Absolute Salinity and sea pressure (in dbar).
Parameters
----------
rho : array_like
in situ density [kg m :sup:`-3`]
SA : array_like
Absolute salinity [g kg :sup:`-1`]
p : array_like
pressure [dbar]
Returns
-------
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
t_multiple : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
See Also
--------
TODO
Notes
-----
At low salinities, in brackish water, there are two possible temperatures
for a single density. This program will output both valid solutions
(t, t_multiple), if there is only one possible solution the second variable
will be set to NaN.
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp.
Modifications:
2011-04-21. Trevor McDougall and Paul Barker.
"""
"""alpha_limit is the positive value of the thermal expansion coefficient
which is used at the freezing temperature to distinguish between I_salty
and I_fresh."""
alpha_limit = 1e-5
"""rec_half_rho_TT is a constant representing the reciprocal of half the
second derivative of density with respect to temperature near the
temperature of maximum density."""
rec_half_rho_TT = -110.0
t = np.zeros_like(SA) + np.NaN
t_multiple = np.zeros_like(SA) + np.NaN
I_SA = np.logical_or(SA < 0, SA > 42)
I_p = np.logical_or(p < -1.5, p > 12000)
I_SA_p = np.logical_or(I_SA, I_p)
SA[I_SA_p] = np.ma.masked
rho_40 = rho_t_exact(SA, 40 * np.ones_like(SA), p)
I_rho_light = (rho - rho_40) < 0
SA[I_rho_light] = np.ma.masked
t_max_rho = t_maxdensity_exact(SA, p)
rho_max = rho_t_exact(SA, t_max_rho, p)
rho_extreme = rho_max
t_freezing = t_freezing(SA, p) # Assumes seawater is saturated with air.
rho_freezing = rho_t_exact(SA, t_freezing, p)
I_fr_gr_max = (t_freezing - t_max_rho) > 0
rho_extreme[I_fr_gr_max] = rho_freezing[I_fr_gr_max]
I_rho_dense = rho > rho_extreme
SA[I_rho_dense] = np.ma.masked
# FIXME: Is this needed?
I_bad = np.isnan(SA * p * rho)
SA[I_bad] = np.ma.masked
alpha_freezing = alpha_wrt_t_exact(SA, t_freezing, p)
I_salty = alpha_freezing > alpha_limit
t_diff = 40. * np.ones_like(I_salty) - t_freezing(I_salty)
top = (rho_40[I_salty] - rho_freezing[I_salty] +
rho_freezing[I_salty] * alpha_freezing[I_salty] * t_diff)
a = top / (t_diff ** 2)
b = -rho_freezing[I_salty] * alpha_freezing[I_salty]
c = rho_freezing[I_salty] - rho[I_salty]
sqrt_disc = np.sqrt(b ** 2 - 4 * a * c)
# The value of t[I_salty] is the initial guess `t` in the range of I_salty.
t[I_salty] = t_freezing[I_salty] + 0.5 * (-b - sqrt_disc) / a
I_fresh = alpha_freezing <= alpha_limit
t_diff = 40 * np.ones_like[I_fresh] - t_max_rho[I_fresh]
factor = ((rho_max[I_fresh] - rho[I_fresh]) /
(rho_max[I_fresh] - rho_40[I_fresh]))
delta_t = t_diff * np.sqrt(factor)
I_fresh_NR = delta_t > 5
t[I_fresh[I_fresh_NR]] = (t_max_rho[I_fresh[I_fresh_NR]] +
delta_t[I_fresh_NR])
I_quad = delta_t <= 5
t_a = np.zeros_like(SA) + np.NaN
# Set the initial value of the quadratic solution roots.
t_a[I_fresh[I_quad]] = (t_max_rho[I_fresh[I_quad]] +
np.sqrt(rec_half_rho_TT * (rho[I_fresh[I_quad]] -
rho_max[I_fresh[I_quad]])))
for Number_of_iterations in range(0, 5):
t_old = t_a
rho_old = rho_t_exact(SA, t_old, p)
factorqa = (rho_max - rho) / (rho_max - rho_old)
t_a = t_max_rho + (t_old - t_max_rho) * np.sqrt(factorqa)
t_a[t_freezing - t_a < 0] = np.ma.masked
t_b = np.zeros_like(SA) + np.NaN
# Set the initial value of the quadratic solution routes.
t_b[I_fresh[I_quad]] = (t_max_rho[I_fresh[I_quad]] -
np.sqrt(rec_half_rho_TT * (rho[I_fresh[I_quad]] -
rho_max[I_fresh[I_quad]])))
for Number_of_iterations in range(0, 6):
t_old = t_b
rho_old = rho_t_exact(SA, t_old, p)
factorqb = (rho_max - rho) / (rho_max - rho_old)
t_b = t_max_rho + (t_old - t_max_rho) * np.sqrt(factorqb)
# After seven iterations of this quadratic iterative procedure,
# the error in rho is no larger than 4.6x10^-13 kg/m^3.
t_b[t_freezing - t_b < 0] = np.ma.masked
# Begin the modified Newton-Raphson iterative method, which will only
# operate on non-masked data.
v_lab = np.ones_like(rho) / rho
v_t = gibbs(0, 1, 1, SA, t, p)
for Number_of_iterations in range(0, 3):
t_old = t
delta_v = gibbs(0, 0, 1, SA, t_old, p) - v_lab
t = t_old - delta_v / v_t # Half way through the modified N-R method.
t_mean = 0.5 * (t + t_old)
v_t = gibbs(0, 1, 1, SA, t_mean, p)
t = t_old - delta_v / v_t
I_quad = ~np.isnan(t_a)
t[I_quad] = t_a[I_quad]
I_quad = ~np.isnan(t_b)
t_multiple[I_quad] = t_b[I_quad]
# After three iterations of this modified Newton-Raphson iteration,
# the error in rho is no larger than 4.6x10^-13 kg/m^3.
return t, t_multiple
@match_args_return
def pot_rho_t_exact(SA, t, p, p_ref=0):
r"""Calculates potential density of seawater.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
p_ref : int, float, optional
reference pressure, default = 0
Returns
-------
pot_rho : array_like
potential density [kg m :sup:`-3`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.pot_rho_t_exact(SA, t, p)
array([ 1021.79814581, 1022.05248442, 1023.89358365, 1026.66762112,
1027.10723087, 1027.40963126])
>>> gsw.pot_rho(SA, t, p, p_ref=1000)
array([ 1025.95554512, 1026.21306986, 1028.12563226, 1031.1204547 ,
1031.63768355, 1032.00240412])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See section 3.4.
Modifications:
2011-03-29. David Jackett, Trevor McDougall and Paul Barker
"""
pt = pt_from_t(SA, t, p, p_ref=p_ref)
return rho_t_exact(SA, pt, p_ref)
@match_args_return
def alpha_wrt_CT_t_exact(SA, t, p):
r"""Calculates the thermal expansion coefficient of seawater with respect
to Conservative Temperature.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
alpha_wrt_CT : array_like
thermal expansion coefficient [K :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.alpha_wrt_CT_t_exact(SA, t, p)
array([ 0.00032471, 0.00032272, 0.00028118, 0.00017314, 0.00014627,
0.00012943])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (2.18.3).
Modifications:
2011-03-29. Trevor McDougall and Paul Barker
"""
pt0 = pt0_from_t(SA, t, p)
factor = -cp0 / ((Kelvin + pt0) * gibbs(n0, n2, n0, SA, t, p))
return factor * (gibbs(n0, n1, n1, SA, t, p) / gibbs(n0, n0, n1, SA, t, p))
@match_args_return
def alpha_wrt_pt_t_exact(SA, t, p):
r"""Calculates the thermal expansion coefficient of seawater with respect
to potential temperature, with a reference pressure of zero.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
alpha_wrt_pt : array_like
thermal expansion coefficient [K :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.alpha_wrt_pt_t_exact(SA, t, p)
array([ 0.00032562, 0.00032355, 0.00028164, 0.00017314, 0.00014623,
0.00012936])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (2.18.2).
Modifications:
2011-03-29. David Jackett, Trevor McDougall and Paul Barker
"""
pt0 = pt0_from_t(SA, t, p)
factor = gibbs(n0, n2, n0, SA, pt0, 0) / gibbs(n0, n2, n0, SA, t, p)
return factor * (gibbs(n0, n1, n1, SA, t, p) / gibbs(n0, n0, n1, SA, t, p))
@match_args_return
def beta_const_CT_t_exact(SA, t, p):
r"""Calculates the saline (i.e. haline) contraction coefficient of seawater
at constant Conservative Temperature.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
beta_const_CT : array_like
saline contraction coefficient [kg g :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.beta_const_CT_t_exact(SA, t, p)
array([ 0.00071749, 0.00071765, 0.00072622, 0.00075051, 0.00075506,
0.00075707])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (2.19.3)
Modifications:
2010-07-23. David Jackett, Trevor McDougall and Paul Barker
"""
# TODO: Original GSW-V3 re-implements gibbs, check what to do here!
pt0 = pt0_from_t(SA, t, p)
factora = (gibbs(n1, n1, n0, SA, t, p) - gibbs(n1, n0, n0, SA, pt0, 0) /
(Kelvin + pt0))
factor = (factora / (gibbs(n0, n0, n1, SA, t, p) *
gibbs(n0, n2, n0, SA, t, p)))
return (gibbs(n0, n1, n1, SA, t, p) * factor -
gibbs(n1, n0, n1, SA, t, p) / gibbs(n0, n0, n1, SA, t, p))
@match_args_return
def beta_const_pt_t_exact(SA, t, p):
r"""Calculates the saline (i.e. haline) contraction coefficient of seawater
at constant potential temperature with a reference pressure of 0 dbar.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
beta_const_pt : array_like
saline contraction coefficient [kg g :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.beta_const_pt_t_exact(SA, t, p)
array([ 0.00073112, 0.00073106, 0.00073599, 0.00075375, 0.00075712,
0.00075843])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (2.19.2)
Modifications:
2011-04-10. Trevor McDougall and Paul Barker
"""
# NOTE: The original Matlab toolbox re-implement some code here. Why?
pt0 = pt0_from_t(SA, t, p)
factora = gibbs(n1, n1, n0, SA, t, p) - gibbs(n1, n1, n0, SA, pt0, 0)
factor = (factora / (gibbs(n0, n0, n1, SA, t, p) *
gibbs(n0, n2, n0, SA, t, p)))
return (gibbs(n0, n1, n1, SA, t, p) * factor -
gibbs(n1, n0, n1, SA, t, p) / gibbs(n0, n0, n1, SA, t, p))
@match_args_return
def beta_const_t_exact(SA, t, p):
r"""Calculates the saline (i.e. haline) contraction coefficient of seawater
at constant in situ temperature.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
beta_const_t : array_like
saline contraction coefficient [kg g :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.beta_const_t_exact(SA, t, p)
array([ 0.00073112, 0.00073107, 0.00073602, 0.00075381, 0.00075726,
0.00075865])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (2.19.1)
Modifications:
2011-03-29. David Jackett, Trevor McDougall and Paul Barker
"""
return -gibbs(n1, n0, n1, SA, t, p) / gibbs(n0, n0, n1, SA, t, p)
@match_args_return
def chem_potential_water_t_exact(SA, t, p):
r"""Calculates the chemical potential of water in seawater.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
chem_potential_water : array_like
chemical potential of water in seawater
[J kg :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.chem_potential_water_t_exact(SA, t, p)
array([-8545.56114628, -8008.08554834, -5103.98013987, -634.06778275,
3335.56680347, 7555.43444597])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp.
Modifications:
2011-03-29. Trevor McDougall and Paul Barker
"""
SA, t, p, mask = strip_mask(SA, t, p)
# FIXME: Ugly copy from gibbs, why?
x2 = sfac * SA
x = np.sqrt(x2)
y = t * 0.025
z = p * 1e-4 # Pressure (p) is sea pressure in units of dbar.
g03_g = (101.342743139674 + z * (100015.695367145 +
z * (-2544.5765420363 + z * (284.517778446287 +
z * (-33.3146754253611 + (4.20263108803084 -
0.546428511471039 * z) * z)))) +
y * (5.90578347909402 + z * (-270.983805184062 +
z * (776.153611613101 + z * (-196.51255088122 +
(28.9796526294175 - 2.13290083518327 * z) * z))) +
y * (-12357.785933039 + z * (1455.0364540468 +
z * (-756.558385769359 + z * (273.479662323528 +
z * (-55.5604063817218 + 4.34420671917197 * z)))) +
y * (736.741204151612 + z * (-672.50778314507 +
z * (499.360390819152 + z * (-239.545330654412 +
(48.8012518593872 - 1.66307106208905 * z) * z))) +
y * (-148.185936433658 + z * (397.968445406972 +
z * (-301.815380621876 + (152.196371733841 -
26.3748377232802 * z) * z)) +
y * (58.0259125842571 + z * (-194.618310617595 +
z * (120.520654902025 + z * (-55.2723052340152 +
6.48190668077221 * z))) +
y * (-18.9843846514172 + y * (3.05081646487967 -
9.63108119393062 * z) +
z * (63.5113936641785 + z * (-22.2897317140459 +
8.17060541818112 * z)))))))))
g08_g = x2 * (1416.27648484197 +
x * (-2432.14662381794 + x * (2025.80115603697 +
y * (543.835333000098 + y * (-68.5572509204491 +
y * (49.3667694856254 + y * (-17.1397577419788 +
2.49697009569508 * y))) - 22.6683558512829 * z) +
x * (-1091.66841042967 - 196.028306689776 * y +
x * (374.60123787784 - 48.5891069025409 * x +
36.7571622995805 * y) + 36.0284195611086 * z) +
z * (-54.7919133532887 + (-4.08193978912261 -
30.1755111971161 * z) * z)) +
z * (199.459603073901 + z * (-52.2940909281335 +
(68.0444942726459 - 3.41251932441282 * z) * z)) +
y * (-493.407510141682 + z * (-175.292041186547 +
(83.1923927801819 - 29.483064349429 * z) * z) +
y * (-43.0664675978042 + z * (383.058066002476 +
z * (-54.1917262517112 + 25.6398487389914 * z)) +
y * (-10.0227370861875 - 460.319931801257 * z + y *
(0.875600661808945 + 234.565187611355 * z))))) +
y * (168.072408311545))
g_SA_part = (8645.36753595126 +
x * (-7296.43987145382 + x * (8103.20462414788 +
y * (2175.341332000392 + y * (-274.2290036817964 +
y * (197.4670779425016 + y * (-68.5590309679152 +
9.98788038278032 * y))) - 90.6734234051316 * z) +
x * (-5458.34205214835 - 980.14153344888 * y +
x * (2247.60742726704 - 340.1237483177863 * x +
220.542973797483 * y) + 180.142097805543 * z) +
z * (-219.1676534131548 + (-16.32775915649044 -
120.7020447884644 * z) * z)) +
z * (598.378809221703 + z * (-156.8822727844005 +
(204.1334828179377 - 10.23755797323846 * z) * z)) +
y * (-1480.222530425046 + z * (-525.876123559641 +
(249.57717834054571 - 88.449193048287 * z) * z) +
y * (-129.1994027934126 + z * (1149.174198007428 +
z * (-162.5751787551336 + 76.9195462169742 * z)) +
y * (-30.0682112585625 - 1380.9597954037708 * z + y *
(2.626801985426835 + 703.695562834065 * z))))) +
y * (1187.3715515697959))
chem_potential_water = g03_g + g08_g - 0.5 * sfac * SA * g_SA_part
return np.ma.array(chem_potential_water, mask=mask, copy=False)
@match_args_return
def chem_potential_salt_t_exact(SA, t, p):
r"""Calculates the chemical potential of salt in seawater.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
chem_potential_salt : array_like
chemical potential of salt in seawater [J kg :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.chem_potential_salt_t_exact(SA, t, p)
array([-8466.13569818, -7928.8256562 , -5029.28859129, -568.42714556,
3396.79366004, 7612.64743154])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See section 2.9.
Modifications:
2010-03-29. Trevor McDougall and Paul Barker
"""
return (chem_potential_relative_t_exact(SA, t, p) +
chem_potential_water_t_exact(SA, t, p))
@match_args_return
def adiabatic_lapse_rate_t_exact(SA, t, p):
r"""Calculates the adiabatic lapse rate of seawater.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
adiabatic_lapse_rate : array_like
Adiabatic lapse rate [K Pa :sup:`-1`]
See Also
--------
TODO
Notes
-----
The output is in unit of degrees Celsius per Pa, (or equivalently K/Pa) not
in units of K/dbar
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.adiabatic_lapse_rate_t_exact(SA, t, p)
array([ 2.40350282e-08, 2.38496700e-08, 2.03479880e-08,
1.19586543e-08, 9.96170718e-09, 8.71747270e-09])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (2.22.1).
Modifications:
2011-03-29. Trevor McDougall and Paul Barker
"""
return -gibbs(n0, n1, n1, SA, t, p) / gibbs(n0, n2, n0, SA, t, p)
@match_args_return
def osmotic_coefficient_t_exact(SA, t, p):
r"""Calculates the osmotic coefficient of seawater.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
osmotic_coefficient : array_like
osmotic coefficient of seawater [unitless]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> t = [28.7856, 28.4329, 22.8103, 10.2600, 6.8863, 4.4036]
>>> p = [10, 50, 125, 250, 600, 1000]
>>> gsw.osmotic_coefficient_t_exact(SA,t , p)
array([ 0.90284718, 0.90298624, 0.90238866, 0.89880927, 0.89801054,
0.89767912])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp.
Modifications:
2011-04-01. Trevor McDougall and Paul Barker.
2012-11-15. Trevor McDougall and Paul Barker.
"""
SA = np.maximum(SA, 0)
k = M_S / R
part = k * (1000 - SA) / (Kelvin + t)
x2 = sfac * SA
x = np.sqrt(x2)
y = t * 0.025
# Note that the input pressure (p) is sea pressure in units of dbar.
z = p / db2Pascal
oc = (7.231916621570606e1, 1.059039593127674e1, -3.025914794694813e1,
5.040733670521486e1, -4.074543321119333e1, 1.864215613820487e1,
-3.022566485046178, -6.138647522851840, 1.353207379758663e1,
-7.316560781114737, 1.829232499785750, -5.358042980767074e-1,
-1.705887283375562, -1.246962174707332e-1, 1.228376913546017,
1.089364009088042e-2, -4.264828939262248e-1, 6.213127679460041e-2,
2.481543497315280, -1.363368964861909, -5.640491627443773e-1,
1.344724779893754, -2.180866793244492, 4.765753255963401,
-5.726993916772165, 2.918303792060746, -6.506082399183509e-1,
-1.015695507663942e-1, 1.035024326471108, -6.742173543702397e-1,
8.465642650849419e-1, -7.508472135244717e-1, -3.668086444057845e-1,
3.189939162107803e-1, -4.245629194309487e-2)
tl = (oc[0] + oc[1] * y + x * (oc[2] + x * (oc[3] + x * (oc[4] + x *
(oc[5] + oc[6] * x))) + y * (oc[7] + x * (oc[8] + x *
(oc[9] + oc[10] * x)) + y * (oc[11] + oc[12] * x + y * (oc[13] +
oc[14] * x + y * (oc[15] + x * (oc[16] + oc[17] * y))))) + z *
(oc[18] + x * (oc[19] + oc[20] * y + oc[21] * x) + y * (oc[22] + y *
(oc[23] + y * (oc[24] + oc[25] * y))) + z * (oc[26] + oc[27] * x + y *
(oc[28] + oc[29] * y) + z * (oc[30] + oc[31] * x + y * (oc[32] +
oc[33] * y) + oc[34] * z)))))
return tl * part
@match_args_return
def dynamic_enthalpy_t_exact(SA, t, p):
r"""Calculates the dynamic enthalpy of seawater from Absolute Salinity, in
situ temperature and pressure. Dynamic enthalpy was defined by Young
(2010) as the difference between enthalpy and potential enthalpy. Note that
this function uses the full TEOS-10 Gibbs function (i.e. the sum of the
IAPWS-09 and IAPWS-08 Gibbs functions, see the TEOS-10 Manual, IOC et al.
(2010)).
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
dynamic_enthalpy_t_exact : array_like
dynamic enthalpy [J :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp.
.. [2] Young, W.R., 2010: Dynamic enthalpy, Conservative Temperature, and
the seawater. Boussinesq approximation. Journal of Physical Oceanography,
40, 394-400.
Modifications:
2011-04-11. Trevor McDougall and Paul Barker
"""
CT = CT_from_t(SA, t, p)
return enthalpy_t_exact(SA, t, p) - cp0 * CT
@match_args_return
def t_maxdensity_exact(SA, p):
r"""Calculates the in-situ temperature of maximum density of seawater.
This function returns the in-situ temperature at which the density of
seawater is a maximum, at given Absolute Salinity, SA, and sea pressure, p
(in dbar).
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
p : array_like
pressure [dbar]
Returns
-------
t_maxdensity_exact : array_like
max in-situ temperature [:math:`^\circ` C]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See section 3.42.
Modifications:
2011-04-03. Trevor McDougall and Paul Barker
"""
# The temperature increment for calculating the gibbs_PTT derivative.
dt = 0.001
t = 3.978 - 0.22072 * SA # The initial guess of t_maxden.
gibbs_PTT = 1.1e-8 # The initial guess for g_PTT.
for Number_of_iterations in range(0, 3):
t_old = t
gibbs_PT = gibbs(n0, n1, n1, SA, t_old, p)
# Half way through the mod. method (McDougall and Wotherspoon, 2012)
t = t_old - gibbs_PT / gibbs_PTT
t_mean = 0.5 * (t + t_old)
gibbs_PTT = (gibbs(n0, n1, n1, SA, t_mean + dt, p) -
gibbs(n0, n1, n1, SA, t_mean - dt, p)) / (dt + dt)
t = t_old - gibbs_PT / gibbs_PTT
# After three iterations of this modified Newton-Raphson iteration, the
# error in t_maxdensity_exact is typically no larger than 1x10^-15 deg C.
return t
@match_args_return
def osmotic_pressure_t_exact(SA, t, pw):
r"""Calculates the osmotic pressure of seawater.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
pw : array_like
sea pressure of the pure water side [dbar]
Returns
-------
osmotic_pressure_t_exact : array_like
dynamic osmotic pressure of seawater [dbar]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See section 3.41.
Modifications:
2011-05-26. Trevor McDougall and Paul Barker
"""
SA = np.maximum(SA, 0)
gibbs_pure_water = gibbs(0, 0, 0, 0, t, pw)
# Initial guess of p, in dbar.
p = pw + 235.4684
# Initial guess of df/dp.
df_dp = -db2Pascal * (gibbs(n0, n0, n1, SA, t, p) -
SA * gibbs(n1, n0, n1, SA, t, p))
for Number_of_iterations in range(0, 2):
p_old = p
f = gibbs_pure_water - chem_potential_water_t_exact(SA, t, p_old)
# This is half way through the modified N-R method.
p = p_old - f / df_dp
p_mean = 0.5 * (p + p_old)
df_dp = -db2Pascal * (gibbs(0, 0, 1, SA, t, p_mean) -
SA * gibbs(1, 0, 1, SA, t, p_mean))
p = p_old - f / df_dp
# After two iterations though the modified Newton-Raphson technique the
# maximum error is 6x10^-12 dbar.
# Osmotic pressure of seawater in dbar.
return p - pw
if __name__ == '__main__':
import doctest
doctest.testmod()
|
#!/usr/bin/env python3
import unittest
TestCase = unittest.TestCase
from BioUtil import fastqFile
import os
print("test fastqFile")
data_dir = os.path.join(os.path.dirname(__file__), "..", "data")
def readfile(file):
fh = open(file, 'r')
content = fh.readlines()
fh.close()
return content
class TestFastq(TestCase):
@classmethod
def setUpClass(self):
self.test_fa = os.path.join(data_dir, "test.fa")
self.test_fq = os.path.join(data_dir, "test.fq")
self.tmp_fa = os.path.join(data_dir, "tmp.fa")
self.tmp_fq = os.path.join(data_dir, "tmp.fq")
@classmethod
def tearDownClass(self):
os.remove(self.tmp_fa)
os.remove(self.tmp_fq)
def test_fasta(self):
with fastqFile(self.tmp_fa, 'w', linewidth=70) as out, fastqFile(self.test_fa) as input:
for rec in input:
# print(rec, file=out)
out.write(rec)
#gold_file = os.path.join(data_dir, "test.100.fa")
gold_file = self.test_fa
gold = readfile(gold_file)
result = readfile(self.tmp_fa)
self.assertEqual(gold, result)
def test_fastq(self):
with fastqFile(self.tmp_fq, 'w') as out, fastqFile(self.test_fq) as input:
for rec in input:
print(rec, file=out)
gold = readfile(self.test_fq)
result = readfile(self.tmp_fq)
self.assertEqual(gold, result)
def test_fastq_write(self):
with fastqFile(self.tmp_fq, 'w') as out, fastqFile(self.test_fq) as input:
for rec in input:
out.write(rec)
gold = readfile(self.test_fq)
result = readfile(self.tmp_fq)
self.assertEqual(gold, result)
if __name__ == '__main__':
unittest.main()
|
import socket
import sys
import argparse
from thread import allocate_lock
import time
import select
import requesthandler
import ConfigParser
class Server(object):
def __init__(self, max_clients, port, accounts):
self.max_clients = max_clients
self.port = port
self.accounts = accounts
self.connection = None
self.clients = []
self.shutdown_granted = False
self.lock = allocate_lock()
self.loggedInUsers = {}
def start(self):
self.connection = socket.socket()
try:
self.connection.bind(('0.0.0.0', self.port))
self.connection.listen(1)
except socket.error as error:
print error
self.connection.close()
return error
return None
def on_disconnect(self, client):
self.clients.remove(client)
if self.shutdown_granted and len(self.clients) == 0:
self.close()
def on_client_accept(self, connection):
handler = requesthandler.RequestHandler(connection, self)
handler.start()
self.clients.append(handler)
def wait_clients(self):
while not self.shutdown_granted:
if len(self.clients) < self.max_clients:
print "Waiting"
readable_list = [self.connection]
readable, writeable, errorlist = select.select(readable_list, [], [])
for s in readable:
if s is self.connection:
try:
client_connection, address = self.connection.accept()
print("Accepted client from ", address)
self.on_client_accept(client_connection)
except socket.error as accept_error:
print("Error while accepting client: ", accept_error)
else:
time.sleep(0.1)
def close(self):
try:
self.connection.shutdown(socket.SHUT_RDWR)
self.connection.close()
except:
pass
def try_login(self, username, password):
self.lock.acquire()
state = True
if self.loggedInUsers.__contains__(username) and self.loggedInUsers[username]:
state = False
else:
account = self.get_account(username)
if account:
state = account['password'] == password
else:
state = False
self.lock.release()
return state
def get_account(self, username):
for accountDef in self.accounts:
account = self.accounts[accountDef]
if account['name'] == username:
return account
return None
def set_logged_in(self, username, state):
self.lock.acquire()
self.loggedInUsers[username] = state
self.lock.release()
def load_config():
config = ConfigParser.ConfigParser()
config.read("../config.ini")
servers = {}
for section in config.sections():
values = {}
values['name'] = section
for option in config.options(section):
try:
values[option] = config.get(section, option)
except:
print("exception when parsing", option)
values[option] = None
servers[section] = values
return servers
def configure():
parser = argparse.ArgumentParser(description="RN Server")
parser.add_argument("--max-clients", '-n', type=int, help="Number of max clients", default=3)
parser.add_argument("--port", "-p", type=int, help="Port", default=1337)
result = parser.parse_args(sys.argv[1:])
server = Server(max_clients=result.max_clients, port=result.port, accounts=load_config())
result = server.start()
return server, result
if __name__ == "__main__":
server, error = configure()
if error:
print("There was an error setting up the server")
else:
print "Awaiting connection"
server.wait_clients()
server.close()
|
#!/usr/bin/env python3
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
"""
License header updater.
"""
from __future__ import unicode_literals
import argparse
import os
import sys
import sanity_utils
HEADER = """
This file is part of Shoop.
Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
This source code is licensed under the AGPLv3 license found in the
LICENSE file in the root directory of this source tree.
""".strip()
PY_HEADER = '\n'.join(('# ' + line).strip() for line in HEADER.splitlines())
JS_HEADER = (
'/**\n' +
'\n'.join((' * ' + line).rstrip() for line in HEADER.splitlines()) +
'\n */')
PY_HEADER_LINES = PY_HEADER.encode('utf-8').splitlines()
JS_HEADER_LINES = JS_HEADER.encode('utf-8').splitlines()
def get_adders():
return {
'.py': add_header_to_python_file,
'.js': add_header_to_javascript_file
}
def main():
ap = argparse.ArgumentParser()
ap.add_argument("root", nargs="+", help="Directory roots to recurse through")
ap.add_argument("-w", "--write", help="Actually write changes", action="store_true")
ap.add_argument("-s", "--exit-status", help="Exit with error status when missing headers", action="store_true")
ap.add_argument("-v", "--verbose", help="Log OK files too", action="store_true")
args = ap.parse_args()
adders = get_adders()
paths = find_files(roots=args.root, extensions=set(adders.keys()))
missing = process_files(paths, adders, verbose=args.verbose, write=args.write)
if args.exit_status and missing:
return 1
return 0
def process_files(paths, adders, verbose, write):
width = max(len(s) for s in paths)
missing = set()
for path in sorted(paths):
if os.stat(path).st_size == 0:
if verbose:
print('[+]:%-*s: File is empty' % (width, path))
elif not has_header(path):
missing.add(path)
if write:
adder = adders[os.path.splitext(path)[1]]
adder(path)
print('[!]:%-*s: Modified' % (width, path))
else:
print('[!]:%-*s: Requires license header' % (width, path))
else:
if verbose:
print('[+]:%-*s: File has license header' % (width, path))
return missing
def find_files(roots, extensions):
paths = set()
generated_resources = set()
for root in roots:
for file in sanity_utils.find_files(
root,
generated_resources=generated_resources,
allowed_extensions=extensions,
ignored_dirs=sanity_utils.IGNORED_DIRS + ["migrations"]
):
if not is_file_ignored(file):
paths.add(file)
paths -= generated_resources
return paths
def is_file_ignored(filepath):
filepath = filepath.replace(os.sep, "/")
return (
('vendor' in filepath) or
('doc/_ext/djangodocs.py' in filepath)
)
def has_header(path):
with open(path, 'rb') as fp:
return b"This file is part of Shoop." in fp.read(256)
def add_header_to_python_file(path):
lines = get_lines(path)
if lines:
i = 0
if lines[i].startswith(b'#!'):
i += 1
if i < len(lines) and b'coding' in lines[i]:
i += 1
new_lines = lines[:i] + PY_HEADER_LINES + lines[i:]
write_lines(path, new_lines)
def add_header_to_javascript_file(path):
lines = get_lines(path)
if lines:
new_lines = JS_HEADER_LINES + lines
write_lines(path, new_lines)
def get_lines(path):
with open(path, 'rb') as fp:
contents = fp.read()
if not contents.strip():
return []
return contents.splitlines()
def write_lines(path, new_lines):
with open(path, 'wb') as fp:
for line in new_lines:
fp.write(line + b'\n')
if __name__ == '__main__':
sys.exit(main())
|
"""Support to interface with the Plex API."""
from datetime import timedelta
import json
import logging
import plexapi.exceptions
import plexapi.playlist
import plexapi.playqueue
import requests.exceptions
from homeassistant.components.media_player import MediaPlayerDevice
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_TVSHOW,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
DEVICE_DEFAULT_NAME,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.helpers.event import track_time_interval
from homeassistant.util import dt as dt_util
from .const import (
CONF_SERVER_IDENTIFIER,
DOMAIN as PLEX_DOMAIN,
NAME_FORMAT,
REFRESH_LISTENERS,
SERVERS,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Plex media_player platform.
Deprecated.
"""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Plex media_player from a config entry."""
def add_entities(entities, update_before_add=False):
"""Sync version of async add entities."""
hass.add_job(async_add_entities, entities, update_before_add)
hass.async_add_executor_job(_setup_platform, hass, config_entry, add_entities)
def _setup_platform(hass, config_entry, add_entities_callback):
"""Set up the Plex media_player platform."""
server_id = config_entry.data[CONF_SERVER_IDENTIFIER]
plexserver = hass.data[PLEX_DOMAIN][SERVERS][server_id]
plex_clients = {}
plex_sessions = {}
hass.data[PLEX_DOMAIN][REFRESH_LISTENERS][server_id] = track_time_interval(
hass, lambda now: update_devices(), timedelta(seconds=10)
)
def update_devices():
"""Update the devices objects."""
try:
devices = plexserver.clients()
except plexapi.exceptions.BadRequest:
_LOGGER.exception("Error listing plex devices")
return
except requests.exceptions.RequestException as ex:
_LOGGER.warning(
"Could not connect to Plex server: %s (%s)",
plexserver.friendly_name,
ex,
)
return
new_plex_clients = []
available_client_ids = []
for device in devices:
# For now, let's allow all deviceClass types
if device.deviceClass in ["badClient"]:
continue
available_client_ids.append(device.machineIdentifier)
if device.machineIdentifier not in plex_clients:
new_client = PlexClient(
plexserver, device, None, plex_sessions, update_devices
)
plex_clients[device.machineIdentifier] = new_client
_LOGGER.debug("New device: %s", device.machineIdentifier)
new_plex_clients.append(new_client)
else:
_LOGGER.debug("Refreshing device: %s", device.machineIdentifier)
plex_clients[device.machineIdentifier].refresh(device, None)
# add devices with a session and no client (ex. PlexConnect Apple TV's)
try:
sessions = plexserver.sessions()
except plexapi.exceptions.BadRequest:
_LOGGER.exception("Error listing plex sessions")
return
except requests.exceptions.RequestException as ex:
_LOGGER.warning(
"Could not connect to Plex server: %s (%s)",
plexserver.friendly_name,
ex,
)
return
plex_sessions.clear()
for session in sessions:
for player in session.players:
plex_sessions[player.machineIdentifier] = session, player
for machine_identifier, (session, player) in plex_sessions.items():
if machine_identifier in available_client_ids:
# Avoid using session if already added as a device.
_LOGGER.debug("Skipping session, device exists: %s", machine_identifier)
continue
if (
machine_identifier not in plex_clients
and machine_identifier is not None
):
new_client = PlexClient(
plexserver, player, session, plex_sessions, update_devices
)
plex_clients[machine_identifier] = new_client
_LOGGER.debug("New session: %s", machine_identifier)
new_plex_clients.append(new_client)
else:
_LOGGER.debug("Refreshing session: %s", machine_identifier)
plex_clients[machine_identifier].refresh(None, session)
for client in plex_clients.values():
# force devices to idle that do not have a valid session
if client.session is None:
client.force_idle()
client.set_availability(
client.machine_identifier in available_client_ids
or client.machine_identifier in plex_sessions
)
if client not in new_plex_clients:
client.schedule_update_ha_state()
if new_plex_clients:
add_entities_callback(new_plex_clients)
class PlexClient(MediaPlayerDevice):
"""Representation of a Plex device."""
def __init__(self, plex_server, device, session, plex_sessions, update_devices):
"""Initialize the Plex device."""
self._app_name = ""
self._device = None
self._available = False
self._marked_unavailable = None
self._device_protocol_capabilities = None
self._is_player_active = False
self._is_player_available = False
self._player = None
self._machine_identifier = None
self._make = ""
self._name = None
self._player_state = "idle"
self._previous_volume_level = 1 # Used in fake muting
self._session = None
self._session_type = None
self._session_username = None
self._state = STATE_IDLE
self._volume_level = 1 # since we can't retrieve remotely
self._volume_muted = False # since we can't retrieve remotely
self.plex_server = plex_server
self.plex_sessions = plex_sessions
self.update_devices = update_devices
# General
self._media_content_id = None
self._media_content_rating = None
self._media_content_type = None
self._media_duration = None
self._media_image_url = None
self._media_title = None
self._media_position = None
self._media_position_updated_at = None
# Music
self._media_album_artist = None
self._media_album_name = None
self._media_artist = None
self._media_track = None
# TV Show
self._media_episode = None
self._media_season = None
self._media_series_title = None
self.refresh(device, session)
def _clear_media_details(self):
"""Set all Media Items to None."""
# General
self._media_content_id = None
self._media_content_rating = None
self._media_content_type = None
self._media_duration = None
self._media_image_url = None
self._media_title = None
# Music
self._media_album_artist = None
self._media_album_name = None
self._media_artist = None
self._media_track = None
# TV Show
self._media_episode = None
self._media_season = None
self._media_series_title = None
# Clear library Name
self._app_name = ""
def refresh(self, device, session):
"""Refresh key device data."""
self._clear_media_details()
if session: # Not being triggered by Chrome or FireTablet Plex App
self._session = session
if device:
self._device = device
try:
device_url = self._device.url("/")
except plexapi.exceptions.BadRequest:
device_url = "127.0.0.1"
if "127.0.0.1" in device_url:
self._device.proxyThroughServer()
self._session = None
self._machine_identifier = self._device.machineIdentifier
self._name = NAME_FORMAT.format(self._device.title or DEVICE_DEFAULT_NAME)
self._device_protocol_capabilities = self._device.protocolCapabilities
# set valid session, preferring device session
if self._device.machineIdentifier in self.plex_sessions:
self._session = self.plex_sessions.get(
self._device.machineIdentifier, [None, None]
)[0]
if self._session:
if (
self._device is not None
and self._device.machineIdentifier is not None
and self._session.players
):
self._is_player_available = True
self._player = [
p
for p in self._session.players
if p.machineIdentifier == self._device.machineIdentifier
][0]
self._name = NAME_FORMAT.format(self._player.title)
self._player_state = self._player.state
self._session_username = self._session.usernames[0]
self._make = self._player.device
else:
self._is_player_available = False
# Calculate throttled position for proper progress display.
position = int(self._session.viewOffset / 1000)
now = dt_util.utcnow()
if self._media_position is not None:
pos_diff = position - self._media_position
time_diff = now - self._media_position_updated_at
if pos_diff != 0 and abs(time_diff.total_seconds() - pos_diff) > 5:
self._media_position_updated_at = now
self._media_position = position
else:
self._media_position_updated_at = now
self._media_position = position
self._media_content_id = self._session.ratingKey
self._media_content_rating = getattr(self._session, "contentRating", None)
self._set_player_state()
if self._is_player_active and self._session is not None:
self._session_type = self._session.type
self._media_duration = int(self._session.duration / 1000)
# title (movie name, tv episode name, music song name)
self._media_title = self._session.title
# media type
self._set_media_type()
self._app_name = (
self._session.section().title
if self._session.section() is not None
else ""
)
self._set_media_image()
else:
self._session_type = None
def _set_media_image(self):
thumb_url = self._session.thumbUrl
if (
self.media_content_type is MEDIA_TYPE_TVSHOW
and not self.plex_server.use_episode_art
):
thumb_url = self._session.url(self._session.grandparentThumb)
if thumb_url is None:
_LOGGER.debug(
"Using media art because media thumb " "was not found: %s",
self.entity_id,
)
thumb_url = self.session.url(self._session.art)
self._media_image_url = thumb_url
def set_availability(self, available):
"""Set the device as available/unavailable noting time."""
if not available:
self._clear_media_details()
if self._marked_unavailable is None:
self._marked_unavailable = dt_util.utcnow()
else:
self._marked_unavailable = None
self._available = available
def _set_player_state(self):
if self._player_state == "playing":
self._is_player_active = True
self._state = STATE_PLAYING
elif self._player_state == "paused":
self._is_player_active = True
self._state = STATE_PAUSED
elif self.device:
self._is_player_active = False
self._state = STATE_IDLE
else:
self._is_player_active = False
self._state = STATE_OFF
def _set_media_type(self):
if self._session_type in ["clip", "episode"]:
self._media_content_type = MEDIA_TYPE_TVSHOW
# season number (00)
if callable(self._session.season):
self._media_season = str((self._session.season()).index).zfill(2)
elif self._session.parentIndex is not None:
self._media_season = self._session.parentIndex.zfill(2)
else:
self._media_season = None
# show name
self._media_series_title = self._session.grandparentTitle
# episode number (00)
if self._session.index is not None:
self._media_episode = str(self._session.index).zfill(2)
elif self._session_type == "movie":
self._media_content_type = MEDIA_TYPE_MOVIE
if self._session.year is not None and self._media_title is not None:
self._media_title += " (" + str(self._session.year) + ")"
elif self._session_type == "track":
self._media_content_type = MEDIA_TYPE_MUSIC
self._media_album_name = self._session.parentTitle
self._media_album_artist = self._session.grandparentTitle
self._media_track = self._session.index
self._media_artist = self._session.originalTitle
# use album artist if track artist is missing
if self._media_artist is None:
_LOGGER.debug(
"Using album artist because track artist " "was not found: %s",
self.entity_id,
)
self._media_artist = self._media_album_artist
def force_idle(self):
"""Force client to idle."""
self._state = STATE_IDLE
self._session = None
self._clear_media_details()
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
@property
def unique_id(self):
"""Return the id of this plex client."""
return self.machine_identifier
@property
def available(self):
"""Return the availability of the client."""
return self._available
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def machine_identifier(self):
"""Return the machine identifier of the device."""
return self._machine_identifier
@property
def app_name(self):
"""Return the library name of playing media."""
return self._app_name
@property
def device(self):
"""Return the device, if any."""
return self._device
@property
def marked_unavailable(self):
"""Return time device was marked unavailable."""
return self._marked_unavailable
@property
def session(self):
"""Return the session, if any."""
return self._session
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def _active_media_plexapi_type(self):
"""Get the active media type required by PlexAPI commands."""
if self.media_content_type is MEDIA_TYPE_MUSIC:
return "music"
return "video"
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return self._media_content_id
@property
def media_content_type(self):
"""Return the content type of current playing media."""
if self._session_type == "clip":
_LOGGER.debug(
"Clip content type detected, " "compatibility may vary: %s",
self.entity_id,
)
return MEDIA_TYPE_TVSHOW
if self._session_type == "episode":
return MEDIA_TYPE_TVSHOW
if self._session_type == "movie":
return MEDIA_TYPE_MOVIE
if self._session_type == "track":
return MEDIA_TYPE_MUSIC
return None
@property
def media_artist(self):
"""Return the artist of current playing media, music track only."""
return self._media_artist
@property
def media_album_name(self):
"""Return the album name of current playing media, music track only."""
return self._media_album_name
@property
def media_album_artist(self):
"""Return the album artist of current playing media, music only."""
return self._media_album_artist
@property
def media_track(self):
"""Return the track number of current playing media, music only."""
return self._media_track
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return self._media_duration
@property
def media_position(self):
"""Return the duration of current playing media in seconds."""
return self._media_position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self._media_position_updated_at
@property
def media_image_url(self):
"""Return the image URL of current playing media."""
return self._media_image_url
@property
def media_title(self):
"""Return the title of current playing media."""
return self._media_title
@property
def media_season(self):
"""Return the season of current playing media (TV Show only)."""
return self._media_season
@property
def media_series_title(self):
"""Return the title of the series of current playing media."""
return self._media_series_title
@property
def media_episode(self):
"""Return the episode of current playing media (TV Show only)."""
return self._media_episode
@property
def make(self):
"""Return the make of the device (ex. SHIELD Android TV)."""
return self._make
@property
def supported_features(self):
"""Flag media player features that are supported."""
if not self._is_player_active:
return 0
# force show all controls
if self.plex_server.show_all_controls:
return (
SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_STOP
| SUPPORT_VOLUME_SET
| SUPPORT_PLAY
| SUPPORT_TURN_OFF
| SUPPORT_VOLUME_MUTE
)
# only show controls when we know what device is connecting
if not self._make:
return 0
# no mute support
if self.make.lower() == "shield android tv":
_LOGGER.debug(
"Shield Android TV client detected, disabling mute " "controls: %s",
self.entity_id,
)
return (
SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_STOP
| SUPPORT_VOLUME_SET
| SUPPORT_PLAY
| SUPPORT_TURN_OFF
)
# Only supports play,pause,stop (and off which really is stop)
if self.make.lower().startswith("tivo"):
_LOGGER.debug(
"Tivo client detected, only enabling pause, play, "
"stop, and off controls: %s",
self.entity_id,
)
return SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_STOP | SUPPORT_TURN_OFF
# Not all devices support playback functionality
# Playback includes volume, stop/play/pause, etc.
if self.device and "playback" in self._device_protocol_capabilities:
return (
SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_STOP
| SUPPORT_VOLUME_SET
| SUPPORT_PLAY
| SUPPORT_TURN_OFF
| SUPPORT_VOLUME_MUTE
)
return 0
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.setVolume(int(volume * 100), self._active_media_plexapi_type)
self._volume_level = volume # store since we can't retrieve
self.update_devices()
@property
def volume_level(self):
"""Return the volume level of the client (0..1)."""
if (
self._is_player_active
and self.device
and "playback" in self._device_protocol_capabilities
):
return self._volume_level
@property
def is_volume_muted(self):
"""Return boolean if volume is currently muted."""
if self._is_player_active and self.device:
return self._volume_muted
def mute_volume(self, mute):
"""Mute the volume.
Since we can't actually mute, we'll:
- On mute, store volume and set volume to 0
- On unmute, set volume to previously stored volume
"""
if not (self.device and "playback" in self._device_protocol_capabilities):
return
self._volume_muted = mute
if mute:
self._previous_volume_level = self._volume_level
self.set_volume_level(0)
else:
self.set_volume_level(self._previous_volume_level)
def media_play(self):
"""Send play command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.play(self._active_media_plexapi_type)
self.update_devices()
def media_pause(self):
"""Send pause command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.pause(self._active_media_plexapi_type)
self.update_devices()
def media_stop(self):
"""Send stop command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.stop(self._active_media_plexapi_type)
self.update_devices()
def turn_off(self):
"""Turn the client off."""
# Fake it since we can't turn the client off
self.media_stop()
def media_next_track(self):
"""Send next track command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.skipNext(self._active_media_plexapi_type)
self.update_devices()
def media_previous_track(self):
"""Send previous track command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.skipPrevious(self._active_media_plexapi_type)
self.update_devices()
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
if not (self.device and "playback" in self._device_protocol_capabilities):
return
src = json.loads(media_id)
media = None
if media_type == "MUSIC":
media = (
self.device.server.library.section(src["library_name"])
.get(src["artist_name"])
.album(src["album_name"])
.get(src["track_name"])
)
elif media_type == "EPISODE":
media = self._get_tv_media(
src["library_name"],
src["show_name"],
src["season_number"],
src["episode_number"],
)
elif media_type == "PLAYLIST":
media = self.device.server.playlist(src["playlist_name"])
elif media_type == "VIDEO":
media = self.device.server.library.section(src["library_name"]).get(
src["video_name"]
)
if (
media
and media_type == "EPISODE"
and isinstance(media, plexapi.playlist.Playlist)
):
# delete episode playlist after being loaded into a play queue
self._client_play_media(media=media, delete=True, shuffle=src["shuffle"])
elif media:
self._client_play_media(media=media, shuffle=src["shuffle"])
def _get_tv_media(self, library_name, show_name, season_number, episode_number):
"""Find TV media and return a Plex media object."""
target_season = None
target_episode = None
show = self.device.server.library.section(library_name).get(show_name)
if not season_number:
playlist_name = f"{self.entity_id} - {show_name} Episodes"
return self.device.server.createPlaylist(playlist_name, show.episodes())
for season in show.seasons():
if int(season.seasonNumber) == int(season_number):
target_season = season
break
if target_season is None:
_LOGGER.error(
"Season not found: %s\\%s - S%sE%s",
library_name,
show_name,
str(season_number).zfill(2),
str(episode_number).zfill(2),
)
else:
if not episode_number:
playlist_name = "{} - {} Season {} Episodes".format(
self.entity_id, show_name, str(season_number)
)
return self.device.server.createPlaylist(
playlist_name, target_season.episodes()
)
for episode in target_season.episodes():
if int(episode.index) == int(episode_number):
target_episode = episode
break
if target_episode is None:
_LOGGER.error(
"Episode not found: %s\\%s - S%sE%s",
library_name,
show_name,
str(season_number).zfill(2),
str(episode_number).zfill(2),
)
return target_episode
def _client_play_media(self, media, delete=False, **params):
"""Instruct Plex client to play a piece of media."""
if not (self.device and "playback" in self._device_protocol_capabilities):
_LOGGER.error("Client cannot play media: %s", self.entity_id)
return
playqueue = plexapi.playqueue.PlayQueue.create(
self.device.server, media, **params
)
# Delete dynamic playlists used to build playqueue (ex. play tv season)
if delete:
media.delete()
server_url = self.device.server.baseurl.split(":")
self.device.sendCommand(
"playback/playMedia",
**dict(
{
"machineIdentifier": self.device.server.machineIdentifier,
"address": server_url[1].strip("/"),
"port": server_url[-1],
"key": media.key,
"containerKey": "/playQueues/{}?window=100&own=1".format(
playqueue.playQueueID
),
},
**params,
),
)
self.update_devices()
@property
def device_state_attributes(self):
"""Return the scene state attributes."""
attr = {
"media_content_rating": self._media_content_rating,
"session_username": self._session_username,
"media_library_name": self._app_name,
}
return attr
|
# Adapted for numpy/ma/cdms2 by convertcdms.py
import numpy
import cdtime
class VCSUtilsError (Exception):
def __init__ (self, args=None):
"""Create an exception"""
self.args = args
def __str__(self):
"""Calculate the string representation"""
return str(self.args)
__repr__ = __str__
def minmax(*data) :
'''
Function : minmax
Description of Function
Return the minimum and maximum of a serie of array/list/tuples (or combination of these)
Values those absolute value are greater than 1.E20, are masked
You can combined list/tuples/... pretty much any combination is allowed
Examples of Use
>>> s=range(7)
>>> vcs.minmax(s)
(0.0, 6.0)
>>> vcs.minmax([s,s])
(0.0, 6.0)
>>> vcs.minmax([[s,s*2],4.,[6.,7.,s]],[5.,-7.,8,(6.,1.)])
(-7.0, 8.0)
'''
mx=-1.E77
mn=1.E77
if len(data)==1 : data=data[0]
global myfunction
def myfunction(d,mx,mn):
if d is None:
return mx,mn
from numpy.ma import maximum,minimum,masked_where,absolute,greater,count
try:
d=masked_where(greater(absolute(d),9.9E19),d)
if count(d)==0 : return mx,mn
mx=float(maximum(mx,float(maximum(d))))
mn=float(minimum(mn,float(minimum(d))))
except:
for i in d:
mx,mn=myfunction(i,mx,mn)
return mx,mn
mx,mn=myfunction(data,mx,mn)
if mn==1.E77 and mx==-1.E77 :mn,mx=1.E20,1.E20
return mn,mx
def mkevenlevels(n1,n2,nlev=10):
'''
Function : mkevenlevels
Description of Function:
Return a serie of evenly spaced levels going from n1 to n2
by default 10 intervals will be produced
Examples of use:
>>> vcs.mkevenlevels(0,100)
[0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
>>> vcs.mkevenlevels(0,100,nlev=5)
[0.0, 20.0, 40.0, 60.0, 80.0, 100.0]
>>> vcs.mkevenlevels(100,0,nlev=5)
[100.0, 80.0, 60.0, 40.0, 20.0, 0.0]
'''
import numpy.ma
lev=numpy.ma.arange(nlev+1,dtype=numpy.float)
factor=float(n2-n1)/nlev
lev=factor*lev
lev=lev+n1
return list(lev)
def mkscale(n1,n2,nc=12,zero=1):
'''
Function: mkscale
Description of function:
This function return a nice scale given a min and a max
option:
nc # Maximum number of intervals (default=12)
zero # Not all implemented yet so set to 1 but values will be:
-1: zero MUST NOT be a contour
0: let the function decide # NOT IMPLEMENTED
1: zero CAN be a contour (default)
2: zero MUST be a contour
Examples of Use:
>>> vcs.mkscale(0,100)
[0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
>>> vcs.mkscale(0,100,nc=5)
[0.0, 20.0, 40.0, 60.0, 80.0, 100.0]
>>> vcs.mkscale(-10,100,nc=5)
[-25.0, 0.0, 25.0, 50.0, 75.0, 100.0]
>>> vcs.mkscale(-10,100,nc=5,zero=-1)
[-20.0, 20.0, 60.0, 100.0]
>>> vcs.mkscale(2,20)
[2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0]
>>> vcs.mkscale(2,20,zero=2)
[0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0]
'''
if n1==n2 : return [n1]
import numpy
nc=int(nc)
cscale=0 # ???? May be later
min, max=minmax(n1,n2)
if zero>1.:
if min>0. : min=0.
if max<0. : max=0.
rg=float(max-min) # range
delta=rg/nc # basic delta
# scale delta to be >10 and <= 100
lg=-numpy.log10(delta)+2.
il=numpy.floor(lg)
delta=delta*(10.**il)
max=max*(10.**il)
min=min*(10.**il)
if zero>-0.5:
if delta<=20.:
delta=20
elif delta<=25. :
delta=25
elif delta<=40. :
delta=40
elif delta<=50. :
delta=50
elif delta<=101. :
delta=100
first = numpy.floor(min/delta)-1.
else:
if delta<=20.:
delta=20
elif delta<=40. :
delta=40
elif delta<=60. :
delta=60
elif delta<=101. :
delta=100
first=numpy.floor(min/delta)-1.5
scvals=delta*(numpy.arange(2*nc)+first)
a=0
for j in range(len(scvals)):
if scvals[j]>min :
a=j-1
break
b=0
for j in range(len(scvals)):
if scvals[j]>=max :
b=j+1
break
if cscale==0:
cnt=scvals[a:b]/10.**il
else:
#not done yet...
raise VCSUtilsError,'ERROR scale not implemented in this function'
return list(cnt)
def __split2contiguous(levels):
""" Function __split2contiguous(levels)
takes list of split intervals and make it contiguous if possible
"""
tmplevs=[]
for il in range(len(levels)):
lv=levels[il]
if not (isinstance(lv,list) or isinstance(lv,tuple)):
raise VCSUtilsError,"Error levels must be a set of intervals"
if not len(lv)==2: raise VCSUtilsError,"Error intervals can only have 2 elements"
if il!=0:
lv2=levels[il-1]
if lv2[1]!=lv[0]:
raise VCSUtilsError,"Error intervals are NOT contiguous from "+str(lv2[1])+" to "+str(lv[0])
tmplevs.append(lv[0])
tmplevs.append(levels[-1][1])
return tmplevs
def mklabels(vals,output='dict'):
'''
Function : mklabels
Description of Function:
This function gets levels and output strings for nice display of the levels values, returns a dictionary unless output="list" specified
Examples of use:
>>> a=vcs.mkscale(2,20,zero=2)
>>> vcs.mklabels (a)
{20.0: '20', 18.0: '18', 16.0: '16', 14.0: '14', 12.0: '12', 10.0: '10', 8.0: '8', 6.0: '6', 4.0: '4', 2.0: '2', 0.0: '0'}
>>> vcs.mklabels ( [5,.005])
{0.0050000000000000001: '0.005', 5.0: '5.000'}
>>> vcs.mklabels ( [.00002,.00005])
{2.0000000000000002e-05: '2E-5', 5.0000000000000002e-05: '5E-5'}
>>> vcs.mklabels ( [.00002,.00005],output='list')
['2E-5', '5E-5']
'''
import string,numpy.ma
if isinstance(vals[0],list) or isinstance(vals[0],tuple):
vals=__split2contiguous(vals)
vals=numpy.ma.asarray(vals)
nvals=len(vals)
ineg=0
ext1=0
ext2=0
# Finds maximum number to write
amax=float(numpy.ma.maximum(numpy.ma.absolute(vals)))
if amax==0 :
if string.lower(output[:3])=='dic' :
return {0:'0'}
else:
return ['0']
amin,amax=minmax(numpy.ma.masked_equal(numpy.ma.absolute(vals),0))
ratio=amax/amin
if int(numpy.ma.floor(numpy.ma.log10(ratio)))+1>6:
lbls=[]
for i in range(nvals):
if vals[i]!=0:
lbls.append(mklabels([vals[i]],output='list')[0])
else:
lbls.append('0')
if string.lower(output[:3])=='dic':
dic={}
for i in range(len(vals)):
dic[float(vals[i])]=lbls[i]
return dic
else:
return lbls
tmax=float(numpy.ma.maximum(vals))
if tmax<0. :
ineg=1
vals=-vals
amax=float(numpy.ma.maximum(vals))
# Number of digit on the left of decimal point
idigleft=int(numpy.ma.floor(numpy.ma.log10(amax)))+1
# Now determine the number of significant figures
idig=0
for i in range(nvals):
aa=numpy.ma.power(10.,-idigleft)
while abs(round(aa*vals[i])-aa*vals[i])>.000001 : aa=aa*10.
idig=numpy.ma.maximum(idig,numpy.ma.floor(numpy.ma.log10(aa*numpy.ma.power(10.,idigleft))))
idig=int(idig)
# Now does the writing part
lbls=[]
# First if we need an E format
if idigleft>5 or idigleft<-2:
if idig==1:
for i in range(nvals):
aa=int(round(vals[i]/numpy.ma.power(10.,idigleft-1)))
lbls.append(str(aa)+'E'+str(idigleft-1))
else:
for i in range(nvals):
aa=str(vals[i]/numpy.ma.power(10.,idigleft-1))
ii=1
if vals[i]<0. : ii=2
aa=string.ljust(aa,idig+ii)
aa=string.replace(aa,' ','0')
lbls.append(aa+'E'+str(idigleft-1))
elif idigleft>0 and idigleft>=idig: #F format
for i in range(nvals):
lbls.append(str(int(round(vals[i]))))
else:
for i in range(nvals):
ii=1
if vals[i]<0.: ii=2
ndig=idig+ii
rdig=idig-idigleft
if idigleft<0 : ndig=idig-idigleft+1+ii
aa='%'+str(ndig)+'.'+str(rdig)+'f'
aa=aa % vals[i]
lbls.append(aa)
if ineg:
vals=-vals
for i in range(len(lbls)):
lbls[i]='-'+lbls[i]
if string.lower(output[:3])=='dic':
dic={}
for i in range(len(vals)):
dic[float(vals[i])]=str(lbls[i])
return dic
else:
return lbls
def getcolors(levs,colors=range(16,240),split=1,white=240):
'''
Function : getcolors(levs,colors=range(16,240),split=1,white=240)
Description of Function:
For isofill/boxfill purposes
Given a list of levels this function returns the colors that would best spread a list of "user-defined" colors (default is 16 to 239 , i.e 224 colors), always using the first and last color. Optionally the color range can be split into 2 equal domain to represent <0 and >0 values.
If the colors are split an interval goes from <0 to >0 then this is assigned the "white" color
Usage:
levs : levels defining the color ranges
colors (default= range(16,240) ) : A list/tuple of the of colors you wish to use
split # parameter to split the colors between 2 equal domain:
one for positive values and one for negative values
0 : no split
1 : split if the levels go from <0 to >0
2 : split even if all the values are positive or negative
white (=240) # If split is on and an interval goes from <0 to >0 this color number will be used within this interval (240 is white in the default VCS palette color)
Examples of Use:
>>> a=[0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0]
>>> vcs.getcolors (a)
[16, 41, 66, 90, 115, 140, 165, 189, 214, 239]
>>> vcs.getcolors (a,colors=range(16,200))
[16, 36, 57, 77, 97, 118, 138, 158, 179, 199]
>>> vcs.getcolors(a,colors=[16,25,15,56,35,234,12,11,19,32,132,17])
[16, 25, 15, 35, 234, 12, 11, 32, 132, 17]
>>> a=[-6.0, -2.0, 2.0, 6.0, 10.0, 14.0, 18.0, 22.0, 26.0]
>>> vcs.getcolors (a,white=241)
[72, 241, 128, 150, 172, 195, 217, 239]
>>> vcs.getcolors (a,white=241,split=0)
[16, 48, 80, 112, 143, 175, 207, 239]
'''
import string
if len(levs)==1: return [colors[0]]
if isinstance(levs[0],list) or isinstance(levs[0],tuple):
tmplevs=[levs[0][0]]
for i in range(len(levs)):
if i!=0:
if levs[i-1][1]*levs[i][0]<0.:
tmplevs[-1]=0.
tmplevs.append(levs[i][1])
levs=tmplevs
# Take care of the input argument split
if isinstance(split,str):
if split.lower()=='no' :
split=0
elif split.lower()=='force' :
split=2
else :
split=1
# Take care of argument white
if isinstance(white,str): white=string.atoi(white)
# Gets first and last value, and adjust if extensions
mn=levs[0]
mx=levs[-1]
# If first level is < -1.E20 then use 2nd level for mn
if levs[0]<=-9.E19 and levs[1]>0. : mn=levs[1]
# If last level is > 1.E20 then use 2nd to last level for mx
if levs[-1]>=9.E19 and levs[-2]<0. : mx=levs[-2]
# Do we need to split the palette in 2 ?
sep=0
if mx*mn<0. and split==1 : sep=1
if split==2 : sep=1
# Determine the number of colors to use
nc=len(levs)-1
## In case only 2 levels, i.e only one color to return
if nc==1:
if split>0 and levs[0]*levs[1]<=0: # Change of sign
return white
else:
return colors[0]
# Number of colors passed
ncols=len(colors)
k=0 #???
col=[]
# Counts the number of negative colors
nn=0 # initialize
#if (mn<=0.) and (levs[0]<=-9.E19) : nn=nn+1 # Ext is one more <0 box
zr=0 # Counter to know if you stop by zero or it is included in a level
for i in range(nc):
if levs[i]<0.: nn=nn+1 # Count nb of <0 box
if levs[i]==0.: zr=1 # Do we stop at zero ?
np=nc-nn # Nb of >0 box is tot - neg -1 for the blank box
if mx*mn<0. and zr==0 :nn=nn-1 # we have a split cell bet + and - so remove a -
# Determine the interval (in colors) between each level
cinc=(ncols-1.)/float(nc-1.)
# Determine the interval (in colors) between each level (neg)
cincn=0.
if nn!=0 and nn!=1 : cincn=(ncols/2.-1.)/float(nn-1.)
# Determine the interval (in colors) between each level (pos)
cincp=0
isplit=0
if np!=0 and np!=1 : cincp=(ncols/2.-1.)/float(np-1.)
if sep!=1:
for i in xrange(nc):
cv=i*cinc
col.append(colors[int(round(cv))])
else:
colp=[]
coln=[]
col=[]
for i in xrange(nc):
if levs[i] < 0 :
cv=i*cincn
# if nn==1 : cv=len(colors)/4. # if only 1 neg then use the middle of the neg colors
if (levs[i])*(levs[i+1])<0 :
col.append(white)
isplit=1
else:
col.append(colors[int(round(cv))])
else:
if np==1 : cv=3*len(colors)/4. # if only 1 pos then use the middle of the pos colors
cv=ncols/2.+(i-nn-isplit)*cincp
col.append(colors[int(round(cv))])
if col[0]==white and levs[0]<-9.E19: col[0]=colors[0]
return col
def generate_time_labels(d1,d2,units,calendar=cdtime.DefaultCalendar):
""" generate_time_labels(self,d1,d2,units,calendar=cdtime.DefaultCalendar)
returns a dictionary of time labels for an interval of time, in a user defined units system
d1 and d2 must be cdtime object, if not they will be assumed to be in "units"
Example:
lbls = generate_time_labels(cdtime.reltime(0,'months since 2000'),
cdtime.reltime(12,'months since 2000'),
'days since 1800',
)
This generated a dictionary of nice time labels for the year 2000 in units of 'days since 1800'
lbls = generate_time_labels(cdtime.reltime(0,'months since 2000'),
cdtime.comptime(2001),
'days since 1800',
)
This generated a dictionary of nice time labels for the year 2000 in units of 'days since 1800'
lbls = generate_time_labels(0,
12,
'months since 2000',
)
This generated a dictionary of nice time labels for the year 2000 in units of 'months since 2000'
"""
if isinstance(d1,(int,long,float)):
d1=cdtime.reltime(d1,units)
if isinstance(d2,(int,long,float)):
d2=cdtime.reltime(d2,units)
d1r=d1.torel(units,calendar)
d2r=d2.torel(units,calendar)
d1,d2=minmax(d1r.value,d2r.value)
u=units.split('since')[0].strip().lower()
dic={}
if u in ['month','months']:
delta=(d2-d1)*30
elif u in ['year','years']:
delta=(d2-d1)*365
elif u in ['hours','hour']:
delta=(d2-d1)/24.
elif u in ['minute','minutes']:
delta=(d2-d1)/24./60.
elif u in ['second','seconds']:
delta=(d2-d1)/24./60.
else:
delta=d2-d1
if delta<.042: # less than 1 hour
levs=mkscale(d1,d2)
for l in levs:
dic[l]=str(cdtime.reltime(l,units).tocomp(calendar))
elif delta<1: # Less than a day put a label every hours
d1=d1r.torel('hours since 2000').value
d2=d2r.torel('hours since 2000').value
d1,d2=minmax(d1,d2)
levs=mkscale(d1,d2)
for l in levs:
t=cdtime.reltime(l,'hours since 2000').tocomp(calendar)
if t.minute>30:
t=t.add(1,cdtime.Hour)
t.minute=0
t.second=0
tr=t.torel(units,calendar)
dic[tr.value]=str(t).split(':')[0]
elif delta<90: # Less than 3 month put label every day
d1=d1r.torel('days since 2000').value
d2=d2r.torel('days since 2000').value
d1,d2=minmax(d1,d2)
levs=mkscale(d1,d2)
for l in levs:
t=cdtime.reltime(l,'days since 2000').tocomp(calendar)
if t.hour>12:
t=t.add(1,cdtime.Day)
t.hour=0
t.minute=0
t.second=0
tr=t.torel(units,calendar)
dic[tr.value]=str(t).split(' ')[0]
elif delta<800: # ~ Less than 24 month put label every month
d1=d1r.torel('months since 2000').value
d2=d2r.torel('months since 2000').value
d1,d2=minmax(d1,d2)
levs=mkscale(d1,d2)
for l in levs:
t=cdtime.reltime(l,'months since 2000').tocomp(calendar)
if t.day>15:
t=t.add(1,cdtime.Month)
t.day=1
t.hour=0
t.minute=0
t.second=0
tr=t.torel(units,calendar)
dic[tr.value]='-'.join(str(t).split('-')[:2])
else: # ok lots of years, let auto decide but always puts at Jan first
d1=d1r.torel('years since 2000').value
d2=d2r.torel('years since 2000').value
d1,d2=minmax(d1,d2)
levs=mkscale(d1,d2)
for l in levs:
t=cdtime.reltime(l,'years since 2000').tocomp(calendar)
if t.month>6:
t=t.add(1,cdtime.Year)
t.month=1
t.day=1
t.hour=0
t.minute=0
t.second=0
tr=t.torel(units,calendar)
dic[tr.value]=str(t).split('-')[0]
return dic
|
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Petr Zelenin (po.zelenin@gmail.com)
#
# Distributed under terms of the MIT license.
import hashlib
import os
import unittest
import transaction
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from zope.sqlalchemy import ZopeTransactionExtension
from pyramid_sacrud.exceptions import SacrudMessagedException
from . import (
add_fixture,
Base,
Gallery, GalleryItem, GalleryItemM2M,
TEST_DATABASE_CONNECTION_STRING,
)
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
def add_data(session):
galleries = [
{'pk': 1, 'name': 'Best gallery',
'description': 'Full description of gallery'},
{'pk': 2, 'name': 'Another best gallery',
'description': 'Another full description of gallery'},
]
add_fixture(Gallery, galleries, session)
items = []
gallery_items_m2m = []
for gallery in galleries:
for x in xrange(1, 10):
image = '{name}-{salt}.jpg'.format(name=x, salt=gallery['pk'])
image_abspath = GalleryItem.get_upload_path()
image_hash_base = os.path.join(image_abspath, image)
image_hash = hashlib.md5(image_hash_base).hexdigest()
items.append({
'image': image,
'description': 'This is image with hash "%s"' % image_hash
})
gallery_items_m2m.append({
'gallery_id': gallery['pk'],
'item_id': image_hash,
})
add_fixture(GalleryItem, items, session)
add_fixture(GalleryItemM2M, gallery_items_m2m, session)
class TestGallery(unittest.TestCase):
def setUp(self):
engine = create_engine(TEST_DATABASE_CONNECTION_STRING)
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
add_data(DBSession)
def tearDown(self):
DBSession.remove()
def test_mixins_attrs(self):
"""Check mixins attrs auto apply to classes."""
self.assertEqual(Gallery.get_pk(), 'pk')
self.assertEqual(Gallery.get_db_pk(), 'id')
self.assertEqual(Gallery.__tablename__, 'gallery')
self.assertEqual(GalleryItem.get_pk(), 'pk')
self.assertEqual(GalleryItem.get_db_pk(), 'id')
self.assertEqual(GalleryItem.__tablename__, 'galleryitem')
self.assertEqual(GalleryItemM2M.__tablename__, 'galleryitemm2m')
def test_instances_attrs(self):
"""Check attrs and methods available only for instances."""
gallery = DBSession.query(Gallery).first()
self.assertEqual(gallery.__repr__(), gallery.name)
self.assertEqual(gallery.get_val_pk(), 1)
image = DBSession.query(GalleryItem).filter(GalleryItem.pk == 1).one()
self.assertIn(image.image_hash, image.__repr__())
def test_mixins_fks(self):
"""Check GalleryItemM2MMixin has ForeignKeys to GalleryMixin
and GalleryItemMixin."""
self.assertTrue(hasattr(GalleryItemM2M, 'gallery_id'))
self.assertTrue(hasattr(GalleryItemM2M, 'item_id'))
def test_access_by_relations(self):
"""Check relations between GalleryMixin and GalleryItemMixin."""
gallery = DBSession.query(Gallery).first()
self.assertEqual(len(gallery.items), 9)
def test_unique_image_hash(self):
"""Check of deny to add non-unique image_hash."""
image = GalleryItem(image='1-1.jpg')
DBSession.add(image)
with self.assertRaises(SacrudMessagedException) as cm:
DBSession.query(GalleryItem).all()
self.assertIn('This image was uploaded earlier.', str(cm.exception))
|
import argparse
import csv
import os
import re
import numpy as np
import tensorflow as tf
from PIL import Image
FILENAME_PATTERN = re.compile(r'.+-(\d+)-of-(\d+)')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='tool that takes tfrecord files and extracts all images + labels from it')
parser.add_argument('tfrecord_dir', help='path to directory containing tfrecord files')
parser.add_argument('destination_dir', help='path to dir where resulting images shall be saved')
parser.add_argument('stage', help='stage of training these files are for [e.g. train]')
args = parser.parse_args()
tfrecord_files = os.listdir(args.tfrecord_dir)
tfrecord_files = sorted(tfrecord_files, key=lambda x: int(FILENAME_PATTERN.match(x).group(1)))
with open(os.path.join(args.destination_dir, '{}.csv'.format(args.stage)), 'w') as label_file:
writer = csv.writer(label_file, delimiter='\t')
for tfrecord_file in tfrecord_files:
tfrecord_filename = os.path.join(args.tfrecord_dir, tfrecord_file)
file_id = FILENAME_PATTERN.match(tfrecord_file).group(1)
dest_dir = os.path.join(args.destination_dir, args.stage, file_id)
os.makedirs(dest_dir, exist_ok=True)
record_iterator = tf.python_io.tf_record_iterator(path=tfrecord_filename)
for idx, string_record in enumerate(record_iterator):
example = tf.train.Example()
example.ParseFromString(string_record)
labels = example.features.feature['image/class'].int64_list.value
img_string = example.features.feature['image/encoded'].bytes_list.value[0]
file_name = os.path.join(dest_dir, '{}.png'.format(idx))
with open(file_name, 'wb') as f:
f.write(img_string)
label_file_data = [file_name]
label_file_data.extend(labels)
writer.writerow(label_file_data)
print("recovered {:0>6} files".format(idx), end='\r')
|
#################################################################
### Chain Reactor Game Demo For Technites #
### #
#################################################################
grid=[[[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]],
[[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]],
[[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]],
[[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]],
[[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]],
[[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]],
[[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]],
[[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]],
[[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]],
]
free_dir_grid=[
[2,3,3,3,3,3,3,2],
[3,4,4,4,4,4,4,3],
[3,4,4,4,4,4,4,3],
[3,4,4,4,4,4,4,3],
[3,4,4,4,4,4,4,3],
[3,4,4,4,4,4,4,3],
[3,4,4,4,4,4,4,3],
[2,3,3,3,3,3,3,2]
]
def display():
"Display the whole game grid"
global grid
print "Game Grid"
for i in range(8):
for j in range(8):
if grid[i][j][0]==0 and grid[i][j][1]==0:
print "_",
else:
if grid[i][j][1]==0:
print grid[i][j][0]*"R",
else:
print grid[i][j][1]*"G",
print
print
def winner():
"Check if any player won the game"
global grid
sg=0
sr=0
for i in range(8):
for j in range(8):
sr+=grid[i][j][0]
sg+=grid[i][j][1]
if sr*sg>0:
return 0
return 1
def chain_reaction(i,j,p,t=0):
"Splits the current cell elements to neighbouring elements"
if i<0 or i>7 or j<0 or j>7:
return
global grid
if t==0:
grid[i][j][p]=0
else:
grid[i][j][p]+=1
grid[i][j][(p+1)%2]=0
if grid[i][j][p]==free_dir_grid[i][j]:
chain_reaction(i,j,p)
if t==0:
chain_reaction(i-1,j,p,1)
chain_reaction(i,j-1,p,1)
chain_reaction(i,j+1,p,1)
chain_reaction(i+1,j,p,1)
if __name__=="__main__":
i=0
player=["Red","Green"]
turns=0
print "Welcome to chain reation game"
display()
while True:
x,y=map(int,raw_input("It's player "+player[i]+"'s move: ").split())
if x<0 or y<0:
break
if x>7 or y>7:
print "Invalid cell "
continue
if grid[x][y][(i+1)%2]!=0: #if opponent has already occupied the cell
print "Try other cell "
continue
grid[x][y][i]+=1
display()
if grid[x][y][i]==free_dir_grid[x][y]: #Cell needs to be split
chain_reaction(x,y,i)
display()
if turns==2:
if winner(): #Check if any player won
print player[i]+ " won the game"
break
if turns<2:
turns+=1
#Change player in next turn
i+=1
i%=2
|
import re, shlex
import math
hextag = re.compile('{{(.*?)}}',flags=re.MULTILINE|re.DOTALL)
class HexExpression:
delimiters = (' ', '=','-','+','*','(',')')
quotes = ('\"','\'')
def __init__( self, tokens ):
self.operations = {
'=':self.assign,
'-':self.minus,
'+':self.plus,
'*':self.multi,
}
if isinstance(tokens,str):
self.tokens = [ tok for tok in self.tokenize( tokens ) if tok != '(' and tok !=')' ]
else:
self.tokens = [ tok for tok in tokens if tok != '(' and tok !=')' ]
self.value = self.tokens[0]
def execute( self ):
if self.value in self.operations:
self.tokens = HexExpression(self.tokens[1:]).execute()
#self.tokens = HexExpression(self.tokens[2:]).execute()
print(self.tokens)
#return self.operations[ self.value ]( self.left, self.right )
else:
return self.tokens
def assign( self, left, right ):
print ('assign: ' + str(left) + ' ' + str(right))
return {str(left):right}
def minus( self, left, right ):
print ('minus: ' + str(left) + ' ' + str(right))
return left - right
def plus( self, left, right ):
print ('plus: ' + str(left) + ' ' + str(right))
return left + right
def multi( self, left, right ):
print ('multi: ' + str(left) + ' ' + str(right))
return left*right
def tokenize( self, string ):
acc, word, inside = [], '', False
for char in string:
if char in self.delimiters and not inside:
if word.strip(): acc.append(word)
if char.strip(): acc.append(char)
word = ''
elif char in self.quotes:
inside = not inside
word += char
else:
word += char
if word.strip(): acc.append(word)
return [ self.evaluate(tok) for tok in acc ]
def evaluate( self, token ):
token = token.strip('. ')
if token.replace('.','',1).isdigit():
if '.' in token:
return float(token)
else:
return int(token)
elif token.lower() == 'true':
return True
elif token.lower() == 'false':
return False
else:
return token
def collapse( self, tokens ):
pass
if __name__=='__main__':
exp = HexExpression('( = this (+ (+ 2 3) (- 4 3))')
print(exp.tokens)
result = exp.execute()
print(result)
|
import pytest
import json
class TestRbdMirrors(object):
@pytest.mark.no_docker
def test_rbd_mirror_is_installed(self, node, host):
assert host.package("rbd-mirror").is_installed
def test_rbd_mirror_service_enabled_and_running(self, node, host):
service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format(
hostname=node["vars"]["inventory_hostname"]
)
s = host.service(service_name)
assert s.is_enabled
assert s.is_running
def test_rbd_mirror_is_up(self, node, host, setup):
hostname = node["vars"]["inventory_hostname"]
cluster = setup["cluster_name"]
container_binary = setup["container_binary"]
daemons = []
if node['docker']:
container_exec_cmd = '{container_binary} exec ceph-rbd-mirror-{hostname}'.format( # noqa E501
hostname=hostname, container_binary=container_binary)
else:
container_exec_cmd = ''
hostname = node["vars"]["inventory_hostname"]
cluster = setup['cluster_name']
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rbd-mirror --keyring /var/lib/ceph/bootstrap-rbd-mirror/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
container_exec_cmd=container_exec_cmd,
hostname=hostname,
cluster=cluster
)
output = host.check_output(cmd)
status = json.loads(output)
daemon_ids = [i for i in status["servicemap"]["services"]
["rbd-mirror"]["daemons"].keys() if i != "summary"]
for daemon_id in daemon_ids:
daemons.append(status["servicemap"]["services"]["rbd-mirror"]
["daemons"][daemon_id]["metadata"]["hostname"])
assert hostname in daemons
|
import pandas as pd
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
def loadData():
col_names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = pd.read_csv("./pima-indians-diabetes.data.txt", names = col_names)
array = dataframe.values
X = array[:, 0:8]
Y = array[:, 8]
return X, Y
def trainAndTestValidation(X, Y):
X_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y, test_size=0.33, random_state=7)
model = LogisticRegression()
model.fit(X_train, Y_train)
result = model.score(X_test, Y_test)
print("Train And Test Sets. Accuracy: {0}".format(result*100.0))
def kFoldValidation(X, Y):
kfold = model_selection.KFold(n_splits=10, random_state=7)
model = LogisticRegression()
result = model_selection.cross_val_score(model, X, Y, cv=kfold)
print("Kfold. Accuracy: {0}, Variance: {1}".format(result.mean()*100.0, result.std()*100.0))
def leaveOneOutValidation(X, Y):
model = LogisticRegression()
loocv = model_selection.LeaveOneOut()
result = model_selection.cross_val_score(model, X, Y, cv=loocv)
print("LOOCV. Accuracy: {0}, Variance: {1}".format(result.mean()*100.0, result.std()*100.0))
def repeatedRandomTestTrainValidation(X, Y):
kfold = model_selection.ShuffleSplit(n_splits=10, test_size=0.33, random_state=7)
model = LogisticRegression()
result = model_selection.cross_val_score(model, X, Y, cv=kfold)
print("Random Test Train Sets. Accuracy: {0}, Variance: {1}".format(result.mean()*100.0, result.std()*100.0))
def run():
X, Y = loadData()
trainAndTestValidation(X, Y)
kFoldValidation(X, Y)
leaveOneOutValidation(X, Y)
repeatedRandomTestTrainValidation(X, Y)
if __name__ == '__main__':
run()
|
import re
from PIL import Image, ImageOps
from io import BytesIO
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import HttpResponseForbidden
from django.shortcuts import get_object_or_404
from django.core.files.uploadedfile import SimpleUploadedFile
from forum.models import Topic
MENTION_REGEX = re.compile(r'@(\w+)', re.M)
IMAGE_LARGE = 144
IMAGE_MEDIUM = 96
IMAGE_SMALL = 48
NUM_PER_PAGE = 20
def _thumbnail(upload, size, fmt):
img = ImageOps.fit(upload, size, Image.ANTIALIAS)
temp = BytesIO()
img.save(temp, fmt, quality=95)
temp.seek(0)
return temp
def create_thumbnail(src, new_name, ext):
upload = Image.open(BytesIO(src.read()))
fmt = src.content_type.split('/')[-1]
large = _thumbnail(upload, (IMAGE_LARGE, IMAGE_LARGE), fmt)
filename_l = "%s_l.%s" % (new_name, ext)
large_file = SimpleUploadedFile(filename_l, large.read(), content_type=src.content_type)
medium = _thumbnail(upload, (IMAGE_MEDIUM, IMAGE_MEDIUM), fmt)
filename_m = "%s_m.%s" % (new_name, ext)
medium_file = SimpleUploadedFile(filename_m, medium.read(), content_type=src.content_type)
small = _thumbnail(upload, (IMAGE_SMALL, IMAGE_SMALL), fmt)
filename_s = "%s_s.%s" % (new_name, ext)
small_file = SimpleUploadedFile(filename_s, small.read(), content_type=src.content_type)
return large_file, medium_file, small_file
def get_pagination(current_page, num_pages, count):
page_list = []
show_pages = 2*count+1
if show_pages >= num_pages:
page_list.extend(range(1, num_pages+1))
elif current_page - count < 1:
page_list.extend(range(1, show_pages+1))
elif current_page + count > num_pages:
page_list.extend(range(num_pages+1-show_pages, num_pages+1))
else:
page_list.extend(range(current_page-count, current_page+count+1))
return page_list
def topic_pagination(page, topics):
paginator = Paginator(topics, NUM_PER_PAGE)
try:
topic_list = paginator.page(page)
except PageNotAnInteger:
topic_list = paginator.page(1)
except EmptyPage:
topic_list = paginator.page(paginator.num_pages)
page_list = get_pagination(topic_list.number, paginator.num_pages, 2)
return topic_list, page_list
def author_required(view_func):
def _wrapped_view_func(request, *args, **kwargs):
topic_id = kwargs.get('topic_id')
topic = get_object_or_404(Topic, id=topic_id)
if topic.author == request.user:
return view_func(request, *args, **kwargs)
else:
return HttpResponseForbidden()
return _wrapped_view_func
def get_metioned_user(sender, markdown):
mentioned = set(re.findall(MENTION_REGEX, markdown)) - set([sender.username])
# mentioned = set(re.findall(MENTION_REGEX, markdown))
if mentioned:
return User.objects.filter(username__in=mentioned)
return None
|
from __future__ import unicode_literals
from django.core.exceptions import FieldError, ValidationError
from django.db.models import Model
from django.test import SimpleTestCase, TestCase
from dynamic_choices.db.models import DynamicChoicesForeignKey
from .models import ALIGNMENT_EVIL, ALIGNMENT_GOOD, Enemy, Master, Puppet
class DefinitionValidationTest(SimpleTestCase):
def test_missing_method(self):
with self.assertRaises(FieldError):
class MissingChoicesCallbackModel(Model):
field = DynamicChoicesForeignKey('self', choices='missing_method')
class Meta:
app_label = 'dynamic_choices'
def test_callable(self):
class CallableChoicesCallbackModel(Model):
field = DynamicChoicesForeignKey('self', choices=lambda qs: qs)
class Meta:
app_label = 'dynamic_choices'
class DynamicForeignKeyTests(TestCase):
def setUp(self):
self.good_master = Master.objects.create(alignment=ALIGNMENT_GOOD)
self.evil_master = Master.objects.create(alignment=ALIGNMENT_EVIL)
def test_valid_value(self):
good_puppet = Puppet(master=self.good_master, alignment=ALIGNMENT_GOOD)
good_puppet.full_clean()
good_puppet.save()
evil_puppet = Puppet(master=self.evil_master, alignment=ALIGNMENT_EVIL)
evil_puppet.full_clean()
evil_puppet.save()
enemy = Enemy(puppet=evil_puppet, enemy=good_puppet, because_of=self.good_master)
enemy.full_clean(exclude=['since'])
def test_invalid_value(self):
puppet = Puppet(master=self.good_master, alignment=ALIGNMENT_EVIL)
self.assertRaises(ValidationError, puppet.full_clean)
class DynamicOneToOneFieldTests(TestCase):
fixtures = ['dynamic_choices_test_data']
def setUp(self):
self.good_puppet = Puppet.objects.get(alignment=ALIGNMENT_GOOD)
self.evil_puppet = Puppet.objects.get(alignment=ALIGNMENT_EVIL)
def test_valid_value(self):
self.evil_puppet.secret_lover = self.good_puppet
self.evil_puppet.full_clean()
self.evil_puppet.save()
self.assertEqual(self.good_puppet.secretly_loves_me, self.evil_puppet)
self.good_puppet.secret_lover = self.evil_puppet
self.good_puppet.full_clean()
def test_invalid_value(self):
self.evil_puppet.secret_lover = self.good_puppet
self.evil_puppet.save()
self.good_puppet.secret_lover = self.good_puppet
self.assertRaises(
ValidationError, self.good_puppet.full_clean,
"Since the evil puppet secretly loves the good puppet the good puppet can only secretly love the bad one."
)
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import time_benchmark
import os
import sys
import json
import re
import time
import numpy as np
import ConfigParser
import subprocess
from tornado import template
from argparse_plus import ArgumentParserPlus
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from Bio.Motif import Motif
from gibbslib.batch_experiment import *
from gibbslib.simple_logging import *
from gibbslib.gimsan_base import GimsanJob
from gibbslib.gimsan_exception import *
class FinderResult():
def __init__(self, finder_meta, nullset_size, statsig_dir, outdir, gimsan_home):
self.logger = logging.getLogger(self.__class__.__name__)
self.finder_id = finder_meta['finder_id']
self.finder_id_alt = self.finder_id.replace('_', '-')
self.finder_outdir = finder_meta['finder_outdir'] #per-finder
self.width = finder_meta['width']
self.nullset_size = nullset_size
self.statsig_dir = statsig_dir
self.outdir = outdir #per-template
self.load_template_json()
self.pval_r_path = os.path.join(gimsan_home, 'misc', "conf_pval_only.R")
#results
self.pvalue_comment = None
self.kmer_lst = None
self.kmer_filename = None
self.logowidth = 5
self.weblogo_basename = None
self.weblogo_revcompl_basename = None
self.coldep_outfile = None
self.coldep_num_pairs = None
def construct_weblogo(self, weblogo_filename, weblogo_revcompl_filename):
self.weblogo_basename = os.path.basename(weblogo_filename)
self.weblogo_revcompl_basename = os.path.basename(weblogo_revcompl_filename)
motif = Motif(alphabet=IUPAC.unambiguous_dna)
for kmer in self.kmer_lst:
motif.add_instance(Seq(kmer, motif.alphabet))
logowidth_normal = self.construct_weblogo_helper(weblogo_filename, motif)
#reverse complement
motif_revcompl = motif.reverse_complement()
logowidth_revcompl = self.construct_weblogo_helper(weblogo_revcompl_filename, motif_revcompl)
self.logowidth = max(self.logowidth, logowidth_normal, logowidth_revcompl)
def construct_weblogo_helper(self, weblogo_filename, motif):
logowidth = (20.0/45.0) * self.width + 2;
motif.weblogo(weblogo_filename, logowidth=logowidth)
#return logowidth * 50 #width to pixel conversion
return logowidth
def load_json(self, filename):
json_dct = {}
with open(filename, 'rb') as fh:
json_dct = json.loads(fh.read())
if json_dct is None or "kmers" not in json_dct:
raise InvalidMotifJsonFileError("File does not seem to be a valid json file for motif results: %s" % json_filename)
return json_dct
def get_pvalue_comment_from_rout(self):
rout_filename = self.get_rout_filename()
with open(rout_filename, 'rb') as fh:
for ln in fh:
ln = unicode(ln, 'utf-8')
if 'MLE of the p-value' in ln:
self.pvalue_comment = ln.strip()
break
if not self.pvalue_comment:
raise ParsingMotifResultError('Cannot find P-value comment: %s' % rout_filename)
def load_template_json(self):
json_filename = os.path.join(self.finder_outdir, 'motif-00000.stdout')
self.template_json = self.load_json(json_filename)
def extract_and_write_kmers(self):
self.kmer_lst = [l[1] for l in self.template_json['kmers'].values()]
self.kmer_filename = os.path.join(self.statsig_dir, '%s.kmers' % self.finder_id)
self.write_kmers_file(self.kmer_lst, self.kmer_filename)
def write_kmers_file(self, curr_kmer_lst, curr_kmer_filename):
self.logger.info('Writing kmers: %s' % curr_kmer_filename)
with open(curr_kmer_filename, 'wb') as fh:
for kmer in curr_kmer_lst:
print('>FASTA header', file=fh)
print(kmer, file=fh)
def get_rscript_text(self, template_score, null_scores_path):
params = {
'pval_r_path' : self.pval_r_path,
'null_scores_path' : null_scores_path,
'template_score' : template_score,
}
rscript_text = """
source("%(pval_r_path)s")
library(MASS)
sample<-scan("%(null_scores_path)s")
getConfPvalLat(%(template_score)s, sample, conf=0.1, mins=7, maxs=200)
""" % params
return rscript_text
def get_rout_filename(self):
return os.path.join(self.statsig_dir, '%s.r_out' % self.finder_id)
def extract_and_write_scores(self):
score_lst = [None] * (self.nullset_size + 1)
for i in range(self.nullset_size+1):
if i == 0:
json_filename = os.path.join(self.finder_outdir, 'motif-00000.stdout')
else:
json_filename = os.path.join(self.finder_outdir, 'null-%05d.stdout' % i )
json_dct = self.load_json(json_filename)
score_lst[i] = json_dct['score_ranking_runs']
#write gm_width008.scores file
nullscores_filename = os.path.join(self.statsig_dir, '%s.scores' % self.finder_id)
self.logger.info('Writing null scores: %s' % nullscores_filename)
with open(nullscores_filename, 'wb') as fh:
print('\n' . join([str(s) for s in score_lst[1:]]), file=fh)
#write R file
rscript_text = self.get_rscript_text(score_lst[0], nullscores_filename)
rscript_filename = os.path.join(self.statsig_dir, '%s.R' % self.finder_id)
self.logger.info('Writing R script: %s' % rscript_filename)
with open(rscript_filename, 'wb') as fh:
print(rscript_text, file=fh)
return rscript_filename
class GimsanResultManager(GimsanJob):
def __init__(self, name, template_file, outdir, config, conf_file, is_overwrite=False, dryrun=False, verbose=False):
super(GimsanResultManager, self).__init__(outdir, config)
self.logger = logging.getLogger(self.__class__.__name__)
self.name = name
self.template_file = template_file
self.outdir = outdir
self.conf_file = conf_file
self.verbose = verbose
self.dryrun = dryrun
self.is_overwrite = is_overwrite
self.css_outdir = os.path.join(self.outdir, 'css')
self.js_outdir = os.path.join(self.outdir, 'js')
self.statsig_dir = os.path.join(self.outdir, 'statsig')
self.r_path = os.path.expanduser(self.config.get('result', 'r_path'))
self.check_result_path()
self.get_finders()
#column-dependency
self.column_dependency_exec = os.path.join(self.gimsan_home, 'column_dependency_app/column_dependency.out')
if not os.path.isfile(self.column_dependency_exec):
raise Exception('Column-Dependency executable missing: %s' % self.column_dependency_exec)
def check_result_path(self):
if not os.path.isdir(self.outdir):
raise MissingDirError('Missing output directory: %s' % self.outdir)
if not os.path.isdir(self.statsig_dir):
os.mkdir(self.statsig_dir)
elif not self.is_overwrite:
raise AlreadyExistOutputDirError('Directory already exist: %s' % self.statsig_dir)
if not os.path.isdir(self.css_outdir):
os.mkdir(self.css_outdir)
elif not self.is_overwrite:
raise AlreadyExistOutputDirError('Directory already exist: %s' % self.css_outdir)
if not os.path.isdir(self.js_outdir):
os.mkdir(self.js_outdir)
elif not self.is_overwrite:
raise AlreadyExistOutputDirError('Directory already exist: %s' % self.js_outdir)
def get_all_finder_meta(self):
lst = []
for width in self.width_lst:
lst.append(self.get_gibbsmarkov_meta(width))
return lst
def generate_finder_result_list(self):
finder_meta_lst = self.get_all_finder_meta()
finder_res_lst = []
for finder_meta in finder_meta_lst:
finder_res_lst.append(FinderResult(finder_meta, self.nullset_size, self.statsig_dir, self.outdir, self.gimsan_home))
rscript_jobs = []
for finder_res in finder_res_lst:
rscript_filename = finder_res.extract_and_write_scores()
cmd = "%s -f %s &>%s" % (self.r_path, rscript_filename, finder_res.get_rout_filename())
job = {
'cmd' : cmd,
'job_id' : rscript_filename,
}
rscript_jobs.append(job)
#run R in parallel
if self.dryrun:
for job in rscript_jobs:
self.logger.info(job['cmd'])
else:
import multiprocessing
pool = multiprocessing.Pool(processes=self.num_proc)
pool.map(subprocess_exec_func, rscript_jobs)
#pvalue
for finder_res in finder_res_lst:
finder_res.get_pvalue_comment_from_rout()
#weblogo
img_dir = os.path.join(self.outdir, 'images')
if not os.path.isdir(img_dir):
os.mkdir(img_dir)
for finder_res in finder_res_lst:
finder_res.extract_and_write_kmers()
weblogo_filename = os.path.join(img_dir, '%s.png' % finder_res.finder_id)
weblogo_revcompl_filename = os.path.join(img_dir, '%s.revcompl.png' % finder_res.finder_id)
finder_res.construct_weblogo(weblogo_filename, weblogo_revcompl_filename)
#column dependency
self.compute_column_dependency(finder_res_lst)
return finder_res_lst
def compute_column_dependency(self, finder_res_lst):
coldep_dir = os.path.join(self.outdir, 'coldep')
if not os.path.isdir(coldep_dir):
os.mkdir(coldep_dir)
if self.config.has_option('column_dependency', 'randseed'):
randseed_param = '-s %d' % self.config.getint('column_dependency', 'randseed')
else:
randseed_param = ''
job_lst = []
for finder_res in finder_res_lst:
coldep_fileroot = '%s.coldep' % finder_res.finder_id
stdout_fn = coldep_fileroot + ".txt"
stderr_fn = coldep_fileroot + ".stderr"
finder_res.coldep_outfile = os.path.join('coldep', stdout_fn)
cmd = "%s -fsa %s %s 1>%s 2>%s" % (
self.column_dependency_exec,
finder_res.kmer_filename,
randseed_param,
os.path.join(coldep_dir, stdout_fn),
os.path.join(coldep_dir, stderr_fn))
job = {
'cmd' : cmd,
'job_id' : coldep_fileroot,
}
job_lst.append(job)
#run R in parallel
if self.dryrun:
for job in job_lst:
self.logger.info(job['cmd'])
else:
import multiprocessing
pool = multiprocessing.Pool(processes=self.num_proc)
pool.map(subprocess_exec_func, job_lst)
for finder_res in finder_res_lst:
full_path = os.path.join(self.outdir, finder_res.coldep_outfile)
with open(full_path, 'rb') as fh:
for ln in fh:
m = re.search(r'for statistically significant pairs \((\d+) pairs\)', ln)
if m:
finder_res.coldep_num_pairs = int(m.group(1))
break
if finder_res.coldep_num_pairs is None:
raise Exception('Unable to find statistically significant pairs from %s' % full_path)
if finder_res.coldep_num_pairs == 0:
finder_res.coldep_btn_style = 'btn-default'
else:
finder_res.coldep_btn_style = 'btn-success'
def generate_html(self):
finder_res_lst = self.generate_finder_result_list()
gm_finder0 = self.finder_lst[0]
tp = None
out_tp_file = os.path.join(self.gimsan_home, 'misc/output_template.html')
with open(out_tp_file) as fh:
tp = template.Template(fh.read())
if tp is None:
raise MissingFileError('Unable to generate HTML from template: %s' % out_tp_file)
output_html = tp.generate(
experiment_name = self.name,
config_filename = os.path.join('../meta', os.path.basename(self.conf_file)),
fsa_filename = os.path.basename(self.template_file),
nullset_size = self.nullset_size,
per_seq_model_comment = gm_finder0.get_per_seq_model(),
stop_crit_comment = gm_finder0.get_stop_crit(),
rapid_conv = gm_finder0.get_rapid_conv(),
double_strand_comment = 'yes' if gm_finder0.get_double_strand() else 'no',
markov_order = gm_finder0.markov_order,
genomic_file_comment = self.genome_filename if self.genome_filename else 'input FASTA file',
finder_res_lst = finder_res_lst,
)
output_html_file = os.path.join(self.outdir, 'output.html')
self.logger.info('Writing HTML file to: %s' % output_html_file)
with open(output_html_file, 'wb') as fh:
print(output_html, file=fh)
self.copy_html_assets()
def copy_html_assets(self):
lst = [
(os.path.join(self.gimsan_home, 'misc', 'css', "bootstrap.min.css"), self.css_outdir),
(os.path.join(self.gimsan_home, 'misc', 'js', "bootstrap.min.js"), self.js_outdir),
(os.path.join(self.gimsan_home, 'misc', 'js', "jquery-1.10.2.min.js"), self.js_outdir),
]
for l in lst:
os.system('cp -v %s %s' % (l[0], l[1]))
def subprocess_exec_func(job):
import logging
logging.info('(%s): %s' % (job['job_id'], job['cmd']))
ret_code = subprocess.call(job['cmd'], shell=True)
if __name__ == '__main__':
benchmark = time_benchmark.Benchmark()
#defaults
description = """
Generate GIMSAN result
"""
epilog = """
Examples:
%(prog)s --dir=testout -v
"""
argp = ArgumentParserPlus(description=description, epilog=epilog)
argp.add_argument('--dir', required=True, help="main output directory used with gimsan_submit.py")
argp.add_argument('--overwrite', action="store_true", help="")
argp.add_argument('--dryrun', action="store_true", help="")
argp.add_argument('-v', '--verbose', action='store_true')
args = argp.parse_args()
import logging
if args.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
simple_stream_logging(log_level)
args.dir = os.path.expanduser(args.dir)
conf_file = BatchExperiment.get_conf_file(args.dir)
batch_exp = BatchExperiment(conf_file, args.overwrite)
for exp in batch_exp.experiments:
gr_manager = GimsanResultManager(
exp['name'],
exp['fasta_file'],
exp['outdir'],
batch_exp.config,
batch_exp.conf_file,
is_overwrite = args.overwrite,
dryrun = args.dryrun,
verbose = args.verbose)
gr_manager.generate_html()
benchmark.print_time(sys.stderr)
|
import client_conn
import filelist
import rsa
import hashlib
import getpass
import log
import os
import signal
import sys
# filename of list
mylist = "list"
password = None
def print_help():
"""
Print the available command list.
"""
print()
print()
if client_conn.is_login():
print("Login as: "+ client_conn.username())
print("Help:")
print("General:")
print(" c create a new account")
print(" i login")
print(" o logout")
print(" p print this menu")
print(" q logout and quit")
print()
print("Files:")
print(" d download file")
print(" l list file")
print(" r delete file")
print(" s share file")
print(" u upload file")
print()
def event_loop():
"""
Main input event loop.
"""
# Get input
print()
print("Command (p for help): ", end="")
command = input().strip()
# General
if command == "c":
ui_create_account()
elif command == "i":
ui_login()
elif command == "o":
ui_logout()
elif command == "p":
print_help()
elif command == "q":
exit_program()
# Files
elif command == "d":
ui_download()
elif command == "l":
ui_listfile()
elif command == "r":
ui_delete()
elif command == "s":
ui_share()
elif command == "u":
ui_upload()
# Unknown command
else:
print(command+": unknown command")
def handler(signum, frame):
"""
Signal handler handling Ctrl+C event.
"""
print("\nReceived signal: ", signum)
print("Exit")
exit_program()
def exit_program():
"""
Save the list before exit the program.
"""
os.system("rm -f *.data")
if client_conn.is_login():
filelist.save(password, "salt", mylist)
client_conn.upload(mylist)
client_conn.logout()
print("Bye")
sys.exit()
def ui_login():
"""
Handle authentication steps during user login.
"""
if client_conn.is_login():
print("You already login")
return
# Connect to server
print("Username: ", end='')
username = input()
global password
password = getpass.getpass('Password: ')
pw_server = hashlib.sha256()
pw_server.update(password.encode('UTF-8'))
status = client_conn.authenticate(username, pw_server.digest())
# Get filelist
if status:
print("Login success")
else:
print("Login failure")
return
# Download the filelist
status = client_conn.download(mylist)
if status:
# List exist on server and successfuly downloaded
filelist.load(password, "salt", mylist)
else:
# List not exist on server
pass
def ui_logout():
"""
Handle the logout events.
"""
if client_conn.is_login():
filelist.save(password, "salt", mylist)
client_conn.upload(mylist)
status = client_conn.logout()
filelist.mylist = {}
if status:
print("Logout success")
else:
print("Logout failure")
def ui_create_account():
"""
Create an new account.
"""
print("Username: ", end='')
global password
username = input()
password = getpass.getpass('Password: ')
pw_server = hashlib.sha256()
pw_server.update(password.encode('UTF-8'))
status = client_conn.registrate(username, pw_server.digest())
if status:
print("Create account success")
else:
print("Create account failure")
def ui_upload():
"""
Encrypte the file and upload.
"""
if not client_conn.is_login():
print("Please login first")
#return
print("Filename: ", end='')
filename = input()
status = client_conn.upload_file(filename)
if status:
print("Upload success")
else:
print("Upload failure")
def ui_download():
if not client_conn.is_login():
print("Please login first")
return
print("Filename: ", end='')
filename = input()
print("Save as: ", end='')
saveas = input()
status = client_conn.download_file(filename, saveas)
if status:
print("Download success")
else:
print("Download failure")
def ui_share():
if not client_conn.is_login():
print("Please login first")
return
# Enter recipient username
print("Invite people (username): ", end='')
recipient = input().strip()
# Recipient's email
recv_email = None
print("Recipient's email address: ", end='')
recv_email = input().strip()
# Get target's public key
choice = None
while choice != "1" and choice != "2":
print("Obtain the recipent's public key:")
print(" 1) Download from Hong Kong Post")
print(" 2) Input from file (debug)")
print("Choice [1,2]: ", end='')
choice = input().strip()
public_key = None
try:
if choice == "1":
# Download from HK Post
public_key = rsa.get_cert(recv_email, True)
sender = "oneonestar@gmail.com"
if choice == "2":
# Import from file
sender = "debug@mail.com"
filename = "key/public_key.pem"
public_key = rsa.load_public_cert_from_file(filename)
except Exception as e:
log.print_exception(e)
log.print_error("error", "failed to load cert")
return
# Get user's private key to signoff
if os.path.isfile("/home/star/.ssh/me.key.pem2"):
private_key = rsa.load_private_cert_from_file("/home/star/.ssh/me.key.pem2")
else:
private_key = rsa.load_private_cert_from_file("key/private_key.pem")
# Encrypt the filelist record
print("File to share: ", end='')
filename = input()
record = filelist.export_record(filename, sender, recv_email, public_key, private_key)
if record == None:
print("Failed to share file")
return
# Send to server
client_conn.share(recipient, record)
def ui_listfile():
if not client_conn.is_login():
print("Please login first")
return
# Get share files
client_conn.get_share()
# Listing
filelist.listing()
def ui_delete():
if not client_conn.is_login():
print("Please login first")
return
print("Filename: ", end='')
filename = input()
record = filelist.get(filename)
if record:
client_conn.delete(record['filename_rand'])
filelist.delete(filename)
if __name__ == "__main__":
'''
For testing
'''
signal.signal(signal.SIGINT, handler)
#url = "https://blog.onestar.moe:8080"
client_conn.setup("http://localhost:8080/", "../server/config/server.pem")
print("Welcome to ComfortZone - Secure Cloud Storage Service")
print()
print()
while True:
event_loop()
# Upload a file
# List files
#filelist.listing()
# Upload filelist
# Download filelist
# Download a file
#client_conn.download_file("testing.txt", "saveas.txt")
# Logout
#client_conn.logout()
#client_conn.download_file("testing.txt", "saveas.txt")
|
import glob
import os
import tempfile
import shutil
import unittest
from avocado.core import exit_codes
from avocado.utils import process
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
class ReplayTests(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
cmd_line = ('%s run passtest.py '
'-m examples/tests/sleeptest.py.data/sleeptest.yaml '
'--job-results-dir %s --sysinfo=off --json -'
% (AVOCADO, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
self.jobdir = ''.join(glob.glob(os.path.join(self.tmpdir, 'job-*')))
idfile = ''.join(os.path.join(self.jobdir, 'id'))
with open(idfile, 'r') as f:
self.jobid = f.read().strip('\n')
def run_and_check(self, cmd_line, expected_rc):
os.chdir(basedir)
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, expected_rc,
"Command %s did not return rc "
"%d:\n%s" % (cmd_line, expected_rc, result))
return result
def test_run_replay_noid(self):
"""
Runs a replay job with an invalid jobid.
"""
cmd_line = ('%s run --replay %s '
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, 'foo', self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
self.run_and_check(cmd_line, expected_rc)
def test_run_replay_latest(self):
"""
Runs a replay job using the 'latest' keyword.
"""
cmd_line = ('%s run --replay latest --job-results-dir %s --sysinfo=off'
% (AVOCADO, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
def test_run_replay_data(self):
"""
Checks if all expected files are there.
"""
file_list = ['multiplex', 'config', 'test_references', 'pwd', 'args',
'cmdline']
for filename in file_list:
path = os.path.join(self.jobdir, 'jobdata', filename)
self.assertTrue(glob.glob(path))
def test_run_replay(self):
"""
Runs a replay job.
"""
cmd_line = ('%s run --replay %s '
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
def test_run_replay_partialid(self):
"""
Runs a replay job with a partial jobid.
"""
partial_id = self.jobid[:5]
cmd_line = ('%s run --replay %s '
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, partial_id, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
def test_run_replay_results_as_jobid(self):
"""
Runs a replay job identifying the job by its results directory.
"""
cmd_line = ('%s run --replay %s '
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobdir, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
def test_run_replay_invalidignore(self):
"""
Runs a replay job with an invalid option for '--replay-ignore'
"""
cmd_line = ('%s run --replay %s --replay-ignore foo'
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Invalid --replay-ignore option. Valid options are ' \
'(more than one allowed): variants,config'
self.assertIn(msg, result.stderr)
def test_run_replay_ignorevariants(self):
"""
Runs a replay job ignoring the variants.
"""
cmd_line = ('%s run --replay %s --replay-ignore variants '
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Ignoring variants from source job with --replay-ignore.'
self.assertIn(msg, result.stderr)
def test_run_replay_invalidstatus(self):
"""
Runs a replay job with an invalid option for '--replay-test-status'
"""
cmd_line = ('%s run --replay %s --replay-test-status E '
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Invalid --replay-test-status option. Valid options are (more ' \
'than one allowed): SKIP,ERROR,FAIL,WARN,PASS,INTERRUPTED'
self.assertIn(msg, result.stderr)
def test_run_replay_statusfail(self):
"""
Runs a replay job only with tests that failed.
"""
cmd_line = ('%s run --replay %s --replay-test-status '
'FAIL --job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = 'RESULTS : PASS 0 | ERROR 0 | FAIL 0 | SKIP 4 | WARN 0 | INTERRUPT 0'
self.assertIn(msg, result.stdout)
def test_run_replay_remotefail(self):
"""
Runs a replay job using remote plugin (not supported).
"""
cmd_line = ('%s run --replay %s --remote-hostname '
'localhost --job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = "Currently we don't replay jobs in remote hosts."
self.assertIn(msg, result.stderr)
def test_run_replay_status_and_variants(self):
"""
Runs a replay job with custom variants using '--replay-test-status'
"""
cmd_line = ('%s run --replay %s --replay-ignore variants '
'--replay-test-status FAIL --job-results-dir %s '
'--sysinfo=off' % (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = ("Option `--replay-test-status` is incompatible with "
"`--replay-ignore variants`")
self.assertIn(msg, result.stderr)
def test_run_replay_status_and_references(self):
"""
Runs a replay job with custom test references and --replay-test-status
"""
cmd_line = ('%s run sleeptest --replay %s '
'--replay-test-status FAIL --job-results-dir %s '
'--sysinfo=off' % (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = ("Option --replay-test-status is incompatible with "
"test references given on the command line.")
self.assertIn(msg, result.stderr)
def test_run_replay_fallbackdir(self):
"""
Runs a replay job with the fallback job data directory name.
"""
shutil.move(os.path.join(self.jobdir, 'jobdata'),
os.path.join(self.jobdir, 'replay'))
cmd_line = ('%s run --replay %s '
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
def test_run_replay_and_mux(self):
"""
Runs a replay job and specifies multiplex file (which should be
ignored)
"""
cmdline = ("%s run --replay %s --job-results-dir %s "
"--sysinfo=off -m examples/mux-selftest.yaml"
% (AVOCADO, self.jobid, self.tmpdir))
self.run_and_check(cmdline, exit_codes.AVOCADO_ALL_OK)
def tearDown(self):
shutil.rmtree(self.tmpdir)
if __name__ == '__main__':
unittest.main()
|
import collections
from data import chain, crossword, warehouse
from data.alphabets import cryptic_keywords
from puzzle.problems.crossword import _base_crossword_problem
class CrypticProblem(_base_crossword_problem._BaseCrosswordProblem):
def __init__(self, name, lines, **kwargs):
super(CrypticProblem, self).__init__(name, lines, **kwargs)
self._plan = None
self._tokens = None
def _init(self):
if self._plan is None and self._tokens is None:
parsed, plan = _compile(self.lines[0])
self._tokens = chain.Chain(parsed)
self._plan = plan
@staticmethod
def score(lines):
if len(lines) > 1:
return 0
line = lines[0]
parts = line.split()
if any(part in cryptic_keywords.ALL_INDICATORS for part in parts):
return 1
return _base_crossword_problem.score(lines) * .9 # Lower than normal.
def _solve(self):
self._init()
solutions = _Solutions(self._notes, self._min_length, self._max_length)
_visit(self._tokens, self._plan, solutions)
return solutions
def _compile(clue):
words_api = warehouse.get('/api/words')
result = []
indicators_seen = collections.defaultdict(list)
for i, token in enumerate(crossword.tokenize_clue(clue)):
indicator_token = token
base_form = words_api.base_form(token)
if base_form in cryptic_keywords.ALL_INDICATORS:
indicator_token = base_form
if indicator_token in cryptic_keywords.ALL_INDICATORS:
for indicator in cryptic_keywords.ALL_INDICATORS[indicator_token]:
indicators_seen[indicator].append(i)
result.append([token])
plan = sorted(indicators_seen.items(), key=lambda i: _VISIT_ORDER[i[0]])
return result, plan
def _visit(tokens, plan, solutions):
words_api = warehouse.get('/api/words')
# First pass: perform any necessary expansions.
for _, words in tokens.items():
source = words[0]
if source in cryptic_keywords.SHORTHAND_CONVERSIONS:
words.extend(cryptic_keywords.SHORTHAND_CONVERSIONS[source])
words.extend(words_api.expand(source).keys())
for indicator, positions in plan:
try:
_VISIT_MAP[indicator](tokens, positions, solutions)
except NotImplementedError:
print('Indicator for "%s" not implemented' % indicator)
raise NotImplementedError('Indicator for "%s" not implemented' % indicator)
except Exception:
print('Error visiting %s for %s' % (
indicator, ' '.join(words[0] for words in tokens)
))
raise
if not solutions:
# Attempt to find the solution from pieces of the expanded words.
_visit_concatenate(tokens, [], solutions)
if not solutions:
# Finally, attempt to find the solution from just 1 expanded word.
_visit_edge_words(tokens, [], solutions)
def _visit_initial(tokens, positions, solutions):
del solutions # Initial indicator produces more tokens.
for position in positions:
tokens.pop(position)
for _, words in tokens.items():
source = words[0]
words.append(source[0])
for position in reversed(positions):
tokens.restore(position)
def _visit_edge_words(tokens, positions, solutions):
del positions
top_words = warehouse.get('/words/unigram')
for edge in (tokens[0], tokens[-1]):
for token in edge[1:]: # Skip first word.
if token in top_words:
solutions.add(token, .33, 'synonym for edge word "%s"', [[edge[0]]])
def _visit_word_edges(tokens, positions, solutions):
del solutions # Edge indicator produces more tokens.
for position in positions:
tokens.pop(position)
for _, words in tokens.items():
source = words[0]
words.append(source[0] + source[-1])
for position in reversed(positions):
tokens.restore(position)
def _visit_reversal(tokens, positions, solutions):
del solutions # Initial indicator produces more tokens.
for position in positions:
tokens.pop(position)
for _, words in tokens.items():
source = words[0]
words.append(''.join(reversed(source)))
for position in reversed(positions):
tokens.restore(position)
def _visit_embedded(tokens, positions, solutions):
min_length = solutions.min_length
max_length = solutions.max_length
acc = []
pos_map = []
start_map = []
for pos, expanded in tokens.items():
source = expanded[0]
acc.append(source)
for i in range(len(source)):
pos_map.append(pos)
start_map.append(i == 0)
search_text = ''.join(acc)
trie = warehouse.get('/words/unigram/trie')
interesting_threshold = trie.interesting_threshold()
end = len(search_text) - min_length
ignored = set(acc) # Ignore words from clue itself.
for offset in range(end + 1): # End should be inclusive.
for result, weight in trie.walk(search_text[offset:]):
if result in ignored:
continue
result_length = len(result)
if result_length >= min_length and result_length <= max_length:
base_weight = min(1, weight / interesting_threshold)
# Demote scores for start-of-word.
if start_map[offset]:
base_weight *= .9
# Score = % of word not banned by `positions`.
score = base_weight * (
sum(pos_map[i] not in positions for i in
range(offset, offset + result_length))
) / result_length
start_pos = pos_map[offset]
end_pos = pos_map[offset + result_length - 1] + 1
embedded_slice = tokens[start_pos:end_pos]
solutions.add(result, score, 'embedded in %s', embedded_slice)
def _visit_anagram(tokens, positions, solutions):
end = len(tokens)
min_length = solutions.min_length
max_length = solutions.max_length
anagram_positions = set(positions)
anagram_index = warehouse.get('/words/unigram/anagram_index')
trie = warehouse.get('/words/unigram/trie')
interesting_threshold = trie.interesting_threshold()
banned_max = len(anagram_positions)
def _add(acc, banned_max):
parts = []
banned_matches = 0
for word, pos in acc:
parts.append(word)
if pos in anagram_positions:
banned_matches += 1
elif word in cryptic_keywords.CONCATENATE_INDICATORS:
# Special case for concatenate keywords which frequently join two
# chunks of an anagram.
banned_matches += 1
banned_max += 1
solution = ''.join(parts)
if solution not in anagram_index:
return
anagrams = anagram_index[solution]
# Score is 0 if all acc are from possitions; .5 if 1/2 are, etc.
if not anagram_positions:
score = 1
else:
score = 1 - (banned_matches / banned_max)
for anagram in anagrams:
if anagram != solution:
base_weight = min(1, trie[anagram] / interesting_threshold)
solutions.add(anagram, base_weight * score, 'anagram of %s', acc)
def _crawl(pos, acc, acc_length):
# Try to form total word from all remaining words.
for i in range(pos, end):
words = tokens[i]
for word in words:
word_length = len(word)
new_length = acc_length + word_length
if new_length > max_length:
continue
acc_length = new_length
acc.append((word, i))
if min_length <= new_length <= max_length:
_add(acc, banned_max)
elif new_length < max_length:
_crawl(i + 1, acc, acc_length)
acc_length -= word_length
acc.pop()
_crawl(0, [], 0)
def _visit_concatenate(tokens, positions, solutions):
end = len(tokens)
min_length = solutions.min_length
max_length = solutions.max_length
concatenate_positions = set(positions)
trie = warehouse.get('/words/unigram/trie')
interesting_threshold = trie.interesting_threshold()
def _add(acc):
if len(acc) == 1:
return # Ignore complete words in input.
parts = []
banned_matches = 0
for word, pos in acc:
parts.append(word)
if pos in concatenate_positions:
banned_matches += 1
solution = ''.join(parts)
if solution not in trie:
return
# Score is 0 if all acc are from possitions; .5 if 1/2 are, etc.
if not concatenate_positions:
score = 1
else:
score = 1 - (banned_matches / len(concatenate_positions))
base_weight = min(1, trie[solution] / interesting_threshold)
solutions.add(solution, base_weight * score, 'concatenation of %s', acc)
def _crawl(pos, acc, acc_length):
if pos in concatenate_positions and pos + 1 < end:
# Optionally, skip ahead to next position using current acc.
_crawl(pos + 1, acc, acc_length)
# Try to form total word from all remaining starting points.
for i in range(pos, end):
words = tokens[i]
for word in words:
word_length = len(word)
new_length = acc_length + word_length
if new_length > max_length:
continue
acc_length = new_length
acc.append((word, i))
if new_length >= min_length and new_length <= max_length:
_add(acc)
elif new_length < max_length and trie.has_keys_with_prefix(
''.join(a[0] for a in acc)):
_crawl(i + 1, acc, acc_length)
acc_length -= word_length
acc.pop()
_crawl(0, [], 0)
def _visit_homophone(tokens, positions, solutions):
del tokens, positions
if not solutions:
raise NotImplementedError('Homophones not implemented')
def _visit_insert(tokens, positions, solutions):
if not solutions:
# "INSERT" indicator is usually a subset of functionality provided by
# "ANAGRAM".
_visit_anagram(tokens, positions, solutions)
if not solutions:
raise NotImplementedError()
class _Solutions(dict):
def __init__(self, notes, min_length, max_length):
super(_Solutions, self).__init__()
self._notes = notes
self.min_length = min_length
self.max_length = max_length
def add(self, solution, weight, note, ingredients):
if solution not in self or weight > self[solution]:
self[solution] = weight
self._notes[solution].clear()
if note:
self._notes[solution].append(
note % ', '.join(words[0] for words in ingredients))
_VISIT_MAP = collections.OrderedDict([
# Embedded clues only use original words.
(cryptic_keywords.EMBEDDED_INDICATORS, _visit_embedded),
# Producers.
(cryptic_keywords.INITIAL_INDICATORS, _visit_initial),
(cryptic_keywords.EDGES_INDICATORS, _visit_word_edges),
(cryptic_keywords.REVERSAL_INDICATORS, _visit_reversal),
# Reducers.
(cryptic_keywords.ANAGRAM_INDICATORS, _visit_anagram),
(cryptic_keywords.CONCATENATE_INDICATORS, _visit_concatenate),
# TODO: Incomplete implementation. Redundant with anagram indicator.
(cryptic_keywords.INSERT_INDICATORS, _visit_insert),
# TODO: Incomplete implementation. This should be up with "producers".
(cryptic_keywords.HOMOPHONE_INDICATORS, _visit_homophone),
])
_VISIT_ORDER = dict([(indicator, i) for i, indicator in enumerate(_VISIT_MAP)])
|
"""
Cement core argument module.
"""
from ..core import backend, exc, interface, handler
Log = backend.minimal_logger(__name__)
def argument_validator(klass, obj):
"""Validates a handler implementation against the IArgument interface."""
members = [
'_setup',
'parse',
'parsed_args',
'add_argument',
]
interface.validate(IArgument, obj, members)
class IArgument(interface.Interface):
"""
This class defines the Argument Handler Interface. Classes that
implement this handler must provide the methods and attributes defined
below. Implementations do *not* subclass from interfaces.
Example:
.. code-block:: python
from cement.core import interface, arg
class MyArgumentHandler(arg.CementArgumentHandler):
class Meta:
interface = arg.IArgument
label = 'my_argument_handler'
"""
class IMeta:
label = 'argument'
validator = argument_validator
# Must be provided by the implementation
Meta = interface.Attribute('Handler Meta-data')
parsed_args = interface.Attribute('Parsed args object')
def _setup(app_obj):
"""
The _setup function is called during application initialization and
must 'setup' the handler object making it ready for the framework
or the application to make further calls to it.
Required Arguments:
app_obj
The application object.
Return: None
"""
def add_argument(self, *args, **kw):
"""
Add arguments for parsing. This should be -o/--option or positional.
Positional Arguments:
args
List of option arguments. Generally something like
['-h', '--help'].
Optional Arguments
dest
The destination name (var). Default: arg[0]'s string.
help
The help text for --help output (for that argument).
action
Must support: ['store', 'store_true', 'store_false',
'store_const']
const
The value stored if action == 'store_const'.
default
The default value.
Return: None
"""
def parse(self, arg_list):
"""
Parse the argument list (i.e. sys.argv). Can return any object as
long as it's members contain those of the added arguments. For
example, if adding a '-v/--version' option that stores to the dest of
'version', then the member must be callable as 'Object().version'.
Must also set self.parsed_args to what is being returned.
Required Arguments:
arg_list
A list of command line arguments.
Return: Callable
"""
class CementArgumentHandler(handler.CementBaseHandler):
"""
Base class that all Argument Handlers should sub-class from.
"""
class Meta:
label = None
interface = IArgument
def __init__(self, *args, **kw):
super(CementArgumentHandler, self).__init__(*args, **kw)
|
#!/usr/bin/python
"""**************************************************************************
**
** Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
** All rights reserved.
** Contact: Nokia Corporation (qt-info@nokia.com)
**
** This file is part of the examples of the Qt Toolkit.
**
** You may use this file under the terms of the BSD license as follows:
**
** "Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions are
** met:
** * Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** * Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in
** the documentation and/or other materials provided with the
** distribution.
** * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
** the names of its contributors may be used to endorse or promote
** products derived from this software without specific prior written
** permission.
**
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
** OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
** LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
** OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
**
*****************************************************************************
** February 2011
** - stardelegate example ported to PySide by Arun Srinivasan
** <rulfzid@gmail.com>
**************************************************************************"""
from PySide.QtGui import (QItemDelegate, QStyledItemDelegate, QStyle)
from starrating import StarRating
from stareditor import StarEditor
class StarDelegate(QStyledItemDelegate):
""" A subclass of QStyledItemDelegate that allows us to render our
pretty star ratings.
"""
def __init__(self, parent=None):
super(StarDelegate, self).__init__(parent)
def paint(self, painter, option, index):
""" Paint the items in the table.
If the item referred to by <index> is a StarRating, we handle the
painting ourselves. For the other items, we let the base class
handle the painting as usual.
In a polished application, we'd use a better check than the
column number to find out if we needed to paint the stars, but
it works for the purposes of this example.
"""
if index.column() == 3:
starRating = StarRating(index.data())
# If the row is currently selected, we need to make sure we
# paint the background accordingly.
if option.state & QStyle.State_Selected:
# The original C++ example used option.palette.foreground() to
# get the brush for painting, but there are a couple of
# problems with that:
# - foreground() is obsolete now, use windowText() instead
# - more importantly, windowText() just returns a brush
# containing a flat color, where sometimes the style
# would have a nice subtle gradient or something.
# Here we just use the brush of the painter object that's
# passed in to us, which keeps the row highlighting nice
# and consistent.
painter.fillRect(option.rect, painter.brush())
# Now that we've painted the background, call starRating.paint()
# to paint the stars.
starRating.paint(painter, option.rect, option.palette)
else:
QStyledItemDelegate.paint(self, painter, option, index)
def sizeHint(self, option, index):
""" Returns the size needed to display the item in a QSize object. """
if index.column() == 3:
starRating = StarRating(index.data())
return starRating.sizeHint()
else:
return QStyledItemDelegate.sizeHint(self, option, index)
# The next 4 methods handle the custom editing that we need to do.
# If this were just a display delegate, paint() and sizeHint() would
# be all we needed.
def createEditor(self, parent, option, index):
""" Creates and returns the custom StarEditor object we'll use to edit
the StarRating.
"""
if index.column() == 3:
editor = StarEditor(parent)
editor.editingFinished.connect(self.commitAndCloseEditor)
return editor
else:
return QStyledItemDelegate.createEditor(self, parent, option, index)
def setEditorData(self, editor, index):
""" Sets the data to be displayed and edited by our custom editor. """
if index.column() == 3:
editor.starRating = StarRating(index.data())
else:
QStyledItemDelegate.setEditorData(self, editor, index)
def setModelData(self, editor, model, index):
""" Get the data from our custom editor and stuffs it into the model.
"""
if index.column() == 3:
model.setData(index, editor.starRating.starCount)
else:
QStyledItemDelegate.setModelData(self, editor, model, index)
def commitAndCloseEditor(self):
""" Erm... commits the data and closes the editor. :) """
editor = self.sender()
# The commitData signal must be emitted when we've finished editing
# and need to write our changed back to the model.
self.commitData.emit(editor)
self.closeEditor.emit(editor)
if __name__ == "__main__":
""" Run the application. """
from PySide.QtGui import (QApplication, QTableWidget, QTableWidgetItem,
QAbstractItemView)
import sys
app = QApplication(sys.argv)
# Create and populate the tableWidget
tableWidget = QTableWidget(4, 4)
tableWidget.setItemDelegate(StarDelegate())
tableWidget.setEditTriggers(QAbstractItemView.DoubleClicked |
QAbstractItemView.SelectedClicked)
tableWidget.setSelectionBehavior(QAbstractItemView.SelectRows)
tableWidget.setHorizontalHeaderLabels(["Title", "Genre", "Artist", "Rating"])
data = [ ["Mass in B-Minor", "Baroque", "J.S. Bach", 5],
["Three More Foxes", "Jazz", "Maynard Ferguson", 4],
["Sex Bomb", "Pop", "Tom Jones", 3],
["Barbie Girl", "Pop", "Aqua", 5] ]
for r in range(len(data)):
tableWidget.setItem(r, 0, QTableWidgetItem(data[r][0]))
tableWidget.setItem(r, 1, QTableWidgetItem(data[r][1]))
tableWidget.setItem(r, 2, QTableWidgetItem(data[r][2]))
item = QTableWidgetItem()
item.setData(0, StarRating(data[r][3]).starCount)
tableWidget.setItem(r, 3, item)
tableWidget.resizeColumnsToContents()
tableWidget.resize(500, 300)
tableWidget.show()
sys.exit(app.exec_())
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from hashlib import sha1
from os import walk
from os.path import dirname, isdir, isfile, join
from click.testing import CliRunner
from platformio import __version__, exception, fs
from platformio.compat import IS_WINDOWS, hashlib_encode_data
from platformio.project.config import ProjectConfig
def get_project_dir():
return os.getcwd()
def is_platformio_project(project_dir=None):
if not project_dir:
project_dir = get_project_dir()
return isfile(join(project_dir, "platformio.ini"))
def find_project_dir_above(path):
if isfile(path):
path = dirname(path)
if is_platformio_project(path):
return path
if isdir(dirname(path)):
return find_project_dir_above(dirname(path))
return None
def get_project_core_dir():
"""Deprecated, use ProjectConfig.get_optional_dir("core") instead"""
return ProjectConfig.get_instance(
join(get_project_dir(), "platformio.ini")
).get_optional_dir("core", exists=True)
def get_project_cache_dir():
"""Deprecated, use ProjectConfig.get_optional_dir("cache") instead"""
return ProjectConfig.get_instance(
join(get_project_dir(), "platformio.ini")
).get_optional_dir("cache")
def get_project_global_lib_dir():
"""
Deprecated, use ProjectConfig.get_optional_dir("globallib") instead
"platformio-node-helpers" depends on it
"""
return ProjectConfig.get_instance(
join(get_project_dir(), "platformio.ini")
).get_optional_dir("globallib")
def get_project_lib_dir():
"""
Deprecated, use ProjectConfig.get_optional_dir("lib") instead
"platformio-node-helpers" depends on it
"""
return ProjectConfig.get_instance(
join(get_project_dir(), "platformio.ini")
).get_optional_dir("lib")
def get_project_libdeps_dir():
"""
Deprecated, use ProjectConfig.get_optional_dir("libdeps") instead
"platformio-node-helpers" depends on it
"""
return ProjectConfig.get_instance(
join(get_project_dir(), "platformio.ini")
).get_optional_dir("libdeps")
def get_default_projects_dir():
docs_dir = join(fs.expanduser("~"), "Documents")
try:
assert IS_WINDOWS
import ctypes.wintypes # pylint: disable=import-outside-toplevel
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(None, 5, None, 0, buf)
docs_dir = buf.value
except: # pylint: disable=bare-except
pass
return join(docs_dir, "PlatformIO", "Projects")
def compute_project_checksum(config):
# rebuild when PIO Core version changes
checksum = sha1(hashlib_encode_data(__version__))
# configuration file state
checksum.update(hashlib_encode_data(config.to_json()))
# project file structure
check_suffixes = (".c", ".cc", ".cpp", ".h", ".hpp", ".s", ".S")
for d in (
config.get_optional_dir("include"),
config.get_optional_dir("src"),
config.get_optional_dir("lib"),
):
if not isdir(d):
continue
chunks = []
for root, _, files in walk(d):
for f in files:
path = join(root, f)
if path.endswith(check_suffixes):
chunks.append(path)
if not chunks:
continue
chunks_to_str = ",".join(sorted(chunks))
if IS_WINDOWS: # case insensitive OS
chunks_to_str = chunks_to_str.lower()
checksum.update(hashlib_encode_data(chunks_to_str))
return checksum.hexdigest()
def load_project_ide_data(project_dir, env_or_envs, cache=False):
assert env_or_envs
env_names = env_or_envs
if not isinstance(env_names, list):
env_names = [env_names]
with fs.cd(project_dir):
result = _load_cached_project_ide_data(project_dir, env_names) if cache else {}
missed_env_names = set(env_names) - set(result.keys())
if missed_env_names:
result.update(_load_project_ide_data(project_dir, missed_env_names))
if not isinstance(env_or_envs, list) and env_or_envs in result:
return result[env_or_envs]
return result or None
def _load_project_ide_data(project_dir, env_names):
# pylint: disable=import-outside-toplevel
from platformio.commands.run.command import cli as cmd_run
args = ["--project-dir", project_dir, "--target", "_idedata"]
for name in env_names:
args.extend(["-e", name])
result = CliRunner().invoke(cmd_run, args)
if result.exit_code != 0 and not isinstance(
result.exception, exception.ReturnErrorCode
):
raise result.exception
if '"includes":' not in result.output:
raise exception.PlatformioException(result.output)
return _load_cached_project_ide_data(project_dir, env_names)
def _load_cached_project_ide_data(project_dir, env_names):
build_dir = ProjectConfig.get_instance(
join(project_dir, "platformio.ini")
).get_optional_dir("build")
result = {}
for name in env_names:
if not os.path.isfile(os.path.join(build_dir, name, "idedata.json")):
continue
with open(os.path.join(build_dir, name, "idedata.json")) as fp:
result[name] = json.load(fp)
return result
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Inventory Management',
'version': '1.1',
'summary': 'Inventory, Logistics, Warehousing',
'description': "",
'website': 'https://www.odoo.com/page/warehouse',
'depends': ['product', 'barcodes', 'web_planner'],
'category': 'Warehouse',
'sequence': 13,
'demo': [
'data/stock_demo_pre.yml',
'data/procurement_demo.xml',
'data/stock_demo.xml',
'data/stock_orderpoint_demo.xml',
'data/stock_orderpoint_demo.yml',
'data/stock_demo.yml',
'data/stock_location_demo_cpu1.xml',
'data/stock_location_demo_cpu3.yml',
'data/stock_quant_demo.xml',
],
'data': [
'security/stock_security.xml',
'security/ir.model.access.csv',
'views/stock_menu_views.xml',
'data/stock_traceability_report_data.xml',
'data/procurement_data.xml',
'report/report_stock_forecast.xml',
'report/stock_report_views.xml',
'report/report_package_barcode.xml',
'report/report_lot_barcode.xml',
'report/report_location_barcode.xml',
'report/report_stockpicking_operations.xml',
'report/report_deliveryslip.xml',
'report/report_stockinventory.xml',
'wizard/stock_change_product_qty_views.xml',
'wizard/stock_picking_return_views.xml',
'wizard/stock_scheduler_compute_views.xml',
'wizard/stock_immediate_transfer_views.xml',
'wizard/stock_backorder_confirmation_views.xml',
'views/res_partner_views.xml',
'views/product_strategy_views.xml',
'views/stock_incoterms_views.xml',
'views/stock_production_lot_views.xml',
'views/stock_picking_views.xml',
'views/stock_scrap_views.xml',
'views/stock_inventory_views.xml',
'views/stock_quant_views.xml',
'views/stock_location_views.xml',
'views/stock_warehouse_views.xml',
'views/stock_move_line_views.xml',
'views/stock_move_views.xml',
'views/product_views.xml',
'views/res_config_settings_views.xml',
'views/report_stock_traceability.xml',
'views/stock_template.xml',
'views/procurement_views.xml',
'data/default_barcode_patterns.xml',
'data/stock_data.xml',
'data/stock_data.yml',
'data/stock_incoterms_data.xml',
'data/stock_sequence_data.xml',
'data/web_planner_data.xml',
],
'qweb': [
'static/src/xml/stock_traceability_report_backend.xml',
],
'test': [
'test/stock_users.yml',
'test/packing.yml',
'test/packingneg.yml',
'test/procrule.yml',
'test/wiseoperator.yml',
],
'installable': True,
'application': True,
'auto_install': False,
}
|
#!/usr/bin/python2.7
# Copyright 2011 Google Inc. All Rights Reserved.
"""Targets class describes which languages/platforms we support."""
__author__ = 'wclarkso@google.com (Will Clarkson)'
import logging
import os
from googleapis.codegen.filesys import files
from googleapis.codegen.utilities import json_expander
from googleapis.codegen.utilities import json_with_comments
class Targets(object):
"""Targets maintains the list of possible target options.
Reads targets.json file in local directory. This file is formatted
as:
{
'languages': {
'languageA': {
'surface_option1': {
'path': 'stable',
'description': 'something about language A',
'displayName': 'SurfaceOption1',
},
'surface_option2': {
'path': 'experimental',
'description': 'something about language A',
'displayName': 'SurfaceOption2',
'platforms': ['cmd-line'],
}
},
'languageB': {
...
}, ...
},
'platforms': {
'cmd-line': {
'displayName': 'Pretty Platform Name'
}
}
}
"""
def __init__(self, targets_path=None, template_root=None, targets_dict=None):
"""Constructor.
Loads targets file.
Args:
targets_path: (str) Path to targets file. Defaults to './targets.json'
template_root: (str) Path to template root. Defaults to '.'
targets_dict: (dict) Initial data, if not supplied from a file.
Raises:
ValueError: if the targets file does not contain the required sections.
"""
self.template_root = template_root or Targets._default_template_root
self.targets_path = targets_path or os.path.join(self.template_root,
'targets.json')
if targets_dict:
self._targets_dict = targets_dict
else:
self._targets_dict = json_with_comments.Loads(
files.GetFileContents(self.targets_path))
# Do some basic validation that this has the required fields
if 'languages' not in self._targets_dict:
raise ValueError('languages not in targets.json')
def Dict(self):
"""The targets.json file as a dictionary."""
return self._targets_dict
def VariationsForLanguage(self, language):
language_def = self._targets_dict['languages'].get(language)
if not language_def:
return None
return Variations(self, language, language_def['variations'])
def GetLanguage(self, language):
return self._targets_dict['languages'][language]
def Languages(self):
return self._targets_dict['languages']
def Platforms(self):
return self._targets_dict.get('platforms', {})
@staticmethod
def SetDefaultTemplateRoot(path):
"""Sets a new default full path to the templates directory.
Args:
path: (str) full path to templates directory.
"""
# This is not a classmethod because we don't want subclasses
# to shadow this value.
logging.info('setting default template root to %s', path)
Targets._default_template_root = path
@staticmethod
def GetDefaultTemplateRoot():
return Targets._default_template_root
# Set the initial template root.
_default_template_root = os.path.join(os.path.dirname(__file__),
'languages')
# Whether to use variation release versions when calculating template paths.
use_versioned_paths = False
@staticmethod
def SetUseVersionedPaths(use_versioned_paths):
"""Sets whether versions are used in the template path."""
# This is not a classmethod because we don't want subclasses
# to shadow this value.
Targets.use_versioned_paths = use_versioned_paths
class Variations(dict):
"""A set of variations available for a particular language."""
def __init__(self, targets, language, variations_dict):
super(Variations, self).__init__(variations_dict)
self._targets = targets
self._language = language
def IsValid(self, variation):
"""Test is a variation exists."""
return variation in self
def _RelativeTemplateDir(self, variation):
"""Returns the path to template dir for the selected variation.
By default, the path is the same as the variation name. It can be
overridden in two ways, of descending precedence:
1. by the 'releaseVersion' element, if use_versioned_paths is set.
2. with an explicit 'path' statement.
Args:
variation: (str) A target variation name.
Returns:
(str) Relative path to template directory.
"""
if self._targets.use_versioned_paths:
path = self[variation].get('releaseVersion') or variation
else:
path = None
if not path:
path = self.get(variation, {}).get('path') or variation
return os.path.join(self._language, path)
def AbsoluteTemplateDir(self, variation):
"""Returns the path to template dir for the selected variation.
Args:
variation: (str) A target variation name.
Returns:
(str) Absolute path to template directory.
"""
return os.path.join(self._targets.template_root,
self._RelativeTemplateDir(variation))
def GetFeaturesForReleaseVersion(self, release_version):
for name in self:
features = self.GetFeatures(name)
if release_version == features.get('releaseVersion'):
return features
return None
def GetFeatures(self, variation):
"""Returns the features dictionary for a specific variation.
This is the basic dictionary informaion plus any specific overrides in
the per-template-tree features.json file.
Args:
variation: (str) A target variation name.
Returns:
(Features) features dictionary
"""
if not variation:
return None
template_dir = self.AbsoluteTemplateDir(variation)
features = Features(template_dir, self.get(variation), variation)
json_path = os.path.join(template_dir, 'features.json')
try:
features_json = files.GetFileContents(json_path)
except files.FileDoesNotExist:
# for backwards compatibility, we forgive this.
# TODO(user): be stricter about this and
# fix/remove any tests that fail as a result.
return features
features.update(json_expander.ExpandJsonTemplate(
json_with_comments.Loads(features_json)))
# If not specified, the releaseVersion matches the variation
if not features.get('releaseVersion'):
features['releaseVersion'] = variation
return features
class Features(dict):
"""A dictionary describing the features of a particular API variation."""
# TODO(user): Do we need initial_content? The only thing we see in it is
# path, which should be set explicitly to the dirname of the real file path.
def __init__(self, template_dir, initial_content=None, name=None):
super(Features, self).__init__(initial_content or {})
self.name = name
self.template_dir = template_dir
if 'path' not in self:
self['path'] = os.path.basename(template_dir)
|
#!usr/bin/env python
import sys
import re
import shelve
from parameter.common_parameters import common_parameters
import utils.setting_utils as utils
utils.now_time("phastcons_score_list script starting...")
p = utils.Bunch(common_parameters)
def main():
utils.now_time("Input_file: " + p.phastcons_score_list_db_input)
utils.now_time("Reference_file: " + p.phastcons_score_list_reference)
utils.now_time("Output_file: " + p.phastcons_score_list_db_output)
output_merge = p.phastcons_score_list_db_output + 'phastCons46way_Refseq_for_MIRAGE_CDS.db' #'phastCons46way_miRBase_v21_hg38Tohg19_for_MIRAGE.db'
output_merge_shelve = shelve.open(output_merge)
#for x in ['chr21']:
for x in ['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22','chrX','chrY','chrM']:
ref_s = p.phastcons_score_list_reference #mirBase, Refseq etc...
ref_file = open(ref_s,'r')
input_s = p.phastcons_score_list_db_input + x + '.phastCons46way_Refseq_CDS.db' #'.phastCons46way_miRBase_v21_hg38Tohg19.db'
output_s = p.phastcons_score_list_db_output + x + '.phastCons46way_Refseq_for_MIRAGE_CDS.db' #'.phastCons46way_miRBase_v21_hg38Tohg19_for_MIRAGE.db'
input_shelve = shelve.open(input_s)
output_shelve = shelve.open(output_s)
score_list_dict = {}
for line in ref_file:
line = line.rstrip()
data = line.split("\t")
chrom = data[0]
if not chrom == x:
continue
strand = data[5]
if len(data) >= 12: #12bed format
exon_block = data[10].split(',')
exon_block.pop() #Remove the last item ''
exon_st = data[11].split(',')
exon_st.pop() #Remove the last item ''
name = data[3]
score_list_dict[name] = []
for y in range(len(exon_block)):
st = int(data[1]) + int(exon_st[y])
ed = int(data[1]) + int(exon_st[y]) + int(exon_block[y])
length = ed - st
for z in range(length):
score = input_shelve[str(st)]
score_list_dict[name].append(score)
st += 1
if strand == '-':
rev_score = score_list_dict[name][::-1]
score_list_dict[name] = rev_score
elif len(data) >= 3: #6bed format
st = int(data[1])
ed = int(data[2])
length = ed - st
name = data[3]
score_list_dict[name] = []
for z in range(length):
score = input_shelve[str(st)]
score_list_dict[name].append(score)
st += 1
if strand == '-':
rev_score = score_list_dict[name][::-1]
score_list_dict[name] = rev_score
else:
print('ERROR: Your BED format file have less than three column.')
print ('BED format file need to have at least three column [chr, st, ed]...')
sys.exit(1)
output_shelve.update(score_list_dict)
output_merge_shelve.update(score_list_dict)
input_shelve.close()
output_shelve.close()
utils.now_time("phastcons_score_list script was successfully finished!!")
output_merge_shelve.close()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
# Author: Eray Ozkural <eray@pardus.org.tr>
"""
XmlFile class further abstracts a dom object using the
high-level dom functions provided in xmlext module (and sorely lacking
in xml.dom :( )
function names are mixedCase for compatibility with minidom,
an 'old library'
this implementation uses piksemel
"""
import gettext
__trans = gettext.translation('pisi', fallback=True)
_ = __trans.ugettext
import codecs
import exceptions
import piksemel as iks
import pisi
from pisi.file import File
from pisi.util import join_path as join
class Error(pisi.Error):
pass
class XmlFile(object):
"""A class to help reading and writing an XML file"""
def __init__(self, tag):
self.rootTag = tag
def newDocument(self):
"""clear DOM"""
self.doc = iks.newDocument(self.rootTag)
def unlink(self):
"""deallocate DOM structure"""
del self.doc
def rootNode(self):
"""returns root document element"""
return self.doc
def readxmlfile(self, file):
raise Exception("not implemented")
try:
self.doc = iks.parse(file)
return self.doc
except Exception, e:
raise Error(_("File '%s' has invalid XML") % (localpath) )
def readxml(self, uri, tmpDir='/tmp', sha1sum=False,
compress=None, sign=None, copylocal = False):
uri = File.make_uri(uri)
#try:
localpath = File.download(uri, tmpDir, sha1sum=sha1sum,
compress=compress,sign=sign, copylocal=copylocal)
#except IOError, e:
# raise Error(_("Cannot read URI %s: %s") % (uri, unicode(e)) )
try:
self.doc = iks.parse(localpath)
return self.doc
except Exception, e:
raise Error(_("File '%s' has invalid XML") % (localpath) )
def writexml(self, uri, tmpDir = '/tmp', sha1sum=False, compress=None, sign=None):
f = File(uri, File.write, sha1sum=sha1sum, compress=compress, sign=sign)
f.write(self.doc.toPrettyString())
f.close()
def writexmlfile(self, f):
f.write(self.doc.toPrettyString())
|
#!/usr/local/bin/python2.7
from sys import exit, stdout, argv
from os import environ, system
environ['KERAS_BACKEND'] = 'tensorflow'
environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
environ["CUDA_VISIBLE_DEVICES"] = ""
import numpy as np
import signal
from keras.layers import Input, Dense, Dropout, concatenate, LSTM, BatchNormalization, Conv1D, concatenate
from keras.models import Model
from keras.callbacks import ModelCheckpoint, LambdaCallback, TensorBoard
from keras.optimizers import Adam, SGD
from keras.utils import np_utils
from keras import backend as K
K.set_image_data_format('channels_last')
from subtlenet import config
from subtlenet.generators.gen_singletons import make_coll, generate
'''
some global definitions
'''
NEPOCH = 20
APOSTLE = 'v3_nopt'
system('cp %s shallow_models/train_%s.py'%(argv[0], APOSTLE))
'''
instantiate data loaders
'''
basedir = '/data/t3serv014/snarayan/deep/v_deepgen_3/'
top = make_coll(basedir + '/PARTITION/Top_*_CATEGORY.npy')
qcd = make_coll(basedir + '/PARTITION/QCD_*_CATEGORY.npy')
data = [top, qcd]
'''
first build the classifier!
'''
# set up data
classifier_train_gen = generate(data, partition='train', batch=1000)
classifier_validation_gen = generate(data, partition='validate', batch=10000)
classifier_test_gen = generate(data, partition='test', batch=10)
test_i, test_o, test_w = next(classifier_test_gen)
#print test_i
inputs = Input(shape=(len(config.gen_default_variables),), name='input')
dense = Dense(32, activation='tanh',name='dense1',kernel_initializer='lecun_uniform') (inputs)
dense = Dense(32, activation='tanh',name='dense2',kernel_initializer='lecun_uniform') (dense)
dense = Dense(32, activation='tanh',name='dense3',kernel_initializer='lecun_uniform') (dense)
y_hat = Dense(config.n_truth, activation='softmax') (dense)
classifier = Model(inputs=inputs, outputs=[y_hat])
classifier.compile(optimizer=Adam(lr=0.0005),
loss='categorical_crossentropy',
metrics=['accuracy'])
print '########### CLASSIFIER ############'
classifier.summary()
print '###################################'
# ctrl+C now triggers a graceful exit
def save_classifier(name='shallow', model=classifier):
model.save('shallow_models/%s_%s.h5'%(name, APOSTLE))
def save_and_exit(signal=None, frame=None, name='shallow', model=classifier):
save_classifier(name, model)
flog.close()
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
classifier.fit_generator(classifier_train_gen,
steps_per_epoch=5000,
epochs=NEPOCH,
validation_data=classifier_validation_gen,
validation_steps=10,
)
save_classifier()
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
XPlan
A QGIS plugin
Fachschale XPlan für XPlanung
-------------------
begin : 2011-03-08
copyright : (C) 2011 by Bernhard Stroebl, KIJ/DV
email : bernhard.stroebl@jena.de
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import object
from qgis.PyQt import QtCore, QtSql
from qgis.gui import *
from qgis.core import *
class DbHandler(object):
'''class to handle a QtSql.QSqlDatabase connnection to a PostgreSQL server'''
def __init__(self, iface, tools):
self.iface = iface
self.tools = tools
self.db = None
def dbConnect(self, thisPassword = None):
s = QtCore.QSettings( "XPlanung", "XPlanung-Erweiterung" )
service = ( s.value( "service", "" ) )
host = ( s.value( "host", "" ) )
port = ( s.value( "port", "5432" ) )
database = ( s.value( "dbname", "" ) )
authcfg = s.value( "authcfg", "" )
username, passwd, authcfg = self.tools.getAuthUserNamePassword(authcfg)
if authcfg == None:
username = ( s.value( "uid", "" ) )
passwd = ( s.value( "pwd", "" ) )
if thisPassword:
passwd = thisPassword
# connect to DB
db = QtSql.QSqlDatabase.addDatabase ("QPSQL", "XPlanung")
db.setHostName(host)
db.setPort(int(port))
db.setDatabaseName(database)
db.setUserName(username)
db.setPassword(passwd)
db.authcfg = authcfg # für DDIM
ok2 = db.open()
if not ok2:
self.iface.messageBar().pushMessage("Fehler", \
u"Konnte keine Verbindung mit der Datenbank aufbauen", \
level=Qgis.Critical)
return None
else:
return db
def dbDisconnect(self, db):
db.close()
db = None
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A dynamic attention wrapper for RNN cells."""
import collections
import functools
import math
import numpy as np
import tensorflow as tf
from tensorflow_addons.utils import keras_utils
from tensorflow_addons.utils.types import (
AcceptableDTypes,
FloatTensorLike,
TensorLike,
Initializer,
Number,
)
from typeguard import typechecked
from typing import Optional, Callable, Union, List
# TODO: Find public API alternatives to these
from tensorflow.python.keras.engine import base_layer_utils
class AttentionMechanism(tf.keras.layers.Layer):
"""Base class for attention mechanisms.
Common functionality includes:
1. Storing the query and memory layers.
2. Preprocessing and storing the memory.
Note that this layer takes memory as its init parameter, which is an
anti-pattern of Keras API, we have to keep the memory as init parameter for
performance and dependency reason. Under the hood, during `__init__()`, it
will invoke `base_layer.__call__(memory, setup_memory=True)`. This will let
keras to keep track of the memory tensor as the input of this layer. Once
the `__init__()` is done, then user can query the attention by
`score = att_obj([query, state])`, and use it as a normal keras layer.
Special attention is needed when adding using this class as the base layer
for new attention:
1. Build() could be invoked at least twice. So please make sure weights
are not duplicated.
2. Layer.get_weights() might return different set of weights if the
instance has `query_layer`. The query_layer weights is not initialized
until the memory is configured.
Also note that this layer does not work with Keras model when
`model.compile(run_eagerly=True)` due to the fact that this layer is
stateful. The support for that will be added in a future version.
"""
@typechecked
def __init__(
self,
memory: Union[TensorLike, None],
probability_fn: callable,
query_layer: Optional[tf.keras.layers.Layer] = None,
memory_layer: Optional[tf.keras.layers.Layer] = None,
memory_sequence_length: Optional[TensorLike] = None,
**kwargs,
):
"""Construct base AttentionMechanism class.
Args:
memory: The memory to query; usually the output of an RNN encoder.
This tensor should be shaped `[batch_size, max_time, ...]`.
probability_fn: A `callable`. Converts the score and previous
alignments to probabilities. Its signature should be:
`probabilities = probability_fn(score, state)`.
query_layer: Optional `tf.keras.layers.Layer` instance. The layer's
depth must match the depth of `memory_layer`. If `query_layer` is
not provided, the shape of `query` must match that of
`memory_layer`.
memory_layer: Optional `tf.keras.layers.Layer` instance. The layer's
depth must match the depth of `query_layer`.
If `memory_layer` is not provided, the shape of `memory` must match
that of `query_layer`.
memory_sequence_length: (optional) Sequence lengths for the batch
entries in memory. If provided, the memory tensor rows are masked
with zeros for values past the respective sequence lengths.
**kwargs: Dictionary that contains other common arguments for layer
creation.
"""
self.query_layer = query_layer
self.memory_layer = memory_layer
super().__init__(**kwargs)
self.default_probability_fn = probability_fn
self.probability_fn = probability_fn
self.keys = None
self.values = None
self.batch_size = None
self._memory_initialized = False
self._check_inner_dims_defined = True
self.supports_masking = True
if memory is not None:
# Setup the memory by self.__call__() with memory and
# memory_seq_length. This will make the attention follow the keras
# convention which takes all the tensor inputs via __call__().
if memory_sequence_length is None:
inputs = memory
else:
inputs = [memory, memory_sequence_length]
self.values = super().__call__(inputs, setup_memory=True)
@property
def memory_initialized(self):
"""Returns `True` if this attention mechanism has been initialized with
a memory."""
return self._memory_initialized
def build(self, input_shape):
if not self._memory_initialized:
# This is for setting up the memory, which contains memory and
# optional memory_sequence_length. Build the memory_layer with
# memory shape.
if self.memory_layer is not None and not self.memory_layer.built:
if isinstance(input_shape, list):
self.memory_layer.build(input_shape[0])
else:
self.memory_layer.build(input_shape)
else:
# The input_shape should be query.shape and state.shape. Use the
# query to init the query layer.
if self.query_layer is not None and not self.query_layer.built:
self.query_layer.build(input_shape[0])
def __call__(self, inputs, **kwargs):
"""Preprocess the inputs before calling `base_layer.__call__()`.
Note that there are situation here, one for setup memory, and one with
actual query and state.
1. When the memory has not been configured, we just pass all the param
to `base_layer.__call__()`, which will then invoke `self.call()` with
proper inputs, which allows this class to setup memory.
2. When the memory has already been setup, the input should contain
query and state, and optionally processed memory. If the processed
memory is not included in the input, we will have to append it to
the inputs and give it to the `base_layer.__call__()`. The processed
memory is the output of first invocation of `self.__call__()`. If we
don't add it here, then from keras perspective, the graph is
disconnected since the output from previous call is never used.
Args:
inputs: the inputs tensors.
**kwargs: dict, other keyeword arguments for the `__call__()`
"""
# Allow manual memory reset
if kwargs.get("setup_memory", False):
self._memory_initialized = False
if self._memory_initialized:
if len(inputs) not in (2, 3):
raise ValueError(
"Expect the inputs to have 2 or 3 tensors, got %d" % len(inputs)
)
if len(inputs) == 2:
# We append the calculated memory here so that the graph will be
# connected.
inputs.append(self.values)
return super().__call__(inputs, **kwargs)
def call(self, inputs, mask=None, setup_memory=False, **kwargs):
"""Setup the memory or query the attention.
There are two case here, one for setup memory, and the second is query
the attention score. `setup_memory` is the flag to indicate which mode
it is. The input list will be treated differently based on that flag.
Args:
inputs: a list of tensor that could either be `query` and `state`, or
`memory` and `memory_sequence_length`.
`query` is the tensor of dtype matching `memory` and shape
`[batch_size, query_depth]`.
`state` is the tensor of dtype matching `memory` and shape
`[batch_size, alignments_size]`. (`alignments_size` is memory's
`max_time`).
`memory` is the memory to query; usually the output of an RNN
encoder. The tensor should be shaped `[batch_size, max_time, ...]`.
`memory_sequence_length` (optional) is the sequence lengths for the
batch entries in memory. If provided, the memory tensor rows are
masked with zeros for values past the respective sequence lengths.
mask: optional bool tensor with shape `[batch, max_time]` for the
mask of memory. If it is not None, the corresponding item of the
memory should be filtered out during calculation.
setup_memory: boolean, whether the input is for setting up memory, or
query attention.
**kwargs: Dict, other keyword arguments for the call method.
Returns:
Either processed memory or attention score, based on `setup_memory`.
"""
if setup_memory:
if isinstance(inputs, list):
if len(inputs) not in (1, 2):
raise ValueError(
"Expect inputs to have 1 or 2 tensors, got %d" % len(inputs)
)
memory = inputs[0]
memory_sequence_length = inputs[1] if len(inputs) == 2 else None
memory_mask = mask
else:
memory, memory_sequence_length = inputs, None
memory_mask = mask
self.setup_memory(memory, memory_sequence_length, memory_mask)
# We force the self.built to false here since only memory is,
# initialized but the real query/state has not been call() yet. The
# layer should be build and call again.
self.built = False
# Return the processed memory in order to create the Keras
# connectivity data for it.
return self.values
else:
if not self._memory_initialized:
raise ValueError(
"Cannot query the attention before the setup of memory"
)
if len(inputs) not in (2, 3):
raise ValueError(
"Expect the inputs to have query, state, and optional "
"processed memory, got %d items" % len(inputs)
)
# Ignore the rest of the inputs and only care about the query and
# state
query, state = inputs[0], inputs[1]
return self._calculate_attention(query, state)
def setup_memory(self, memory, memory_sequence_length=None, memory_mask=None):
"""Pre-process the memory before actually query the memory.
This should only be called once at the first invocation of `call()`.
Args:
memory: The memory to query; usually the output of an RNN encoder.
This tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch
entries in memory. If provided, the memory tensor rows are masked
with zeros for values past the respective sequence lengths.
memory_mask: (Optional) The boolean tensor with shape `[batch_size,
max_time]`. For any value equal to False, the corresponding value
in memory should be ignored.
"""
if memory_sequence_length is not None and memory_mask is not None:
raise ValueError(
"memory_sequence_length and memory_mask cannot be "
"used at same time for attention."
)
with tf.name_scope(self.name or "BaseAttentionMechanismInit"):
self.values = _prepare_memory(
memory,
memory_sequence_length=memory_sequence_length,
memory_mask=memory_mask,
check_inner_dims_defined=self._check_inner_dims_defined,
)
# Mark the value as check since the memory and memory mask might not
# passed from __call__(), which does not have proper keras metadata.
# TODO(omalleyt12): Remove this hack once the mask the has proper
# keras history.
base_layer_utils.mark_checked(self.values)
if self.memory_layer is not None:
self.keys = self.memory_layer(self.values)
else:
self.keys = self.values
self.batch_size = self.keys.shape[0] or tf.shape(self.keys)[0]
self._alignments_size = self.keys.shape[1] or tf.shape(self.keys)[1]
if memory_mask is not None or memory_sequence_length is not None:
unwrapped_probability_fn = self.default_probability_fn
def _mask_probability_fn(score, prev):
return unwrapped_probability_fn(
_maybe_mask_score(
score,
memory_mask=memory_mask,
memory_sequence_length=memory_sequence_length,
score_mask_value=score.dtype.min,
),
prev,
)
self.probability_fn = _mask_probability_fn
self._memory_initialized = True
def _calculate_attention(self, query, state):
raise NotImplementedError(
"_calculate_attention need to be implemented by subclasses."
)
def compute_mask(self, inputs, mask=None):
# There real input of the attention is query and state, and the memory
# layer mask shouldn't be pass down. Returning None for all output mask
# here.
return None, None
def get_config(self):
config = {}
# Since the probability_fn is likely to be a wrapped function, the child
# class should preserve the original function and how its wrapped.
if self.query_layer is not None:
config["query_layer"] = {
"class_name": self.query_layer.__class__.__name__,
"config": self.query_layer.get_config(),
}
if self.memory_layer is not None:
config["memory_layer"] = {
"class_name": self.memory_layer.__class__.__name__,
"config": self.memory_layer.get_config(),
}
# memory is a required init parameter and its a tensor. It cannot be
# serialized to config, so we put a placeholder for it.
config["memory"] = None
base_config = super().get_config()
return {**base_config, **config}
def _process_probability_fn(self, func_name):
"""Helper method to retrieve the probably function by string input."""
valid_probability_fns = {
"softmax": tf.nn.softmax,
"hardmax": hardmax,
}
if func_name not in valid_probability_fns.keys():
raise ValueError(
"Invalid probability function: %s, options are %s"
% (func_name, valid_probability_fns.keys())
)
return valid_probability_fns[func_name]
@classmethod
def deserialize_inner_layer_from_config(cls, config, custom_objects):
"""Helper method that reconstruct the query and memory from the config.
In the get_config() method, the query and memory layer configs are
serialized into dict for persistence, this method perform the reverse
action to reconstruct the layer from the config.
Args:
config: dict, the configs that will be used to reconstruct the
object.
custom_objects: dict mapping class names (or function names) of
custom (non-Keras) objects to class/functions.
Returns:
config: dict, the config with layer instance created, which is ready
to be used as init parameters.
"""
# Reconstruct the query and memory layer for parent class.
# Instead of updating the input, create a copy and use that.
config = config.copy()
query_layer_config = config.pop("query_layer", None)
if query_layer_config:
query_layer = tf.keras.layers.deserialize(
query_layer_config, custom_objects=custom_objects
)
config["query_layer"] = query_layer
memory_layer_config = config.pop("memory_layer", None)
if memory_layer_config:
memory_layer = tf.keras.layers.deserialize(
memory_layer_config, custom_objects=custom_objects
)
config["memory_layer"] = memory_layer
return config
@property
def alignments_size(self):
if isinstance(self._alignments_size, int):
return self._alignments_size
else:
return tf.TensorShape([None])
@property
def state_size(self):
return self.alignments_size
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the `tfa.seq2seq.AttentionWrapper`
class.
This is important for attention mechanisms that use the previous
alignment to calculate the alignment at the next time step
(e.g. monotonic attention).
The default behavior is to return a tensor of all zeros.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
return tf.zeros([batch_size, self._alignments_size], dtype=dtype)
def initial_state(self, batch_size, dtype):
"""Creates the initial state values for the `tfa.seq2seq.AttentionWrapper` class.
This is important for attention mechanisms that use the previous
alignment to calculate the alignment at the next time step
(e.g. monotonic attention).
The default behavior is to return the same output as
`initial_alignments`.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A structure of all-zero tensors with shapes as described by
`state_size`.
"""
return self.initial_alignments(batch_size, dtype)
def _luong_score(query, keys, scale):
"""Implements Luong-style (multiplicative) scoring function.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, call this function with `scale=True`.
Args:
query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
scale: the optional tensor to scale the attention score.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
Raises:
ValueError: If `key` and `query` depths do not match.
"""
depth = query.shape[-1]
key_units = keys.shape[-1]
if depth != key_units:
raise ValueError(
"Incompatible or unknown inner dimensions between query and keys. "
"Query (%s) has units: %s. Keys (%s) have units: %s. "
"Perhaps you need to set num_units to the keys' dimension (%s)?"
% (query, depth, keys, key_units, key_units)
)
# Reshape from [batch_size, depth] to [batch_size, 1, depth]
# for matmul.
query = tf.expand_dims(query, 1)
# Inner product along the query units dimension.
# matmul shapes: query is [batch_size, 1, depth] and
# keys is [batch_size, max_time, depth].
# the inner product is asked to **transpose keys' inner shape** to get a
# batched matmul on:
# [batch_size, 1, depth] . [batch_size, depth, max_time]
# resulting in an output shape of:
# [batch_size, 1, max_time].
# we then squeeze out the center singleton dimension.
score = tf.matmul(query, keys, transpose_b=True)
score = tf.squeeze(score, [1])
if scale is not None:
score = scale * score
return score
class LuongAttention(AttentionMechanism):
"""Implements Luong-style (multiplicative) attention scoring.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
[Effective Approaches to Attention-based Neural Machine Translation.
EMNLP 2015.](https://arxiv.org/abs/1508.04025)
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, construct the object with parameter
`scale=True`.
"""
@typechecked
def __init__(
self,
units: TensorLike,
memory: Optional[TensorLike] = None,
memory_sequence_length: Optional[TensorLike] = None,
scale: bool = False,
probability_fn: str = "softmax",
dtype: AcceptableDTypes = None,
name: str = "LuongAttention",
**kwargs,
):
"""Construct the AttentionMechanism mechanism.
Args:
units: The depth of the attention mechanism.
memory: The memory to query; usually the output of an RNN encoder.
This tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional): Sequence lengths for the batch
entries in memory. If provided, the memory tensor rows are masked
with zeros for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
probability_fn: (optional) string, the name of function to convert
the attention score to probabilities. The default is `softmax`
which is `tf.nn.softmax`. Other options is `hardmax`, which is
hardmax() within this module. Any other value will result
intovalidation error. Default to use `softmax`.
dtype: The data type for the memory layer of the attention mechanism.
name: Name to use when creating ops.
**kwargs: Dictionary that contains other common arguments for layer
creation.
"""
# For LuongAttention, we only transform the memory layer; thus
# num_units **must** match expected the query depth.
self.probability_fn_name = probability_fn
probability_fn = self._process_probability_fn(self.probability_fn_name)
def wrapped_probability_fn(score, _):
return probability_fn(score)
memory_layer = kwargs.pop("memory_layer", None)
if not memory_layer:
memory_layer = tf.keras.layers.Dense(
units, name="memory_layer", use_bias=False, dtype=dtype
)
self.units = units
self.scale = scale
self.scale_weight = None
super().__init__(
memory=memory,
memory_sequence_length=memory_sequence_length,
query_layer=None,
memory_layer=memory_layer,
probability_fn=wrapped_probability_fn,
name=name,
dtype=dtype,
**kwargs,
)
def build(self, input_shape):
super().build(input_shape)
if self.scale and self.scale_weight is None:
self.scale_weight = self.add_weight(
"attention_g", initializer=tf.ones_initializer, shape=()
)
self.built = True
def _calculate_attention(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
next_state: Same as the alignments.
"""
score = _luong_score(query, self.keys, self.scale_weight)
alignments = self.probability_fn(score, state)
next_state = alignments
return alignments, next_state
def get_config(self):
config = {
"units": self.units,
"scale": self.scale,
"probability_fn": self.probability_fn_name,
}
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config, custom_objects=None):
config = AttentionMechanism.deserialize_inner_layer_from_config(
config, custom_objects=custom_objects
)
return cls(**config)
def _bahdanau_score(
processed_query, keys, attention_v, attention_g=None, attention_b=None
):
"""Implements Bahdanau-style (additive) scoring function.
This attention has two forms. The first is Bahdanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, set please pass in attention_g and attention_b.
Args:
processed_query: Tensor, shape `[batch_size, num_units]` to compare to
keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
attention_v: Tensor, shape `[num_units]`.
attention_g: Optional scalar tensor for normalization.
attention_b: Optional tensor with shape `[num_units]` for normalization.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
"""
# Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
processed_query = tf.expand_dims(processed_query, 1)
if attention_g is not None and attention_b is not None:
normed_v = (
attention_g
* attention_v
* tf.math.rsqrt(tf.reduce_sum(tf.square(attention_v)))
)
return tf.reduce_sum(
normed_v * tf.tanh(keys + processed_query + attention_b), [2]
)
else:
return tf.reduce_sum(attention_v * tf.tanh(keys + processed_query), [2])
class BahdanauAttention(AttentionMechanism):
"""Implements Bahdanau-style (additive) attention.
This attention has two forms. The first is Bahdanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, construct the object with parameter
`normalize=True`.
"""
@typechecked
def __init__(
self,
units: TensorLike,
memory: Optional[TensorLike] = None,
memory_sequence_length: Optional[TensorLike] = None,
normalize: bool = False,
probability_fn: str = "softmax",
kernel_initializer: Initializer = "glorot_uniform",
dtype: AcceptableDTypes = None,
name: str = "BahdanauAttention",
**kwargs,
):
"""Construct the Attention mechanism.
Args:
units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder.
This tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional): Sequence lengths for the batch
entries in memory. If provided, the memory tensor rows are masked
with zeros for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
probability_fn: (optional) string, the name of function to convert
the attention score to probabilities. The default is `softmax`
which is `tf.nn.softmax`. Other options is `hardmax`, which is
hardmax() within this module. Any other value will result into
validation error. Default to use `softmax`.
kernel_initializer: (optional), the name of the initializer for the
attention kernel.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
**kwargs: Dictionary that contains other common arguments for layer
creation.
"""
self.probability_fn_name = probability_fn
probability_fn = self._process_probability_fn(self.probability_fn_name)
def wrapped_probability_fn(score, _):
return probability_fn(score)
query_layer = kwargs.pop("query_layer", None)
if not query_layer:
query_layer = tf.keras.layers.Dense(
units, name="query_layer", use_bias=False, dtype=dtype
)
memory_layer = kwargs.pop("memory_layer", None)
if not memory_layer:
memory_layer = tf.keras.layers.Dense(
units, name="memory_layer", use_bias=False, dtype=dtype
)
self.units = units
self.normalize = normalize
self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self.attention_v = None
self.attention_g = None
self.attention_b = None
super().__init__(
memory=memory,
memory_sequence_length=memory_sequence_length,
query_layer=query_layer,
memory_layer=memory_layer,
probability_fn=wrapped_probability_fn,
name=name,
dtype=dtype,
**kwargs,
)
def build(self, input_shape):
super().build(input_shape)
if self.attention_v is None:
self.attention_v = self.add_weight(
"attention_v",
[self.units],
dtype=self.dtype,
initializer=self.kernel_initializer,
)
if self.normalize and self.attention_g is None and self.attention_b is None:
self.attention_g = self.add_weight(
"attention_g",
initializer=tf.constant_initializer(math.sqrt(1.0 / self.units)),
shape=(),
)
self.attention_b = self.add_weight(
"attention_b", shape=[self.units], initializer=tf.zeros_initializer()
)
self.built = True
def _calculate_attention(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
next_state: same as alignments.
"""
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(
processed_query,
self.keys,
self.attention_v,
attention_g=self.attention_g,
attention_b=self.attention_b,
)
alignments = self.probability_fn(score, state)
next_state = alignments
return alignments, next_state
def get_config(self):
# yapf: disable
config = {
"units": self.units,
"normalize": self.normalize,
"probability_fn": self.probability_fn_name,
"kernel_initializer": tf.keras.initializers.serialize(
self.kernel_initializer)
}
# yapf: enable
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config, custom_objects=None):
config = AttentionMechanism.deserialize_inner_layer_from_config(
config, custom_objects=custom_objects
)
return cls(**config)
def safe_cumprod(x: TensorLike, *args, **kwargs) -> tf.Tensor:
"""Computes cumprod of x in logspace using cumsum to avoid underflow.
The cumprod function and its gradient can result in numerical instabilities
when its argument has very small and/or zero values. As long as the
argument is all positive, we can instead compute the cumulative product as
exp(cumsum(log(x))). This function can be called identically to
tf.cumprod.
Args:
x: Tensor to take the cumulative product of.
*args: Passed on to cumsum; these are identical to those in cumprod.
**kwargs: Passed on to cumsum; these are identical to those in cumprod.
Returns:
Cumulative product of x.
"""
with tf.name_scope("SafeCumprod"):
x = tf.convert_to_tensor(x, name="x")
tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
return tf.exp(
tf.cumsum(tf.math.log(tf.clip_by_value(x, tiny, 1)), *args, **kwargs)
)
def monotonic_attention(
p_choose_i: FloatTensorLike, previous_attention: FloatTensorLike, mode: str
) -> tf.Tensor:
"""Computes monotonic attention distribution from choosing probabilities.
Monotonic attention implies that the input sequence is processed in an
explicitly left-to-right manner when generating the output sequence. In
addition, once an input sequence element is attended to at a given output
timestep, elements occurring before it cannot be attended to at subsequent
output timesteps. This function generates attention distributions
according to these assumptions. For more information, see `Online and
Linear-Time Attention by Enforcing Monotonic Alignments`.
Args:
p_choose_i: Probability of choosing input sequence/memory element i.
Should be of shape (batch_size, input_sequence_length), and should all
be in the range [0, 1].
previous_attention: The attention distribution from the previous output
timestep. Should be of shape (batch_size, input_sequence_length). For
the first output timestep, preevious_attention[n] should be
[1, 0, 0, ..., 0] for all n in [0, ... batch_size - 1].
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'.
* 'recursive' uses tf.scan to recursively compute the distribution.
This is slowest but is exact, general, and does not suffer from
numerical instabilities.
* 'parallel' uses parallelized cumulative-sum and cumulative-product
operations to compute a closed-form solution to the recurrence
relation defining the attention distribution. This makes it more
efficient than 'recursive', but it requires numerical checks which
make the distribution non-exact. This can be a problem in
particular when input_sequence_length is long and/or p_choose_i has
entries very close to 0 or 1.
* 'hard' requires that the probabilities in p_choose_i are all either
0 or 1, and subsequently uses a more efficient and exact solution.
Returns:
A tensor of shape (batch_size, input_sequence_length) representing the
attention distributions for each sequence in the batch.
Raises:
ValueError: mode is not one of 'recursive', 'parallel', 'hard'.
"""
# Force things to be tensors
p_choose_i = tf.convert_to_tensor(p_choose_i, name="p_choose_i")
previous_attention = tf.convert_to_tensor(
previous_attention, name="previous_attention"
)
if mode == "recursive":
# Use .shape[0] when it's not None, or fall back on symbolic shape
batch_size = p_choose_i.shape[0] or tf.shape(p_choose_i)[0]
# Compute [1, 1 - p_choose_i[0], 1 - p_choose_i[1], ..., 1 - p_choose_
# i[-2]]
shifted_1mp_choose_i = tf.concat(
[tf.ones((batch_size, 1)), 1 - p_choose_i[:, :-1]], 1
)
# Compute attention distribution recursively as
# q[i] = (1 - p_choose_i[i - 1])*q[i - 1] + previous_attention[i]
# attention[i] = p_choose_i[i]*q[i]
attention = p_choose_i * tf.transpose(
tf.scan(
# Need to use reshape to remind TF of the shape between loop
# iterations
lambda x, yz: tf.reshape(yz[0] * x + yz[1], (batch_size,)),
# Loop variables yz[0] and yz[1]
[tf.transpose(shifted_1mp_choose_i), tf.transpose(previous_attention)],
# Initial value of x is just zeros
tf.zeros((batch_size,)),
)
)
elif mode == "parallel":
# safe_cumprod computes cumprod in logspace with numeric checks
cumprod_1mp_choose_i = safe_cumprod(1 - p_choose_i, axis=1, exclusive=True)
# Compute recurrence relation solution
attention = (
p_choose_i
* cumprod_1mp_choose_i
* tf.cumsum(
previous_attention /
# Clip cumprod_1mp to avoid divide-by-zero
tf.clip_by_value(cumprod_1mp_choose_i, 1e-10, 1.0),
axis=1,
)
)
elif mode == "hard":
# Remove any probabilities before the index chosen last time step
p_choose_i *= tf.cumsum(previous_attention, axis=1)
# Now, use exclusive cumprod to remove probabilities after the first
# chosen index, like so:
# p_choose_i = [0, 0, 0, 1, 1, 0, 1, 1]
# cumprod(1 - p_choose_i, exclusive=True) = [1, 1, 1, 1, 0, 0, 0, 0]
# Product of above: [0, 0, 0, 1, 0, 0, 0, 0]
attention = p_choose_i * tf.math.cumprod(1 - p_choose_i, axis=1, exclusive=True)
else:
raise ValueError("mode must be 'recursive', 'parallel', or 'hard'.")
return attention
def _monotonic_probability_fn(
score, previous_alignments, sigmoid_noise, mode, seed=None
):
"""Attention probability function for monotonic attention.
Takes in unnormalized attention scores, adds pre-sigmoid noise to encourage
the model to make discrete attention decisions, passes them through a
sigmoid to obtain "choosing" probabilities, and then calls
monotonic_attention to obtain the attention distribution. For more
information, see
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
Args:
score: Unnormalized attention scores, shape
`[batch_size, alignments_size]`
previous_alignments: Previous attention distribution, shape
`[batch_size, alignments_size]`
sigmoid_noise: Standard deviation of pre-sigmoid noise. Setting this
larger than 0 will encourage the model to produce large attention
scores, effectively making the choosing probabilities discrete and the
resulting attention distribution one-hot. It should be set to 0 at
test-time, and when hard attention is not desired.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tfa.seq2seq.monotonic_attention` for more information.
seed: (optional) Random seed for pre-sigmoid noise.
Returns:
A `[batch_size, alignments_size]`-shape tensor corresponding to the
resulting attention distribution.
"""
# Optionally add pre-sigmoid noise to the scores
if sigmoid_noise > 0:
noise = tf.random.normal(tf.shape(score), dtype=score.dtype, seed=seed)
score += sigmoid_noise * noise
# Compute "choosing" probabilities from the attention scores
if mode == "hard":
# When mode is hard, use a hard sigmoid
p_choose_i = tf.cast(score > 0, score.dtype)
else:
p_choose_i = tf.sigmoid(score)
# Convert from choosing probabilities to attention distribution
return monotonic_attention(p_choose_i, previous_alignments, mode)
class _BaseMonotonicAttentionMechanism(AttentionMechanism):
"""Base attention mechanism for monotonic attention.
Simply overrides the initial_alignments function to provide a dirac
distribution, which is needed in order for the monotonic attention
distributions to have the correct behavior.
"""
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the monotonic attentions.
Initializes to dirac distributions, i.e.
[1, 0, 0, ...memory length..., 0] for all entries in the batch.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return tf.one_hot(
tf.zeros((batch_size,), dtype=tf.int32), max_time, dtype=dtype
)
class BahdanauMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Bahdanau-style energy function.
This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the
memory it can't attend to any prior points at subsequence output timesteps.
It achieves this by using the `_monotonic_probability_fn` instead of `softmax`
to construct its attention distributions. Since the attention scores are
passed through a sigmoid, a learnable scalar bias parameter is applied
after the score function and before the sigmoid. Otherwise, it is
equivalent to `tfa.seq2seq.BahdanauAttention`. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
@typechecked
def __init__(
self,
units: TensorLike,
memory: Optional[TensorLike] = None,
memory_sequence_length: Optional[TensorLike] = None,
normalize: bool = False,
sigmoid_noise: FloatTensorLike = 0.0,
sigmoid_noise_seed: Optional[FloatTensorLike] = None,
score_bias_init: FloatTensorLike = 0.0,
mode: str = "parallel",
kernel_initializer: Initializer = "glorot_uniform",
dtype: AcceptableDTypes = None,
name: str = "BahdanauMonotonicAttention",
**kwargs,
):
"""Construct the attention mechanism.
Args:
units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder.
This tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional): Sequence lengths for the batch
entries in memory. If provided, the memory tensor rows are masked
with zeros for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the
docstring for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's
recommended to initialize this to a negative value when the length
of the memory is large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tfa.seq2seq.monotonic_attention` for more information.
kernel_initializer: (optional), the name of the initializer for the
attention kernel.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
**kwargs: Dictionary that contains other common arguments for layer
creation.
"""
# Set up the monotonic probability fn with supplied parameters
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn,
sigmoid_noise=sigmoid_noise,
mode=mode,
seed=sigmoid_noise_seed,
)
query_layer = kwargs.pop("query_layer", None)
if not query_layer:
query_layer = tf.keras.layers.Dense(
units, name="query_layer", use_bias=False, dtype=dtype
)
memory_layer = kwargs.pop("memory_layer", None)
if not memory_layer:
memory_layer = tf.keras.layers.Dense(
units, name="memory_layer", use_bias=False, dtype=dtype
)
self.units = units
self.normalize = normalize
self.sigmoid_noise = sigmoid_noise
self.sigmoid_noise_seed = sigmoid_noise_seed
self.score_bias_init = score_bias_init
self.mode = mode
self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self.attention_v = None
self.attention_score_bias = None
self.attention_g = None
self.attention_b = None
super().__init__(
memory=memory,
memory_sequence_length=memory_sequence_length,
query_layer=query_layer,
memory_layer=memory_layer,
probability_fn=wrapped_probability_fn,
name=name,
dtype=dtype,
**kwargs,
)
def build(self, input_shape):
super().build(input_shape)
if self.attention_v is None:
self.attention_v = self.add_weight(
"attention_v",
[self.units],
dtype=self.dtype,
initializer=self.kernel_initializer,
)
if self.attention_score_bias is None:
self.attention_score_bias = self.add_weight(
"attention_score_bias",
shape=(),
dtype=self.dtype,
initializer=tf.constant_initializer(self.score_bias_init),
)
if self.normalize and self.attention_g is None and self.attention_b is None:
self.attention_g = self.add_weight(
"attention_g",
dtype=self.dtype,
initializer=tf.constant_initializer(math.sqrt(1.0 / self.units)),
shape=(),
)
self.attention_b = self.add_weight(
"attention_b",
[self.units],
dtype=self.dtype,
initializer=tf.zeros_initializer(),
)
self.built = True
def _calculate_attention(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(
processed_query,
self.keys,
self.attention_v,
attention_g=self.attention_g,
attention_b=self.attention_b,
)
score += self.attention_score_bias
alignments = self.probability_fn(score, state)
next_state = alignments
return alignments, next_state
def get_config(self):
# yapf: disable
config = {
"units": self.units,
"normalize": self.normalize,
"sigmoid_noise": self.sigmoid_noise,
"sigmoid_noise_seed": self.sigmoid_noise_seed,
"score_bias_init": self.score_bias_init,
"mode": self.mode,
"kernel_initializer": tf.keras.initializers.serialize(
self.kernel_initializer),
}
# yapf: enable
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config, custom_objects=None):
config = AttentionMechanism.deserialize_inner_layer_from_config(
config, custom_objects=custom_objects
)
return cls(**config)
class LuongMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Luong-style energy function.
This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the
memory it can't attend to any prior points at subsequence output timesteps.
It achieves this by using the `_monotonic_probability_fn` instead of `softmax`
to construct its attention distributions. Otherwise, it is equivalent to
`tfa.seq2seq.LuongAttention`. This approach is proposed in
[Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017.](https://arxiv.org/abs/1704.00784)
"""
@typechecked
def __init__(
self,
units: TensorLike,
memory: Optional[TensorLike] = None,
memory_sequence_length: Optional[TensorLike] = None,
scale: bool = False,
sigmoid_noise: FloatTensorLike = 0.0,
sigmoid_noise_seed: Optional[FloatTensorLike] = None,
score_bias_init: FloatTensorLike = 0.0,
mode: str = "parallel",
dtype: AcceptableDTypes = None,
name: str = "LuongMonotonicAttention",
**kwargs,
):
"""Construct the attention mechanism.
Args:
units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder.
This tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional): Sequence lengths for the batch
entries in memory. If provided, the memory tensor rows are masked
with zeros for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the
docstring for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's
recommended to initialize this to a negative value when the length
of the memory is large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tfa.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
**kwargs: Dictionary that contains other common arguments for layer
creation.
"""
# Set up the monotonic probability fn with supplied parameters
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn,
sigmoid_noise=sigmoid_noise,
mode=mode,
seed=sigmoid_noise_seed,
)
memory_layer = kwargs.pop("memory_layer", None)
if not memory_layer:
memory_layer = tf.keras.layers.Dense(
units, name="memory_layer", use_bias=False, dtype=dtype
)
self.units = units
self.scale = scale
self.sigmoid_noise = sigmoid_noise
self.sigmoid_noise_seed = sigmoid_noise_seed
self.score_bias_init = score_bias_init
self.mode = mode
self.attention_g = None
self.attention_score_bias = None
super().__init__(
memory=memory,
memory_sequence_length=memory_sequence_length,
query_layer=None,
memory_layer=memory_layer,
probability_fn=wrapped_probability_fn,
name=name,
dtype=dtype,
**kwargs,
)
def build(self, input_shape):
super().build(input_shape)
if self.scale and self.attention_g is None:
self.attention_g = self.add_weight(
"attention_g", initializer=tf.ones_initializer, shape=()
)
if self.attention_score_bias is None:
self.attention_score_bias = self.add_weight(
"attention_score_bias",
shape=(),
initializer=tf.constant_initializer(self.score_bias_init),
)
self.built = True
def _calculate_attention(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
next_state: Same as alignments
"""
score = _luong_score(query, self.keys, self.attention_g)
score += self.attention_score_bias
alignments = self.probability_fn(score, state)
next_state = alignments
return alignments, next_state
def get_config(self):
config = {
"units": self.units,
"scale": self.scale,
"sigmoid_noise": self.sigmoid_noise,
"sigmoid_noise_seed": self.sigmoid_noise_seed,
"score_bias_init": self.score_bias_init,
"mode": self.mode,
}
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config, custom_objects=None):
config = AttentionMechanism.deserialize_inner_layer_from_config(
config, custom_objects=custom_objects
)
return cls(**config)
class AttentionWrapperState(
collections.namedtuple(
"AttentionWrapperState",
(
"cell_state",
"attention",
"alignments",
"alignment_history",
"attention_state",
),
)
):
"""State of a `tfa.seq2seq.AttentionWrapper`.
Attributes:
cell_state: The state of the wrapped RNN cell at the previous time
step.
attention: The attention emitted at the previous time step.
alignments: A single or tuple of `Tensor`(s) containing the
alignments emitted at the previous time step for each attention
mechanism.
alignment_history: (if enabled) a single or tuple of `TensorArray`(s)
containing alignment matrices from all time steps for each attention
mechanism. Call `stack()` on each to convert to a `Tensor`.
attention_state: A single or tuple of nested objects
containing attention mechanism state for each attention mechanism.
The objects may contain Tensors or TensorArrays.
"""
def clone(self, **kwargs):
"""Clone this object, overriding components provided by kwargs.
The new state fields' shape must match original state fields' shape.
This will be validated, and original fields' shape will be propagated
to new fields.
Example:
>>> batch_size = 1
>>> memory = tf.random.normal(shape=[batch_size, 3, 100])
>>> encoder_state = [tf.zeros((batch_size, 100)), tf.zeros((batch_size, 100))]
>>> attention_mechanism = tfa.seq2seq.LuongAttention(100, memory=memory, memory_sequence_length=[3] * batch_size)
>>> attention_cell = tfa.seq2seq.AttentionWrapper(tf.keras.layers.LSTMCell(100), attention_mechanism, attention_layer_size=10)
>>> decoder_initial_state = attention_cell.get_initial_state(batch_size=batch_size, dtype=tf.float32)
>>> decoder_initial_state = decoder_initial_state.clone(cell_state=encoder_state)
Args:
**kwargs: Any properties of the state object to replace in the
returned `AttentionWrapperState`.
Returns:
A new `AttentionWrapperState` whose properties are the same as
this one, except any overridden properties as provided in `kwargs`.
"""
def with_same_shape(old, new):
"""Check and set new tensor's shape."""
if isinstance(old, tf.Tensor) and isinstance(new, tf.Tensor):
if not tf.executing_eagerly():
new_shape = tf.shape(new)
old_shape = tf.shape(old)
assert_equal = tf.debugging.assert_equal(new_shape, old_shape)
with tf.control_dependencies([assert_equal]):
# Add an identity op so that control deps can kick in.
return tf.identity(new)
else:
if old.shape.as_list() != new.shape.as_list():
raise ValueError(
"The shape of the AttentionWrapperState is "
"expected to be same as the one to clone. "
"self.shape: %s, input.shape: %s" % (old.shape, new.shape)
)
return new
return new
return tf.nest.map_structure(with_same_shape, self, super()._replace(**kwargs))
def _prepare_memory(
memory, memory_sequence_length=None, memory_mask=None, check_inner_dims_defined=True
):
"""Convert to tensor and possibly mask `memory`.
Args:
memory: `Tensor`, shaped `[batch_size, max_time, ...]`.
memory_sequence_length: `int32` `Tensor`, shaped `[batch_size]`.
memory_mask: `boolean` tensor with shape [batch_size, max_time]. The
memory should be skipped when the corresponding mask is False.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
Returns:
A (possibly masked), checked, new `memory`.
Raises:
ValueError: If `check_inner_dims_defined` is `True` and not
`memory.shape[2:].is_fully_defined()`.
"""
memory = tf.nest.map_structure(
lambda m: tf.convert_to_tensor(m, name="memory"), memory
)
if memory_sequence_length is not None and memory_mask is not None:
raise ValueError(
"memory_sequence_length and memory_mask can't be provided at same time."
)
if memory_sequence_length is not None:
memory_sequence_length = tf.convert_to_tensor(
memory_sequence_length, name="memory_sequence_length"
)
if check_inner_dims_defined:
def _check_dims(m):
if not m.shape[2:].is_fully_defined():
raise ValueError(
"Expected memory %s to have fully defined inner dims, "
"but saw shape: %s" % (m.name, m.shape)
)
tf.nest.map_structure(_check_dims, memory)
if memory_sequence_length is None and memory_mask is None:
return memory
elif memory_sequence_length is not None:
seq_len_mask = tf.sequence_mask(
memory_sequence_length,
maxlen=tf.shape(tf.nest.flatten(memory)[0])[1],
dtype=tf.nest.flatten(memory)[0].dtype,
)
else:
# For memory_mask is not None
seq_len_mask = tf.cast(memory_mask, dtype=tf.nest.flatten(memory)[0].dtype)
def _maybe_mask(m, seq_len_mask):
"""Mask the memory based on the memory mask."""
rank = m.shape.ndims
rank = rank if rank is not None else tf.rank(m)
extra_ones = tf.ones(rank - 2, dtype=tf.int32)
seq_len_mask = tf.reshape(
seq_len_mask, tf.concat((tf.shape(seq_len_mask), extra_ones), 0)
)
return m * seq_len_mask
return tf.nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), memory)
def _maybe_mask_score(
score, memory_sequence_length=None, memory_mask=None, score_mask_value=None
):
"""Mask the attention score based on the masks."""
if memory_sequence_length is None and memory_mask is None:
return score
if memory_sequence_length is not None and memory_mask is not None:
raise ValueError(
"memory_sequence_length and memory_mask can't be provided at same time."
)
if memory_sequence_length is not None:
message = "All values in memory_sequence_length must greater than zero."
with tf.control_dependencies(
[
tf.debugging.assert_positive( # pylint: disable=bad-continuation
memory_sequence_length, message=message
)
]
):
memory_mask = tf.sequence_mask(
memory_sequence_length, maxlen=tf.shape(score)[1]
)
score_mask_values = score_mask_value * tf.ones_like(score)
return tf.where(memory_mask, score, score_mask_values)
def hardmax(logits: TensorLike, name: Optional[str] = None) -> tf.Tensor:
"""Returns batched one-hot vectors.
The depth index containing the `1` is that of the maximum logit value.
Args:
logits: A batch tensor of logit values.
name: Name to use when creating ops.
Returns:
A batched one-hot tensor.
"""
with tf.name_scope(name or "Hardmax"):
logits = tf.convert_to_tensor(logits, name="logits")
depth = logits.shape[-1] or tf.shape(logits)[-1]
return tf.one_hot(tf.argmax(logits, -1), depth, dtype=logits.dtype)
def _compute_attention(
attention_mechanism, cell_output, attention_state, attention_layer
):
"""Computes the attention and alignments for a given
attention_mechanism."""
alignments, next_attention_state = attention_mechanism(
[cell_output, attention_state]
)
# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = tf.expand_dims(alignments, 1)
# Context is the inner product of alignments and values along the
# memory time dimension.
# alignments shape is
# [batch_size, 1, memory_time]
# attention_mechanism.values shape is
# [batch_size, memory_time, memory_size]
# the batched matmul is over memory_time, so the output shape is
# [batch_size, 1, memory_size].
# we then squeeze out the singleton dim.
context_ = tf.matmul(expanded_alignments, attention_mechanism.values)
context_ = tf.squeeze(context_, [1])
if attention_layer is not None:
attention = attention_layer(tf.concat([cell_output, context_], 1))
else:
attention = context_
return attention, alignments, next_attention_state
class AttentionWrapper(tf.keras.layers.AbstractRNNCell):
"""Wraps another RNN cell with attention.
Example:
>>> batch_size = 4
>>> max_time = 7
>>> hidden_size = 32
>>>
>>> memory = tf.random.uniform([batch_size, max_time, hidden_size])
>>> memory_sequence_length = tf.fill([batch_size], max_time)
>>>
>>> attention_mechanism = tfa.seq2seq.LuongAttention(hidden_size)
>>> attention_mechanism.setup_memory(memory, memory_sequence_length)
>>>
>>> cell = tf.keras.layers.LSTMCell(hidden_size)
>>> cell = tfa.seq2seq.AttentionWrapper(
... cell, attention_mechanism, attention_layer_size=hidden_size)
>>>
>>> inputs = tf.random.uniform([batch_size, hidden_size])
>>> state = cell.get_initial_state(inputs)
>>>
>>> outputs, state = cell(inputs, state)
>>> outputs.shape
TensorShape([4, 32])
"""
@typechecked
def __init__(
self,
cell: tf.keras.layers.Layer,
attention_mechanism: Union[AttentionMechanism, List[AttentionMechanism]],
attention_layer_size: Optional[Union[Number, List[Number]]] = None,
alignment_history: bool = False,
cell_input_fn: Optional[Callable] = None,
output_attention: bool = True,
initial_cell_state: Optional[TensorLike] = None,
name: Optional[str] = None,
attention_layer: Optional[
Union[tf.keras.layers.Layer, List[tf.keras.layers.Layer]]
] = None,
attention_fn: Optional[Callable] = None,
**kwargs,
):
"""Construct the `AttentionWrapper`.
**NOTE** If you are using the `tfa.seq2seq.BeamSearchDecoder` with a cell wrapped
in `AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
`tfa.seq2seq.tile_batch` (NOT `tf.tile`).
- The `batch_size` argument passed to the `get_initial_state` method of
this wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `get_initial_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
>>> batch_size = 1
>>> beam_width = 5
>>> sequence_length = tf.convert_to_tensor([5])
>>> encoder_outputs = tf.random.uniform(shape=(batch_size, 5, 10))
>>> encoder_final_state = [tf.zeros((batch_size, 10)), tf.zeros((batch_size, 10))]
>>> tiled_encoder_outputs = tfa.seq2seq.tile_batch(encoder_outputs, multiplier=beam_width)
>>> tiled_encoder_final_state = tfa.seq2seq.tile_batch(encoder_final_state, multiplier=beam_width)
>>> tiled_sequence_length = tfa.seq2seq.tile_batch(sequence_length, multiplier=beam_width)
>>> attention_mechanism = tfa.seq2seq.BahdanauAttention(10, memory=tiled_encoder_outputs, memory_sequence_length=tiled_sequence_length)
>>> attention_cell = tfa.seq2seq.AttentionWrapper(tf.keras.layers.LSTMCell(10), attention_mechanism)
>>> decoder_initial_state = attention_cell.get_initial_state(batch_size=batch_size * beam_width, dtype=tf.float32)
>>> decoder_initial_state = decoder_initial_state.clone(cell_state=tiled_encoder_final_state)
Args:
cell: A layer that implements the `tf.keras.layers.AbstractRNNCell`
interface.
attention_mechanism: A list of `tfa.seq2seq.AttentionMechanism`
instances single instance.
attention_layer_size: A list of Python integers or a single Python
integer, the depth of the attention (output) layer(s). If `None`
(default), use the context as attention at each time step.
Otherwise, feed the context and cell output into the attention
layer to generate attention at each time step. If
`attention_mechanism` is a list, `attention_layer_size` must be a list
of the same length. If `attention_layer` is set, this must be `None`.
If `attention_fn` is set, it must guaranteed that the outputs of
`attention_fn` also meet the above requirements.
alignment_history: Python boolean, whether to store alignment history
from all time steps in the final output state (currently stored as
a time major `TensorArray` on which you must call `stack()`).
cell_input_fn: (optional) A `callable`. The default is:
`lambda inputs, attention:
tf.concat([inputs, attention], -1)`.
output_attention: Python bool. If `True` (default), the output at
each time step is the attention value. This is the behavior of
Luong-style attention mechanisms. If `False`, the output at each
time step is the output of `cell`. This is the behavior of
Bahdanau-style attention mechanisms. In both cases, the
`attention` tensor is propagated to the next time step via the
state and is used there. This flag only controls whether the
attention mechanism is propagated up to the next cell in an RNN
stack or to the top RNN output.
initial_cell_state: The initial state value to use for the cell when
the user calls `get_initial_state()`. Note that if this value is
provided now, and the user uses a `batch_size` argument of
`get_initial_state` which does not match the batch size of
`initial_cell_state`, proper behavior is not guaranteed.
name: Name to use when creating ops.
attention_layer: A list of `tf.keras.layers.Layer` instances or a
single `tf.keras.layers.Layer` instance taking the context
and cell output as inputs to generate attention at each time step.
If `None` (default), use the context as attention at each time step.
If `attention_mechanism` is a list, `attention_layer` must be a list of
the same length. If `attention_layer_size` is set, this must be
`None`.
attention_fn: An optional callable function that allows users to
provide their own customized attention function, which takes input
`(attention_mechanism, cell_output, attention_state,
attention_layer)` and outputs `(attention, alignments,
next_attention_state)`. If provided, the `attention_layer_size` should
be the size of the outputs of `attention_fn`.
**kwargs: Other keyword arguments for layer creation.
Raises:
TypeError: `attention_layer_size` is not `None` and
(`attention_mechanism` is a list but `attention_layer_size` is not;
or vice versa).
ValueError: if `attention_layer_size` is not `None`,
`attention_mechanism` is a list, and its length does not match that
of `attention_layer_size`; if `attention_layer_size` and
`attention_layer` are set simultaneously.
"""
super().__init__(name=name, **kwargs)
keras_utils.assert_like_rnncell("cell", cell)
if isinstance(attention_mechanism, (list, tuple)):
self._is_multi = True
attention_mechanisms = list(attention_mechanism)
else:
self._is_multi = False
attention_mechanisms = [attention_mechanism]
if cell_input_fn is None:
def cell_input_fn(inputs, attention):
return tf.concat([inputs, attention], -1)
if attention_layer_size is not None and attention_layer is not None:
raise ValueError(
"Only one of attention_layer_size and attention_layer should be set"
)
if attention_layer_size is not None:
attention_layer_sizes = tuple(
attention_layer_size
if isinstance(attention_layer_size, (list, tuple))
else (attention_layer_size,)
)
if len(attention_layer_sizes) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer_size must contain exactly "
"one integer per attention_mechanism, saw: %d vs %d"
% (len(attention_layer_sizes), len(attention_mechanisms))
)
dtype = kwargs.get("dtype", None)
self._attention_layers = list(
tf.keras.layers.Dense(
attention_layer_size,
name="attention_layer",
use_bias=False,
dtype=dtype,
)
for i, attention_layer_size in enumerate(attention_layer_sizes)
)
elif attention_layer is not None:
self._attention_layers = list(
attention_layer
if isinstance(attention_layer, (list, tuple))
else (attention_layer,)
)
if len(self._attention_layers) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer must contain exactly one "
"layer per attention_mechanism, saw: %d vs %d"
% (len(self._attention_layers), len(attention_mechanisms))
)
else:
self._attention_layers = None
if attention_fn is None:
attention_fn = _compute_attention
self._attention_fn = attention_fn
self._attention_layer_size = None
self._cell = cell
self._attention_mechanisms = attention_mechanisms
self._cell_input_fn = cell_input_fn
self._output_attention = output_attention
self._alignment_history = alignment_history
with tf.name_scope(name or "AttentionWrapperInit"):
if initial_cell_state is None:
self._initial_cell_state = None
else:
final_state_tensor = tf.nest.flatten(initial_cell_state)[-1]
state_batch_size = (
final_state_tensor.shape[0] or tf.shape(final_state_tensor)[0]
)
error_message = (
"When constructing AttentionWrapper %s: " % self.name
+ "Non-matching batch sizes between the memory "
"(encoder output) and initial_cell_state. Are you using "
"the BeamSearchDecoder? You may need to tile your "
"initial state via the tfa.seq2seq.tile_batch "
"function with argument multiple=beam_width."
)
with tf.control_dependencies(
self._batch_size_checks( # pylint: disable=bad-continuation
state_batch_size, error_message
)
):
self._initial_cell_state = tf.nest.map_structure(
lambda s: tf.identity(s, name="check_initial_cell_state"),
initial_cell_state,
)
def _attention_mechanisms_checks(self):
for attention_mechanism in self._attention_mechanisms:
if not attention_mechanism.memory_initialized:
raise ValueError(
"The AttentionMechanism instances passed to "
"this AttentionWrapper should be initialized "
"with a memory first, either by passing it "
"to the AttentionMechanism constructor or "
"calling attention_mechanism.setup_memory()"
)
def _batch_size_checks(self, batch_size, error_message):
self._attention_mechanisms_checks()
return [
tf.debugging.assert_equal(
batch_size, attention_mechanism.batch_size, message=error_message
)
for attention_mechanism in self._attention_mechanisms
]
def _get_attention_layer_size(self):
if self._attention_layer_size is not None:
return self._attention_layer_size
self._attention_mechanisms_checks()
attention_output_sizes = (
attention_mechanism.values.shape[-1]
for attention_mechanism in self._attention_mechanisms
)
if self._attention_layers is None:
self._attention_layer_size = sum(attention_output_sizes)
else:
# Compute the layer output size from its input which is the
# concatenation of the cell output and the attention mechanism
# output.
self._attention_layer_size = sum(
layer.compute_output_shape(
[None, self._cell.output_size + attention_output_size]
)[-1]
for layer, attention_output_size in zip(
self._attention_layers, attention_output_sizes
)
)
return self._attention_layer_size
def _item_or_tuple(self, seq):
"""Returns `seq` as tuple or the singular element.
Which is returned is determined by how the AttentionMechanism(s) were
passed to the constructor.
Args:
seq: A non-empty sequence of items or generator.
Returns:
Either the values in the sequence as a tuple if
AttentionMechanism(s) were passed to the constructor as a sequence
or the singular element.
"""
t = tuple(seq)
if self._is_multi:
return t
else:
return t[0]
@property
def output_size(self):
if self._output_attention:
return self._get_attention_layer_size()
else:
return self._cell.output_size
@property
def state_size(self):
"""The `state_size` property of `tfa.seq2seq.AttentionWrapper`.
Returns:
A `tfa.seq2seq.AttentionWrapperState` tuple containing shapes used
by this object.
"""
return AttentionWrapperState(
cell_state=self._cell.state_size,
attention=self._get_attention_layer_size(),
alignments=self._item_or_tuple(
a.alignments_size for a in self._attention_mechanisms
),
attention_state=self._item_or_tuple(
a.state_size for a in self._attention_mechanisms
),
alignment_history=self._item_or_tuple(
a.alignments_size if self._alignment_history else ()
for a in self._attention_mechanisms
),
) # sometimes a TensorArray
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
"""Return an initial (zero) state tuple for this `tfa.seq2seq.AttentionWrapper`.
**NOTE** Please see the initializer documentation for details of how
to call `get_initial_state` if using a `tfa.seq2seq.AttentionWrapper`
with a `tfa.seq2seq.BeamSearchDecoder`.
Args:
inputs: The inputs that will be fed to this cell.
batch_size: `0D` integer tensor: the batch size.
dtype: The internal state data type.
Returns:
An `tfa.seq2seq.AttentionWrapperState` tuple containing zeroed out tensors and,
possibly, empty `TensorArray` objects.
Raises:
ValueError: (or, possibly at runtime, `InvalidArgument`), if
`batch_size` does not match the output size of the encoder passed
to the wrapper object at initialization time.
"""
if inputs is not None:
batch_size = tf.shape(inputs)[0]
dtype = inputs.dtype
with tf.name_scope(
type(self).__name__ + "ZeroState"
): # pylint: disable=bad-continuation
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.get_initial_state(
batch_size=batch_size, dtype=dtype
)
error_message = (
"When calling get_initial_state of AttentionWrapper %s: " % self.name
+ "Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output "
"has been tiled to beam_width via "
"tfa.seq2seq.tile_batch, and the batch_size= argument "
"passed to get_initial_state is batch_size * beam_width."
)
with tf.control_dependencies(
self._batch_size_checks(batch_size, error_message)
): # pylint: disable=bad-continuation
cell_state = tf.nest.map_structure(
lambda s: tf.identity(s, name="checked_cell_state"), cell_state
)
initial_alignments = [
attention_mechanism.initial_alignments(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms
]
return AttentionWrapperState(
cell_state=cell_state,
attention=tf.zeros(
[batch_size, self._get_attention_layer_size()], dtype=dtype
),
alignments=self._item_or_tuple(initial_alignments),
attention_state=self._item_or_tuple(
attention_mechanism.initial_state(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms
),
alignment_history=self._item_or_tuple(
tf.TensorArray(
dtype, size=0, dynamic_size=True, element_shape=alignment.shape
)
if self._alignment_history
else ()
for alignment in initial_alignments
),
)
def call(self, inputs, state, **kwargs):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous
state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell
output and context through the attention layer (a linear layer with
`attention_layer_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time
step.
state: An instance of `tfa.seq2seq.AttentionWrapperState` containing
tensors from the previous time step.
**kwargs: Dict, other keyword arguments for the cell call method.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `tfa.seq2seq.AttentionWrapperState`
containing the state calculated at this time step.
Raises:
TypeError: If `state` is not an instance of `tfa.seq2seq.AttentionWrapperState`.
"""
if not isinstance(state, AttentionWrapperState):
try:
state = AttentionWrapperState(*state)
except TypeError:
raise TypeError(
"Expected state to be instance of AttentionWrapperState or "
"values that can construct AttentionWrapperState. "
"Received type %s instead." % type(state)
)
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
cell_inputs = self._cell_input_fn(inputs, state.attention)
cell_state = state.cell_state
cell_output, next_cell_state = self._cell(cell_inputs, cell_state, **kwargs)
next_cell_state = tf.nest.pack_sequence_as(
cell_state, tf.nest.flatten(next_cell_state)
)
cell_batch_size = cell_output.shape[0] or tf.shape(cell_output)[0]
error_message = (
"When applying AttentionWrapper %s: " % self.name
+ "Non-matching batch sizes between the memory "
"(encoder output) and the query (decoder output). Are you using "
"the BeamSearchDecoder? You may need to tile your memory input "
"via the tfa.seq2seq.tile_batch function with argument "
"multiple=beam_width."
)
with tf.control_dependencies(
self._batch_size_checks(cell_batch_size, error_message)
): # pylint: disable=bad-continuation
cell_output = tf.identity(cell_output, name="checked_cell_output")
if self._is_multi:
previous_attention_state = state.attention_state
previous_alignment_history = state.alignment_history
else:
previous_attention_state = [state.attention_state]
previous_alignment_history = [state.alignment_history]
all_alignments = []
all_attentions = []
all_attention_states = []
maybe_all_histories = []
for i, attention_mechanism in enumerate(self._attention_mechanisms):
attention, alignments, next_attention_state = self._attention_fn(
attention_mechanism,
cell_output,
previous_attention_state[i],
self._attention_layers[i] if self._attention_layers else None,
)
alignment_history = (
previous_alignment_history[i].write(
previous_alignment_history[i].size(), alignments
)
if self._alignment_history
else ()
)
all_attention_states.append(next_attention_state)
all_alignments.append(alignments)
all_attentions.append(attention)
maybe_all_histories.append(alignment_history)
attention = tf.concat(all_attentions, 1)
next_state = AttentionWrapperState(
cell_state=next_cell_state,
attention=attention,
attention_state=self._item_or_tuple(all_attention_states),
alignments=self._item_or_tuple(all_alignments),
alignment_history=self._item_or_tuple(maybe_all_histories),
)
if self._output_attention:
return attention, next_state
else:
return cell_output, next_state
|
#!/usr/bin/env python3
# iset-match.py: Report where iset.mm statements differ from set.mm.
# Author: David A. Wheeler
# SPDX-License-Identifier: MIT
import os,re
# Generate list of statements in set.mm and iset.mm.
os.system("metamath 'read set.mm' 'set width 9999' 'show statement *' quit > ,set-mm-statements")
os.system("metamath 'read iset.mm' 'set width 9999' 'show statement *' quit > ,iset-mm-statements")
# The lines we want have this form:
# 70 mpd $p |- ( ph -> ch ) $= ... $.
# with a beginning number, label, $[aep], and statement.
useful = re.compile(r'[0-9]+ ([^ ]+) (\$[aep]) (.*)')
# Utility functions to clean up statements.
# https://stackoverflow.com/questions/3663450/python-remove-substring-only-at-the-end-of-string
def rchop(thestring, ending):
if thestring.endswith(ending):
return thestring[:-len(ending)]
return thestring
def cleanup_expr(expr):
t1 = rchop(expr, ' $= ... $.')
t2 = rchop(t1, ' $.')
return t2.strip()
setmm_statements = {}
# Read set.mm statement list
with open(',set-mm-statements', 'r') as setmm:
for line in setmm:
# print(line)
res = useful.match(line)
if res:
label = res[1]
expr = cleanup_expr(res[3])
# print(label + ' ' + expr)
setmm_statements[label] = expr
# print(setmm_statements)
# Read iset.mm statement list, report ones differing from set.mm.
with open(',iset-mm-statements', 'r') as isetmm:
for line in isetmm:
# print(line)
res = useful.match(line)
if res:
label = res[1]
label_type = res[2]
expr = cleanup_expr(res[3])
# print(label + ' ' + expr)
if label in setmm_statements and setmm_statements[label] != expr:
print('{} {}: {} DIFFERENT_FROM {}'.format(
label, label_type, setmm_statements[label], expr))
|
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 - 2017 Björn Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of python-quilt for details.
from quilt.cli.meta import Command
from quilt.cli.parser import Argument, OptionArgument
from quilt.delete import Delete
class DeleteCommand(Command):
name = "delete"
help = "Remove the specified or topmost patch from the series file."
remove = OptionArgument("-r", action="store_true", dest="remove",
default=False,
help="Remove the deleted patch file from the "
"patches directory as well.")
backup = OptionArgument("--backup",
action="store_true", default=False, dest="backup",
help="Rename the patch file to patch~ rather than "
"deleting it. Ignored if not used with `-r'.")
next = OptionArgument("-n", action="store_true", dest="next",
help="Delete the next unapplied patch, "
"rather than the specified or topmost patch.")
patch = Argument(nargs="?")
def run(self, args):
delete = Delete(self.get_cwd(), self.get_pc_dir(),
self.get_patches_dir())
delete.deleted_patch.connect(self.deleted_patch)
delete.deleting_patch.connect(self.deleting_patch)
if args.next and args.patch:
self.exit_error("-n parameter doesn't take an argument")
if args.next:
delete.delete_next(args.remove, args.backup)
else:
delete.delete_patch(args.patch, args.remove, args.backup)
def deleted_patch(self, patch):
print("Removed patch %s" % patch.get_name())
def deleting_patch(self, patch, applied):
if applied:
print("Removing currently applied patch %s" % patch.get_name())
else:
print("Removing patch %s" % patch.get_name())
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from absl.testing import parameterized
from clif.testing.derived_in_other_header.python import concrete_base
from clif.testing.derived_in_other_header.python import concrete_derived
from clif.testing.derived_in_other_header.python import shared_unique_interop
from clif.testing.derived_in_other_header.python import virtual_derived
# TODO: Restore simple import after OSS setup includes pybind11.
# pylint: disable=g-import-not-at-top
try:
from clif.testing.derived_in_other_header.python import concrete_base_pybind11
from clif.testing.derived_in_other_header.python import concrete_derived_pybind11
from clif.testing.derived_in_other_header.python import shared_unique_interop_pybind11
from clif.testing.derived_in_other_header.python import virtual_derived_pybind11
except ImportError:
concrete_base_pybind11 = None
concrete_derived_pybind11 = None
shared_unique_interop_pybind11 = None
virtual_derived_pybind11 = None
# pylint: enable=g-import-not-at-top
CONCRETE_BASE_EMPTY_GET_RESULT = 90146438
CONCRETE_DERIVED_EMPTY_GET_RESULT = 31607978
VIRTUAL_DERIVED_EMPTY_GET_RESULT = 29852452
class ConcreteTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.concrete_base = concrete_base
cls.concrete_derived = concrete_derived
cls.shared_unique_interop = shared_unique_interop
def testBaseAndDerived(self):
cbe = self.concrete_base.ConcreteBaseEmpty()
self.assertEqual(cbe.Get(), CONCRETE_BASE_EMPTY_GET_RESULT)
cde = self.concrete_derived.ConcreteDerivedEmpty()
self.assertEqual(cde.Get(), CONCRETE_DERIVED_EMPTY_GET_RESULT)
self.assertEqual(
cde.BaseGet(cbe),
CONCRETE_BASE_EMPTY_GET_RESULT + CONCRETE_DERIVED_EMPTY_GET_RESULT)
self.assertEqual(
cde.BaseGet(cde),
CONCRETE_BASE_EMPTY_GET_RESULT + CONCRETE_DERIVED_EMPTY_GET_RESULT)
@parameterized.named_parameters(
('DefaultDeleter', False),
('CustomDeleter', True))
def testUnableToDisownOriginalShared(self, use_custom_deleter):
cbe = self.shared_unique_interop.make_shared_concrete_derived_empty_up_cast(
use_custom_deleter)
with self.assertRaises(ValueError):
self.shared_unique_interop.pass_unique_concrete_base_empty(cbe)
def testPassUniqueConcreteBaseEmpty(self):
# b/175568410
cbe = (
self.shared_unique_interop.make_unique_concrete_derived_empty_up_cast())
self.assertEqual(cbe.Get(), CONCRETE_BASE_EMPTY_GET_RESULT)
i = self.shared_unique_interop.pass_unique_concrete_base_empty(cbe)
self.assertEqual(i, CONCRETE_BASE_EMPTY_GET_RESULT)
with self.assertRaises(ValueError): # Disowned.
cbe.Get()
def testOriginalUniqueNotDisownedByShared(self):
# b/175568410
cbe = (
self.shared_unique_interop.make_unique_concrete_derived_empty_up_cast())
i = self.shared_unique_interop.pass_shared_concrete_base_empty(cbe)
self.assertEqual(i, CONCRETE_BASE_EMPTY_GET_RESULT)
self.assertEqual(cbe.Get(), CONCRETE_BASE_EMPTY_GET_RESULT)
i = self.shared_unique_interop.pass_unique_concrete_base_empty(cbe)
self.assertEqual(i, CONCRETE_BASE_EMPTY_GET_RESULT)
with self.assertRaises(ValueError): # Disowned.
cbe.Get()
@parameterized.named_parameters(
('DefaultDeleter', False),
('CustomDeleter', True))
def testPassSharedConcreteBaseEmpty(self, use_custom_deleter):
cbe = self.shared_unique_interop.make_shared_concrete_derived_empty_up_cast(
use_custom_deleter)
self.assertEqual(cbe.Get(), CONCRETE_BASE_EMPTY_GET_RESULT)
i = self.shared_unique_interop.pass_shared_concrete_base_empty(cbe)
self.assertEqual(i, CONCRETE_BASE_EMPTY_GET_RESULT)
self.assertEqual(cbe.Get(), CONCRETE_BASE_EMPTY_GET_RESULT)
@absltest.skipIf(not concrete_base_pybind11, 'Failed to import pybind11 module')
class ConcretePybind11Test(ConcreteTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.concrete_base = concrete_base_pybind11
cls.concrete_derived = concrete_derived_pybind11
cls.shared_unique_interop = shared_unique_interop_pybind11
class VirtualTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.shared_unique_interop = shared_unique_interop
cls.virtual_derived = virtual_derived
def testBaseAndDerived(self):
vde = self.virtual_derived.VirtualDerivedEmpty()
self.assertEqual(vde.Get(), VIRTUAL_DERIVED_EMPTY_GET_RESULT)
self.assertEqual(vde.BaseGet(vde), 2 * VIRTUAL_DERIVED_EMPTY_GET_RESULT)
@parameterized.named_parameters(
('DefaultDeleter', False),
('CustomDeleter', True))
def testUnableToDisownOriginalShared(self, use_custom_deleter):
vbe = self.shared_unique_interop.make_shared_virtual_derived_empty_up_cast(
use_custom_deleter)
with self.assertRaises(ValueError):
self.shared_unique_interop.pass_unique_virtual_base_empty(vbe)
def testPassUniqueVirtualBaseEmpty(self):
vbe = self.shared_unique_interop.make_unique_virtual_derived_empty_up_cast()
self.assertEqual(vbe.Get(), VIRTUAL_DERIVED_EMPTY_GET_RESULT)
i = self.shared_unique_interop.pass_unique_virtual_base_empty(vbe)
self.assertEqual(i, VIRTUAL_DERIVED_EMPTY_GET_RESULT)
with self.assertRaises(ValueError): # Disowned.
vbe.Get()
def testOriginalUniqueNotDisownedByShared(self):
vbe = self.shared_unique_interop.make_unique_virtual_derived_empty_up_cast()
i = self.shared_unique_interop.pass_shared_virtual_base_empty(vbe)
self.assertEqual(i, VIRTUAL_DERIVED_EMPTY_GET_RESULT)
self.assertEqual(vbe.Get(), VIRTUAL_DERIVED_EMPTY_GET_RESULT)
i = self.shared_unique_interop.pass_unique_virtual_base_empty(vbe)
self.assertEqual(i, VIRTUAL_DERIVED_EMPTY_GET_RESULT)
with self.assertRaises(ValueError): # Disowned.
vbe.Get()
@parameterized.named_parameters(
('DefaultDeleter', False),
('CustomDeleter', True))
def testPassSharedVirtualBaseEmpty(self, use_custom_deleter):
vbe = self.shared_unique_interop.make_shared_virtual_derived_empty_up_cast(
use_custom_deleter)
self.assertEqual(vbe.Get(), VIRTUAL_DERIVED_EMPTY_GET_RESULT)
i = self.shared_unique_interop.pass_shared_virtual_base_empty(vbe)
self.assertEqual(i, VIRTUAL_DERIVED_EMPTY_GET_RESULT)
self.assertEqual(vbe.Get(), VIRTUAL_DERIVED_EMPTY_GET_RESULT)
@absltest.skipIf(not virtual_derived_pybind11,
'Failed to import pybind11 module')
class VirtualPybind11Test(VirtualTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.shared_unique_interop = shared_unique_interop_pybind11
cls.virtual_derived = virtual_derived_pybind11
if __name__ == '__main__':
absltest.main()
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LSTM with Mel spectrum and fully connected layers."""
from kws_streaming.layers import lstm
from kws_streaming.layers import modes
from kws_streaming.layers import speech_features
from kws_streaming.layers import stream
from kws_streaming.layers.compat import tf
import kws_streaming.models.model_utils as utils
def model_parameters(parser_nn):
"""LSTM model parameters."""
parser_nn.add_argument(
'--lstm_units',
type=str,
default='500',
help='Output space dimensionality of lstm layer ',
)
parser_nn.add_argument(
'--return_sequences',
type=str,
default='0',
help='Whether to return the last output in the output sequence,'
'or the full sequence',
)
parser_nn.add_argument(
'--stateful',
type=int,
default='1',
help='If True, the last state for each sample at index i'
'in a batch will be used as initial state for the sample '
'of index i in the following batch',
)
parser_nn.add_argument(
'--num_proj',
type=str,
default='200',
help='The output dimensionality for the projection matrices.',
)
parser_nn.add_argument(
'--use_peepholes',
type=int,
default='1',
help='True to enable diagonal/peephole connections',
)
parser_nn.add_argument(
'--dropout1',
type=float,
default=0.3,
help='Percentage of data dropped',
)
parser_nn.add_argument(
'--units1',
type=str,
default='',
help='Number of units in the last set of hidden layers',
)
parser_nn.add_argument(
'--act1',
type=str,
default='',
help='Activation function of the last set of hidden layers',
)
def model(flags):
"""LSTM model.
Similar model in papers:
Convolutional Recurrent Neural Networks for Small-Footprint Keyword Spotting
https://arxiv.org/pdf/1703.05390.pdf (with no conv layer)
Model topology is similar with "Hello Edge: Keyword Spotting on
Microcontrollers" https://arxiv.org/pdf/1711.07128.pdf
Args:
flags: data/model parameters
Returns:
Keras model for training
"""
input_audio = tf.keras.layers.Input(
shape=modes.get_input_data_shape(flags, modes.Modes.TRAINING),
batch_size=flags.batch_size)
net = input_audio
if flags.preprocess == 'raw':
# it is a self contained model, user need to feed raw audio only
net = speech_features.SpeechFeatures(
speech_features.SpeechFeatures.get_params(flags))(
net)
for units, return_sequences, num_proj in zip(
utils.parse(flags.lstm_units), utils.parse(flags.return_sequences),
utils.parse(flags.num_proj)):
net = lstm.LSTM(
units=units,
return_sequences=return_sequences,
stateful=flags.stateful,
use_peepholes=flags.use_peepholes,
num_proj=num_proj)(
net)
net = stream.Stream(cell=tf.keras.layers.Flatten())(net)
net = tf.keras.layers.Dropout(rate=flags.dropout1)(net)
for units, activation in zip(
utils.parse(flags.units1), utils.parse(flags.act1)):
net = tf.keras.layers.Dense(units=units, activation=activation)(net)
net = tf.keras.layers.Dense(units=flags.label_count)(net)
if flags.return_softmax:
net = tf.keras.layers.Activation('softmax')(net)
return tf.keras.Model(input_audio, net)
|
import json
import random
from enum import Enum
from PyQt5.QtCore import QObject, pyqtSignal
class PlayerAffiliation(Enum):
SELF = "self"
FRIEND = "friend"
FOE = "foe"
CLANNIE = "clan"
OTHER = "default"
class PlayerColors(QObject):
changed = pyqtSignal()
def __init__(self, me, user_relations, theme):
QObject.__init__(self)
self._me = me
self._user_relations = user_relations
self._theme = theme
self._colored_nicknames = False
self.colors = self._load_colors("client/colors.json")
self.random_colors = self._load_colors("client/randomcolors.json")
@property
def colored_nicknames(self):
return self._colored_nicknames
@colored_nicknames.setter
def colored_nicknames(self, value):
self._colored_nicknames = value
self.changed.emit()
def _load_colors(self, filename):
return json.loads(self._theme.readfile(filename))
def get_color(self, name):
if name in self.colors:
return self.colors[name]
else:
return self.colors["default"]
def _seed(self, id_, name):
return id_ if id_ not in [-1, None] else name
def get_random_color(self, id_, name):
random.seed(self._seed(id_, name))
return random.choice(self.random_colors)
def get_random_color_index(self, id_, name):
random.seed(self._seed(id_, name))
return random.choice(range(len(self.random_colors)))
def _get_affiliation(self, id_=-1, name=None):
if self._me.player is not None and self._me.player.id == id_:
return PlayerAffiliation.SELF
if self._user_relations.is_friend(id_, name):
return PlayerAffiliation.FRIEND
if self._user_relations.is_foe(id_, name):
return PlayerAffiliation.FOE
if self._me.is_clannie(id_):
return PlayerAffiliation.CLANNIE
return PlayerAffiliation.OTHER
def get_user_color(self, _id=-1, name=None):
affil = self._get_affiliation(_id, name)
names = {
PlayerAffiliation.SELF: "self",
PlayerAffiliation.FRIEND: "friend",
PlayerAffiliation.FOE: "foe",
PlayerAffiliation.CLANNIE: "clan",
}
if affil in names:
return self.get_color(names[affil])
if self.colored_nicknames:
return self.get_random_color(_id, name)
if _id == -1: # IRC user
return self.get_color("default")
return self.get_color("player")
def get_mod_color(self, _id=-1, name=None):
affil = self._get_affiliation(_id, name)
names = {
PlayerAffiliation.SELF: "self_mod",
PlayerAffiliation.FRIEND: "friend_mod",
PlayerAffiliation.CLANNIE: "friend_mod",
}
if affil in names:
return self.get_color(names[affil])
return self.get_color("mod")
|
import numpy as np
import tensorflow as tf
import h5py
from sklearn.preprocessing import OneHotEncoder
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
import scipy.io
# Functions for initializing neural nets parameters
def weight_variable(shape, var_name):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float64)
return tf.Variable(initial, name=var_name)
def bias_variable(shape, var_name):
initial = tf.constant(0.1, shape=shape, dtype=tf.float64)
return tf.Variable(initial, name=var_name)
def conv2d(x, W):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')
def batch_nm(x, eps=1e-5):
# batch normalization to have zero mean and unit variance
mu, var = tf.nn.moments(x, [0])
return tf.nn.batch_normalization(x, mu, var, None, None, eps)
# Download data from .mat file into numpy array
print('==> Experiment 8g')
filepath = '/scratch/ttanpras/exp8a_d7_1s.mat'
print('==> Loading data from {}'.format(filepath))
f = h5py.File(filepath)
data_train = np.array(f.get('trainingFeatures'))
data_val = np.array(f.get('validationFeatures'))
del f
print('==> Data sizes:',data_train.shape, data_val.shape)
# Transform labels into on-hot encoding form
enc = OneHotEncoder(n_values = 71)
'''
NN config parameters
'''
sub_window_size = 32
num_features = 169*sub_window_size
num_frames = 32
hidden_layer_size = 2000
num_bits = 2000
num_classes = 71
print("Number of features:", num_features)
print("Number of songs:",num_classes)
# Reshape input features
X_train = np.reshape(data_train,(-1, num_features))
X_val = np.reshape(data_val,(-1, num_features))
print("Input sizes:", X_train.shape, X_val.shape)
y_train = []
y_val = []
# Add Labels
for label in range(num_classes):
for sampleCount in range(X_train.shape[0]//num_classes):
y_train.append([label])
for sampleCount in range(X_val.shape[0]//num_classes):
y_val.append([label])
X_train = np.concatenate((X_train, y_train), axis=1)
X_val = np.concatenate((X_val, y_val), axis=1)
# Shuffle
np.random.shuffle(X_train)
np.random.shuffle(X_val)
# Separate coefficients and labels
y_train = X_train[:, -1].reshape(-1, 1)
X_train = X_train[:, :-1]
y_val = X_val[:, -1].reshape(-1, 1)
X_val = X_val[:, :-1]
print('==> Data sizes:',X_train.shape, y_train.shape,X_val.shape, y_val.shape)
y_train = enc.fit_transform(y_train.copy()).astype(int).toarray()
y_val = enc.fit_transform(y_val.copy()).astype(int).toarray()
plotx = []
ploty_train = []
ploty_val = []
# Set-up NN layers
x = tf.placeholder(tf.float64, [None, num_features])
W1 = weight_variable([num_features, hidden_layer_size], "W1")
b1 = bias_variable([hidden_layer_size], "b1")
OpW1 = tf.placeholder(tf.float64, [num_features, hidden_layer_size])
Opb1 = tf.placeholder(tf.float64, [hidden_layer_size])
# Hidden layer activation function: ReLU
h1 = tf.nn.relu(tf.matmul(x, W1) + b1)
W2 = weight_variable([hidden_layer_size, num_bits], "W2")
b2 = bias_variable([num_bits], "b2")
OpW2 = tf.placeholder(tf.float64, [hidden_layer_size, num_bits])
Opb2 = tf.placeholder(tf.float64, [num_bits])
# Pre-activation value for bit representation
h = tf.matmul(h1, W2) + b2
h2 = tf.nn.relu(tf.matmul(h1, W2) + b2)
W3 = weight_variable([num_bits, num_classes], "W3")
b3 = bias_variable([num_classes], "b3")
OpW3 = tf.placeholder(tf.float64, [num_bits, num_classes])
Opb3 = tf.placeholder(tf.float64, [num_classes])
# Softmax layer (Output), dtype = float64
y = tf.matmul(h2, W3) + b3
# NN desired value (labels)
y_ = tf.placeholder(tf.float64, [None, num_classes])
# Loss function
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
sess = tf.InteractiveSession()
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))
sess.run(tf.initialize_all_variables())
# Training
numTrainingVec = len(X_train)
batchSize = 500
numEpochs = 1000
bestValErr = 10000
bestValEpoch = 0
startTime = time.time()
for epoch in range(numEpochs):
for i in range(0,numTrainingVec,batchSize):
# Batch Data
batchEndPoint = min(i+batchSize, numTrainingVec)
trainBatchData = X_train[i:batchEndPoint]
trainBatchLabel = y_train[i:batchEndPoint]
train_step.run(feed_dict={x: trainBatchData, y_: trainBatchLabel})
# Print accuracy
if epoch % 5 == 0 or epoch == numEpochs-1:
plotx.append(epoch)
train_error = cross_entropy.eval(feed_dict={x:trainBatchData, y_: trainBatchLabel})
train_acc = accuracy.eval(feed_dict={x:trainBatchData, y_: trainBatchLabel})
val_error = cross_entropy.eval(feed_dict={x:X_val, y_: y_val})
val_acc = accuracy.eval(feed_dict={x:X_val, y_: y_val})
ploty_train.append(train_error)
ploty_val.append(val_error)
print("epoch: %d, val error %g, train error %g"%(epoch, val_error, train_error))
if val_error < bestValErr:
bestValErr = val_error
bestValEpoch = epoch
OpW1 = W1
Opb1 = b1
OpW2 = W2
Opb2 = b2
OpW3 = W3
Opb3 = b3
endTime = time.time()
print("Elapse Time:", endTime - startTime)
print("Best validation error: %g at epoch %d"%(bestValErr, bestValEpoch))
# Restore best model for early stopping
W1 = OpW1
b1 = Opb1
W2 = OpW2
b2 = Opb2
W3 = OpW3
b3 = Opb3
saveweight = {}
saveweight['W1'] = np.array(W1.eval())
saveweight['b1'] = np.array(b1.eval())
saveweight['W2'] = np.array(W2.eval())
saveweight['b2'] = np.array(b2.eval())
scipy.io.savemat('exp8g_none_weight.mat',saveweight)
print('==> Generating error plot...')
errfig = plt.figure()
trainErrPlot = errfig.add_subplot(111)
trainErrPlot.set_xlabel('Number of Epochs')
trainErrPlot.set_ylabel('Cross-Entropy Error')
trainErrPlot.set_title('Error vs Number of Epochs')
trainErrPlot.scatter(plotx, ploty_train)
valErrPlot = errfig.add_subplot(111)
valErrPlot.scatter(plotx, ploty_val)
errfig.savefig('exp8g_none.png')
'''
GENERATING REPRESENTATION OF NOISY FILES
'''
namelist = ['orig','comp5','comp10','str5','str10','ampSat_(-15)','ampSat_(-10)','ampSat_(-5)', \
'ampSat_(5)','ampSat_(10)','ampSat_(15)','pitchShift_(-1)','pitchShift_(-0.5)', \
'pitchShift_(0.5)','pitchShift_(1)','rev_dkw','rev_gal','rev_shan0','rev_shan1', \
'rev_gen','crowd-15','crowd-10','crowd-5','crowd0','crowd5','crowd10','crowd15', \
'crowd100','rest-15','rest-10','rest-5','rest0','rest5','rest10','rest15', \
'rest100','AWGN-15','AWGN-10','AWGN-5','AWGN0','AWGN5','AWGN10','AWGN15', 'AWGN100']
outdir = '/scratch/ttanpras/taylorswift_noisy_processed/'
repDict = {}
# Loop over each CQT files, not shuffled
for count in range(len(namelist)):
name = namelist[count]
filename = outdir + name + '.mat'
cqt = scipy.io.loadmat(filename)['Q']
cqt = np.transpose(np.array(cqt))
# Group into windows of 32 without overlapping
# Discard any leftover frames
num_windows = cqt.shape[0] // 32
cqt = cqt[:32*num_windows]
X = np.reshape(cqt,(num_windows, num_features))
# Feed window through model (Only 1 layer of weight w/o non-linearity)
rep = h.eval(feed_dict={x:X})
# Put the output representation into a dictionary
repDict['n'+str(count)] = rep
scipy.io.savemat('exp8g_none_repNon.mat',repDict)
|
import asyncio
import logging
import os
import signal
from struct import Struct
import time
from .base import (WorkerProcess, ChildProcess,
IDLE_CHECK, IDLE_TIME)
MSG_HEAD = 0x0
MSG_PING = 0x1
MSG_PONG = 0x2
MSG_CLOSE = 0x3
PACK_MSG = Struct('!BB').pack
UNPACK_MSG = Struct('!BB').unpack
logger = logging.getLogger(__name__)
class ConnectionClosedError(Exception):
pass
@asyncio.coroutine
def connect_write_pipe(file):
loop = asyncio.get_event_loop()
transport, _ = yield from loop.connect_write_pipe(asyncio.Protocol, file)
return PipeWriter(transport)
@asyncio.coroutine
def connect_read_pipe(file):
loop = asyncio.get_event_loop()
pipe_reader = PipeReader(loop=loop)
transport, _ = yield from loop.connect_read_pipe(
lambda: PipeReadProtocol(pipe_reader), file)
pipe_reader.transport = transport
return pipe_reader
class PipeWriter:
def __init__(self, transport):
self.transport = transport
def _send(self, msg):
self.transport.write(PACK_MSG(MSG_HEAD, msg))
def ping(self):
self._send(MSG_PING)
def pong(self):
self._send(MSG_PONG)
def stop(self):
self._send(MSG_CLOSE)
def close(self):
if self.transport is not None:
self.transport.close()
class PipeReadProtocol(asyncio.Protocol):
def __init__(self, reader):
self.reader = reader
def data_received(self, data):
self.reader.feed(data)
def connection_lost(self, exc):
self.reader.close()
class PipeReader:
closed = False
transport = None
def __init__(self, loop):
self.loop = loop
self._waiters = asyncio.Queue()
def close(self):
self.closed = True
while not self._waiters.empty():
waiter = self._waiters.get_nowait()
if not waiter.done():
waiter.set_exception(ConnectionClosedError())
if self.transport is not None:
self.transport.close()
def feed(self, data):
asyncio.async(self._feed_waiter(data))
@asyncio.coroutine
def _feed_waiter(self, data):
waiter = yield from self._waiters.get()
waiter.set_result(data)
@asyncio.coroutine
def read(self):
if self.closed:
raise ConnectionClosedError()
waiter = asyncio.Future(loop=self.loop)
yield from self._waiters.put(waiter)
data = yield from waiter
hdr, msg = UNPACK_MSG(data)
if hdr == MSG_HEAD:
return msg
class ForkChild(ChildProcess):
_heartbeat_task = None
def __init__(self, parent_read, parent_write, loader, **options):
ChildProcess.__init__(self, loader, **options)
self.parent_read = parent_read
self.parent_write = parent_write
@asyncio.coroutine
def on_start(self):
self._heartbeat_task = asyncio.Task(self.heartbeat())
def stop(self):
if self._heartbeat_task is not None:
self._heartbeat_task.cancel()
ChildProcess.stop(self)
@asyncio.coroutine
def heartbeat(self):
# setup pipes
reader = yield from connect_read_pipe(
os.fdopen(self.parent_read, 'rb'))
writer = yield from connect_write_pipe(
os.fdopen(self.parent_write, 'wb'))
while True:
try:
msg = yield from reader.read()
except ConnectionClosedError:
logger.info('Parent is dead, {} stopping...'
''.format(os.getpid()))
break
if msg == MSG_PING:
writer.pong()
elif msg.tp == MSG_CLOSE:
break
reader.close()
writer.close()
self.stop()
class ForkWorker(WorkerProcess):
pid = ping = None
reader = writer = None
chat_task = heartbeat_task = None
def start_child(self):
parent_read, child_write = os.pipe()
child_read, parent_write = os.pipe()
pid = os.fork()
if pid:
# parent
os.close(parent_read)
os.close(parent_write)
asyncio.async(self.connect(pid, child_write, child_read))
else:
# child
os.close(child_write)
os.close(child_read)
# cleanup after fork
asyncio.set_event_loop(None)
# setup process
process = ForkChild(parent_read, parent_write, self.loader)
process.start()
def kill_child(self):
self.chat_task.cancel()
self.heartbeat_task.cancel()
self.reader.close()
self.writer.close()
try:
os.kill(self.pid, signal.SIGTERM)
os.waitpid(self.pid, 0)
except ProcessLookupError:
pass
@asyncio.coroutine
def heartbeat(self, writer):
idle_time = self.options.get('idle_time', IDLE_TIME)
idle_check = self.options.get('idle_check', IDLE_CHECK)
while True:
yield from asyncio.sleep(idle_check)
if (time.monotonic() - self.ping) < idle_time:
writer.ping()
else:
self.restart()
return
@asyncio.coroutine
def chat(self, reader):
while True:
try:
msg = yield from reader.read()
except ConnectionClosedError:
self.restart()
return
if msg == MSG_PONG:
self.ping = time.monotonic()
@asyncio.coroutine
def connect(self, pid, up_write, down_read):
# setup pipes
reader = yield from connect_read_pipe(
os.fdopen(down_read, 'rb'))
writer = yield from connect_write_pipe(
os.fdopen(up_write, 'wb'))
# store info
self.pid = pid
self.ping = time.monotonic()
self.reader = reader
self.writer = writer
self.chat_task = asyncio.Task(self.chat(reader))
self.heartbeat_task = asyncio.Task(self.heartbeat(writer))
|
# test client - Simon Lees simon@simotek.net
# Copyright (C) 2015 Simon Lees
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from PyLibs.uiclient import UiClient, UiClientCallbacks
import time
if __name__ == '__main__':
clientCallbacks = UiClientCallbacks()
uiClient = UiClient(clientCallbacks)
count = 0
# Main app event loop
while True:
uiClient.processMessages()
time.sleep(0.01)
count = count+1
if count > 2000:
count = 0
uiClient.sendDriveMotorSpeed(0,0)
elif count == 500:
uiClient.sendDriveMotorSpeed(60,60)
elif count == 1000:
uiClient.sendDriveMotorSpeed(-60,-60)
elif count == 1500:
uiClient.sendDriveMotorSpeed(60,-60)
|
#!/usr/bin/env python
import serial
import re
import time
import sys
import argparse
# import threading
RX_BUFFER_SIZE = 128
def send(job=None,sname='/dev/ttyACM0'):
if job == None:
yield 'Invalid Job'
try:
s = serial.Serial(sname,9600)
except Exception, e:
yield str(e).split(':')[0]
return
# Wake up grbl
yield "Initializing grbl..."
s.write("\r\n\r\n")
# Wait for grbl to initialize and flush startup text in serial input
time.sleep(.3)
s.flushInput()
#set units
g = "G21" if job['unit'] == 'mm' else "G20"
s.write(g + '\n')
#set defaults
s.write('\n')
#regex the gcode
job['gcode'] = job['gcode'].split('\n')
yield "Streaming gcode to "+sname
# Stream g-code to grbl
repeat = 0
while repeat < int(job['repeat']):
l_count = 0
g_count = 0
c_line = []
# periodic() # Start status report periodic timer
for line in job['gcode']:
l_count += 1 # Iterate line counter
# l_block = re.sub('\s|\(.*?\)','',line).upper() # Strip comments/spaces/new line and capitalize
l_block = line.strip()
c_line.append(len(l_block)+1) # Track number of characters in grbl serial read buffer
grbl_out = ''
while sum(c_line) >= RX_BUFFER_SIZE-1 | s.inWaiting() :
out_temp = s.readline().strip() # Wait for grbl response
if out_temp.find('ok') < 0 and out_temp.find('error') < 0 :
print " Debug: ",out_temp # Debug response
else :
grbl_out += out_temp;
g_count += 1 # Iterate g-code counter
grbl_out += str(g_count); # Add line finished indicator
del c_line[0]
print "SND: " + str(l_count) + " : " + l_block,
s.write(l_block + '\n') # Send block to grbl
print "BUF:",str(sum(c_line)),"REC:",grbl_out
repeat+=1;
# Wait for user input after streaming is completed
yield 'G-code streaming finished!'
print s.inWaiting()
# Close file and serial port
time.sleep(2)
s.close()
return
|
import urllib2
import urlparse
import zeit.cms.browser.preview
import zeit.cms.interfaces
import zeit.connector.interfaces
import zope.component
class WorkingcopyPreview(zeit.cms.browser.preview.Preview):
"""Preview for workingcopy versions of content objects.
This supports two modes of operation:
1. Upload the workingcopy version of an object to the repository, retrieve
the html and return it (proxying the result).
2. Give the workingcopy URL to the preview service (for those who can
traverse it directly) and redirect to it as for the repository preview.
"""
def __call__(self):
url = self.get_preview_url_for(self.context)
if self.should_upload(url):
return self.proxied_preview()
else:
return self.redirect(self.workingcopy_url(url), trusted=True)
def should_upload(self, url):
return 'friedbert' not in url # XXX Really kludgy heuristics
def proxied_preview(self):
preview_obj = self.temporary_checkin()
url = self.get_preview_url_for(preview_obj)
preview_request = urllib2.urlopen(url)
del preview_obj.__parent__[preview_obj.__name__]
return preview_request.read()
def get_preview_url_for(self, preview_context):
url = zope.component.getMultiAdapter(
(preview_context, self.preview_type),
zeit.cms.browser.interfaces.IPreviewURL)
querystring = self.request.environment['QUERY_STRING']
if querystring:
url = '%s?%s' % (url, querystring)
return url
def temporary_checkin(self):
content = zeit.cms.interfaces.ICMSContent(
zeit.connector.interfaces.IResource(self.context))
content.uniqueId = None
target_folder = zeit.cms.interfaces.ICMSContent(
self.context.uniqueId).__parent__
temp_id = self.get_temp_id(self.context.__name__)
target_folder[temp_id] = content
return content
def get_temp_id(self, name):
return 'preview-%s-%s' % (
self.request.principal.id, name)
def workingcopy_url(self, url):
repository_path = urlparse.urlparse(self.context.uniqueId).path
fullpath = self.url(self.context)
workingcopy = self.url(zope.component.getUtility(
zeit.cms.workingcopy.interfaces.IWorkingcopyLocation))
workingcopy_path = fullpath.replace(workingcopy, '')
config = zope.app.appsetup.product.getProductConfiguration('zeit.cms')
workingcopy_path = config[
'friebert-wc-preview-prefix'] + workingcopy_path
url = url.replace(repository_path, workingcopy_path)
return url
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import pytest
from django.core.files.uploadedfile import SimpleUploadedFile
from import_export.utils import import_file
from import_export.exceptions import UnsupportedFiletypeError
from pootle_store.models import NEW, PARSED, Store
TEST_PO_DIR = "tests/data/po/tutorial/en"
IMPORT_SUCCESS = "headers_correct.po"
IMPORT_UNSUPP_FILE = "tutorial.ts"
def _import_file(file_name, file_dir=TEST_PO_DIR,
content_type="text/x-gettext-translation"):
with open(os.path.join(file_dir, file_name), "r") as f:
import_file(SimpleUploadedFile(file_name,
f.read(),
content_type))
@pytest.mark.django_db
def test_import_success(en_tutorial_po_no_file):
assert en_tutorial_po_no_file.state == NEW
_import_file(IMPORT_SUCCESS)
store = Store.objects.get(pk=en_tutorial_po_no_file.pk)
assert store.state == PARSED
@pytest.mark.django_db
def test_import_failure(file_import_failure, en_tutorial_po):
filename, exception = file_import_failure
with pytest.raises(exception):
_import_file(filename)
@pytest.mark.django_db
def test_import_unsupported(en_tutorial_ts, ts_directory):
with pytest.raises(UnsupportedFiletypeError):
_import_file(IMPORT_UNSUPP_FILE,
file_dir=os.path.join(ts_directory, "tutorial/en"),
content_type="text/vnd.trolltech.linguist")
|
from django.contrib.sites.models import Site, RequestSite
from django.test import TestCase, RequestFactory
from sitesutils.context_processors import site
from sitesutils.middleware import RequestSiteMiddleware
def create_request(host):
request_factory = RequestFactory(HTTP_HOST=host)
return request_factory.get('/')
class SiteContextProcessorTests(TestCase):
def setUp(self):
self.site = Site.objects.get(pk=1)
self.middleware = RequestSiteMiddleware()
def test_is_lazy(self):
request = create_request('example.com')
self.middleware.process_request(request)
with self.assertNumQueries(0):
context = site(request)
with self.assertNumQueries(1):
context['site'].domain
def test_is_lazy(self):
request = create_request('example.com')
self.middleware.process_request(request)
context = site(request)
self.assertEqual(context['site'], self.site)
def test_returns_request_site(self):
request = create_request('example.com')
context = site(request)
self.assertIsInstance(context['site'], RequestSite)
|
import subprocess
import pytest
from ..helpers import BaseWFC3
@pytest.mark.xfail(reason="Temporary xfail. New input/truth files on Artifactory, but branch not merged.")
class TestUVIS32Single(BaseWFC3):
"""
Test pos UVIS2 subarray data with CTE correction
"""
detector = 'uvis'
def _single_raw_calib(self, rootname):
raw_file = '{}_raw.fits'.format(rootname)
# Prepare input file.
self.get_input_file(raw_file)
# Run CALWF3
subprocess.call(['calwf3.e', raw_file, '-vts'])
# Compare results
outputs = [('{}_flt.fits'.format(rootname), '{}_flt_ref.fits'.format(rootname)),
('{}_flc.fits'.format(rootname), '{}_flc_ref.fits'.format(rootname)),
('{}_rac_tmp.fits'.format(rootname), '{}_rac_tmp_ref.fits'.format(rootname))]
self.compare_outputs(outputs)
# Ported from ``calwf3_uv_32``.
@pytest.mark.parametrize(
'rootname', ['ib3805v0q'])
# 'rootname', ['ib3805v0q',
# 'ib2kabmaq',
# 'ib3503wwq',
# 'ibde04msq',
# 'icoc14hcq'])
def test_uvis_32single(self, rootname):
self._single_raw_calib(rootname)
|
#coding=utf-8
import numpy as np
# 通过PBC边界条件计算两个原子之间的最小镜像距离
def dis_pbc(a, b, c, alpha, beta, gamma, t1, t2):
cosalpha = np.cos(alpha * np.pi / 180)
sinalpha = np.sin(alpha * np.pi / 180)
cosbeta = np.cos(beta * np.pi / 180)
cosgamma = np.cos(gamma * np.pi / 180)
singamma = np.sin(gamma * np.pi / 180)
Ax = a
Bx = b * cosgamma
By = b * singamma
Cx = cosbeta
Cy = (cosalpha - cosbeta * cosgamma) / singamma
Cz = np.sqrt(1.0 - Cx * Cx - Cy * Cy)
Cx = c * Cx
Cy = c * Cy
Cz = c * Cz
xmin = np.abs(t1[0] - t2[0]) - Ax * np.round(np.abs(t1[0] - t2[0]) / Ax)
ymin = np.abs(t1[1] - t2[1]) - By * np.round(np.abs(t1[1] - t2[1]) / By)
zmin = np.abs(t1[2] - t2[2]) - Cz * np.round(np.abs(t1[2] - t2[2]) / Cz)
return np.sqrt(xmin * xmin + ymin * ymin + zmin * zmin)
# 适用于任何体系,即输入 a,b,c,alpha,beta,gamma
def dis(pbca, pbcb, pbcc, t1, t2):
xmin = np.abs(t1[0] - t2[0]) - pbca * np.round(np.abs(t1[0] - t2[0]) / pbca)
ymin = np.abs(t1[1] - t2[1]) - pbcb * np.round(np.abs(t1[1] - t2[1]) / pbcb)
zmin = np.abs(t1[2] - t2[2]) - pbcc * np.round(np.abs(t1[2] - t2[2]) / pbcc)
return xmin,ymin,np.sqrt(xmin * xmin + ymin * ymin + zmin * zmin)
# 适用于正交体系,只输入a,b,c, alpha,beta,gamma 均为90度
def dis_npbc(t1, t2):
xmin = t1[0] - t2[0]
ymin = t1[1] - t2[1]
zmin = t1[2] - t2[2]
return np.sqrt(xmin * xmin + ymin * ymin + zmin * zmin)
# 适用于不考虑周期性的体系
def plane(h1, h2, o):
x1, y1, z1 = [h1[0]-o[0], h1[1]-o[1], h1[2]-o[2]]
x2, y2, z2 = [h2[0]-o[0], h2[1]-o[1], h2[2]-o[2]]
b = (x2 * z1 - x1 * z2) / (y2 * z1 - y1 * z2)
c = (x1 * y2 - x2 * y1) / (y2 * z1 - y1 * z2)
return np.arccos(c / np.sqrt(1 + b * b + c * c)) * 180 / np.pi
# 通过水分子3个原子的坐标计算由3个原子确定的平面的法线与z方向的夹角的余弦值
def polar(h1, h2, o):
x1 = (h1[0] + h2[0]) * 0.5
y1 = (h1[1] + h2[1]) * 0.5
z1 = (h1[2] + h2[2]) * 0.5
x, y, z = [x1-o[0], y1-o[1], z1-o[2]]
r = np.sqrt(x * x + y * y + z * z)
return np.arccos(z / r * 180 / np.pi)
# 以2个H的中点作为正电荷中心,O坐标为负电荷中心,两者连线方向为偶极矩方向,返回此方向与z的夹角的余弦值
def ori(o,h):
x, y, z = [h[0]-o[0], h[1]-o[1], h[2]-o[2]]
r = np.sqrt(x * x + y * y + z * z)
return np.arccos(np.abs(z / r)) * 180 / np.pi
# 用于OH根(或其他两个原子)与Z夹角的余弦值
def plane_abs(h1, h2, o):
x1, y1, z1 = [h1[0]-o[0], h1[1]-o[1], h1[2]-o[2]]
x2, y2, z2 = [h2[0]-o[0], h2[1]-o[1], h2[2]-o[2]]
b = (x2 * z1 - x1 * z2) / (y2 * z1 - y1 * z2)
c = (x1 * y2 - x2 * y1) / (y2 * z1 - y1 * z2)
return np.arccos(np.abs(c / np.sqrt(1 + b * b + c * c))) * 180 / np.pi
# 通过水分子3个原子的坐标计算由3个原子确定的平面的发现与z方向的夹角的余弦值
# 返回的为绝对值
def polar_abs(h1, h2, o):
x1 = (h1[0] + h2[0]) * 0.5
y1 = (h1[1] + h2[1]) * 0.5
z1 = (h1[2] + h2[2]) * 0.5
x, y, z = [x1-o[0], y1-o[1], z1-o[2]]
r = np.sqrt(x * x + y * y + z * z)
return np.arccos(np.abs(z / r )) * 180 / np.pi
# 以2个H的中点作为正电荷中心,O坐标为负电荷中心,两者连线方向为偶极矩方向,返回此方向与z的夹角的余弦值
# 返回的为绝对值
def hbond_pbc(a, b, c, alpha, beta, gamma, donor, h, acceptor):
cosalpha = np.cos(alpha * np.pi / 180)
sinalpha = np.sin(alpha * np.pi / 180)
cosbeta = np.cos(beta * np.pi / 180)
cosgamma = np.cos(gamma * np.pi / 180)
singamma = np.sin(gamma * np.pi / 180)
Ax = a
Bx = b * cosgamma
By = b * singamma
Cx = cosbeta
Cy = (cosalpha - cosbeta * cosgamma) / singamma
Cz = np.sqrt(1.0 - Cx * Cx - Cy * Cy)
Cx = c * Cx
Cy = c * Cy
Cz = c * Cz
# H-acceptor间的距离
xmin = np.abs(h[0] - acceptor[0]) - Ax * np.round(np.abs(h[0] - acceptor[0]) / Ax)
ymin = np.abs(h[1] - acceptor[1]) - By * np.round(np.abs(h[1] - acceptor[1]) / By)
zmin = np.abs(h[2] - acceptor[2]) - Cz * np.round(np.abs(h[2] - acceptor[2]) / Cz)
# O-O距离
# xmin = np.abs(donor[0] - acceptor[0]) - Ax * np.round(np.abs(donor[0] - acceptor[0]) / Ax)
# ymin = np.abs(donor[1] - acceptor[1]) - By * np.round(np.abs(donor[1] - acceptor[1]) / By)
# zmin = np.abs(donor[2] - acceptor[2]) - Cz * np.round(np.abs(donor[2] - acceptor[2]) / Cz)
r = np.sqrt(xmin * xmin + ymin * ymin + zmin * zmin)
# x1 = donor[0] - h[0] - Ax * np.round((donor[0] - h[0]) / Ax)
# y1 = donor[1] - h[1] - By * np.round((donor[1] - h[1]) / By)
# z1 = donor[2] - h[2] - Cz * np.round((donor[2] - h[2]) / Cz)
#
# x2 = (h[0] - acceptor[0]) - Ax * np.round((h[0] - acceptor[0]) / Ax)
# y2 = (h[1] - acceptor[1]) - By * np.round((h[1] - acceptor[1]) / By)
# z2 = (h[2] - acceptor[2]) - Cz * np.round((h[2] - acceptor[2]) / Cz)
x1 = acceptor[0] - donor[0] - Ax * np.round((acceptor[0] - donor[0]) / Ax)
y1 = acceptor[1] - donor[1] - By * np.round((acceptor[1] - donor[1]) / By)
z1 = acceptor[2] - donor[2] - Cz * np.round((acceptor[2] - donor[2]) / Cz)
x2 = (h[0] - donor[0]) - Ax * np.round((h[0] - donor[0]) / Ax)
y2 = (h[1] - donor[1]) - By * np.round((h[1] - donor[1]) / By)
z2 = (h[2] - donor[2]) - Cz * np.round((h[2] - donor[2]) / Cz)
dh = np.array([x1, y1, z1])
da = np.array([x2, y2, z2])
angle = np.arccos(sum(dh * da) / (np.sqrt(sum(dh * dh)) * np.sqrt(sum(da * da)))) * 180 / np.pi
return r, angle
# 因为将距离和角度分开计算很费时间,于是拿出来单独作为计算氢键的函数
# 这里只返回计算的r及angle(度)的值,不进行判断,在调用时在根据设定的cutoff判断
def get_cell(a, b, c, alpha, beta, gamma):
cosalpha = np.cos(alpha * np.pi / 180)
sinalpha = np.sin(alpha * np.pi / 180)
cosbeta = np.cos(beta * np.pi / 180)
cosgamma = np.cos(gamma * np.pi / 180)
singamma = np.sin(gamma * np.pi / 180)
Ax = a
Ay = 0
Az = 0
Bx = b * cosgamma
By = b * singamma
Bz = 0
Cx = cosbeta
Cy = (cosalpha - cosbeta * cosgamma) / singamma
Cz = np.sqrt(1.0 - Cx * Cx - Cy * Cy)
Cx = c * Cx
Cy = c * Cy
Cz = c * Cz
return Ax, Ay, Az, Bx, By, Bz, Cx, Cy, Cz
# auto correlation function
def acf(d1):
d1unbiased = d1 - np.mean(d1)
d1norm = np.sum(d1unbiased**2)
ac = np.correlate(d1unbiased,d1unbiased,"same")/d1norm
return ac[len(ac)/2:]
#acf2 is very slow
def acf2(x, length):
return np.array([1] + [np.corrcoef(x[:-i], x[i:])[0,1] \
for i in range(1, length)])
#auto correlation time
def act(x):
t = 0
for i in range(len(x)):
if x[i]<=0.001:
t=i
break
return t
|
#!/usr/bin/env python
'''
011.py: https://projecteuler.net/problem=11
Largest Product in a Grid
In the 20×20 grid below, four numbers along a diagonal line have been marked in red.
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
The product of these numbers is 26 × 63 × 78 × 14 = 1788696.
What is the greatest product of four adjacent numbers in the same direction
(up, down, left, right, or diagonally) in the 20×20 grid?
'''
import os
import pytest
import time
from helpers import load_matrix, build_path
from functools import reduce
from operator import mul
data_file_path = build_path(__file__, 'data/011.txt')
def largest_product_in_grid(grid, adj):
'''Finds the greatest product of n adj numbers in grid. Note: assumes
grid is n x n square.'''
greatest = 0
grid_len = len(grid)
for i in range(grid_len):
for j in range(grid_len):
# left to right
if j + adj <= grid_len:
greatest = max(greatest, (grid[i][j] * grid[i][j+1] * grid[i][j+2] * grid[i][j+3]))
# top to bottom
if i + adj <= grid_len:
greatest = max(greatest, (grid[i][j] * grid[i+1][j] * grid[i+2][j] * grid[i+3][j]))
# left to right diagonal
if j + adj <= grid_len:
greatest = max(greatest, (grid[i][j] * grid[i+1][j+1] * grid[i+2][j+2] * grid[i+3][j+3]))
# right to left diagonal
if j - (adj - 1) >= 0:
greatest = max(greatest, (grid[i][j] * grid[i+1][j-1] * grid[i+2][j-2] * grid[i+3][j-3]))
return greatest
def test_largest_product_in_grid():
'''Test'''
grid = load_matrix(data_file_path)
assert 70600674 == largest_product_in_grid(grid, 4)
def main():
'''Main runner, delegates to solution.'''
grid = load_matrix(data_file_path)
print(largest_product_in_grid(grid, 4))
if __name__ == '__main__':
start_time = time.time()
main()
print("--- %s seconds ---" % (time.time() - start_time))
|
##
# Copyright 2009-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Trilinos, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
import random
import re
from distutils.version import LooseVersion
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_path
from easybuild.tools.filetools import mkdir, remove_dir, symlink
from easybuild.tools.modules import get_software_root
from easybuild.tools.py2vs3 import ascii_letters
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_Trilinos(CMakeMake):
"""Support for building Trilinos."""
# see http://trilinos.sandia.gov/Trilinos10CMakeQuickstart.txt
@staticmethod
def extra_options():
"""Add extra config options specific to Trilinos."""
extra_vars = {
'shared_libs': [None, "Deprecated. Use build_shared_libs", CUSTOM],
'openmp': [True, "Enable OpenMP support", CUSTOM],
'all_exts': [True, "Enable all Trilinos packages", CUSTOM],
'skip_exts': [[], "List of Trilinos packages to skip", CUSTOM],
'verbose': [False, "Configure for verbose output", CUSTOM],
}
return CMakeMake.extra_options(extra_vars)
def __init__(self, *args, **kwargs):
"""Constructor of custom easyblock for Trilinos."""
super(EB_Trilinos, self).__init__(*args, **kwargs)
if self.cfg['shared_libs'] is not None:
self.log.deprecated("Use 'build_shared_libs' instead of 'shared_libs' easyconfig parameter", '5.0')
self.cfg['build_shared_libs'] = self.cfg['shared_libs']
def configure_step(self):
"""Set some extra environment variables before configuring."""
# enable verbose output if desired
if self.cfg['verbose']:
for x in ["CONFIGURE", "MAKEFILE"]:
self.cfg.update('configopts', "-DTrilinos_VERBOSE_%s:BOOL=ON" % x)
# compiler flags
cflags = [os.getenv('CFLAGS')]
cxxflags = [os.getenv('CXXFLAGS')]
fflags = [os.getenv('FFLAGS')]
ignore_cxx_seek_mpis = [toolchain.INTELMPI, toolchain.MPICH,
toolchain.MPICH2, toolchain.MVAPICH2] # @UndefinedVariable
ignore_cxx_seek_flag = "-DMPICH_IGNORE_CXX_SEEK"
if self.toolchain.mpi_family() in ignore_cxx_seek_mpis:
cflags.append(ignore_cxx_seek_flag)
cxxflags.append(ignore_cxx_seek_flag)
fflags.append(ignore_cxx_seek_flag)
self.cfg.update('configopts', '-DCMAKE_C_FLAGS="%s"' % ' '.join(cflags))
self.cfg.update('configopts', '-DCMAKE_CXX_FLAGS="%s"' % ' '.join(cxxflags))
self.cfg.update('configopts', '-DCMAKE_Fortran_FLAGS="%s"' % ' '.join(fflags))
# Make sure Tpetra/Kokkos Serial mode is enabled regardless of OpenMP
self.cfg.update('configopts', "-DKokkos_ENABLE_Serial:BOOL=ON")
self.cfg.update('configopts', "-DTpetra_INST_SERIAL:BOOL=ON")
# OpenMP
if self.cfg['openmp']:
self.cfg.update('configopts', "-DTrilinos_ENABLE_OpenMP:BOOL=ON")
self.cfg.update('configopts', "-DKokkos_ENABLE_OpenMP:BOOL=ON")
# MPI
if self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', "-DTPL_ENABLE_MPI:BOOL=ON")
# enable full testing
self.cfg.update('configopts', "-DTrilinos_ENABLE_TESTS:BOOL=ON")
self.cfg.update('configopts', "-DTrilinos_ENABLE_ALL_FORWARD_DEP_PACKAGES:BOOL=ON")
lib_re = re.compile("^lib(.*).a$")
# BLAS, LAPACK
for dep in ["BLAS", "LAPACK"]:
self.cfg.update('configopts', '-DTPL_ENABLE_%s:BOOL=ON' % dep)
libdirs = os.getenv('%s_LIB_DIR' % dep)
if self.toolchain.comp_family() == toolchain.GCC: # @UndefinedVariable
libdirs += ";%s/lib64" % get_software_root('GCC')
self.cfg.update('configopts', '-D%s_LIBRARY_DIRS="%s"' % (dep, libdirs))
if self.cfg['openmp']:
libs = os.getenv('%s_MT_STATIC_LIBS' % dep).split(',')
else:
libs = os.getenv('%s_STATIC_LIBS' % dep).split(',')
lib_names = ';'.join([lib_re.search(x).group(1) for x in libs])
if self.toolchain.comp_family() == toolchain.GCC: # @UndefinedVariable
# explicitely specify static lib!
lib_names += ";libgfortran.a"
self.cfg.update('configopts', '-D%s_LIBRARY_NAMES="%s"' % (dep, lib_names))
# MKL
if get_software_root('imkl') and LooseVersion(self.version) >= LooseVersion('12.12'):
self.cfg.update('configopts', "-DTPL_ENABLE_MKL:BOOL=ON")
self.cfg.update('configopts', '-DMKL_LIBRARY_DIRS:PATH="%s/lib/intel64"' % os.getenv('MKLROOT'))
self.cfg.update('configopts', '-DMKL_INCLUDE_DIRS:PATH="%s/include"' % os.getenv('MKLROOT'))
# UMFPACK is part of SuiteSparse
suitesparse = get_software_root('SuiteSparse')
if suitesparse:
self.cfg.update('configopts', "-DTPL_ENABLE_UMFPACK:BOOL=ON")
self.cfg.update('configopts', "-DTPL_ENABLE_Cholmod:BOOL=ON")
incdirs, libdirs, libnames = [], [], []
for lib in ["UMFPACK", "CHOLMOD", "COLAMD", "AMD", "CCOLAMD", "CAMD"]:
incdirs.append(os.path.join(suitesparse, lib, "Include"))
libdirs.append(os.path.join(suitesparse, lib, "Lib"))
libnames.append(lib.lower())
# add SuiteSparse config lib, it is in recent versions of suitesparse
libdirs.append(os.path.join(suitesparse, 'SuiteSparse_config'))
libnames.append('suitesparseconfig')
# because of "SuiteSparse_config.c:function SuiteSparse_tic: error: undefined reference to 'clock_gettime'"
libnames.append('rt')
# required to resolve METIS symbols in SuiteSparse's libcholmod.a
# doesn't need to be full location, probably because it can be found via $LIBRARY_PATH
# not easy to know whether it should come from METIS or ParMETIS...
# see https://answers.launchpad.net/dorsal/+question/223167
libnames.append('libmetis.a')
self.cfg.update('configopts', '-DUMFPACK_INCLUDE_DIRS:PATH="%s"' % ';'.join(incdirs))
self.cfg.update('configopts', '-DUMFPACK_LIBRARY_DIRS:PATH="%s"' % ';'.join(libdirs))
self.cfg.update('configopts', '-DUMFPACK_LIBRARY_NAMES:STRING="%s"' % ';'.join(libnames))
self.cfg.update('configopts', '-DCholmod_INCLUDE_DIRS:PATH="%s"' % ';'.join(incdirs))
self.cfg.update('configopts', '-DCholmod_LIBRARY_DIRS:PATH="%s"' % ';'.join(libdirs))
self.cfg.update('configopts', '-DCholmod_LIBRARY_NAMES:STRING="%s"' % ';'.join(libnames))
# BLACS
if get_software_root('BLACS'):
self.cfg.update('configopts', "-DTPL_ENABLE_BLACS:BOOL=ON")
self.cfg.update('configopts', '-DBLACS_INCLUDE_DIRS:PATH="%s"' % os.getenv('BLACS_INC_DIR'))
self.cfg.update('configopts', '-DBLACS_LIBRARY_DIRS:PATH="%s"' % os.getenv('BLACS_LIB_DIR'))
blacs_lib_names = os.getenv('BLACS_STATIC_LIBS').split(',')
blacs_lib_names = [lib_re.search(x).group(1) for x in blacs_lib_names]
self.cfg.update('configopts', '-DBLACS_LIBRARY_NAMES:STRING="%s"' % (';'.join(blacs_lib_names)))
# ScaLAPACK
if get_software_root('ScaLAPACK'):
self.cfg.update('configopts', "-DTPL_ENABLE_SCALAPACK:BOOL=ON")
self.cfg.update('configopts', '-DSCALAPACK_INCLUDE_DIRS:PATH="%s"' % os.getenv('SCALAPACK_INC_DIR'))
self.cfg.update('configopts', '-DSCALAPACK_LIBRARY_DIRS:PATH="%s;%s"' % (os.getenv('SCALAPACK_LIB_DIR'),
os.getenv('BLACS_LIB_DIR')))
# PETSc
petsc = get_software_root('PETSc')
if petsc:
self.cfg.update('configopts', "-DTPL_ENABLE_PETSC:BOOL=ON")
incdirs = [os.path.join(petsc, "include")]
self.cfg.update('configopts', '-DPETSC_INCLUDE_DIRS:PATH="%s"' % ';'.join(incdirs))
petsc_libdirs = [
os.path.join(petsc, "lib"),
os.path.join(suitesparse, "UMFPACK", "Lib"),
os.path.join(suitesparse, "CHOLMOD", "Lib"),
os.path.join(suitesparse, "COLAMD", "Lib"),
os.path.join(suitesparse, "AMD", "Lib"),
os.getenv('FFTW_LIB_DIR'),
os.path.join(get_software_root('ParMETIS'), "Lib")
]
self.cfg.update('configopts', '-DPETSC_LIBRARY_DIRS:PATH="%s"' % ';'.join(petsc_libdirs))
petsc_libnames = ["petsc", "umfpack", "cholmod", "colamd", "amd", "parmetis", "metis"]
petsc_libnames += [lib_re.search(x).group(1) for x in os.getenv('FFTW_STATIC_LIBS').split(',')]
self.cfg.update('configopts', '-DPETSC_LIBRARY_NAMES:STRING="%s"' % ';'.join(petsc_libnames))
# other Third-Party Libraries (TPLs)
deps = self.cfg.dependencies()
builddeps = self.cfg.builddependencies() + ["SuiteSparse"]
deps = [dep['name'] for dep in deps if not dep['name'] in builddeps]
for dep in deps:
deproot = get_software_root(dep)
if deproot:
depmap = {
'SCOTCH': 'Scotch',
}
dep = depmap.get(dep, dep)
self.cfg.update('configopts', "-DTPL_ENABLE_%s:BOOL=ON" % dep)
incdir = os.path.join(deproot, "include")
self.cfg.update('configopts', '-D%s_INCLUDE_DIRS:PATH="%s"' % (dep, incdir))
libdir = os.path.join(deproot, "lib")
self.cfg.update('configopts', '-D%s_LIBRARY_DIRS:PATH="%s"' % (dep, libdir))
# extensions_step
if self.cfg['all_exts']:
self.cfg.update('configopts', "-DTrilinos_ENABLE_ALL_PACKAGES:BOOL=ON")
else:
for ext in self.cfg['exts_list']:
self.cfg.update('configopts', "-DTrilinos_ENABLE_%s=ON" % ext)
# packages to skip
skip_exts = self.cfg['skip_exts']
if skip_exts:
for ext in skip_exts:
self.cfg.update('configopts', "-DTrilinos_ENABLE_%s:BOOL=OFF" % ext)
# building in source dir not supported
# + if the build directory is a long path, problems like "Argument list too long" may occur
# cfr. https://github.com/trilinos/Trilinos/issues/2434
# so, try to create build directory with shorter path length to build in
salt = ''.join(random.choice(ascii_letters) for _ in range(5))
self.short_start_dir = os.path.join(build_path(), self.name + '-' + salt)
if os.path.exists(self.short_start_dir):
raise EasyBuildError("Short start directory %s for Trilinos already exists?!", self.short_start_dir)
self.log.info("Length of path to original start directory: %s", len(self.start_dir))
self.log.info("Short start directory: %s (length: %d)", self.short_start_dir, len(self.short_start_dir))
mkdir(self.short_start_dir)
short_src_dir = os.path.join(self.short_start_dir, 'src')
symlink(self.start_dir, short_src_dir)
short_build_dir = os.path.join(self.short_start_dir, 'obj')
obj_dir = os.path.join(self.builddir, 'obj')
mkdir(obj_dir)
symlink(obj_dir, short_build_dir)
# configure using cmake
super(EB_Trilinos, self).configure_step(srcdir=short_src_dir, builddir=short_build_dir)
def build_step(self):
"""Build with make (verbose logging enabled)."""
super(EB_Trilinos, self).build_step(verbose=True)
def sanity_check_step(self):
"""Custom sanity check for Trilinos."""
# selection of libraries
libs = ["Amesos", "Anasazi", "AztecOO", "Belos", "Epetra", "Galeri",
"GlobiPack", "Ifpack", "Intrepid", "Isorropia", "Kokkos",
"Komplex", "LOCA", "Mesquite", "ML", "Moertel", "MOOCHO", "NOX",
"Pamgen", "RTOp", "Rythmos", "Sacado", "Shards", "Stratimikos",
"Teuchos", "Tpetra", "Triutils", "Zoltan"]
libs = [x for x in libs if x not in self.cfg['skip_exts']]
# Teuchos was refactored in 11.2
if LooseVersion(self.version) >= LooseVersion('11.2') and 'Teuchos' in libs:
libs.remove('Teuchos')
libs.extend(['teuchoscomm', 'teuchoscore', 'teuchosnumerics', 'teuchosparameterlist', 'teuchosremainder'])
# Kokkos was refactored in 12.x, check for libkokkoscore.a rather than libkokkos.a
if LooseVersion(self.version) >= LooseVersion('12') and 'Kokkos' in libs:
libs.remove('Kokkos')
libs.append('kokkoscore')
# libgaleri was split into libgaleri-epetra & libgaleri-xpetra
if LooseVersion(self.version) >= LooseVersion('12.6'):
libs.remove('Galeri')
libs.extend(['galeri-epetra', 'galeri-xpetra'])
# Get the library extension
if self.cfg['build_shared_libs']:
lib_ext = get_shared_lib_ext()
else:
lib_ext = 'a'
custom_paths = {
'files': [os.path.join('lib', 'lib%s.%s' % (x.lower(), lib_ext)) for x in libs],
'dirs': ['bin', 'include']
}
super(EB_Trilinos, self).sanity_check_step(custom_paths=custom_paths)
def cleanup_step(self):
"""Complete cleanup by also removing custom created short build directory."""
remove_dir(self.short_start_dir)
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Record ID handling for checker."""
from intbitset import intbitset # pylint: disable=no-name-in-module
from .common import ALL
def ids_from_input(ids_input):
"""Return the list of IDs to check for from user-input.
:param ids_input: Comma-separated list of requested record IDs.
May contain, or be ALL.
:type ids_input: str
:returns: intbitset of IDs or ALL
:rtype: seq
:raises: ValueError
"""
if ALL in ids_input.split(','):
from invenio_checker.common import ALL
return ALL
else:
from invenio_utils.shell import split_cli_ids_arg
return intbitset(split_cli_ids_arg(ids_input), sanity_checks=True)
|
#! /usr/bin/env python
# From IRC:
#
# "I was thinking about a toy idea for my kid to teach multiplication through
# area representation. 2x3 is a two-inch-by-three-inch slab of something with
# lines on it, etc. I'd need 45 pieces (since AxB = BxA, you can drop almost
# half) but if I wanted to put it away in almost equal 9x9 layers, how many
# layers would be required?"
# Let's draw a picture. We have a times table, a square from 1 to 9 each side,
# but a bunch of blocks are duplicates so I will X them out because we don't
# need to make them:
# 123456789
# 1 XXXXXXXX
# 2 XXXXXXX
# 3 XXXXXX
# 4 XXXXX
# 5 XXXX
# 6 XXX
# 7 XX
# 8 X
# 9
# First off I wanted to know if there's any hope of packing with no gaps. So I
# find the volume of units that it'll all take up. The function row() tells me
# the total area of the pieces in each row -- for row 3, I have a 3x1 piece, a
# 3x2 piece, and a 3x3 piece, so the total area is 18 units.
def row(end):
sum = 0
for i in range(1,end+1):
sum += end * i
return sum
# So to get the total volume of a set of times-table blocks going up to n (n has
# been 9 so far) I'll express which rows I have -- range(1,n+1) -- and sum up
# all their areas. Note that area of them all spread out, and volume, are
# synonymous here since I'm assuming they're 1 unit thick. This may come in
# handy later so I can put the blocks away making the best use of the 3d box,
# like if some go in vertically while others are horizontal. Again, here I'm
# just looking for a set size and box size that have a **chance** of packing
# into a box with a square footprint.
def math_toy_volume(n):
return sum(map(row, range(1,n+1)))
# I happen to know from the original problem that the set had 45 pieces. If I
# try other set sizes, though, I would also like to know how many pieces they
# have. Easy, but easier to name it.
def math_toy_pieces(n):
return sum(range(1,n+1))
# Anyways I want the ones that have any hope of packing into a square box so I
# need to get the factors of the area and then find dups in the list of factors.
# From https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python
# I get:
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
for i in range(1,21):
n = math_toy_volume(i)
print str(n) + "\t" + str(sorted(factors(n)))
|
'''
Created on 2016/4/13
:author: hubo
'''
from vlcp.service.sdn.flowbase import FlowBase
from vlcp.server.module import depend, ModuleNotification, call_api,api
import vlcp.service.sdn.ofpportmanager as ofpportmanager
import vlcp.service.sdn.ovsdbportmanager as ovsdbportmanager
import vlcp.service.kvdb.objectdb as objectdb
from vlcp.event.event import Event, withIndices, M_
from vlcp.event.runnable import RoutineContainer, RoutineException
from vlcp.config.config import defaultconfig
from vlcp.service.sdn.ofpmanager import FlowInitialize
from vlcp.utils.networkmodel import PhysicalPort, LogicalPort, PhysicalPortSet, LogicalPortSet, LogicalNetwork, \
PhysicalNetwork,SubNet,RouterPort,VRouter, \
PhysicalNetworkMap
from vlcp.utils.flowupdater import FlowUpdater
import itertools
from functools import partial
from contextlib import closing, suppress
from vlcp.utils.exceptions import WalkKeyNotRetrieved
from vlcp.protocol.openflow.openflow import OpenflowConnectionStateEvent
@withIndices('datapathid', 'vhost', 'connection', 'logicalportchanged', 'physicalportchanged',
'logicalnetworkchanged', 'physicalnetworkchanged')
class DataObjectChanged(Event):
pass
class IDAssigner(object):
def __init__(self):
self._indices = {}
self._revindices = {}
# Reserve 0 and 0xffff
self._revindices[0] = '<reserve0>'
self._revindices[0xffff] = '<reserve65535>'
self._lastindex = 1
def assign(self, key):
if key in self._indices:
return self._indices[key]
else:
ind = self._lastindex
while ind in self._revindices:
ind += 1
ind &= 0xffff
self._revindices[ind] = key
self._indices[key] = ind
self._lastindex = ind + 1
return ind
def unassign(self, keys):
for k in keys:
ind = self._indices.pop(k, None)
if ind is not None:
del self._revindices[ind]
def frozen(self):
return dict(self._indices)
def _to32bitport(portno):
if portno >= 0xff00:
portno = 0xffff0000 | portno
return portno
@withIndices('connection')
class FlowReadyEvent(Event):
pass
class IOFlowUpdater(FlowUpdater):
def __init__(self, connection, systemid, bridgename, parent):
FlowUpdater.__init__(self, connection, (PhysicalPortSet.default_key(),),
('ioprocessing', connection),
parent._logger)
self._walkerdict = {PhysicalPortSet.default_key(): partial(self._physicalport_walker, _portnames={})}
self._systemid = systemid
self._bridgename = bridgename
self._portnames = {}
self._portids = {}
self._currentportids = {}
self._currentportnames = {}
self._lastportids = {}
self._lastportnames = {}
self._lastnetworkids = {}
self._networkids = IDAssigner()
self._phynetworkids = IDAssigner()
self._physicalnetworkids = {}
self._logicalportkeys = set()
self._physicalportkeys = set()
self._logicalnetworkkeys = set()
self._physicalnetworkkeys = set()
self._original_initialkeys = []
self._append_initialkeys = []
self._parent = parent
self._flows_sent = set()
async def update_ports(self, ports, ovsdb_ports):
"""
Called from main module to update port information
"""
new_port_names = dict((p['name'], _to32bitport(p['ofport'])) for p in ovsdb_ports)
new_port_ids = dict((p['id'], _to32bitport(p['ofport'])) for p in ovsdb_ports if p['id'])
if new_port_names == self._portnames and new_port_ids == self._portids:
return
self._portnames.clear()
self._portnames.update(new_port_names)
self._portids.clear()
self._portids.update(new_port_ids)
logicalportkeys = [LogicalPort.default_key(id) for id in self._portids]
self._original_initialkeys = logicalportkeys + [PhysicalPortSet.default_key()]
self._initialkeys = tuple(itertools.chain(self._original_initialkeys, self._append_initialkeys))
phy_walker = partial(self._physicalport_walker, _portnames=new_port_names)
log_walker = partial(self._logicalport_walker, _portids=new_port_ids)
self._walkerdict = dict(itertools.chain(
((PhysicalPortSet.default_key(),phy_walker),),
((lgportkey,log_walker) for lgportkey in logicalportkeys)
))
self._portnames = new_port_names
self._portids = new_port_ids
await self.restart_walk()
async def flowready(self, logicalnetworkid, physicalportid):
# 1. Check the current updated flows
# 2. Check the current logicalnetwork and physicalport
# 3. Wait for:
# a. flow updated event
# b. data object change event
# c. connection down event
flowready_matcher = FlowReadyEvent.createMatcher(self._connection)
conn_down = self._connection.protocol.statematcher(self._connection)
dataobjectchanged = DataObjectChanged.createMatcher(None, None, self._connection)
while self._connection.connected:
currentlognetid = dict((id, n) for n, id in self._lastlognets)
currentphyportid = dict((id, (p, p.physicalnetwork)) for p, id in self._lastphyports)
if (logicalnetworkid, physicalportid) in self._flows_sent:
return True
elif logicalnetworkid in currentlognetid and physicalportid in currentphyportid:
conn_down = OpenflowConnectionStateEvent.createMatcher(None, None, OpenflowConnectionStateEvent.CONNECTION_DOWN, self._connection)
await M_(dataobjectchanged, conn_down, flowready_matcher)
else:
return False
return False
def _logicalport_walker(self, key, value, walk, save, _portids):
_, (id,) = LogicalPort._getIndices(key)
if id not in _portids:
return
save(key)
if value is None:
return
with suppress(WalkKeyNotRetrieved):
lognet = walk(value.network.getkey())
save(lognet.getkey())
phynet = walk(lognet.physicalnetwork.getkey())
save(phynet.getkey())
if hasattr(value,"subnet"):
with suppress(WalkKeyNotRetrieved):
subnet = walk(value.subnet.getkey())
save(subnet.getkey())
if hasattr(subnet,"router"):
routerport = walk(subnet.router.getkey())
save(routerport.getkey())
if hasattr(routerport,"router"):
router = walk(routerport.router.getkey())
save(router.getkey())
if router.interfaces.dataset():
for weakobj in router.interfaces.dataset():
with suppress(WalkKeyNotRetrieved):
weakrouterport = walk(weakobj.getkey())
save(weakrouterport.getkey())
s = walk(weakrouterport.subnet.getkey())
save(s.getkey())
lgnet = walk(s.network.getkey())
save(lgnet.getkey())
def _physicalport_walker(self, key, value, walk, save, _portnames):
save(key)
if value is None:
return
physet = value.set
for name in _portnames:
phyports = physet.find(PhysicalPort, self._connection.protocol.vhost, self._systemid, self._bridgename, name)
# There might be more than one match physical port rule for one port, pick the most specified one
namedict = {}
for p in phyports:
_, inds = PhysicalPort._getIndices(p.getkey())
name = inds[-1]
ind_key = [i != '%' for i in inds]
if name != '%':
if name in namedict:
if namedict[name][0] < ind_key:
namedict[name] = (ind_key, p)
else:
namedict[name] = (ind_key, p)
phyports = [v[1] for v in namedict.values()]
for p in phyports:
with suppress(WalkKeyNotRetrieved):
phyp = walk(p.getkey())
save(phyp.getkey())
phynet = walk(phyp.physicalnetwork.getkey())
save(phynet.getkey())
if self._parent.enable_router_forward:
phynetmap = walk(PhysicalNetworkMap.default_key(phynet.id))
save(phynetmap.getkey())
for weak_lgnet in phynetmap.logicnetworks.dataset():
with suppress(WalkKeyNotRetrieved):
lgnet = walk(weak_lgnet.getkey())
save(lgnet.getkey())
def reset_initialkeys(self,keys,values):
subnetkeys = [k for k,v in zip(keys,values) if v is not None and not v.isdeleted() and
v.isinstance(SubNet)]
routerportkeys = [k for k,v in zip(keys,values) if v is not None and not v.isdeleted() and
v.isinstance(RouterPort)]
portkeys = [k for k,v in zip(keys,values) if v is not None and not v.isdeleted() and
v.isinstance(VRouter)]
self._append_initialkeys = subnetkeys + routerportkeys + portkeys
self._initialkeys = tuple(itertools.chain(self._original_initialkeys, self._append_initialkeys))
async def walkcomplete(self, keys, values):
conn = self._connection
dpid = conn.openflow_datapathid
vhost = conn.protocol.vhost
_currentportids = dict(self._portids)
_currentportnames = dict(self._portnames)
updated_data = {}
current_data = {}
for cls, name, idg, assigner in ((LogicalPort, '_logicalportkeys', lambda x: _currentportids.get(x.id), None),
(PhysicalPort, '_physicalportkeys', lambda x: _currentportnames.get(x.name), None),
(LogicalNetwork, '_logicalnetworkkeys', lambda x: self._networkids.assign(x.getkey()), self._networkids),
(PhysicalNetwork, '_physicalnetworkkeys', lambda x: self._phynetworkids.assign(x.getkey()), self._phynetworkids),
):
objs = [v for v in values if v is not None and not v.isdeleted() and v.isinstance(cls)]
cv = [(o, oid) for o,oid in ((o, idg(o)) for o in objs) if oid is not None]
objkeys = set([v.getkey() for v,_ in cv])
oldkeys = getattr(self, name)
current_data[cls] = cv
if objkeys != oldkeys:
if assigner is not None:
assigner.unassign(oldkeys.difference(objkeys))
setattr(self, name, objkeys)
updated_data[cls] = True
if updated_data:
await self.wait_for_send(DataObjectChanged(dpid, vhost, conn, LogicalPort in updated_data,
PhysicalPort in updated_data,
LogicalNetwork in updated_data,
PhysicalNetwork in updated_data,
current = (current_data.get(LogicalPort),
current_data.get(PhysicalPort),
current_data.get(LogicalNetwork),
current_data.get(PhysicalNetwork))))
self._lastlognets = current_data.get(LogicalNetwork)
self._lastphyports = current_data.get(PhysicalPort)
self._currentportids = _currentportids
self._currentportnames = _currentportnames
async def updateflow(self, connection, addvalues, removevalues, updatedvalues):
# We must do these in order, each with a batch:
# 1. Remove flows
# 2. Remove groups
# 3. Add groups, modify groups
# 4. Add flows, modify flows
try:
cmds = []
ofdef = connection.openflowdef
vhost = connection.protocol.vhost
input_table = self._parent._gettableindex('ingress', vhost)
input_next = self._parent._getnexttable('', 'ingress', vhost = vhost)
output_table = self._parent._gettableindex('egress', vhost)
# Cache all IDs, save them into last. We will need them for remove.
_lastportids = self._lastportids
_lastportnames = self._lastportnames
_lastnetworkids = self._lastnetworkids
_portids = dict(self._currentportids)
_portnames = dict(self._currentportnames)
_networkids = self._networkids.frozen()
exist_objs = dict((obj.getkey(), obj) for obj in self._savedresult if obj is not None and not obj.isdeleted())
# We must generate actions from network driver
phyportset = [obj for obj in self._savedresult if obj is not None and not obj.isdeleted() and obj.isinstance(PhysicalPort)]
phynetset = [obj for obj in self._savedresult if obj is not None and not obj.isdeleted() and obj.isinstance(PhysicalNetwork)]
lognetset = [obj for obj in self._savedresult if obj is not None and not obj.isdeleted() and obj.isinstance(LogicalNetwork)]
logportset = [obj for obj in self._savedresult if obj is not None and not obj.isdeleted() and obj.isinstance(LogicalPort)]
# If a port is both a logical port and a physical port, flows may conflict.
# Remove the port from dictionary if it is duplicated.
logportofps = set(_portids[lp.id] for lp in logportset if lp.id in _portids)
_portnames = dict((n,v) for n,v in _portnames.items() if v not in logportofps)
self._lastportids = _portids
self._lastportnames = _portnames
self._lastnetworkids = _networkids
# Group current ports by network for further use
phyportdict = {}
for p in phyportset:
phyportdict.setdefault(p.physicalnetwork, []).append(p)
lognetdict = {}
for n in lognetset:
lognetdict.setdefault(n.physicalnetwork, []).append(n)
logportdict = {}
for p in logportset:
logportdict.setdefault(p.network, []).append(p)
allapis = []
# Updated networks when:
# 1. Network is updated
# 2. Physical network of this logical network is updated
# 3. Logical port is added or removed from the network
# 4. Physical port is added or removed from the physical network
group_updates = set([obj for obj in updatedvalues if obj.isinstance(LogicalNetwork)])
group_updates.update(obj.network for obj in addvalues if obj.isinstance(LogicalPort))
#group_updates.update(obj.network for obj in updatedvalues if obj.isinstance(LogicalPort))
group_updates.update(exist_objs[obj.network.getkey()] for obj in removevalues if obj.isinstance(LogicalPort) and obj.network.getkey() in exist_objs)
updated_physicalnetworks = set(obj for obj in updatedvalues if obj.isinstance(PhysicalNetwork))
updated_physicalnetworks.update(p.physicalnetwork for p in addvalues if p.isinstance(PhysicalPort))
updated_physicalnetworks.update(exist_objs[p.physicalnetwork.getkey()] for p in removevalues if p.isinstance(PhysicalPort) and p.physicalnetwork.getkey() in exist_objs)
updated_physicalnetworks.update(p.physicalnetwork for p in updatedvalues if p.isinstance(PhysicalPort))
group_updates.update(lnet for pnet in updated_physicalnetworks
if pnet in lognetdict
for lnet in lognetdict[pnet])
_flows_sent = set()
for pnet in phynetset:
if pnet in lognetdict and pnet in phyportdict:
for lognet in lognetdict[pnet]:
netid = _networkids.get(lognet.getkey())
if netid is not None:
for p in phyportdict[pnet]:
if lognet in addvalues or lognet in group_updates or p in addvalues or p in updatedvalues:
pid = _portnames.get(p.name)
if pid is not None:
async def subr(lognet, p, netid, pid):
try:
r = await call_api(self, 'public', 'createioflowparts', {'connection': connection,
'logicalnetwork': lognet,
'physicalport': p,
'logicalnetworkid': netid,
'physicalportid': pid})
except Exception:
self._parent._logger.warning("Create flow parts failed for %r and %r", lognet, p, exc_info = True)
return None
else:
_flows_sent.add((netid, pid))
return ((lognet, p), r)
allapis.append(subr(lognet, p, netid, pid))
flowparts_result = await self.execute_all(allapis)
flowparts = dict(r for r in flowparts_result if r is not None)
if connection.protocol.disablenxext:
# Nicira extension is disabled, use metadata instead
# 64-bit metadata is used as:
# | 16-bit input network | 16-bit output network | 16-bit reserved | 16-bit output port |
# When first initialized, input network = output network = Logical Network no.
# output port = OFPP_ANY, reserved bits are 0x0000
# Currently used reserved bits:
# left-most (offset = 15, mask = 0x8000): allow output to IN_PORT
# offset = 14, mask = 0x4000: 1 if is IN_PORT is a logical port, 0 else
# right-most (offset = 0, mask = 0x0001): VXLAN learned
def create_input_instructions(lognetid, extra_actions, is_logport):
lognetid = (lognetid & 0xffff)
instructions = [ofdef.ofp_instruction_write_metadata(
metadata = (lognetid << 48) | (lognetid << 32) | ((0x4000 if is_logport else 0) << 16) | (ofdef.OFPP_ANY & 0xffff),
metadata_mask = 0xffffffffffffffff
),
ofdef.ofp_instruction_goto_table(table_id = input_next)
]
if extra_actions:
instructions.insert(0, ofdef.ofp_instruction_actions(actions = list(extra_actions)))
return instructions
def create_output_oxm(lognetid, portid, in_port = False):
r = [ofdef.create_oxm(ofdef.OXM_OF_METADATA_W, (portid & 0xFFFF) | (0x80000000 if in_port else 0) | ((lognetid & 0xFFFF) << 32), 0x0000FFFF8000FFFF)]
if in_port:
r.append(ofdef.create_oxm(ofdef.OXM_OF_IN_PORT, portid))
return r
else:
# With nicira extension, we store input network, output network and output port in REG4, REG5 and REG6
# REG7 is used as the reserved bits
def create_input_instructions(lognetid, extra_actions, is_logport):
lognetid = (lognetid & 0xffff)
return [ofdef.ofp_instruction_actions(actions = [
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(ofdef.NXM_NX_REG4, lognetid)
),
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(ofdef.NXM_NX_REG5, lognetid)
),
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(ofdef.NXM_NX_REG6, ofdef.OFPP_ANY)
),
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(ofdef.NXM_NX_REG7, (0x4000 if is_logport else 0))
)
] + list(extra_actions)),
ofdef.ofp_instruction_goto_table(table_id = input_next)
]
def create_output_oxm(lognetid, portid, in_port = False):
r = [ofdef.create_oxm(ofdef.NXM_NX_REG5, lognetid),
ofdef.create_oxm(ofdef.NXM_NX_REG6, portid),
ofdef.create_oxm(ofdef.NXM_NX_REG7_W, 0x8000 if in_port else 0, 0x8000)]
if in_port:
r.append(ofdef.create_oxm(ofdef.OXM_OF_IN_PORT, portid))
return r
for obj in removevalues:
if obj.isinstance(LogicalPort):
ofport = _lastportids.get(obj.id)
if ofport is not None:
cmds.append(ofdef.ofp_flow_mod(table_id = input_table,
command = ofdef.OFPFC_DELETE,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = [
ofdef.create_oxm(ofdef.OXM_OF_IN_PORT,
ofport
)])
))
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
command = ofdef.OFPFC_DELETE,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofport,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm()))
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
command = ofdef.OFPFC_DELETE,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_IN_PORT,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = [
ofdef.create_oxm(ofdef.OXM_OF_IN_PORT, ofport)])))
elif obj.isinstance(PhysicalPort):
ofport = _lastportnames.get(obj.name)
if ofport is not None:
cmds.append(ofdef.ofp_flow_mod(table_id = input_table,
command = ofdef.OFPFC_DELETE,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = [
ofdef.create_oxm(ofdef.OXM_OF_IN_PORT,
ofport
)])
))
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
cookie = 0x0001000000000000 | ((ofport & 0xffff) << 16),
cookie_mask = 0xffffffffffff0000,
command = ofdef.OFPFC_DELETE,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm()))
elif obj.isinstance(LogicalNetwork):
groupid = _lastnetworkids.get(obj.getkey())
if groupid is not None:
cmds.append(ofdef.ofp_flow_mod(table_id = input_table,
cookie = 0x0001000000000000 | groupid,
cookie_mask = 0xffffffffffffffff,
command = ofdef.OFPFC_DELETE,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm()
))
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
cookie = 0x0001000000000000 | groupid,
cookie_mask = 0xffff00000000ffff,
command = ofdef.OFPFC_DELETE,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm()
))
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
command = ofdef.OFPFC_DELETE,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = create_output_oxm(groupid, ofdef.OFPP_ANY))
))
# Never use flow mod to update an input flow of physical port, because the input_oxm may change.
for obj in updatedvalues:
if obj.isinstance(PhysicalPort):
ofport = _portnames.get(obj.name)
if ofport is not None:
cmds.append(ofdef.ofp_flow_mod(table_id = input_table,
command = ofdef.OFPFC_DELETE,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = [
ofdef.create_oxm(ofdef.OXM_OF_IN_PORT,
ofport
)])
))
elif obj.isinstance(LogicalNetwork):
groupid = _networkids.get(obj.getkey())
if groupid is not None:
cmds.append(ofdef.ofp_flow_mod(table_id = input_table,
cookie = 0x0001000000000000 | groupid,
cookie_mask = 0xffffffffffffffff,
command = ofdef.OFPFC_DELETE,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm()
))
elif obj.isinstance(PhysicalNetwork):
if obj in phyportdict:
for p in phyportdict[obj]:
ofport = _portnames.get(p.name)
if ofport is not None and p not in addvalues and p not in updatedvalues:
cmds.append(ofdef.ofp_flow_mod(table_id = input_table,
command = ofdef.OFPFC_DELETE,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = [
ofdef.create_oxm(ofdef.OXM_OF_IN_PORT,
ofport
)])
))
await self.execute_commands(connection, cmds)
del cmds[:]
for obj in removevalues:
if obj.isinstance(LogicalNetwork):
groupid = _lastnetworkids.get(obj.getkey())
if groupid is not None:
cmds.append(ofdef.ofp_group_mod(command = ofdef.OFPGC_DELETE,
type = ofdef.OFPGT_ALL,
group_id = groupid
))
await self.execute_commands(connection, cmds)
del cmds[:]
disablechaining = connection.protocol.disablechaining
created_groups = {}
def create_buckets(obj, groupid):
# Generate buckets
buckets = [ofdef.ofp_bucket(actions=[ofdef.ofp_action_output(port = _portids[p.id])])
for p in logportdict[obj]
if p.id in _portids] if obj in logportdict else []
allactions = [ofdef.ofp_action_output(port = _portids[p.id])
for p in logportdict[obj]
if p.id in _portids] if obj in logportdict else []
disablegroup = False
if obj.physicalnetwork in phyportdict:
for p in phyportdict[obj.physicalnetwork]:
if (obj, p) in flowparts:
fp = flowparts[(obj,p)]
allactions.extend(fp[3])
if disablechaining and not disablegroup and any(a.type == ofdef.OFPAT_GROUP for a in fp[3]):
# We cannot use chaining. We use a long action list instead, and hope there is no conflicts
disablegroup = True
else:
buckets.append(ofdef.ofp_bucket(actions=list(fp[3])))
if disablegroup:
created_groups[groupid] = allactions
else:
created_groups[groupid] = [ofdef.ofp_action_group(group_id = groupid)]
return buckets
for obj in addvalues:
if obj.isinstance(LogicalNetwork):
groupid = _networkids.get(obj.getkey())
if groupid is not None:
cmds.append(ofdef.ofp_group_mod(command = ofdef.OFPGC_ADD,
type = ofdef.OFPGT_ALL,
group_id = groupid,
buckets = create_buckets(obj, groupid)
))
for obj in group_updates:
groupid = _networkids.get(obj.getkey())
if groupid is not None:
cmds.append(ofdef.ofp_group_mod(command = ofdef.OFPGC_MODIFY,
type = ofdef.OFPGT_ALL,
group_id = groupid,
buckets = create_buckets(obj, groupid)
))
await self.execute_commands(connection, cmds)
del cmds[:]
# There are 5 kinds of flows:
# 1. in_port = (Logical Port)
# 2. in_port = (Physical_Port), network = (Logical_Network)
# 3. out_port = (Logical Port)
# 4. out_port = (Physical_Port), network = (Logical_Network)
# 5. out_port = OFPP_ANY, network = (Logical_Network)
for obj in addvalues:
if obj.isinstance(LogicalPort):
ofport = _portids.get(obj.id)
lognetid = _networkids.get(obj.network.getkey())
if ofport is not None and lognetid is not None:
cmds.append(ofdef.ofp_flow_mod(table_id = input_table,
command = ofdef.OFPFC_ADD,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = [
ofdef.create_oxm(ofdef.OXM_OF_IN_PORT,
ofport
)]),
instructions = create_input_instructions(lognetid, [], True)
))
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
command = ofdef.OFPFC_ADD,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = create_output_oxm(lognetid, ofport)),
instructions = [ofdef.ofp_instruction_actions(actions = [
ofdef.ofp_action_output(port = ofport)
])]
))
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
command = ofdef.OFPFC_ADD,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = create_output_oxm(lognetid, ofport, True)),
instructions = [ofdef.ofp_instruction_actions(actions = [
ofdef.ofp_action_output(port = ofdef.OFPP_IN_PORT)
])]
))
# Ignore update of logical port
# Physical port:
for obj in addvalues:
if obj.isinstance(PhysicalPort):
ofport = _portnames.get(obj.name)
if ofport is not None and obj.physicalnetwork in lognetdict:
for lognet in lognetdict[obj.physicalnetwork]:
lognetid = _networkids.get(lognet.getkey())
if lognetid is not None and (lognet, obj) in flowparts:
input_oxm, input_actions, output_actions, _, output_actions2 = flowparts[(lognet, obj)]
cmds.append(ofdef.ofp_flow_mod(table_id = input_table,
cookie = 0x0001000000000000 | lognetid,
cookie_mask = 0xffffffffffffffff,
command = ofdef.OFPFC_ADD,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = [
ofdef.create_oxm(ofdef.OXM_OF_IN_PORT,
ofport
)] + list(input_oxm)),
instructions = create_input_instructions(lognetid, input_actions, False)
))
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
cookie = 0x0001000000000000 | lognetid | ((ofport & 0xffff) << 16),
cookie_mask = 0xffffffffffffffff,
command = ofdef.OFPFC_ADD,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = create_output_oxm(lognetid, ofport, False)),
instructions = [ofdef.ofp_instruction_actions(actions =
list(output_actions))]
))
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
cookie = 0x0001000000000000 | lognetid | ((ofport & 0xffff) << 16),
cookie_mask = 0xffffffffffffffff,
command = ofdef.OFPFC_ADD,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = create_output_oxm(lognetid, ofport, True)),
instructions = [ofdef.ofp_instruction_actions(actions =
list(output_actions2))]
))
for lognet in addvalues:
if lognet.isinstance(LogicalNetwork):
lognetid = _networkids.get(lognet.getkey())
if lognetid is not None and lognet.physicalnetwork in phyportdict:
for obj in phyportdict[lognet.physicalnetwork]:
ofport = _portnames.get(obj.name)
if ofport is not None and (lognet, obj) in flowparts and obj not in addvalues:
input_oxm, input_actions, output_actions, _, output_actions2 = flowparts[(lognet, obj)]
cmds.append(ofdef.ofp_flow_mod(table_id = input_table,
cookie = 0x0001000000000000 | lognetid,
cookie_mask = 0xffffffffffffffff,
command = ofdef.OFPFC_ADD,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = [
ofdef.create_oxm(ofdef.OXM_OF_IN_PORT,
ofport
)] + input_oxm),
instructions = create_input_instructions(lognetid, input_actions, False)
))
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
cookie = 0x0001000000000000 | lognetid | ((ofport & 0xffff) << 16),
cookie_mask = 0xffffffffffffffff,
command = ofdef.OFPFC_ADD,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = create_output_oxm(lognetid, ofport, False)),
instructions = [ofdef.ofp_instruction_actions(actions =
list(output_actions))]
))
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
cookie = 0x0001000000000000 | lognetid | ((ofport & 0xffff) << 16),
cookie_mask = 0xffffffffffffffff,
command = ofdef.OFPFC_ADD,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = create_output_oxm(lognetid, ofport, True)),
instructions = [ofdef.ofp_instruction_actions(actions =
list(output_actions2))]
))
for obj in updatedvalues:
if obj.isinstance(PhysicalPort):
ofport = _portnames.get(obj.name)
if ofport is not None and obj.physicalnetwork in lognetdict:
for lognet in lognetdict[obj.physicalnetwork]:
lognetid = _networkids.get(lognet.getkey())
if lognetid is not None and (lognet, obj) in flowparts and not lognet in addvalues:
input_oxm, input_actions, output_actions, _, output_actions2 = flowparts[(lognet, obj)]
cmds.append(ofdef.ofp_flow_mod(table_id = input_table,
cookie = 0x0001000000000000 | lognetid,
cookie_mask = 0xffffffffffffffff,
command = ofdef.OFPFC_ADD,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = [
ofdef.create_oxm(ofdef.OXM_OF_IN_PORT,
ofport
)] + input_oxm),
instructions = create_input_instructions(lognetid, input_actions, False)
))
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
cookie = 0x0001000000000000 | lognetid | ((ofport & 0xffff) << 16),
cookie_mask = 0xffffffffffffffff,
command = ofdef.OFPFC_MODIFY,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = create_output_oxm(lognetid, ofport, False)),
instructions = [ofdef.ofp_instruction_actions(actions =
list(output_actions))]
))
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
cookie = 0x0001000000000000 | lognetid | ((ofport & 0xffff) << 16),
cookie_mask = 0xffffffffffffffff,
command = ofdef.OFPFC_MODIFY,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = create_output_oxm(lognetid, ofport, True)),
instructions = [ofdef.ofp_instruction_actions(actions =
list(output_actions2))]
))
for lognet in updatedvalues:
if lognet.isinstance(LogicalNetwork):
lognetid = _networkids.get(lognet.getkey())
if lognetid is not None and lognet.physicalnetwork in phyportdict:
for obj in phyportdict[lognet.physicalnetwork]:
ofport = _portnames.get(obj.name)
if ofport is not None and (lognet, obj) in flowparts and obj not in addvalues and obj not in updatedvalues:
input_oxm, input_actions, output_actions, _, output_actions2 = flowparts[(lognet, obj)]
cmds.append(ofdef.ofp_flow_mod(table_id = input_table,
cookie = 0x0001000000000000 | lognetid,
cookie_mask = 0xffffffffffffffff,
command = ofdef.OFPFC_ADD,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = [
ofdef.create_oxm(ofdef.OXM_OF_IN_PORT,
ofport
)] + input_oxm),
instructions = create_input_instructions(lognetid, input_actions, False)
))
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
cookie = 0x0001000000000000 | lognetid | ((ofport & 0xffff) << 16),
cookie_mask = 0xffffffffffffffff,
command = ofdef.OFPFC_MODIFY,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = create_output_oxm(lognetid, ofport)),
instructions = [ofdef.ofp_instruction_actions(actions =
list(output_actions))]
))
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
cookie = 0x0001000000000000 | lognetid | ((ofport & 0xffff) << 16),
cookie_mask = 0xffffffffffffffff,
command = ofdef.OFPFC_MODIFY,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = create_output_oxm(lognetid, ofport, True)),
instructions = [ofdef.ofp_instruction_actions(actions =
list(output_actions2))]
))
# Physical network is updated
for pnet in updatedvalues:
if pnet.isinstance(PhysicalNetwork) and pnet in lognetdict:
for lognet in lognetdict[pnet]:
if lognet.isinstance(LogicalNetwork):
lognetid = _networkids.get(lognet.getkey())
if lognetid is not None and lognet not in updatedvalues and lognet not in addvalues and lognet.physicalnetwork in phyportdict:
for obj in phyportdict[lognet.physicalnetwork]:
ofport = _portnames.get(obj.name)
if ofport is not None and (lognet, obj) in flowparts and obj not in addvalues and obj not in updatedvalues:
input_oxm, input_actions, output_actions, _, output_actions2 = flowparts[(lognet, obj)]
cmds.append(ofdef.ofp_flow_mod(table_id = input_table,
cookie = 0x0001000000000000 | lognetid,
cookie_mask = 0xffffffffffffffff,
command = ofdef.OFPFC_ADD,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = [
ofdef.create_oxm(ofdef.OXM_OF_IN_PORT,
ofport
)] + input_oxm),
instructions = create_input_instructions(lognetid, input_actions, False)
))
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
cookie = 0x0001000000000000 | lognetid | ((ofport & 0xffff) << 16),
cookie_mask = 0xffffffffffffffff,
command = ofdef.OFPFC_MODIFY,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = create_output_oxm(lognetid, ofport)),
instructions = [ofdef.ofp_instruction_actions(actions =
list(output_actions))]
))
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
cookie = 0x0001000000000000 | lognetid | ((ofport & 0xffff) << 16),
cookie_mask = 0xffffffffffffffff,
command = ofdef.OFPFC_MODIFY,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = create_output_oxm(lognetid, ofport, True)),
instructions = [ofdef.ofp_instruction_actions(actions =
list(output_actions2))]
))
# Logical network broadcast
for lognet in addvalues:
if lognet.isinstance(LogicalNetwork):
lognetid = _networkids.get(lognet.getkey())
if lognetid is not None and lognetid in created_groups:
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
command = ofdef.OFPFC_ADD,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = create_output_oxm(lognetid, ofdef.OFPP_ANY)),
instructions = [ofdef.ofp_instruction_actions(actions =
created_groups.pop(lognetid))]
))
for lognetid, actions in created_groups.items():
cmds.append(ofdef.ofp_flow_mod(table_id = output_table,
command = ofdef.OFPFC_ADD,
priority = ofdef.OFP_DEFAULT_PRIORITY,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(oxm_fields = create_output_oxm(lognetid, ofdef.OFPP_ANY)),
instructions = [ofdef.ofp_instruction_actions(actions = actions)]
))
# Ignore logical network update
await self.execute_commands(connection, cmds)
self._flows_sent = _flows_sent
await self.wait_for_send(FlowReadyEvent(self._connection))
except Exception:
self._parent._logger.warning("Update flow for connection %r failed with exception", connection, exc_info = True)
# We don't want the whole flow update stops, so ignore the exception and continue
@defaultconfig
@depend(ofpportmanager.OpenflowPortManager, ovsdbportmanager.OVSDBPortManager, objectdb.ObjectDB)
class IOProcessing(FlowBase):
"Ingress and Egress processing"
_tablerequest = (("ingress", (), ''),
("egress", ("ingress",),''))
# vHost map from OpenFlow vHost to OVSDB vHost. If the OpenFlow vHost is not found in this map,
# it will map to the default OVSDB vHost ('')
_default_vhostmap = {}
# Enable forwarding in this server, so it becomes a forwarding node (also known as a N/S gateway)
_default_enable_router_forward = False
def __init__(self, server):
FlowBase.__init__(self, server)
self.apiroutine = RoutineContainer(self.scheduler)
self.apiroutine.main = self._main
self.routines.append(self.apiroutine)
self._flowupdaters = {}
self._portchanging = set()
self._portchanged = set()
self.createAPI(api(self.flowready, self.apiroutine))
async def flowready(self, connection, logicalnetworkid, physicalportid):
"""
Wait until flows are sent to switch
:param connection: Openflow connection
:param logicalnetworkid: logical network id (integer)
:param physicalportid: physical port id (integer)
:return: If connection/network/port not exists, return False, else return True
"""
if connection not in self._flowupdaters:
return False
else:
return await self._flowupdaters[connection].flowready(logicalnetworkid, physicalportid)
async def _main(self):
flow_init = FlowInitialize.createMatcher(_ismatch = lambda x: self.vhostbind is None or x.vhost in self.vhostbind)
port_change = ModuleNotification.createMatcher("openflowportmanager", "update", _ismatch = lambda x: self.vhostbind is None or x.vhost in self.vhostbind)
while True:
e, m = await M_(flow_init, port_change)
c = e.connection
if m is flow_init:
self.apiroutine.subroutine(self._init_conn(c))
else:
if e.reason == 'disconnected':
self.apiroutine.subroutine(self._remove_conn(c))
else:
self.apiroutine.subroutine(self._portchange(c))
async def _init_conn(self, conn):
# Default drop
await conn.protocol.batch((conn.openflowdef.ofp_flow_mod(table_id = self._gettableindex("ingress", conn.protocol.vhost),
command = conn.openflowdef.OFPFC_ADD,
priority = 0,
buffer_id = conn.openflowdef.OFP_NO_BUFFER,
match = conn.openflowdef.ofp_match_oxm(),
instructions = [conn.openflowdef.ofp_instruction_actions(
type = conn.openflowdef.OFPIT_CLEAR_ACTIONS
)]
),
conn.openflowdef.ofp_flow_mod(table_id = self._gettableindex("egress", conn.protocol.vhost),
command = conn.openflowdef.OFPFC_ADD,
priority = 0,
buffer_id = conn.openflowdef.OFP_NO_BUFFER,
match = conn.openflowdef.ofp_match_oxm(),
instructions = [conn.openflowdef.ofp_instruction_actions(
type = conn.openflowdef.OFPIT_CLEAR_ACTIONS
)]
)), conn, self.apiroutine)
if conn in self._flowupdaters:
self._flowupdaters[conn].close()
datapath_id = conn.openflow_datapathid
ovsdb_vhost = self.vhostmap.get(conn.protocol.vhost, "")
bridgename, systemid, _ = await call_api(self.apiroutine, 'ovsdbmanager', 'waitbridgeinfo',
{'datapathid': datapath_id,
'vhost': ovsdb_vhost})
new_updater = IOFlowUpdater(conn, systemid, bridgename, self)
self._flowupdaters[conn] = new_updater
new_updater.start()
await self._portchange(conn)
async def _remove_conn(self, conn):
# Do not need to modify flows
if conn in self._flowupdaters:
self._flowupdaters[conn].close()
del self._flowupdaters[conn]
async def _portchange(self, conn):
# Do not re-enter
if conn in self._portchanging:
self._portchanged.add(conn)
return
self._portchanging.add(conn)
last_portno = set()
try:
while True:
self._portchanged.discard(conn)
flow_updater = self._flowupdaters.get(conn)
if flow_updater is None:
break
if not conn.connected:
break
datapath_id = conn.openflow_datapathid
ovsdb_vhost = self.vhostmap.get(conn.protocol.vhost, "")
ovsdb_update_event_matcher = ModuleNotification.createMatcher(
"ovsdbportmanager",
"update",
_ismatch = lambda x: x.vhost == ovsdb_vhost and x.datapathid == datapath_id)
ovsdb_updated = False
def _ovsdb_update_callback(event, matcher):
nonlocal ovsdb_updated
ovsdb_updated = True
ports, ovsdb_ports = \
await self.apiroutine.with_callback(
self.apiroutine.execute_all(
[call_api(self.apiroutine, 'openflowportmanager', 'getports', {'datapathid': datapath_id,
'vhost': conn.protocol.vhost}),
call_api(self.apiroutine, 'ovsdbportmanager', 'getports', {'datapathid': datapath_id,
'vhost': ovsdb_vhost})]),
_ovsdb_update_callback,
ovsdb_update_event_matcher
)
if conn in self._portchanged or ovsdb_updated:
# Retrieve again
continue
if not conn.connected:
self._portchanged.discard(conn)
return
ovsdb_port_dict = {p['ofport']: p for p in ovsdb_ports}
# Choose the intersection of ports from two sources
port_pairs = [(p, ovsdb_port_dict[p.port_no & 0xffff])
for p in ports
if (p.port_no & 0xffff) in ovsdb_port_dict]
current_portno = {p.port_no for p, _ in port_pairs}
# Get again to prevent concurrent problems
flow_updater = self._flowupdaters.get(conn)
if flow_updater is None:
break
if not conn.connected:
break
if conn in self._portchanged or ovsdb_updated:
continue
# If all openflow ports have their OVSDB ports, we are in sync and can exit
if all((p.port_no & 0xffff) in ovsdb_port_dict for p in ports):
if current_portno != last_portno:
if port_pairs:
await self.apiroutine.with_callback(
flow_updater.update_ports(*zip(*port_pairs)),
_ovsdb_update_callback,
ovsdb_update_event_matcher
)
else:
await self.apiroutine.with_callback(
flow_updater.update_ports((), ()),
_ovsdb_update_callback,
ovsdb_update_event_matcher
)
break
else:
# Partially update
if current_portno and current_portno != last_portno:
if port_pairs:
await self.apiroutine.with_callback(
flow_updater.update_ports(*zip(*port_pairs)),
_ovsdb_update_callback,
ovsdb_update_event_matcher
)
else:
await self.apiroutine.with_callback(
flow_updater.update_ports((), ()),
_ovsdb_update_callback,
ovsdb_update_event_matcher
)
last_portno = current_portno
# Some openflow ports do not have OVSDB information, this may be caused
# by:
# 1. A port is added to OpenFlow, but not yet retrieved from OVSDB
# 2. A port is deleted from OVSDB, but not yet updated in OpenFlow
# 3. Other synchronization problem
port_change = ModuleNotification.createMatcher("openflowportmanager", "update",
_ismatch = lambda x: x.connection == conn)
conndown = conn.protocol.statematcher(conn)
timeout, _, m = await self.apiroutine.wait_with_timeout(5,
port_change,
ovsdb_update_event_matcher,
conndown)
if timeout:
self._logger.warning('OpenFlow ports may not be synchronized. Try resync...')
# Connection is up but ports are not synchronized, try resync
await self.apiroutine.execute_all([call_api(self.apiroutine, 'openflowportmanager', 'resync',
{'datapathid': datapath_id,
'vhost': conn.protocol.vhost}),
call_api(self.apiroutine, 'ovsdbportmanager', 'resync',
{'datapathid': datapath_id,
'vhost': ovsdb_vhost})])
# Wait for a while
await self.apiroutine.wait_with_timeout(5)
continue
elif m is conndown:
# Connection lost, no longer need to trace the port changes
break
finally:
self._portchanging.remove(conn)
|
# Copyright 2013-2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains the detection logic for external
# dependencies. Mostly just uses pkg-config but also contains
# custom logic for packages that don't provide them.
# Currently one file, should probably be split into a
# package before this gets too big.
import re
import os, stat, glob, subprocess, shutil
import sysconfig
from collections import OrderedDict
from . mesonlib import MesonException
from . import mlog
from . import mesonlib
from .environment import detect_cpu_family, for_windows
class DependencyException(MesonException):
def __init__(self, *args, **kwargs):
MesonException.__init__(self, *args, **kwargs)
class Dependency():
def __init__(self, type_name='unknown'):
self.name = "null"
self.is_found = False
self.type_name = type_name
def __repr__(self):
s = '<{0} {1}: {2}>'
return s.format(self.__class__.__name__, self.name, self.is_found)
def get_compile_args(self):
return []
def get_link_args(self):
return []
def found(self):
return self.is_found
def get_sources(self):
"""Source files that need to be added to the target.
As an example, gtest-all.cc when using GTest."""
return []
def get_name(self):
return self.name
def get_exe_args(self):
return []
def need_threads(self):
return False
def type_name(self):
return self.type_name
def get_pkgconfig_variable(self, variable_name):
raise MesonException('Tried to get a pkg-config variable from a non-pkgconfig dependency.')
class InternalDependency(Dependency):
def __init__(self, version, incdirs, compile_args, link_args, libraries, sources, ext_deps):
super().__init__('internal')
self.version = version
self.include_directories = incdirs
self.compile_args = compile_args
self.link_args = link_args
self.libraries = libraries
self.sources = sources
self.ext_deps = ext_deps
def get_compile_args(self):
return self.compile_args
def get_link_args(self):
return self.link_args
def get_version(self):
return self.version
class PkgConfigDependency(Dependency):
pkgconfig_found = None
def __init__(self, name, environment, kwargs):
Dependency.__init__(self, 'pkgconfig')
self.is_libtool = False
self.required = kwargs.get('required', True)
self.static = kwargs.get('static', False)
self.silent = kwargs.get('silent', False)
if not isinstance(self.static, bool):
raise DependencyException('Static keyword must be boolean')
self.cargs = []
self.libs = []
if 'native' in kwargs and environment.is_cross_build():
want_cross = not kwargs['native']
else:
want_cross = environment.is_cross_build()
self.name = name
if PkgConfigDependency.pkgconfig_found is None:
self.check_pkgconfig()
self.is_found = False
if not PkgConfigDependency.pkgconfig_found:
if self.required:
raise DependencyException('Pkg-config not found.')
return
if environment.is_cross_build() and want_cross:
if "pkgconfig" not in environment.cross_info.config["binaries"]:
raise DependencyException('Pkg-config binary missing from cross file.')
pkgbin = environment.cross_info.config["binaries"]['pkgconfig']
self.type_string = 'Cross'
else:
pkgbin = 'pkg-config'
self.type_string = 'Native'
mlog.debug('Determining dependency %s with pkg-config executable %s.' % (name, pkgbin))
self.pkgbin = pkgbin
ret, self.modversion = self._call_pkgbin(['--modversion', name])
if ret != 0:
if self.required:
raise DependencyException('%s dependency %s not found.' % (self.type_string, name))
self.modversion = 'none'
return
found_msg = ['%s dependency' % self.type_string, mlog.bold(name), 'found:']
self.version_requirement = kwargs.get('version', None)
if self.version_requirement is None:
self.is_found = True
else:
if not isinstance(self.version_requirement, str):
raise DependencyException('Version argument must be string.')
self.is_found = mesonlib.version_compare(self.modversion, self.version_requirement)
if not self.is_found:
found_msg += [mlog.red('NO'), 'found {!r}'.format(self.modversion),
'but need {!r}'.format(self.version_requirement)]
if not self.silent:
mlog.log(*found_msg)
if self.required:
raise DependencyException(
'Invalid version of a dependency, needed %s %s found %s.' %
(name, self.version_requirement, self.modversion))
return
found_msg += [mlog.green('YES'), self.modversion]
if not self.silent:
mlog.log(*found_msg)
# Fetch cargs to be used while using this dependency
self._set_cargs()
# Fetch the libraries and library paths needed for using this
self._set_libs()
def _call_pkgbin(self, args):
p = subprocess.Popen([self.pkgbin] + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=os.environ, universal_newlines=True)
out = p.communicate()[0]
return (p.returncode, out.strip())
def _set_cargs(self):
ret, out = self._call_pkgbin(['--cflags', self.name])
if ret != 0:
raise DependencyException('Could not generate cargs for %s:\n\n%s' % \
(self.name, out.decode(errors='ignore')))
self.cargs = out.split()
def _set_libs(self):
libcmd = [self.name, '--libs']
if self.static:
libcmd.append('--static')
ret, out = self._call_pkgbin(libcmd)
if ret != 0:
raise DependencyException('Could not generate libs for %s:\n\n%s' % \
(self.name, out.decode(errors='ignore')))
self.libs = []
for lib in out.split():
if lib.endswith(".la"):
shared_libname = self.extract_libtool_shlib(lib)
shared_lib = os.path.join(os.path.dirname(lib), shared_libname)
if not os.path.exists(shared_lib):
shared_lib = os.path.join(os.path.dirname(lib), ".libs", shared_libname)
if not os.path.exists(shared_lib):
raise DependencyException('Got a libtools specific "%s" dependencies'
'but we could not compute the actual shared'
'library path' % lib)
lib = shared_lib
self.is_libtool = True
self.libs.append(lib)
def get_pkgconfig_variable(self, variable_name):
ret, out = self._call_pkgbin(['--variable=' + variable_name, self.name])
variable = ''
if ret != 0:
if self.required:
raise DependencyException('%s dependency %s not found.' %
(self.type_string, self.name))
else:
variable = out.strip()
mlog.debug('Got pkgconfig variable %s : %s' % (variable_name, variable))
return variable
def get_modversion(self):
return self.modversion
def get_version(self):
return self.get_modversion()
def get_compile_args(self):
return self.cargs
def get_link_args(self):
return self.libs
def check_pkgconfig(self):
try:
p = subprocess.Popen(['pkg-config', '--version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = p.communicate()[0]
if p.returncode == 0:
if not self.silent:
mlog.log('Found pkg-config:', mlog.bold(shutil.which('pkg-config')),
'(%s)' % out.decode().strip())
PkgConfigDependency.pkgconfig_found = True
return
except Exception:
pass
PkgConfigDependency.pkgconfig_found = False
if not self.silent:
mlog.log('Found Pkg-config:', mlog.red('NO'))
def found(self):
return self.is_found
def extract_field(self, la_file, fieldname):
with open(la_file) as f:
for line in f:
arr = line.strip().split('=')
if arr[0] == fieldname:
return arr[1][1:-1]
return None
def extract_dlname_field(self, la_file):
return self.extract_field(la_file, 'dlname')
def extract_libdir_field(self, la_file):
return self.extract_field(la_file, 'libdir')
def extract_libtool_shlib(self, la_file):
'''
Returns the path to the shared library
corresponding to this .la file
'''
dlname = self.extract_dlname_field(la_file)
if dlname is None:
return None
# Darwin uses absolute paths where possible; since the libtool files never
# contain absolute paths, use the libdir field
if mesonlib.is_osx():
dlbasename = os.path.basename(dlname)
libdir = self.extract_libdir_field(la_file)
if libdir is None:
return dlbasename
return os.path.join(libdir, dlbasename)
# From the comments in extract_libtool(), older libtools had
# a path rather than the raw dlname
return os.path.basename(dlname)
class WxDependency(Dependency):
wx_found = None
def __init__(self, environment, kwargs):
Dependency.__init__(self, 'wx')
self.is_found = False
if WxDependency.wx_found is None:
self.check_wxconfig()
if not WxDependency.wx_found:
mlog.log("Neither wx-config-3.0 nor wx-config found; can't detect dependency")
return
p = subprocess.Popen([self.wxc, '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = p.communicate()[0]
if p.returncode != 0:
mlog.log('Dependency wxwidgets found:', mlog.red('NO'))
self.cargs = []
self.libs = []
else:
self.modversion = out.decode().strip()
version_req = kwargs.get('version', None)
if version_req is not None:
if not mesonlib.version_compare(self.modversion, version_req):
mlog.log('Wxwidgets version %s does not fullfill requirement %s' %\
(self.modversion, version_req))
return
mlog.log('Dependency wxwidgets found:', mlog.green('YES'))
self.is_found = True
self.requested_modules = self.get_requested(kwargs)
# wx-config seems to have a cflags as well but since it requires C++,
# this should be good, at least for now.
p = subprocess.Popen([self.wxc, '--cxxflags'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = p.communicate()[0]
if p.returncode != 0:
raise DependencyException('Could not generate cargs for wxwidgets.')
self.cargs = out.decode().split()
p = subprocess.Popen([self.wxc, '--libs'] + self.requested_modules,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = p.communicate()[0]
if p.returncode != 0:
raise DependencyException('Could not generate libs for wxwidgets.')
self.libs = out.decode().split()
def get_requested(self, kwargs):
modules = 'modules'
if not modules in kwargs:
return []
candidates = kwargs[modules]
if isinstance(candidates, str):
return [candidates]
for c in candidates:
if not isinstance(c, str):
raise DependencyException('wxwidgets module argument is not a string.')
return candidates
def get_modversion(self):
return self.modversion
def get_compile_args(self):
return self.cargs
def get_link_args(self):
return self.libs
def check_wxconfig(self):
for wxc in ['wx-config-3.0', 'wx-config']:
try:
p = subprocess.Popen([wxc, '--version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = p.communicate()[0]
if p.returncode == 0:
mlog.log('Found wx-config:', mlog.bold(shutil.which(wxc)),
'(%s)' % out.decode().strip())
self.wxc = wxc
WxDependency.wx_found = True
return
except Exception:
pass
WxDependency.wxconfig_found = False
mlog.log('Found wx-config:', mlog.red('NO'))
def found(self):
return self.is_found
class ExternalProgram():
def __init__(self, name, fullpath=None, silent=False, search_dir=None):
self.name = name
if fullpath is not None:
if not isinstance(fullpath, list):
self.fullpath = [fullpath]
else:
self.fullpath = fullpath
else:
self.fullpath = self._search(name, search_dir)
if not silent:
if self.found():
mlog.log('Program', mlog.bold(name), 'found:', mlog.green('YES'),
'(%s)' % ' '.join(self.fullpath))
else:
mlog.log('Program', mlog.bold(name), 'found:', mlog.red('NO'))
@staticmethod
def _shebang_to_cmd(script):
"""
Windows does not understand shebangs, so we check if the file has a
shebang and manually parse it to figure out the interpreter to use
"""
try:
with open(script) as f:
first_line = f.readline().strip()
if first_line.startswith('#!'):
commands = first_line[2:].split('#')[0].strip().split()
if mesonlib.is_windows():
# Windows does not have /usr/bin.
commands[0] = commands[0].split('/')[-1]
if commands[0] == 'env':
commands = commands[1:]
return commands + [script]
except Exception:
pass
return False
@staticmethod
def _is_executable(path):
suffix = os.path.splitext(path)[-1].lower()[1:]
if mesonlib.is_windows():
if suffix == 'exe' or suffix == 'com' or suffix == 'bat':
return True
elif os.access(path, os.X_OK):
return True
return False
def _search_dir(self, name, search_dir):
if search_dir is None:
return False
trial = os.path.join(search_dir, name)
if not os.path.exists(trial):
return False
if self._is_executable(trial):
return [trial]
# Now getting desperate. Maybe it is a script file that is a) not chmodded
# executable or b) we are on windows so they can't be directly executed.
return self._shebang_to_cmd(trial)
def _search(self, name, search_dir):
commands = self._search_dir(name, search_dir)
if commands:
return commands
# Do a standard search in PATH
fullpath = shutil.which(name)
if fullpath or not mesonlib.is_windows():
# On UNIX-like platforms, the standard PATH search is enough
return [fullpath]
# On Windows, interpreted scripts must have an extension otherwise they
# cannot be found by a standard PATH search. So we do a custom search
# where we manually search for a script with a shebang in PATH.
search_dirs = os.environ.get('PATH', '').split(';')
for search_dir in search_dirs:
commands = self._search_dir(name, search_dir)
if commands:
return commands
return [None]
def found(self):
return self.fullpath[0] is not None
def get_command(self):
return self.fullpath
def get_name(self):
return self.name
class ExternalLibrary(Dependency):
# TODO: Add `lang` to the parent Dependency object so that dependencies can
# be expressed for languages other than C-like
def __init__(self, name, link_args=None, language=None, silent=False):
super().__init__('external')
self.name = name
self.is_found = False
self.link_args = []
self.lang_args = []
if link_args:
self.is_found = True
if not isinstance(link_args, list):
link_args = [link_args]
if language:
self.lang_args = {language: link_args}
else:
self.link_args = link_args
if not silent:
if self.is_found:
mlog.log('Library', mlog.bold(name), 'found:', mlog.green('YES'))
else:
mlog.log('Library', mlog.bold(name), 'found:', mlog.red('NO'))
def found(self):
return self.is_found
def get_link_args(self):
return self.link_args
def get_lang_args(self, lang):
if lang in self.lang_args:
return self.lang_args[lang]
return []
class BoostDependency(Dependency):
# Some boost libraries have different names for
# their sources and libraries. This dict maps
# between the two.
name2lib = {'test' : 'unit_test_framework'}
def __init__(self, environment, kwargs):
Dependency.__init__(self, 'boost')
self.name = 'boost'
self.environment = environment
self.libdir = ''
if 'native' in kwargs and environment.is_cross_build():
want_cross = not kwargs['native']
else:
want_cross = environment.is_cross_build()
try:
self.boost_root = os.environ['BOOST_ROOT']
if not os.path.isabs(self.boost_root):
raise DependencyException('BOOST_ROOT must be an absolute path.')
except KeyError:
self.boost_root = None
if self.boost_root is None:
if want_cross:
raise DependencyException('BOOST_ROOT is needed while cross-compiling')
if mesonlib.is_windows():
self.boost_root = self.detect_win_root()
self.incdir = self.boost_root
else:
self.incdir = '/usr/include'
else:
self.incdir = os.path.join(self.boost_root, 'include')
self.boost_inc_subdir = os.path.join(self.incdir, 'boost')
mlog.debug('Boost library root dir is', self.boost_root)
self.src_modules = {}
self.lib_modules = {}
self.lib_modules_mt = {}
self.detect_version()
self.requested_modules = self.get_requested(kwargs)
module_str = ', '.join(self.requested_modules)
if self.version is not None:
self.detect_src_modules()
self.detect_lib_modules()
self.validate_requested()
if self.boost_root is not None:
info = self.version + ', ' + self.boost_root
else:
info = self.version
mlog.log('Dependency Boost (%s) found:' % module_str, mlog.green('YES'),
'(' + info + ')')
else:
mlog.log("Dependency Boost (%s) found:" % module_str, mlog.red('NO'))
def detect_win_root(self):
globtext = 'c:\\local\\boost_*'
files = glob.glob(globtext)
if len(files) > 0:
return files[0]
return 'C:\\'
def get_compile_args(self):
args = []
if self.boost_root is not None:
if mesonlib.is_windows():
args.append('-I' + self.boost_root)
else:
args.append('-I' + os.path.join(self.boost_root, 'include'))
else:
args.append('-I' + self.incdir)
return args
def get_requested(self, kwargs):
candidates = kwargs.get('modules', [])
if isinstance(candidates, str):
return [candidates]
for c in candidates:
if not isinstance(c, str):
raise DependencyException('Boost module argument is not a string.')
return candidates
def validate_requested(self):
for m in self.requested_modules:
if m not in self.src_modules:
raise DependencyException('Requested Boost module "%s" not found.' % m)
def found(self):
return self.version is not None
def get_version(self):
return self.version
def detect_version(self):
try:
ifile = open(os.path.join(self.boost_inc_subdir, 'version.hpp'))
except FileNotFoundError:
self.version = None
return
with ifile:
for line in ifile:
if line.startswith("#define") and 'BOOST_LIB_VERSION' in line:
ver = line.split()[-1]
ver = ver[1:-1]
self.version = ver.replace('_', '.')
return
self.version = None
def detect_src_modules(self):
for entry in os.listdir(self.boost_inc_subdir):
entry = os.path.join(self.boost_inc_subdir, entry)
if stat.S_ISDIR(os.stat(entry).st_mode):
self.src_modules[os.path.split(entry)[-1]] = True
def detect_lib_modules(self):
if mesonlib.is_windows():
return self.detect_lib_modules_win()
return self.detect_lib_modules_nix()
def detect_lib_modules_win(self):
arch = detect_cpu_family(self.environment.coredata.compilers)
# Guess the libdir
if arch == 'x86':
gl = 'lib32*'
elif arch == 'x86_64':
gl = 'lib64*'
else:
# Does anyone do Boost cross-compiling to other archs on Windows?
gl = None
# See if the libdir is valid
if gl:
libdir = glob.glob(os.path.join(self.boost_root, gl))
else:
libdir = []
# Can't find libdir, bail
if len(libdir) == 0:
return
libdir = libdir[0]
self.libdir = libdir
globber = 'boost_*-gd-*.lib' # FIXME
for entry in glob.glob(os.path.join(libdir, globber)):
(_, fname) = os.path.split(entry)
base = fname.split('_', 1)[1]
modname = base.split('-', 1)[0]
self.lib_modules_mt[modname] = fname
def detect_lib_modules_nix(self):
libsuffix = None
if mesonlib.is_osx():
libsuffix = 'dylib'
else:
libsuffix = 'so'
globber = 'libboost_*.{}'.format(libsuffix)
if self.boost_root is None:
libdirs = mesonlib.get_library_dirs()
else:
libdirs = [os.path.join(self.boost_root, 'lib')]
for libdir in libdirs:
for entry in glob.glob(os.path.join(libdir, globber)):
lib = os.path.basename(entry)
name = lib.split('.')[0].split('_', 1)[-1]
# I'm not 100% sure what to do here. Some distros
# have modules such as thread only as -mt versions.
if entry.endswith('-mt.so'):
self.lib_modules_mt[name] = True
else:
self.lib_modules[name] = True
def get_win_link_args(self):
args = []
if self.boost_root:
args.append('-L' + self.libdir)
for module in self.requested_modules:
module = BoostDependency.name2lib.get(module, module)
if module in self.lib_modules_mt:
args.append(self.lib_modules_mt[module])
return args
def get_link_args(self):
if mesonlib.is_windows():
return self.get_win_link_args()
args = []
if self.boost_root:
args.append('-L' + os.path.join(self.boost_root, 'lib'))
for module in self.requested_modules:
module = BoostDependency.name2lib.get(module, module)
if module in self.lib_modules or module in self.lib_modules_mt:
linkcmd = '-lboost_' + module
args.append(linkcmd)
# FIXME a hack, but Boost's testing framework has a lot of
# different options and it's hard to determine what to do
# without feedback from actual users. Update this
# as we get more bug reports.
if module == 'unit_testing_framework':
args.append('-lboost_test_exec_monitor')
elif module + '-mt' in self.lib_modules_mt:
linkcmd = '-lboost_' + module + '-mt'
args.append(linkcmd)
if module == 'unit_testing_framework':
args.append('-lboost_test_exec_monitor-mt')
return args
def get_sources(self):
return []
def need_threads(self):
return 'thread' in self.requested_modules
class GTestDependency(Dependency):
def __init__(self, environment, kwargs):
Dependency.__init__(self, 'gtest')
self.main = kwargs.get('main', False)
self.name = 'gtest'
self.libname = 'libgtest.so'
self.libmain_name = 'libgtest_main.so'
self.include_dir = '/usr/include'
self.src_dirs = ['/usr/src/gtest/src', '/usr/src/googletest/googletest/src']
self.detect()
def found(self):
return self.is_found
def detect(self):
trial_dirs = mesonlib.get_library_dirs()
glib_found = False
gmain_found = False
for d in trial_dirs:
if os.path.isfile(os.path.join(d, self.libname)):
glib_found = True
if os.path.isfile(os.path.join(d, self.libmain_name)):
gmain_found = True
if glib_found and gmain_found:
self.is_found = True
self.compile_args = []
self.link_args = ['-lgtest']
if self.main:
self.link_args.append('-lgtest_main')
self.sources = []
mlog.log('Dependency GTest found:', mlog.green('YES'), '(prebuilt)')
elif self.detect_srcdir():
self.is_found = True
self.compile_args = ['-I' + self.src_include_dir]
self.link_args = []
if self.main:
self.sources = [self.all_src, self.main_src]
else:
self.sources = [self.all_src]
mlog.log('Dependency GTest found:', mlog.green('YES'), '(building self)')
else:
mlog.log('Dependency GTest found:', mlog.red('NO'))
self.is_found = False
return self.is_found
def detect_srcdir(self):
for s in self.src_dirs:
if os.path.exists(s):
self.src_dir = s
self.all_src = mesonlib.File.from_absolute_file(
os.path.join(self.src_dir, 'gtest-all.cc'))
self.main_src = mesonlib.File.from_absolute_file(
os.path.join(self.src_dir, 'gtest_main.cc'))
self.src_include_dir = os.path.normpath(os.path.join(self.src_dir, '..'))
return True
return False
def get_compile_args(self):
arr = []
if self.include_dir != '/usr/include':
arr.append('-I' + self.include_dir)
if hasattr(self, 'src_include_dir'):
arr.append('-I' + self.src_include_dir)
return arr
def get_link_args(self):
return self.link_args
def get_version(self):
return '1.something_maybe'
def get_sources(self):
return self.sources
def need_threads(self):
return True
class GMockDependency(Dependency):
def __init__(self, environment, kwargs):
Dependency.__init__(self, 'gmock')
# GMock may be a library or just source.
# Work with both.
self.name = 'gmock'
self.libname = 'libgmock.so'
trial_dirs = mesonlib.get_library_dirs()
gmock_found = False
for d in trial_dirs:
if os.path.isfile(os.path.join(d, self.libname)):
gmock_found = True
if gmock_found:
self.is_found = True
self.compile_args = []
self.link_args = ['-lgmock']
self.sources = []
mlog.log('Dependency GMock found:', mlog.green('YES'), '(prebuilt)')
return
for d in ['/usr/src/googletest/googlemock/src', '/usr/src/gmock/src', '/usr/src/gmock']:
if os.path.exists(d):
self.is_found = True
# Yes, we need both because there are multiple
# versions of gmock that do different things.
d2 = os.path.normpath(os.path.join(d, '..'))
self.compile_args = ['-I' + d, '-I' + d2]
self.link_args = []
all_src = mesonlib.File.from_absolute_file(os.path.join(d, 'gmock-all.cc'))
main_src = mesonlib.File.from_absolute_file(os.path.join(d, 'gmock_main.cc'))
if kwargs.get('main', False):
self.sources = [all_src, main_src]
else:
self.sources = [all_src]
mlog.log('Dependency GMock found:', mlog.green('YES'), '(building self)')
return
mlog.log('Dependency GMock found:', mlog.red('NO'))
self.is_found = False
def get_version(self):
return '1.something_maybe'
def get_compile_args(self):
return self.compile_args
def get_sources(self):
return self.sources
def get_link_args(self):
return self.link_args
def found(self):
return self.is_found
class QtBaseDependency(Dependency):
def __init__(self, name, env, kwargs):
Dependency.__init__(self, name)
self.name = name
self.qtname = name.capitalize()
self.qtver = name[-1]
if self.qtver == "4":
self.qtpkgname = 'Qt'
else:
self.qtpkgname = self.qtname
self.root = '/usr'
self.bindir = None
self.silent = kwargs.get('silent', False)
# We store the value of required here instead of passing it on to
# PkgConfigDependency etc because we want to try the qmake-based
# fallback as well.
self.required = kwargs.pop('required', True)
kwargs['required'] = False
mods = kwargs.get('modules', [])
self.cargs = []
self.largs = []
self.is_found = False
if isinstance(mods, str):
mods = [mods]
if len(mods) == 0:
raise DependencyException('No ' + self.qtname + ' modules specified.')
type_text = 'cross' if env.is_cross_build() else 'native'
found_msg = '{} {} {{}} dependency (modules: {}) found:' \
''.format(self.qtname, type_text, ', '.join(mods))
from_text = '`pkg-config`'
# Prefer pkg-config, then fallback to `qmake -query`
self._pkgconfig_detect(mods, env, kwargs)
if not self.is_found:
from_text = self._qmake_detect(mods, env, kwargs)
if not self.is_found:
# Reset compile args and link args
self.cargs = []
self.largs = []
from_text = '(checked pkg-config, qmake-{}, and qmake)' \
''.format(self.name)
self.version = 'none'
if self.required:
err_msg = '{} {} dependency not found {}' \
''.format(self.qtname, type_text, from_text)
raise DependencyException(err_msg)
if not self.silent:
mlog.log(found_msg.format(from_text), mlog.red('NO'))
return
from_text = '`{}`'.format(from_text)
if not self.silent:
mlog.log(found_msg.format(from_text), mlog.green('YES'))
def compilers_detect(self):
"Detect Qt (4 or 5) moc, uic, rcc in the specified bindir or in PATH"
if self.bindir:
moc = ExternalProgram(os.path.join(self.bindir, 'moc'), silent=True)
uic = ExternalProgram(os.path.join(self.bindir, 'uic'), silent=True)
rcc = ExternalProgram(os.path.join(self.bindir, 'rcc'), silent=True)
else:
# We don't accept unsuffixed 'moc', 'uic', and 'rcc' because they
# are sometimes older, or newer versions.
moc = ExternalProgram('moc-' + self.name, silent=True)
uic = ExternalProgram('uic-' + self.name, silent=True)
rcc = ExternalProgram('rcc-' + self.name, silent=True)
return moc, uic, rcc
def _pkgconfig_detect(self, mods, env, kwargs):
modules = OrderedDict()
for module in mods:
modules[module] = PkgConfigDependency(self.qtpkgname + module, env, kwargs)
self.is_found = True
for m in modules.values():
if not m.found():
self.is_found = False
return
self.cargs += m.get_compile_args()
self.largs += m.get_link_args()
self.version = m.modversion
# Try to detect moc, uic, rcc
if 'Core' in modules:
core = modules['Core']
else:
corekwargs = {'required': 'false', 'silent': 'true'}
core = PkgConfigDependency(self.qtpkgname + 'Core', env, corekwargs)
# Used by self.compilers_detect()
self.bindir = core.get_pkgconfig_variable('host_bins')
if not self.bindir:
# If exec_prefix is not defined, the pkg-config file is broken
prefix = core.get_pkgconfig_variable('exec_prefix')
if prefix:
self.bindir = os.path.join(prefix, 'bin')
def _find_qmake(self, qmake, env):
# Even when cross-compiling, if we don't get a cross-info qmake, we
# fallback to using the qmake in PATH because that's what we used to do
if env.is_cross_build():
qmake = env.cross_info.config['binaries'].get('qmake', qmake)
return ExternalProgram(qmake, silent=True)
def _qmake_detect(self, mods, env, kwargs):
for qmake in ('qmake-' + self.name, 'qmake'):
self.qmake = self._find_qmake(qmake, env)
if not self.qmake.found():
continue
# Check that the qmake is for qt5
pc = subprocess.Popen(self.qmake.fullpath + ['-v'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True)
stdo = pc.communicate()[0]
if pc.returncode != 0:
continue
if not 'Qt version ' + self.qtver in stdo:
mlog.log('QMake is not for ' + self.qtname)
continue
# Found qmake for Qt5!
break
else:
# Didn't find qmake :(
return
self.version = re.search(self.qtver + '(\.\d+)+', stdo).group(0)
# Query library path, header path, and binary path
stdo = subprocess.Popen(self.qmake.fullpath + ['-query'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True).communicate()[0]
qvars = {}
for line in stdo.split('\n'):
line = line.strip()
if line == '':
continue
(k, v) = tuple(line.split(':', 1))
qvars[k] = v
if mesonlib.is_osx():
return self._framework_detect(qvars, mods, kwargs)
incdir = qvars['QT_INSTALL_HEADERS']
self.cargs.append('-I' + incdir)
libdir = qvars['QT_INSTALL_LIBS']
# Used by self.compilers_detect()
self.bindir = qvars['QT_INSTALL_BINS']
self.is_found = True
for module in mods:
mincdir = os.path.join(incdir, 'Qt' + module)
self.cargs.append('-I' + mincdir)
if for_windows(env.is_cross_build(), env):
libfile = os.path.join(libdir, self.qtpkgname + module + '.lib')
if not os.path.isfile(libfile):
# MinGW can link directly to .dll
libfile = os.path.join(self.bindir, self.qtpkgname + module + '.dll')
if not os.path.isfile(libfile):
self.is_found = False
break
else:
libfile = os.path.join(libdir, 'lib{}{}.so'.format(self.qtpkgname, module))
if not os.path.isfile(libfile):
self.is_found = False
break
self.largs.append(libfile)
return qmake
def _framework_detect(self, qvars, modules, kwargs):
libdir = qvars['QT_INSTALL_LIBS']
for m in modules:
fname = 'Qt' + m
fwdep = ExtraFrameworkDependency(fname, kwargs.get('required', True), libdir)
self.cargs.append('-F' + libdir)
if fwdep.found():
self.is_found = True
self.cargs += fwdep.get_compile_args()
self.largs += fwdep.get_link_args()
# Used by self.compilers_detect()
self.bindir = qvars['QT_INSTALL_BINS']
def get_version(self):
return self.version
def get_compile_args(self):
return self.cargs
def get_sources(self):
return []
def get_link_args(self):
return self.largs
def found(self):
return self.is_found
def get_exe_args(self):
# Originally this was -fPIE but nowadays the default
# for upstream and distros seems to be -reduce-relocations
# which requires -fPIC. This may cause a performance
# penalty when using self-built Qt or on platforms
# where -fPIC is not required. If this is an issue
# for you, patches are welcome.
# Fix this to be more portable, especially to MSVC.
return ['-fPIC']
class Qt5Dependency(QtBaseDependency):
def __init__(self, env, kwargs):
QtBaseDependency.__init__(self, 'qt5', env, kwargs)
class Qt4Dependency(QtBaseDependency):
def __init__(self, env, kwargs):
QtBaseDependency.__init__(self, 'qt4', env, kwargs)
class GnuStepDependency(Dependency):
def __init__(self, environment, kwargs):
Dependency.__init__(self, 'gnustep')
self.modules = kwargs.get('modules', [])
self.detect()
def detect(self):
confprog = 'gnustep-config'
try:
gp = subprocess.Popen([confprog, '--help'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gp.communicate()
except FileNotFoundError:
self.args = None
mlog.log('Dependency GnuStep found:', mlog.red('NO'), '(no gnustep-config)')
return
if gp.returncode != 0:
self.args = None
mlog.log('Dependency GnuStep found:', mlog.red('NO'))
return
if 'gui' in self.modules:
arg = '--gui-libs'
else:
arg = '--base-libs'
fp = subprocess.Popen([confprog, '--objc-flags'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(flagtxt, flagerr) = fp.communicate()
flagtxt = flagtxt.decode()
flagerr = flagerr.decode()
if fp.returncode != 0:
raise DependencyException('Error getting objc-args: %s %s' % (flagtxt, flagerr))
args = flagtxt.split()
self.args = self.filter_arsg(args)
fp = subprocess.Popen([confprog, arg],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(libtxt, liberr) = fp.communicate()
libtxt = libtxt.decode()
liberr = liberr.decode()
if fp.returncode != 0:
raise DependencyException('Error getting objc-lib args: %s %s' % (libtxt, liberr))
self.libs = self.weird_filter(libtxt.split())
mlog.log('Dependency GnuStep found:', mlog.green('YES'))
def weird_filter(self, elems):
"""When building packages, the output of the enclosing Make
is sometimes mixed among the subprocess output. I have no idea
why. As a hack filter out everything that is not a flag."""
return [e for e in elems if e.startswith('-')]
def filter_arsg(self, args):
"""gnustep-config returns a bunch of garbage args such
as -O2 and so on. Drop everything that is not needed."""
result = []
for f in args:
if f.startswith('-D') or f.startswith('-f') or \
f.startswith('-I') or f == '-pthread' or\
(f.startswith('-W') and not f == '-Wall'):
result.append(f)
return result
def found(self):
return self.args is not None
def get_compile_args(self):
if self.args is None:
return []
return self.args
def get_link_args(self):
return self.libs
class AppleFrameworks(Dependency):
def __init__(self, environment, kwargs):
Dependency.__init__(self, 'appleframeworks')
modules = kwargs.get('modules', [])
if isinstance(modules, str):
modules = [modules]
if len(modules) == 0:
raise DependencyException("AppleFrameworks dependency requires at least one module.")
self.frameworks = modules
def get_link_args(self):
args = []
for f in self.frameworks:
args.append('-framework')
args.append(f)
return args
def found(self):
return mesonlib.is_osx()
class GLDependency(Dependency):
def __init__(self, environment, kwargs):
Dependency.__init__(self, 'gl')
self.is_found = False
self.cargs = []
self.linkargs = []
try:
pcdep = PkgConfigDependency('gl', environment, kwargs)
if pcdep.found():
self.type_name = 'pkgconfig'
self.is_found = True
self.cargs = pcdep.get_compile_args()
self.linkargs = pcdep.get_link_args()
return
except Exception:
pass
if mesonlib.is_osx():
self.is_found = True
self.linkargs = ['-framework', 'OpenGL']
return
if mesonlib.is_windows():
self.is_found = True
self.linkargs = ['-lopengl32']
return
def get_link_args(self):
return self.linkargs
# There are three different ways of depending on SDL2:
# sdl2-config, pkg-config and OSX framework
class SDL2Dependency(Dependency):
def __init__(self, environment, kwargs):
Dependency.__init__(self, 'sdl2')
self.is_found = False
self.cargs = []
self.linkargs = []
try:
pcdep = PkgConfigDependency('sdl2', environment, kwargs)
if pcdep.found():
self.type_name = 'pkgconfig'
self.is_found = True
self.cargs = pcdep.get_compile_args()
self.linkargs = pcdep.get_link_args()
self.version = pcdep.get_version()
return
except Exception as e:
mlog.debug('SDL 2 not found via pkgconfig. Trying next, error was:', str(e))
pass
sdlconf = shutil.which('sdl2-config')
if sdlconf:
pc = subprocess.Popen(['sdl2-config', '--cflags'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
(stdo, _) = pc.communicate()
self.cargs = stdo.decode().strip().split()
pc = subprocess.Popen(['sdl2-config', '--libs'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
(stdo, _) = pc.communicate()
self.linkargs = stdo.decode().strip().split()
self.is_found = True
mlog.log('Dependency', mlog.bold('sdl2'), 'found:', mlog.green('YES'), '(%s)' % sdlconf)
self.version = '2' # FIXME
return
mlog.debug('Could not find sdl2-config binary, trying next.')
if mesonlib.is_osx():
fwdep = ExtraFrameworkDependency('sdl2', kwargs.get('required', True))
if fwdep.found():
self.is_found = True
self.cargs = fwdep.get_compile_args()
self.linkargs = fwdep.get_link_args()
self.version = '2' # FIXME
return
mlog.log('Dependency', mlog.bold('sdl2'), 'found:', mlog.red('NO'))
def get_compile_args(self):
return self.cargs
def get_link_args(self):
return self.linkargs
def found(self):
return self.is_found
def get_version(self):
return self.version
class ExtraFrameworkDependency(Dependency):
def __init__(self, name, required, path=None):
Dependency.__init__(self, 'extraframeworks')
self.name = None
self.detect(name, path)
if self.found():
mlog.log('Dependency', mlog.bold(name), 'found:', mlog.green('YES'),
os.path.join(self.path, self.name))
else:
mlog.log('Dependency', name, 'found:', mlog.red('NO'))
def detect(self, name, path):
lname = name.lower()
if path is None:
paths = ['/Library/Frameworks']
else:
paths = [path]
for p in paths:
for d in os.listdir(p):
fullpath = os.path.join(p, d)
if lname != d.split('.')[0].lower():
continue
if not stat.S_ISDIR(os.stat(fullpath).st_mode):
continue
self.path = p
self.name = d
return
def get_compile_args(self):
if self.found():
return ['-I' + os.path.join(self.path, self.name, 'Headers')]
return []
def get_link_args(self):
if self.found():
return ['-F' + self.path, '-framework', self.name.split('.')[0]]
return []
def found(self):
return self.name is not None
class ThreadDependency(Dependency):
def __init__(self, environment, kwargs):
super().__init__('threads')
self.name = 'threads'
self.is_found = True
mlog.log('Dependency', mlog.bold(self.name), 'found:', mlog.green('YES'))
def need_threads(self):
return True
class Python3Dependency(Dependency):
def __init__(self, environment, kwargs):
super().__init__('python3')
self.name = 'python3'
self.is_found = False
self.version = "3.something_maybe"
try:
pkgdep = PkgConfigDependency('python3', environment, kwargs)
if pkgdep.found():
self.cargs = pkgdep.cargs
self.libs = pkgdep.libs
self.version = pkgdep.get_version()
self.is_found = True
return
except Exception:
pass
if not self.is_found:
if mesonlib.is_windows():
inc = sysconfig.get_path('include')
platinc = sysconfig.get_path('platinclude')
self.cargs = ['-I' + inc]
if inc != platinc:
self.cargs.append('-I' + platinc)
# Nothing exposes this directly that I coulf find
basedir = sysconfig.get_config_var('base')
vernum = sysconfig.get_config_var('py_version_nodot')
self.libs = ['-L{}/libs'.format(basedir),
'-lpython{}'.format(vernum)]
self.is_found = True
self.version = sysconfig.get_config_var('py_version_short')
elif mesonlib.is_osx():
# In OSX the Python 3 framework does not have a version
# number in its name.
fw = ExtraFrameworkDependency('python', False)
if fw.found():
self.cargs = fw.get_compile_args()
self.libs = fw.get_link_args()
self.is_found = True
if self.is_found:
mlog.log('Dependency', mlog.bold(self.name), 'found:', mlog.green('YES'))
else:
mlog.log('Dependency', mlog.bold(self.name), 'found:', mlog.red('NO'))
def get_compile_args(self):
return self.cargs
def get_link_args(self):
return self.libs
def get_version(self):
return self.version
def get_dep_identifier(name, kwargs):
elements = [name]
modlist = kwargs.get('modules', [])
if isinstance(modlist, str):
modlist = [modlist]
for module in modlist:
elements.append(module)
# We use a tuple because we need a non-mutable structure to use as the key
# of a dictionary and a string has potential for name collisions
identifier = tuple(elements)
identifier += ('main', kwargs.get('main', False))
identifier += ('static', kwargs.get('static', False))
if 'fallback' in kwargs:
f = kwargs.get('fallback')
identifier += ('fallback', f[0], f[1])
return identifier
def find_external_dependency(name, environment, kwargs):
required = kwargs.get('required', True)
if not isinstance(required, bool):
raise DependencyException('Keyword "required" must be a boolean.')
lname = name.lower()
if lname in packages:
dep = packages[lname](environment, kwargs)
if required and not dep.found():
raise DependencyException('Dependency "%s" not found' % name)
return dep
pkg_exc = None
pkgdep = None
try:
pkgdep = PkgConfigDependency(name, environment, kwargs)
if pkgdep.found():
return pkgdep
except Exception as e:
pkg_exc = e
if mesonlib.is_osx():
fwdep = ExtraFrameworkDependency(name, required)
if required and not fwdep.found():
raise DependencyException('Dependency "%s" not found' % name)
return fwdep
if pkg_exc is not None:
raise pkg_exc
mlog.log('Dependency', mlog.bold(name), 'found:', mlog.red('NO'))
return pkgdep
# This has to be at the end so the classes it references
# are defined.
packages = {'boost': BoostDependency,
'gtest': GTestDependency,
'gmock': GMockDependency,
'qt5': Qt5Dependency,
'qt4': Qt4Dependency,
'gnustep': GnuStepDependency,
'appleframeworks': AppleFrameworks,
'wxwidgets' : WxDependency,
'sdl2' : SDL2Dependency,
'gl' : GLDependency,
'threads' : ThreadDependency,
'python3' : Python3Dependency,
}
|
#!/usr/bin/env python3
import imp
import os
import unittest
from pprint import pprint
imp.load_source('ufw_forward', os.path.join(os.path.dirname(__file__), os.path.pardir, 'library', 'ufw_forward.py'))
from ufw_forward import UFWForwards
class TestBase(unittest.TestCase):
def test_do_magic(self):
test = { "incomming_dev": "eth0",
"outgoing_dev": "lxdbr0",
"outgoing_network": "10.20.10.0/24",
"masquerading": True,
"conntrack_state": "RELATED,ESTABLISHED",
"reroute": [],
"forwards": [
{
"container": "mumble.baviaan.eggie.zone",
"destination_ip": "10.20.10.11",
"destination_port": [
64738
],
"incomming_ip": "88.99.152.112",
"incomming_port": [
64738
],
"protocol": [
"tcp",
"udp"
]
},
{
"container": "brandon-minecraft.baviaan.eggie.zone",
"destination_ip": "10.20.10.12",
"destination_port": [
25565
],
"incomming_ip": "88.99.152.112",
"incomming_port": [
25565
],
"protocol": [
"tcp"
]
}
]
}
response = {
'nat_rules' : [],
'filter_rules' : []
}
ufw_forwards = UFWForwards(test, False)
ufw_forwards.nat_rules = response['nat_rules']
ufw_forwards.filter_rules = response['filter_rules']
ufw_forwards.generate()
for rule in response['nat_rules']:
print(" ".join(rule))
pprint(response['filter_rules']))
for rule in response['filter_rules']:
print(" ".join(rule))
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
import re
import logging
logger = logging.getLogger(__name__)
def clean_extract(sel, path, path_type='xpath', limit_from=None, limit_to=None, sep='\n',
unicode=True):
if path_type == 'xpath':
return clean(sep.join(x.strip()
for x in sel.xpath(path).extract()[limit_from:limit_to]),
unicode=unicode)
elif path_type == 'css':
return clean(sep.join(x.strip()
for x in sel.css(path).extract()[limit_from:limit_to]),
unicode=unicode)
else:
return None
def clean(s, unicode=True):
flags = re.UNICODE if unicode else 0
return re.subn(r'(\s){2,}', '\g<1>', s, flags)[0].strip()
def split_at(content, delimiters):
""" Splits content using given delimiters following their order, for example
>>> [x for x in split_at(range(11), range(3,10,3))]
[(None, [1, 2]), (3, [4, 5]), (6, [7, 8]), (None, [9, 10])]
"""
found = last = 0
for i, x in enumerate(content):
if x == delimiters[found]:
yield (delimiters[found - 1] if found > 0 else None), content[last + 1:i]
last = i
found += 1
if found == len(delimiters):
break
if last < len(content):
yield None, content[last:]
def parse_birth_death(string):
"""
Parses birth and death dates from a string.
:param string: String with the dates. Can be 'd. <year>' to indicate the
year of death, 'b. <year>' to indicate the year of birth, <year>-<year>
to indicate both birth and death year. Can optionally include 'c.' or 'ca.'
before years to indicate approximation (ignored by the return value).
If only the century is specified, birth is the first year of the century and
death is the last one, e.g. '19th century' will be parsed as `('1801', '1900')`
:return: tuple `(birth_year, death_year)`, both strings as appearing in the original string.
If the string cannot be parsed `(None, None)` is returned.
"""
string = string.lower().replace(' ', '')
if type(string) == unicode:
# \u2013 is another fancy unicode character ('EN DASH') for '-'
string = string.replace(u'\u2013', '-')
if string.startswith('d.'):
birth, death = None, re.findall(r'(ca?\.)?(\d+)', string)[0][1]
elif string.startswith('b.'):
birth, death = re.findall(r'(ca?\.)?(\d+)', string)[0][1], None
elif 'century' in string:
century = int(string[0:2])
birth, death = '%d01' % (century - 1), '%d00' % century
else:
match = re.search(r'(ca?\.)?(?P<birth>\d+)-(ca?\.)?(?P<death>\d*)', string)
birth = death = None
if match:
birth = match.group('birth') or None
death = match.group('death') or None
return birth, death
def extract_dict(response, keys_selector, values_selector, keys_extractor='.//text()',
values_extractor='.//text()', **kwargs):
""" Extracts a dictionary given the selectors for the keys and the vaues.
The selectors should point to the elements containing the text and not the
text itself.
:param response: The response object. The methods xpath or css are used
:param keys_selector: Selector pointing to the elements containing the keys,
starting with the type `xpath:` or `css:` followed by
the selector itself
:param values_selector: Selector pointing to the elements containing the values,
starting with the type `xpath:` or `css:` followed
by the selector itself
:param keys_extracotr: Selector used to actually extract the value of the key from
each key element. xpath only
:param keys_extracotr: Selector used to extract the actual value value from each
value element. xpath only
:param \*\*kwargs: Other parameters to pass to `clean_extract`. Nothing good will
come by passing `path_type='css'`, you have been warned.
"""
def get(selector):
type, sel = selector.split(':', 1)
if type == 'css':
return response.css(sel)
elif type == 'xpath':
return response.xpath(sel)
else:
raise ValueError('Unknown selector type: ' + type)
keys = get(keys_selector)
values = get(values_selector)
return dict(zip((clean_extract(k, keys_extractor, **kwargs) for k in keys),
(clean_extract(v, values_extractor, **kwargs) for v in values)))
def fix_name(name):
""" tries to normalize a name so that it can be searched with the wikidata APIs
:param name: The name to normalize
:returns: a tuple with the normalized name and a list of honorifics
"""
orig = name
name = re.sub(r'\([^)]+\)', '', name.lower())
if ',' in name:
last_name, first_name = name.split(',', 1)
first_name, honorifics = strip_honorifics(first_name.strip())
name = first_name.split(' ')[0] + ' ' + last_name.strip()
else:
parts = name.split()
if len(parts) > 2:
name = '%s %s' % (parts[0], parts[-1])
name, honorifics = strip_honorifics(name)
logger.debug('normalized name "%s" to "%s"', orig, name)
return name.strip(), honorifics
def strip_honorifics(name):
""" Removes honorifics from the name
:param name: The name
:returns: a tuple with the name without honorifics and a list of honorifics
"""
honorifics = []
changed = True
while changed:
changed = False
for prefix in ['prof', 'dr', 'phd', 'sir', 'mr', 'mrs', 'miss', 'mister',
'bishop', 'arcibishop', 'st', 'hon', 'rev', 'prof']:
if name.startswith(prefix):
honorifics.append(prefix)
changed = True
name = name[len(prefix):]
if name and name[0] == '.':
name = name[1:]
name = name.strip()
return name, honorifics
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.util.common import Sample as BSample, JTensor as BJTensor,\
JavaCreator, _get_gateway, _py2java, _java2py
import numpy as np
import os
import tempfile
import uuid
from urllib.parse import urlparse
def convert_to_safe_path(input_path, follow_symlinks=True):
# resolves symbolic links
if follow_symlinks:
return os.path.realpath(input_path)
# covert to abs path
return os.path.abspath(input_path)
def to_list_of_numpy(elements):
if isinstance(elements, np.ndarray):
return [elements]
elif np.isscalar(elements):
return [np.array(elements)]
elif not isinstance(elements, list):
raise ValueError("Wrong type: %s" % type(elements))
results = []
for element in elements:
if np.isscalar(element):
results.append(np.array(element))
elif isinstance(element, np.ndarray):
results.append(element)
else:
raise ValueError("Wrong type: %s" % type(element))
return results
def get_file_list(path, recursive=False):
return callZooFunc("float", "listPaths", path, recursive)
def is_local_path(path):
parse_result = urlparse(path)
return len(parse_result.scheme.lower()) == 0 or parse_result.scheme.lower() == "file"
def append_suffix(prefix, path):
# append suffix
splits = path.split(".")
if len(splits) > 0:
file_name = prefix + "." + splits[-1]
else:
file_name = prefix
return file_name
def save_file(save_func, path, **kwargs):
if is_local_path(path):
save_func(path, **kwargs)
else:
file_name = str(uuid.uuid1())
file_name = append_suffix(file_name, path)
temp_path = os.path.join(tempfile.gettempdir(), file_name)
try:
save_func(temp_path, **kwargs)
if "overwrite" in kwargs:
put_local_file_to_remote(temp_path, path, over_write=kwargs['overwrite'])
else:
put_local_file_to_remote(temp_path, path)
finally:
os.remove(temp_path)
def load_from_file(load_func, path):
if is_local_path(path):
return load_func(path)
else:
file_name = str(uuid.uuid1())
file_name = append_suffix(file_name, path)
temp_path = os.path.join(tempfile.gettempdir(), file_name)
get_remote_file_to_local(path, temp_path)
try:
return load_func(temp_path)
finally:
os.remove(temp_path)
def get_remote_file_to_local(remote_path, local_path, over_write=False):
callZooFunc("float", "getRemoteFileToLocal", remote_path, local_path, over_write)
def put_local_file_to_remote(local_path, remote_path, over_write=False):
callZooFunc("float", "putLocalFileToRemote", local_path, remote_path, over_write)
def set_core_number(num):
callZooFunc("float", "setCoreNumber", num)
def callZooFunc(bigdl_type, name, *args):
""" Call API in PythonBigDL """
gateway = _get_gateway()
args = [_py2java(gateway, a) for a in args]
error = Exception("Cannot find function: %s" % name)
for jinvoker in JavaCreator.instance(bigdl_type, gateway).value:
# hasattr(jinvoker, name) always return true here,
# so you need to invoke the method to check if it exist or not
try:
api = getattr(jinvoker, name)
java_result = api(*args)
result = _java2py(gateway, java_result)
except Exception as e:
error = e
if not ("does not exist" in str(e)
and "Method {}".format(name) in str(e)):
raise e
else:
return result
raise error
class JTensor(BJTensor):
def __init__(self, storage, shape, bigdl_type="float", indices=None):
super(JTensor, self).__init__(storage, shape, bigdl_type, indices)
@classmethod
def from_ndarray(cls, a_ndarray, bigdl_type="float"):
"""
Convert a ndarray to a DenseTensor which would be used in Java side.
"""
if a_ndarray is None:
return None
assert isinstance(a_ndarray, np.ndarray), \
"input should be a np.ndarray, not %s" % type(a_ndarray)
return cls(a_ndarray,
a_ndarray.shape,
bigdl_type)
class Sample(BSample):
def __init__(self, features, labels, bigdl_type="float"):
super(Sample, self).__init__(features, labels, bigdl_type)
@classmethod
def from_ndarray(cls, features, labels, bigdl_type="float"):
features = to_list_of_numpy(features)
labels = to_list_of_numpy(labels)
return cls(
features=[JTensor(feature, feature.shape) for feature in features],
labels=[JTensor(label, label.shape) for label in labels],
bigdl_type=bigdl_type)
|
"""Utils for testing autocompletes."""
from django.apps import apps
class Fixtures(object):
"""Callback for post_migrate to create many objects."""
def __init__(self, model_name=None):
"""Preset a model name, ie. 'auth.user'."""
self.model_name = model_name
def get_model(self, sender):
"""Return either the preset model, either the sender's TestModel."""
if self.model_name is None:
return sender.get_model('TModel')
else:
return apps.get_model(self.model_name)
def __call__(self, sender, **kwargs):
"""Callback function, calls install_fixtures."""
model = self.get_model(sender)
self.install_fixtures(model)
def install_fixtures(self, model):
"""Install fixtures for model."""
for n in range(1, 50):
try:
model.objects.get(pk=n)
except model.DoesNotExist:
model.objects.create(name='test %s' % n, pk=n)
class OwnedFixtures(Fixtures):
"""Fixtures for models with an "owner" relation to User."""
installed_auth = False
def install_fixtures(self, model):
"""Install owners and fixtures."""
if not self.installed_auth:
User = apps.get_model('auth.user') # noqa
self.test, c = User.objects.get_or_create(
username='test',
is_staff=True,
is_superuser=True
)
self.test.set_password('test')
self.test.save()
self.other, c = User.objects.get_or_create(username='other')
self.other.set_password('test')
self.other.save()
self.installed_auth = True
for n in range(1, 3):
for u in [self.test, self.other]:
model.objects.get_or_create(
name='test #%s for %s' % (n, u),
owner=u
)
fixtures = Fixtures()
|
# encoding: utf-8
"""Test the schema objects."""
from unittest import TestCase
from marrow.interface import schema
class TestAttributeSuccesses(TestCase):
foo = 27
basic = schema.Attribute()
basic.__name__ = 'basic'
value = schema.Attribute(value=27)
value.__name__ = 'foo'
exact = schema.Attribute()
exact.__name__ = 'exact'
exact.exact = exact
validator = schema.Attribute(validator=lambda v: 20 < v < 30)
validator.__name__ = 'foo'
def test_basic_repr(self):
self.assertEqual(repr(self.basic), "Attribute(basic)")
def test_basic_call(self):
self.assertTrue(self.basic(self))
def test_value_call(self):
self.assertTrue(self.value(self))
def test_exact_call(self):
self.assertTrue(self.exact(self))
def test_validator_call(self):
self.assertTrue(self.validator(self))
class TestAttributeFailure(TestCase):
foo = 42
basic = schema.Attribute()
basic.__name__ = 'bar'
value = schema.Attribute(value=27)
value.__name__ = 'foo'
exact = schema.Attribute()
exact.__name__ = 'exact'
exact.exact = None
validator = schema.Attribute(validator=lambda v: 20 < v < 30)
validator.__name__ = 'foo'
def test_basic_call(self):
self.assertFalse(self.basic(self))
def test_value_call(self):
self.assertFalse(self.value(self))
def test_exact_call(self):
self.assertFalse(self.exact(self))
def test_validator_call(self):
self.assertFalse(self.validator(self))
class TestProperty(TestCase):
foo = 27
bar = "baz"
good = schema.Property(type=int)
good.__name__ = 'foo'
bad = schema.Property(type=int)
bad.__name__ = 'bar'
def test_property_success(self):
self.assertTrue(self.good(self))
def test_property_failure(self):
self.assertFalse(self.bad(self))
class TestClassProperty(TestCase):
foo = 27
good = schema.ClassProperty()
good.__name__ = 'foo'
bad = schema.ClassProperty()
bad.__name__ = 'bar'
def __init__(self, *args, **kw):
super(TestClassProperty, self).__init__(*args, **kw)
self.bar = 42
def test_class_property_success(self):
self.assertTrue(self.good(self))
def test_class_property_failure(self):
self.assertFalse(self.bad(self))
class TestInstanceProperty(TestCase):
foo = 27
bar = 42
good1 = schema.InstanceProperty()
good1.__name__ = 'bar'
good2 = schema.InstanceProperty()
good2.__name__ = 'baz'
bad = schema.InstanceProperty()
bad.__name__ = 'foo'
def __init__(self, *args, **kw):
super(TestInstanceProperty, self).__init__(*args, **kw)
self.bar = 27
self.baz = 42
def test_instance_property_override_success(self):
self.assertTrue(self.good1(self))
def test_instance_property_unique_success(self):
self.assertTrue(self.good2(self))
def test_instance_property_failure(self):
self.assertFalse(self.bad(self))
class BaseCallables:
foo = "foo"
def callable1(self, arg1, arg2=None):
pass
@classmethod
def callable2(cls, *args, **kw):
pass
@staticmethod
def callable3():
pass
class TestCallableBasics(TestCase, BaseCallables):
good = schema.Callable()
good.__name__ = 'callable1'
bad = schema.Callable()
bad.__name__ = 'foo'
notdictionary = object()
error = schema.Callable()
error.__name__ = '__getitem__'
def test_callable_base_success(self):
self.assertTrue(self.good(self))
def test_callable_base_failure(self):
self.assertFalse(self.bad(self))
def test_callable_introspect_fail(self):
self.assertFalse(self.error(self.notdictionary))
class TestCallableArgspecSuccess(TestCase, BaseCallables):
# like=None, args=None, optional=None, names=None, vargs=None, kwargs=None
args = schema.Callable(args=1)
optional = schema.Callable(optional=1)
names = schema.Callable(names=('arg1', 'arg2'))
args.__name__ = optional.__name__ = names.__name__ = 'callable1'
args.skip = optional.skip = names.skip = 1
vargs = schema.Callable(vargs=True)
kwargs = schema.Callable(kwargs=True)
vargs.__name__ = kwargs.__name__ = 'callable2'
vargs.skip = kwargs.skip = 1
like_basic = schema.Callable(like=BaseCallables.callable1)
like_basic.__name__ = 'callable1'
like_variable = schema.Callable(like=BaseCallables.callable2)
like_variable.__name__ = 'callable2'
like_override = schema.Callable(like=BaseCallables.callable1, args=2)
like_override.__name__ = 'callable1'
def test_callable_args(self):
self.assertTrue(self.args(self))
def test_callable_optional(self):
self.assertTrue(self.optional(self))
def test_callable_names(self):
self.assertTrue(self.names(self))
def test_callable_vargs(self):
self.assertTrue(self.vargs(self))
def test_callable_kwargs(self):
self.assertTrue(self.kwargs(self))
def test_callable_like_basic(self):
self.assertTrue(self.like_basic(self))
def test_callable_like_variable(self):
self.assertTrue(self.like_variable(self))
def test_callable_like_override(self):
self.assertTrue(self.like_override(self))
class TestCallableArgspecFailures(TestCase, BaseCallables):
# like=None, args=None, optional=None, names=None, vargs=None, kwargs=None
args = schema.Callable(args=1)
optional = schema.Callable(optional=1)
names = schema.Callable(names=('arg1', 'arg2'))
args.__name__ = optional.__name__ = names.__name__ = 'callable2'
args.skip = optional.skip = names.skip = 1
vargs = schema.Callable(vargs=True)
kwargs = schema.Callable(kwargs=True)
vargs.__name__ = kwargs.__name__ = 'callable1'
vargs.skip = kwargs.skip = 1
like_basic = schema.Callable(like=BaseCallables.callable1)
like_basic.__name__ = 'callable2'
like_variable = schema.Callable(like=BaseCallables.callable2)
like_variable.__name__ = 'callable1'
def test_callable_args(self):
self.assertFalse(self.args(self))
def test_callable_optional(self):
self.assertFalse(self.optional(self))
def test_callable_names(self):
self.assertFalse(self.names(self))
def test_callable_vargs(self):
self.assertFalse(self.vargs(self))
def test_callable_kwargs(self):
self.assertFalse(self.kwargs(self))
def test_callable_like_basic(self):
self.assertFalse(self.like_basic(self))
def test_callable_like_variable(self):
self.assertFalse(self.like_variable(self))
class TestMethod(TestCase, BaseCallables):
good1 = schema.Method()
good1.__name__ = 'callable1'
good2 = schema.Method()
good2.__name__ = 'callable1'
bad = schema.Method()
bad.__name__ = 'callable3'
def test_method_success(self):
self.assertTrue(self.good1(self))
def test_class_method_success(self):
self.assertTrue(self.good2(self))
def test_method_failure(self):
self.assertFalse(self.bad(self))
class TestClassMethod(TestCase, BaseCallables):
good = schema.ClassMethod()
good.__name__ = 'callable2'
bad1 = schema.ClassMethod()
bad1.__name__ = 'callable1'
bad2 = schema.ClassMethod()
bad2.__name__ = 'callable3'
def test_class_method_success(self):
self.assertTrue(self.good(self))
def test_method_failure(self):
self.assertFalse(self.bad1(self))
def test_static_method_failure(self):
self.assertFalse(self.bad2(self))
class TestStaticMethod(TestCase, BaseCallables):
good = schema.StaticMethod()
good.__name__ = 'callable3'
bad1 = schema.StaticMethod()
bad1.__name__ = 'callable1'
bad2 = schema.StaticMethod()
bad2.__name__ = 'callable2'
invalid = schema.StaticMethod(args=1)
invalid.__name__ = 'callable3'
def test_static_method_success(self):
self.assertTrue(self.good(self))
def test_method_failure(self):
self.assertFalse(self.bad1(self))
def test_class_method_failure(self):
self.assertFalse(self.bad2(self))
def test_static_method_parent_failure(self):
self.assertFalse(self.invalid(self))
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Paul Brossier <piem@piem.org>
# This file is part of TimeSide.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Author: Paul Brossier <piem@piem.org>
from timeside.core import implements, interfacedoc
from timeside.core.analyzer import Analyzer
from timeside.core.api import IAnalyzer
from timeside.core.tools.parameters import HasTraits, List
import subprocess
import tempfile
import numpy as np
from timeside.core.tools.parameters import store_parameters
def simple_host_process(argslist):
"""Call vamp-simple-host"""
vamp_host = 'vamp-simple-host'
command = [vamp_host]
command.extend(argslist)
# try ?
stdout = subprocess.check_output(command,
stderr=subprocess.STDOUT).decode('utf-8').splitlines()
return stdout
# Raise an exception if Vamp Host is missing
from timeside.core.exceptions import VampImportError
try:
simple_host_process(['-v'])
WITH_VAMP = True
except OSError:
WITH_VAMP = False
raise VampImportError
def get_plugins_list():
arg = ['--list-outputs']
stdout = simple_host_process(arg)
return [line.split(':')[1:] for line in stdout]
class VampSimpleHost(Analyzer):
"""Vamp plugins library interface analyzer"""
implements(IAnalyzer)
class _Param(HasTraits):
plugin_list = List
_schema = {'$schema': 'http://json-schema.org/schema#',
'properties': {'plugin_list': {'default': get_plugins_list(),
'type': 'array',
'items': {'type': 'array',
'items': {'type': 'string'}}
}
},
'type': 'object'}
@store_parameters
def __init__(self, plugin_list=None):
super(VampSimpleHost, self).__init__()
if plugin_list is None:
plugin_list = get_plugins_list()
#plugin_list = [['vamp-example-plugins', 'percussiononsets', 'detectionfunction']]
self.plugin_list = plugin_list
@interfacedoc
def setup(self, channels=None, samplerate=None,
blocksize=None, totalframes=None):
super(VampSimpleHost, self).setup(
channels, samplerate, blocksize, totalframes)
@staticmethod
@interfacedoc
def id():
return "vamp_simple_host"
@staticmethod
@interfacedoc
def name():
return "Vamp Plugins host"
@staticmethod
@interfacedoc
def version():
return "1.1.0"
@staticmethod
@interfacedoc
def unit():
return ""
def process(self, frames, eod=False):
pass
return frames, eod
def post_process(self):
#plugin = 'vamp-example-plugins:amplitudefollower:amplitude'
wavfile = self.mediainfo()['uri'].split('file://')[-1]
for plugin_line in self.plugin_list:
plugin = ':'.join(plugin_line)
(time, duration, value) = self.vamp_plugin(plugin, wavfile)
if value is None:
return
if duration is not None:
plugin_res = self.new_result(
data_mode='value', time_mode='segment')
plugin_res.data_object.duration = duration
else:
plugin_res = self.new_result(
data_mode='value', time_mode='event')
plugin_res.data_object.time = time
plugin_res.data_object.value = value
# Fix strat, duration issues if audio is a segment
# if self.mediainfo()['is_segment']:
# start_index = np.floor(self.mediainfo()['start'] *
# self.result_samplerate /
# self.result_stepsize)
#
# stop_index = np.ceil((self.mediainfo()['start'] +
# self.mediainfo()['duration']) *
# self.result_samplerate /
# self.result_stepsize)
#
# fixed_start = (start_index * self.result_stepsize /
# self.result_samplerate)
# fixed_duration = ((stop_index - start_index) * self.result_stepsize /
# self.result_samplerate)
#
# plugin_res.audio_metadata.start = fixed_start
# plugin_res.audio_metadata.duration = fixed_duration
#
# value = value[start_index:stop_index + 1]
plugin_res.id_metadata.id += '.' + '.'.join(plugin_line[1:])
plugin_res.id_metadata.name += ' ' + \
' '.join(plugin_line[1:])
self.add_result(plugin_res)
@staticmethod
def vamp_plugin(plugin, wavfile):
def get_vamp_result(txt_file):
# Guess format
time_, value_ = np.genfromtxt(txt_file, delimiter=':', dtype=str,
unpack=True)
time_duration = np.genfromtxt(np.array(time_).ravel(),
delimiter=',',
dtype=float, unpack=True)
if len(time_duration.shape) <= 1:
time = time_duration
if len(time_duration.shape) == 2:
time = time_duration[:, 0]
duration = time_duration[:, 1]
else:
duration = None
if value_.size == 1 and value_ == '':
value = None
elif value_.size > 1 and (value_ == '').all():
value = None
else:
value = np.genfromtxt(np.array(value_).ravel(), delimiter=' ',
invalid_raise=False)
value = np.atleast_2d(value)
if np.isnan(value[:, -1]).all():
value = value[:, 0:-1]
return (time, duration, value)
vamp_output_file = tempfile.NamedTemporaryFile(suffix='_vamp.txt',
delete=False)
args = [plugin, wavfile, '-o', vamp_output_file.name]
stderr = simple_host_process(args) # run vamp-simple-host
# Parse stderr to get blocksize and stepsize
blocksize_info = stderr[4]
import re
# Match agianst pattern 'Using block size = %d, step size = %d'
m = re.match(
'Using block size = (\d+), step size = (\d+)', blocksize_info)
blocksize = int(m.groups()[0])
stepsize = int(m.groups()[1])
# Get the results
return get_vamp_result(vamp_output_file)
|
# Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import moldesign as mdt
from . import toplevel
from . import constraints, grads, coords, setcoord
class Monitor(object):
def __init__(self, *atoms):
if len(atoms) != self.NUM_ATOMS:
raise ValueError('%s requires %d atoms, but %d passed' %
(type(self), self.NUM_ATOMS, len(atoms)))
self.atoms = atoms
@property
def value(self):
return self.GETTER(*self.atoms)
@value.setter
def value(self, val):
args = self.atoms + (val,)
self.SETTER(*args)
def gradient(self):
return grads._atom_grad_to_mol_grad(self.atoms, self.GRAD(*self.atoms))
@mdt.utils.kwargs_from(constraints.GeometryConstraint)
def constrain(self, **kwargs):
""" Constrain this coordinate.
This will add a new item to the parent molecule's constraint list.
Args:
**kwargs (dict): kwargs for constraints.GeometryConstraint
Returns:
constraints.GeometryConstraint: the constraint object
"""
c = self.CONSTRAINT(*self.atoms, **kwargs)
mol = self.atoms[0].molecule
for atom in mol.atoms[1:]:
if atom.molecule is not mol:
raise ValueError("Can't create constraint; atoms are not part of the same Molecule")
mol.constraints.append(c)
mol._reset_methods()
return c
def __call__(self, obj):
""" Calculate this value for the given trajectory
Args:
obj (mdt.Molecule or mdt.Trajectory): molecule or trajectory to measure
Returns:
moldesign.units.Quantity: this coordinate's value (for a molecule), or a list of values
(for a trajectory)
Note:
Atoms are identified by their index only; the atoms defined in the Monitor must have
the same indices as those in the passed object
"""
return self.GETTER(*(obj.atoms[a.index] for a in self.atoms))
def __str__(self):
return '%s: %s' % (type(self).__name__, self.value)
def __repr__(self):
return '<%s for atoms %s: %s>' % (type(self).__name__,
','.join(str(atom.index) for atom in self.atoms),
self.value)
@toplevel
class DistanceMonitor(Monitor):
NUM_ATOMS = 2
GETTER = staticmethod(coords.distance)
SETTER = staticmethod(setcoord.set_distance)
GRAD = staticmethod(grads.distance_gradient)
CONSTRAINT = constraints.DistanceConstraint
@toplevel
class AngleMonitor(Monitor):
NUM_ATOMS = 3
GETTER = staticmethod(coords.angle)
SETTER = staticmethod(setcoord.set_angle)
GRAD = staticmethod(grads.angle_gradient)
CONSTRAINT = constraints.AngleConstraint
@toplevel
class DihedralMonitor(Monitor):
def __init__(self, *atoms):
if len(atoms) in (1, 2):
atoms = coords._infer_dihedral(*atoms)
super(DihedralMonitor, self).__init__(*atoms)
NUM_ATOMS = 4
GETTER = staticmethod(coords.dihedral)
SETTER = staticmethod(setcoord.set_dihedral)
GRAD = staticmethod(grads.dihedral_gradient)
CONSTRAINT = constraints.DihedralConstraint
|
#!/usr/bin/env python
#coding:utf-8
"""
Consecutive prime sum
The prime 41, can be written as the sum of six consecutive primes:
41 = 2 + 3 + 5 + 7 + 11 + 13
This is the longest sum of consecutive primes that adds to a prime below one-hundred.
The longest sum of consecutive primes below one-thousand that adds to a prime, contains 21 terms, and is equal to 953.
Which prime, below one-million, can be written as the sum of the most consecutive primes?
"""
import math
def gen_primes():
D = {}
q = 2
while True:
if q not in D:
yield q
D[q * q] = [q]
else:
for p in D[q]:
D.setdefault(p + q, []).append(p)
del D[q]
q += 1
primes=[]
for p in gen_primes():
if p>10000: break
primes.append(p)
def is_prime(num):
for p in primes:
if p>math.sqrt(num): break
if num%p==0: return False
return True
def answer():
m=[0,0]
for i in xrange(0,29):
for j in xrange(len(primes)-680,i,-1):
s=sum(primes[i:j])
if s<1000000 and is_prime(s) and j-i>m[1]:
m=[s,j-i]
break
print m,len(primes)
import time
tStart=time.time()
answer()
print 'run time=',time.time()-tStart
# [997651, 543]
# run time= 0.111933946609
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2019 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from testtools.matchers import Equals, FileExists
from tests import integration
class ChannelClosingTestCase(integration.StoreTestCase):
def test_missing_permission(self):
self.addCleanup(self.logout)
self.login()
expected = (
"Make sure the logged in account has upload permissions on "
"'missing' in series '16'."
)
status = self.close("missing", "beta", expected=expected)
self.assertThat(status, Equals(2))
def test_close_channel(self):
self.addCleanup(self.logout)
self.login()
# Change to a random name and version when not on the fake store.
if not self.is_store_fake():
name = self.get_unique_name()
version = self.get_unique_version()
# If not, keep the name that is faked in our fake account.
else:
name = "basic"
version = "1.0"
self.copy_project_to_cwd("basic")
self.update_name_and_version(name, version)
self.run_snapcraft("snap")
# Register the snap
self.register(name)
# Upload the snap
snap_file_path = "{}_{}_{}.snap".format(name, version, "all")
self.assertThat(os.path.join(snap_file_path), FileExists())
self.assertThat(self.push(snap_file_path, release="edge,beta"), Equals(0))
expected = "The beta channel is now closed."
status = self.close(name, "beta", expected=expected)
self.assertThat(status, Equals(0))
|
import numpy as np
import matplotlib.pyplot as plt
def plot_venn_diagram():
fig, ax = plt.subplots(subplot_kw=dict(frameon=False, xticks=[], yticks=[]))
ax.add_patch(plt.Circle((0.3, 0.3), 0.3, fc='red', alpha=0.5))
ax.add_patch(plt.Circle((0.6, 0.3), 0.3, fc='blue', alpha=0.5))
ax.add_patch(plt.Rectangle((-0.1, -0.1), 1.1, 0.8, fc='none', ec='black'))
ax.text(0.2, 0.3, '$x$', size=30, ha='center', va='center')
ax.text(0.7, 0.3, '$y$', size=30, ha='center', va='center')
ax.text(0.0, 0.6, '$I$', size=30)
ax.axis('equal')
def plot_example_decision_tree():
fig = plt.figure(figsize=(10, 4))
ax = fig.add_axes([0, 0, 0.8, 1], frameon=False, xticks=[], yticks=[])
ax.set_title('Example Decision Tree: Animal Classification', size=24)
def text(ax, x, y, t, size=20, **kwargs):
ax.text(x, y, t,
ha='center', va='center', size=size,
bbox=dict(boxstyle='round', ec='k', fc='w'), **kwargs)
text(ax, 0.5, 0.9, "How big is\nthe animal?", 20)
text(ax, 0.3, 0.6, "Does the animal\nhave horns?", 18)
text(ax, 0.7, 0.6, "Does the animal\nhave two legs?", 18)
text(ax, 0.12, 0.3, "Are the horns\nlonger than 10cm?", 14)
text(ax, 0.38, 0.3, "Is the animal\nwearing a collar?", 14)
text(ax, 0.62, 0.3, "Does the animal\nhave wings?", 14)
text(ax, 0.88, 0.3, "Does the animal\nhave a tail?", 14)
text(ax, 0.4, 0.75, "> 1m", 12, alpha=0.4)
text(ax, 0.6, 0.75, "< 1m", 12, alpha=0.4)
text(ax, 0.21, 0.45, "yes", 12, alpha=0.4)
text(ax, 0.34, 0.45, "no", 12, alpha=0.4)
text(ax, 0.66, 0.45, "yes", 12, alpha=0.4)
text(ax, 0.79, 0.45, "no", 12, alpha=0.4)
ax.plot([0.3, 0.5, 0.7], [0.6, 0.9, 0.6], '-k')
ax.plot([0.12, 0.3, 0.38], [0.3, 0.6, 0.3], '-k')
ax.plot([0.62, 0.7, 0.88], [0.3, 0.6, 0.3], '-k')
ax.plot([0.0, 0.12, 0.20], [0.0, 0.3, 0.0], '--k')
ax.plot([0.28, 0.38, 0.48], [0.0, 0.3, 0.0], '--k')
ax.plot([0.52, 0.62, 0.72], [0.0, 0.3, 0.0], '--k')
ax.plot([0.8, 0.88, 1.0], [0.0, 0.3, 0.0], '--k')
ax.axis([0, 1, 0, 1])
def visualize_tree(estimator, X, y, boundaries=True,
xlim=None, ylim=None):
estimator.fit(X, y)
if xlim is None:
xlim = (X[:, 0].min() - 0.1, X[:, 0].max() + 0.1)
if ylim is None:
ylim = (X[:, 1].min() - 0.1, X[:, 1].max() + 0.1)
x_min, x_max = xlim
y_min, y_max = ylim
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
Z = estimator.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, alpha=0.2, cmap='rainbow')
plt.clim(y.min(), y.max())
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='rainbow')
plt.axis('off')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.clim(y.min(), y.max())
# Plot the decision boundaries
def plot_boundaries(i, xlim, ylim):
if i < 0:
return
tree = estimator.tree_
if tree.feature[i] == 0:
plt.plot([tree.threshold[i], tree.threshold[i]], ylim, '-k')
plot_boundaries(tree.children_left[i],
[xlim[0], tree.threshold[i]], ylim)
plot_boundaries(tree.children_right[i],
[tree.threshold[i], xlim[1]], ylim)
elif tree.feature[i] == 1:
plt.plot(xlim, [tree.threshold[i], tree.threshold[i]], '-k')
plot_boundaries(tree.children_left[i], xlim,
[ylim[0], tree.threshold[i]])
plot_boundaries(tree.children_right[i], xlim,
[tree.threshold[i], ylim[1]])
if boundaries:
plot_boundaries(0, plt.xlim(), plt.ylim())
def plot_tree_interactive(X, y):
from sklearn.tree import DecisionTreeClassifier
def interactive_tree(depth):
clf = DecisionTreeClassifier(max_depth=depth, random_state=0)
visualize_tree(clf, X, y)
from IPython.html.widgets import interact
return interact(interactive_tree, depth=[1, 5])
def plot_kmeans_interactive():
from IPython.html.widgets import interact
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.datasets.samples_generator import make_blobs
X, y = make_blobs(n_samples=300, centers=4,
random_state=0, cluster_std=0.60)
def _kmeans_step(frame, n_clusters):
rng = np.random.RandomState(2)
labels = np.zeros(X.shape[0])
centers = rng.randn(n_clusters, 2)
nsteps = frame // 3
for i in range(nsteps + 1):
old_centers = centers
if i < nsteps or frame % 3 > 0:
dist = euclidean_distances(X, centers)
labels = dist.argmin(1)
if i < nsteps or frame % 3 > 1:
centers = np.array([X[labels == j].mean(0)
for j in range(n_clusters)])
nans = np.isnan(centers)
centers[nans] = old_centers[nans]
# plot the cluster centers
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='rainbow');
plt.scatter(old_centers[:, 0], old_centers[:, 1], marker='o',
c=np.arange(n_clusters),
s=200, cmap='rainbow')
plt.scatter(old_centers[:, 0], old_centers[:, 1], marker='o',
c='black', s=50)
# plot new centers if third frame
if frame % 3 == 2:
for i in range(n_clusters):
plt.annotate('', centers[i], old_centers[i],
arrowprops=dict(arrowstyle='->', linewidth=1))
plt.scatter(centers[:, 0], centers[:, 1], marker='o',
c=np.arange(n_clusters),
s=200, cmap='rainbow')
plt.scatter(centers[:, 0], centers[:, 1], marker='o',
c='black', s=50)
plt.xlim(-4, 4)
plt.ylim(-2, 10)
if frame % 3 == 1:
plt.text(3.8, 9.5, "1. Reassign points to nearest centroid",
ha='right', va='top', size=14)
elif frame % 3 == 2:
plt.text(3.8, 9.5, "2. Update centroids to cluster means",
ha='right', va='top', size=14)
return interact(_kmeans_step, frame=[0, 50], n_clusters=[3, 5])
|
# Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Base layer for convolution."""
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import normalization_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
class EncoderQConvolution(base_layers.BaseLayer):
"""Quantized encoder convolution layers."""
def __init__(self,
filters,
ksize,
stride=1,
padding="SAME",
dilations=None,
activation=tf.keras.layers.ReLU(),
bias=True,
rank=4,
**kwargs):
self.out_filters = filters
assert rank >= 3 and rank <= 4
self.rank = rank
self.ksize = self._unpack(ksize)
self.strides = self._unpack(stride)
self.dilations = [1] + self._unpack(dilations) + [1] if dilations else None
self.activation = activation
self.bias = bias
self.padding = padding
self.qoutput = quantization_layers.ActivationQuantization(**kwargs)
self._create_normalizer(**kwargs)
super(EncoderQConvolution, self).__init__(**kwargs)
def _unpack(self, value):
if not isinstance(value, list):
assert isinstance(value, int)
return [1 if self.rank == 3 else value, value]
else:
assert len(value) == 2 and self.rank == 4
assert isinstance(value[0], int) and isinstance(value[1], int)
return value
def build(self, input_shapes):
assert len(input_shapes) == self.rank
self.in_filters = input_shapes[-1]
shape = self.ksize + [self.in_filters, self.out_filters]
self.filters = self.add_qweight(shape=shape)
if self.bias:
self.b = self.add_bias(shape=[self.out_filters])
def _create_normalizer(self, **kwargs):
self.normalization = normalization_layers.BatchNormalization(**kwargs)
def _conv_r4(self, inputs, normalize_method):
outputs = tf.nn.conv2d(
inputs,
self.filters,
strides=self.strides,
padding=self.padding,
dilations=self.dilations)
if self.bias:
outputs = tf.nn.bias_add(outputs, self.b)
outputs = normalize_method(outputs)
if self.activation:
outputs = self.activation(outputs)
return self.qoutput(outputs)
def _conv_r3(self, inputs, normalize_method):
bsz = self.get_batch_dimension(inputs)
inputs_r4 = tf.reshape(inputs, [bsz, 1, -1, self.in_filters])
outputs = self._conv_r4(inputs_r4, normalize_method)
return tf.reshape(outputs, [bsz, -1, self.out_filters])
def call(self, inputs):
def normalize_method(tensor):
return self.normalization(tensor)
return self._do_call(inputs, normalize_method)
def _do_call(self, inputs, normalize_method):
if self.rank == 3:
return self._conv_r3(inputs, normalize_method)
return self._conv_r4(inputs, normalize_method)
def quantize_using_output_range(self, tensor):
return self.qoutput.quantize_using_range(tensor)
class EncoderQConvolutionVarLen(EncoderQConvolution):
"""Convolution on variable length sequence."""
def _create_normalizer(self, **kwargs):
self.normalization = normalization_layers.VarLenBatchNormalization(
rank=4, **kwargs)
def call(self, inputs, mask, inverse_normalizer):
def normalize_method(tensor):
return self.normalization(tensor, mask, inverse_normalizer)
return self._do_call(inputs, normalize_method)
|
from __future__ import unicode_literals
import logging
import warnings
from functools import update_wrapper
from django import http
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import NoReverseMatch, reverse
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.decorators import classonlymethod
from django.utils.deprecation import RemovedInDjango19Warning
_sentinel = object()
logger = logging.getLogger('django.request')
class ContextMixin(object):
"""
A default context mixin that passes the keyword arguments received by
get_context_data as the template context.
"""
def get_context_data(self, **kwargs):
if 'view' not in kwargs:
kwargs['view'] = self
return kwargs
class View(object):
"""
Intentionally simple parent class for all views. Only implements
dispatch-by-method and simple sanity checking.
"""
http_method_names = ['get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace']
def __init__(self, **kwargs):
"""
Constructor. Called in the URLconf; can contain helpful extra
keyword arguments, and other things.
"""
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classonlymethod
def as_view(cls, **initkwargs):
"""
Main entry point for a request-response process.
"""
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError("You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r. as_view "
"only accepts arguments that are already "
"attributes of the class." % (cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
self.request = request
self.args = args
self.kwargs = kwargs
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
def http_method_not_allowed(self, request, *args, **kwargs):
logger.warning('Method Not Allowed (%s): %s', request.method, request.path,
extra={
'status_code': 405,
'request': request
}
)
return http.HttpResponseNotAllowed(self._allowed_methods())
def options(self, request, *args, **kwargs):
"""
Handles responding to requests for the OPTIONS HTTP verb.
"""
response = http.HttpResponse()
response['Allow'] = ', '.join(self._allowed_methods())
response['Content-Length'] = '0'
return response
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
class TemplateResponseMixin(object):
"""
A mixin that can be used to render a template.
"""
template_name = None
template_engine = None
response_class = TemplateResponse
content_type = None
def render_to_response(self, context, **response_kwargs):
"""
Returns a response, using the `response_class` for this
view, with a template rendered with the given context.
If any keyword arguments are provided, they will be
passed to the constructor of the response class.
"""
response_kwargs.setdefault('content_type', self.content_type)
return self.response_class(
request=self.request,
template=self.get_template_names(),
context=context,
using=self.template_engine,
**response_kwargs
)
def get_template_names(self):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
if self.template_name is None:
raise ImproperlyConfigured(
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'")
else:
return [self.template_name]
class TemplateView(TemplateResponseMixin, ContextMixin, View):
"""
A view that renders a template. This view will also pass into the context
any keyword arguments passed by the url conf.
"""
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class RedirectView(View):
"""
A view that provides a redirect on any GET request.
"""
permanent = _sentinel
url = None
pattern_name = None
query_string = False
def __init__(self, *args, **kwargs):
if 'permanent' not in kwargs and self.permanent is _sentinel:
warnings.warn(
"Default value of 'RedirectView.permanent' will change "
"from True to False in Django 1.9. Set an explicit value "
"to silence this warning.",
RemovedInDjango19Warning,
stacklevel=2
)
self.permanent = True
super(RedirectView, self).__init__(*args, **kwargs)
@classonlymethod
def as_view(cls, **initkwargs):
if 'permanent' not in initkwargs and cls.permanent is _sentinel:
warnings.warn(
"Default value of 'RedirectView.permanent' will change "
"from True to False in Django 1.9. Set an explicit value "
"to silence this warning.",
RemovedInDjango19Warning,
stacklevel=2
)
initkwargs['permanent'] = True
return super(RedirectView, cls).as_view(**initkwargs)
def get_redirect_url(self, *args, **kwargs):
"""
Return the URL redirect to. Keyword arguments from the
URL pattern match generating the redirect request
are provided as kwargs to this method.
"""
if self.url:
url = self.url % kwargs
elif self.pattern_name:
try:
url = reverse(self.pattern_name, args=args, kwargs=kwargs)
except NoReverseMatch:
return None
else:
return None
args = self.request.META.get('QUERY_STRING', '')
if args and self.query_string:
url = "%s?%s" % (url, args)
return url
def get(self, request, *args, **kwargs):
url = self.get_redirect_url(*args, **kwargs)
if url:
if self.permanent:
return http.HttpResponsePermanentRedirect(url)
else:
return http.HttpResponseRedirect(url)
else:
logger.warning('Gone: %s', request.path,
extra={
'status_code': 410,
'request': request
})
return http.HttpResponseGone()
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def options(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
|
# -*- coding: utf-8 -*-
'''
Created on 21.05.2012
@author: bach
'''
import unittest
from shotgun_replica.utilities import entityNaming
class Test( unittest.TestCase ):
def setUp( self ):
pass
def tearDown( self ):
pass
def testUnderScoreReplacement( self ):
testPairs = [
( "shoot_days", "ShootDays", True ),
( "_shoot_days", "ShootDays", False ),
]
for ( underscored, capitalized, needsInverse ) in testPairs:
replacedCapitalized = entityNaming.replaceUnderscoresWithCapitals( underscored )
self.assertEqual( replacedCapitalized, capitalized )
if needsInverse:
replacedUnderscored = entityNaming.replaceCapitalsWithUnderscores( capitalized )
self.assertEqual( replacedUnderscored, underscored )
def testConnectionEntityName( self ):
testPairs = [
( "Asset", "assets", "AssetAssetConnection" ),
( "Asset", "sg_linked_assets", "Asset_sg_linked_assets_Connection" ),
( "Asset", "sg_linked_shots", "Asset_sg_linked_shots_Connection" ),
( "Asset", "shoot_days", "AssetShootDayConnection" )
]
for ( entityType, attrName, connectionEntityName ) in testPairs:
connEntityNameTesting = entityNaming.getConnectionEntityName( entityType, attrName )
self.assertEqual( connEntityNameTesting, connectionEntityName )
def testConnectionAttrNames( self ):
testPairs = [
( "Asset", "Asset", "AssetAssetConnection", "asset", "parent" ),
( "Asset", "Shot", "AssetShotConnection", "asset", "shot" ),
( "CustomEntity07", "CustomEntity05", "CustomEntity07_sg_sources_Connection", "custom_entity07", "custom_entity05" ),
( "Revision", "Revision", "RevisionRevisionConnection", "source_revision", "dest_revision"),
]
for ( baseEntityType, linkedEntityType, connEntityName, srcAttrName, destAttrName ) in testPairs:
( srcAttrNameTest, destAttrNameTest ) = entityNaming.getConnectionEntityAttrName( baseEntityType,
linkedEntityType,
connEntityName )
self.assertEqual( srcAttrNameTest, srcAttrName )
self.assertEqual( destAttrNameTest, destAttrName )
def testRetAttributeNames( self ):
testPairs = [
( "Asset", "sg_linked_assets", "asset_sg_linked_assets_assets" ),
( "CustomEntity02", "sg_sink_tasks", "custom_entity02_sg_sink_tasks_custom_entity02s" ),
]
for ( entityType, attrName, retAttrName ) in testPairs:
retAttrNameTest = entityNaming.getReverseAttributeName( entityType, attrName )
self.assertEqual( retAttrNameTest, retAttrName )
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
import psycopg2
from googlemaps import client
import sys
key= "A GOOGLE API KEY AUTHORIZED ON SEVERAL DIRECTIONS APIS"
myClient = client.Client(key)
def getRoutes(cursor) :
valsToText = []
cursor.execute("""select * from person""")
person=cursor.fetchone()
while(person):
[minCluster,minDirections] = getClosestCluster(cursor,person)
urlForRoute = getURLForRoute(cursor, minDirections,person)
valsToText.append([person[1], urlForRoute])
person = cursor.fetchone()
return valsToText
def getClosestCluster(cursor,person) :
cursor.execute("""select * from cluster""")
cluster = cursor.fetchone()
minDistance = sys.maxsize
minCluster = None
minDirections = None
while(cluster):
directions = myClient.directions(getCoordsForGoogle(person[2],person[3]),getCoordsForGoogle(cluster[1],cluster[2]),alternatives=True)
distance = directions[0].get('legs')[0].get('duration').get('value')
if(distance<minDistance):
minDistance=distance
minCluster=cluster
minDirections = directions
cluster = cursor.fetchone()
return [minCluster, minDirections]
def getCoordsForGoogle(lat,lon):
if abs(lat) > 1000:
lat = lat/1000
if abs(lon) > 1000:
lon = lon/1000
return str(lat)+","+str(lon)
def getURLForRoute(cursor, minDirections, person):
aFireRoute = FireRoute()
aFireRoute.addLatLon(person[2],person[3])
for route in minDirections:
if not routeHasFires(cursor,route):
for step in route['legs'][0]['steps']:
aFireRoute.addLatLon(step['end_location']['lat'],step['end_location']['lng'])
return aFireRoute.getURL()
return ""
def routeHasFires(cursor, route):
cursor.execute("""select * from fire_point""")
fire_point = cursor.fetchone()
while(fire_point):
for step in route.get('legs')[0]['steps']:
endLoc = step['end_location']
if(distFrom(endLoc['lat'], endLoc['lng'],fire_point[1], fire_point[2])<1000):
return True
fire_point = cursor.fetchone()
return False
def distFrom(lat1, lon1, lat2, lon2):
from math import sin, cos, sqrt, atan2, radians
earthRadius = 6371000
dlat = radians(lat2-lat1)
dlon = radians(lon2-lon1)
a = sin(dlat/2)**2 + cos(radians(lat1)) * cos(radians(lat2)) * sin(dlon/2)**2
c = 2 * atan2(sqrt(a), sqrt(1-a))
return earthRadius * c
class FireRoute:
#aList = []
PREFIX = "https://www.google.com/maps/dir"
DELIMITER = "/"
def __init__(self):
self.aList = []
def addLatLon(self,lat, lon):
self.aList.append(getCoordsForGoogle(lat,lon))
def getURL(self):
aURL = FireRoute.PREFIX
for loc in self.aList:
aURL+= FireRoute.DELIMITER + loc
return aURL
|
"""
Exposes the 5 parameter unital channel.
"""
import numpy as np
import scipy as sp
from scipy.linalg import polar
PDIAG = np.zeros((9, 9))
for esi in np.eye(3):
one = np.kron(esi, esi)
PDIAG = PDIAG + np.outer(one, one)
PDIAG = PDIAG.astype(np.int)
FIXEDQ = np.array([[-0.1911, 0.3136, -0.9301],
[-0.8547, 0.4128, 0.3148],
[ 0.4826, 0.8551, 0.1891]])
def o(Q, D):
return np.dot(np.dot(Q, D), Q.T)
def Ls(d1=0.1, d2=0.1, d3=0.1):
L1 = np.array([[np.cos(d1), -np.sin(d1), 0],
[np.sin(d1), np.cos(d1), 0],
[0, 0, 1]])
L2 = np.array([[np.cos(d2), 0, -np.sin(d2)],
[0, 1, 0],
[np.sin(d2), 0, np.cos(d2)]])
L3 = np.array([[1, 0, 0],
[0, np.cos(d3), -np.sin(d3)],
[0, np.sin(d3), np.cos(d3)]])
return L1, L2, L3
def SENSOR(d1=0.1, d2=0.1, d3=0.1):
L1, L2, L3 = Ls(d1, d2, d3)
LL1 = np.dot(PDIAG, np.kron(L1, L1))
LL2 = np.dot(PDIAG, np.kron(L2, L2))
LL3 = np.dot(PDIAG, np.kron(L3, L3))
SENSOR = np.r_[LL1[[0, 4, 8], :], LL2[[0, 4, 8], :], LL3[[0, 4, 8], :]]
return SENSOR
class Channel(object):
def __init__(self, kx, ky, kz, **kwargs):
# Ground truth variables
self.kx, self.ky, self.kz = kx, ky, kz
self.n = kwargs.get("n", 1e6)
self.Q = kwargs.get("Q", np.eye(3))
self.C = np.dot(np.dot(self.Q,
np.diag([self.kx, self.ky, self.kz])),
self.Q.T)
self.Q = np.linalg.svd(self.C)[0]
# Sensor parameters
self.d1 = kwargs.get("d1", 0.01)
self.d2 = kwargs.get("d2", 0.01)
self.d3 = kwargs.get("d3", 0.01)
# Estimators
self.at = np.zeros(9)
self.Vt = np.zeros((9, 9))
self.Qc = np.linalg.qr(np.random.randn(3, 3))[0]
self.M = np.zeros((3, 3))
self.cycle = 1
def sample_data(self):
QcQc = np.kron(self.Qc, self.Qc)
cvec = np.dot(QcQc, np.reshape(self.C, (9,)))
rates = np.dot(SENSOR(self.d1, self.d2, self.d3), cvec)
# Get samples for each L_i
D1 = np.random.multinomial(self.n, rates[0:3]) / float(self.n)
D2 = np.random.multinomial(self.n, rates[3:6]) / float(self.n)
D3 = np.random.multinomial(self.n, rates[6:9]) / float(self.n)
data = np.r_[D1, D2, D3]
return data
def update(self):
# Get new data at this effective orientation
x = self.sample_data()
# Recover the vectorized process matrix and its covariance through a
# linear inversion
a, Sa = self.recover_a(x)
# Update the running mean of the covariance matrix and of the linear
# inversion channel estimate
self.Vt = self.Vt + np.linalg.pinv(Sa)
self.at = np.dot(np.linalg.pinv(self.Vt),
self.at + np.dot(np.linalg.pinv(Sa), a))
# Recover the physical process matrix from the linear inversion
A = np.reshape(self.at, (3, 3))
self.M = self.recoverM(A)
# Get the estimated channel Pauli-basis
self.Qc = np.linalg.svd(self.M)[0]
# Update the process matrices
self.cycle = self.cycle + 1
def recover_a(self, x):
# Initiate the sensor and basis matrices
L = SENSOR(self.d1, self.d2, self.d3)
Linv = np.linalg.pinv(L)
QcQc = np.kron(self.Qc, self.Qc)
# Calculate the data covariance
Sx = sp.linalg.block_diag(
1.0 / self.n * np.outer(x[0:3], x[0:3]),
1.0 / self.n * np.outer(x[3:6], x[3:6]),
1.0 / self.n * np.outer(x[6:9], x[6:9])
)
Sx[np.diag_indices(9)] = 1.0 / self.n * x * (1.0 - x)
# Perform the linear inversion and transform to the standard basis
ac = np.dot(Linv, x)
Sac = o(Linv, Sx)
a = np.dot(QcQc.T, ac)
Sa = o(QcQc.T, Sac)
return a, Sa
@staticmethod
def recoverM(A):
B = 0.5 * (A + A.T)
H = polar(B)[1]
M = 0.5 * (B+H)
M = M / np.trace(M)
return M
|
# -*- coding: utf-8 -*-
import xbmc
import xbmcaddon
import re
import sys
import logging
import json as json
# read settings
ADDON = xbmcaddon.Addon()
logger = logging.getLogger(__name__)
def notification(header, message, time=5000, icon=ADDON.getAddonInfo('icon'), sound=True):
xbmcgui.Dialog().notification(header, message, icon, time, sound)
def show_settings():
ADDON.openSettings()
def get_setting(setting):
return ADDON.getSetting(setting).strip().decode('utf-8')
def set_setting(setting, value):
ADDON.setSetting(setting, str(value))
def get_setting_as_bool(setting):
return get_setting(setting).lower() == "true"
def get_setting_as_float(setting):
try:
return float(get_setting(setting))
except ValueError:
return 0
def get_setting_as_int(setting):
try:
return int(get_setting_as_float(setting))
except ValueError:
return 0
def get_string(string_id):
return ADDON.getLocalizedString(string_id).encode('utf-8', 'ignore')
def kodi_json_request(params):
data = json.dumps(params)
request = xbmc.executeJSONRPC(data)
try:
response = json.loads(request)
except UnicodeDecodeError:
response = json.loads(request.decode('utf-8', 'ignore'))
try:
if 'result' in response:
return response['result']
return None
except KeyError:
logger.warn("[%s] %s" %
(params['method'], response['error']['message']))
return None
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from datetime import date
from ._common_conversion import (
_sign_string,
_to_str,
)
from ._serialization import (
url_quote,
_to_utc_datetime,
)
from ._constants import X_MS_VERSION
class SharedAccessSignature(object):
'''
Provides a factory for creating blob, queue, table, and file shares access
signature tokens with a common account name and account key. Users can either
use the factory or can construct the appropriate service and use the
generate_*_shared_access_signature method directly.
'''
def __init__(self, account_name, account_key):
'''
:param str account_name:
The storage account name used to generate the shared access signatures.
:param str account_key:
The access key to genenerate the shares access signatures.
'''
self.account_name = account_name
self.account_key = account_key
def generate_table(self, table_name, permission=None,
expiry=None, start=None, id=None,
ip=None, protocol=None,
start_pk=None, start_rk=None,
end_pk=None, end_rk=None):
'''
Generates a shared access signature for the table.
Use the returned signature with the sas_token parameter of TableService.
:param str table_name:
Name of table.
:param TablePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str start_pk:
The minimum partition key accessible with this shared access
signature. startpk must accompany startrk. Key values are inclusive.
If omitted, there is no lower bound on the table entities that can
be accessed.
:param str start_rk:
The minimum row key accessible with this shared access signature.
startpk must accompany startrk. Key values are inclusive. If
omitted, there is no lower bound on the table entities that can be
accessed.
:param str end_pk:
The maximum partition key accessible with this shared access
signature. endpk must accompany endrk. Key values are inclusive. If
omitted, there is no upper bound on the table entities that can be
accessed.
:param str end_rk:
The maximum row key accessible with this shared access signature.
endpk must accompany endrk. Key values are inclusive. If omitted,
there is no upper bound on the table entities that can be accessed.
'''
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_table_access_ranges(table_name, start_pk, start_rk, end_pk, end_rk)
sas.add_resource_signature(self.account_name, self.account_key, 'table', table_name)
return sas.get_token()
def generate_queue(self, queue_name, permission=None,
expiry=None, start=None, id=None,
ip=None, protocol=None):
'''
Generates a shared access signature for the queue.
Use the returned signature with the sas_token parameter of QueueService.
:param str queue_name:
Name of queue.
:param QueuePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, add, update, process.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
'''
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource_signature(self.account_name, self.account_key, 'queue', queue_name)
return sas.get_token()
def generate_blob(self, container_name, blob_name, permission=None,
expiry=None, start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None):
'''
Generates a shared access signature for the blob.
Use the returned signature with the sas_token parameter of any BlobService.
:param str container_name:
Name of container.
:param str blob_name:
Name of blob.
:param BlobPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
resource_path = container_name + '/' + blob_name
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource('b')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, 'blob', resource_path)
return sas.get_token()
def generate_container(self, container_name, permission=None, expiry=None,
start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None):
'''
Generates a shared access signature for the container.
Use the returned signature with the sas_token parameter of any BlobService.
:param str container_name:
Name of container.
:param ContainerPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource('c')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, 'blob', container_name)
return sas.get_token()
def generate_file(self, share_name, directory_name=None, file_name=None,
permission=None, expiry=None, start=None, id=None,
ip=None, protocol=None, cache_control=None,
content_disposition=None, content_encoding=None,
content_language=None, content_type=None):
'''
Generates a shared access signature for the file.
Use the returned signature with the sas_token parameter of FileService.
:param str share_name:
Name of share.
:param str directory_name:
Name of directory. SAS tokens cannot be created for directories, so
this parameter should only be present if file_name is provided.
:param str file_name:
Name of file.
:param FilePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, create, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_file_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
resource_path = share_name
if directory_name is not None:
resource_path += '/' + _to_str(directory_name)
resource_path += '/' + _to_str(file_name)
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource('f')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, 'file', resource_path)
return sas.get_token()
def generate_share(self, share_name, permission=None, expiry=None,
start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None):
'''
Generates a shared access signature for the share.
Use the returned signature with the sas_token parameter of FileService.
:param str share_name:
Name of share.
:param SharePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, create, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_file_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource('s')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, 'file', share_name)
return sas.get_token()
def generate_account(self, services, resource_types, permission, expiry, start=None,
ip=None, protocol=None):
'''
Generates a shared access signature for the account.
Use the returned signature with the sas_token parameter of the service
or to create a new account object.
:param Services services:
Specifies the services accessible with the account SAS. You can
combine values to provide access to more than one service.
:param ResourceTypes resource_types:
Specifies the resource types that are accessible with the account
SAS. You can combine values to provide access to more than one
resource type.
:param AccountPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy. You can combine
values to provide more than one permission.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
'''
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_account(services, resource_types)
sas.add_account_signature(self.account_name, self.account_key)
return sas.get_token()
class _QueryStringConstants(object):
SIGNED_SIGNATURE = 'sig'
SIGNED_PERMISSION = 'sp'
SIGNED_START = 'st'
SIGNED_EXPIRY = 'se'
SIGNED_RESOURCE = 'sr'
SIGNED_IDENTIFIER = 'si'
SIGNED_IP = 'sip'
SIGNED_PROTOCOL = 'spr'
SIGNED_VERSION = 'sv'
SIGNED_CACHE_CONTROL = 'rscc'
SIGNED_CONTENT_DISPOSITION = 'rscd'
SIGNED_CONTENT_ENCODING = 'rsce'
SIGNED_CONTENT_LANGUAGE = 'rscl'
SIGNED_CONTENT_TYPE = 'rsct'
TABLE_NAME = 'tn'
START_PK = 'spk'
START_RK = 'srk'
END_PK = 'epk'
END_RK = 'erk'
SIGNED_RESOURCE_TYPES = 'srt'
SIGNED_SERVICES = 'ss'
class _SharedAccessHelper():
def __init__(self):
self.query_dict = {}
def _add_query(self, name, val):
if val:
self.query_dict[name] = _to_str(val)
def add_base(self, permission, expiry, start, ip, protocol):
if isinstance(start, date):
start = _to_utc_datetime(start)
if isinstance(expiry, date):
expiry = _to_utc_datetime(expiry)
self._add_query(_QueryStringConstants.SIGNED_START, start)
self._add_query(_QueryStringConstants.SIGNED_EXPIRY, expiry)
self._add_query(_QueryStringConstants.SIGNED_PERMISSION, permission)
self._add_query(_QueryStringConstants.SIGNED_IP, ip)
self._add_query(_QueryStringConstants.SIGNED_PROTOCOL, protocol)
self._add_query(_QueryStringConstants.SIGNED_VERSION, X_MS_VERSION)
def add_resource(self, resource):
self._add_query(_QueryStringConstants.SIGNED_RESOURCE, resource)
def add_id(self, id):
self._add_query(_QueryStringConstants.SIGNED_IDENTIFIER, id)
def add_account(self, services, resource_types):
self._add_query(_QueryStringConstants.SIGNED_SERVICES, services)
self._add_query(_QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
def add_table_access_ranges(self, table_name, start_pk, start_rk,
end_pk, end_rk):
self._add_query(_QueryStringConstants.TABLE_NAME, table_name)
self._add_query(_QueryStringConstants.START_PK, start_pk)
self._add_query(_QueryStringConstants.START_RK, start_rk)
self._add_query(_QueryStringConstants.END_PK, end_pk)
self._add_query(_QueryStringConstants.END_RK, end_rk)
def add_override_response_headers(self, cache_control,
content_disposition,
content_encoding,
content_language,
content_type):
self._add_query(_QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
self._add_query(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
self._add_query(_QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
self._add_query(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
self._add_query(_QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
def add_resource_signature(self, account_name, account_key, service, path):
def get_value_to_append(query):
return_value = self.query_dict.get(query) or ''
return return_value + '\n'
if path[0] != '/':
path = '/' + path
canonicalized_resource = '/' + service + '/' + account_name + path + '\n'
# Form the string to sign from shared_access_policy and canonicalized
# resource. The order of values is important.
string_to_sign = \
(get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
get_value_to_append(_QueryStringConstants.SIGNED_START) +
get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
canonicalized_resource +
get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) +
get_value_to_append(_QueryStringConstants.SIGNED_IP) +
get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
if service == 'blob' or service == 'file':
string_to_sign += \
(get_value_to_append(_QueryStringConstants.SIGNED_CACHE_CONTROL) +
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_ENCODING) +
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_TYPE))
if service == 'table':
string_to_sign += \
(get_value_to_append(_QueryStringConstants.START_PK) +
get_value_to_append(_QueryStringConstants.START_RK) +
get_value_to_append(_QueryStringConstants.END_PK) +
get_value_to_append(_QueryStringConstants.END_RK))
# remove the trailing newline
if string_to_sign[-1] == '\n':
string_to_sign = string_to_sign[:-1]
self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
_sign_string(account_key, string_to_sign))
def add_account_signature(self, account_name, account_key):
def get_value_to_append(query):
return_value = self.query_dict.get(query) or ''
return return_value + '\n'
string_to_sign = \
(account_name + '\n' +
get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
get_value_to_append(_QueryStringConstants.SIGNED_SERVICES) +
get_value_to_append(_QueryStringConstants.SIGNED_RESOURCE_TYPES) +
get_value_to_append(_QueryStringConstants.SIGNED_START) +
get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
get_value_to_append(_QueryStringConstants.SIGNED_IP) +
get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
_sign_string(account_key, string_to_sign))
def get_token(self):
return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None])
|
import re
import csv
import ipaddress
__version__ = 1.0
# Each route will have the following values
class Route_Template(object):
def __init__(self):
self.route = {}
self.protocol = []
self.metric = []
self.next_hop = []
self.age = []
self.interface = []
def __repr__(self):
return str(self.route)
# The main code structure
class RouteParse(object):
def __init__(self):
self.route_table = {}
self.Read_File()
self.Generate_Output_To_File()
# Retrieve a route object if it exists
def Get_Route_Object(self,target_route):
for route in self.route_table:
if target_route in route:
return self.route_table[route]
return None
# If the regular expression picked up a valid route, extract the values into a temporary dictionary
def Get_Route_Values_From_Match(self,matchObj):
values = {}
for keyword, value in vars(Route_Template()).items():
if keyword in matchObj.groupdict():
val = str(matchObj.group(keyword).strip())
values[keyword] = val
else:
values[keyword] = "N/A"
return values
# Create a new route object using the values from the temporary dictionary
def Create_New_Route(self,match):
route = self.Get_Route_Values_From_Match(match)
route_prefix = route["route"]
if not self.Get_Route_Object(route_prefix):
NewRoute = Route_Template()
NewRoute.route = route["route"]
self.route_table[NewRoute.route] = NewRoute
# Check the detail for the route and append it to the object
def Add_Route_Detail(self,previous_route,line):
route = self.Get_Route_Object(previous_route)
route_patterns = [r'via (?P<next_hop>.*), (?P<interface>.*), (?P<metric>\[.*]), (?P<age>.*?), (?P<protocol>.*)', \
r'via (?P<next_hop>.*), (?P<metric>\[.*]), (?P<age>.*?), (?P<protocol>.*)']
for pattern in route_patterns:
match = re.search(pattern,line)
if match:
route.next_hop.append(match.group('next_hop').strip())
route.metric.append(match.group('metric').strip())
route.age.append(match.group('age').strip())
route.protocol.append(match.group('protocol').strip().replace(",","_"))
try:
route.interface.append(match.group('interface').strip())
except IndexError:
route.interface.append("N/A")
break
def Get_Host_Range(self,subnet):
try:
range = ipaddress.ip_network(subnet)
return range[1],range[-2]
except ValueError:
return "error", "error"
except IndexError: # Handles /32
return range[0], ""
def Generate_Output_To_File(self):
try:
with open('routes.csv', 'w', newline='') as csv_file:
spamwriter = csv.writer(csv_file, delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['Route', 'Protocol','Metric','Next Hop','Age','Interface','From Range','To Range'])
for entry in sorted(self.route_table):
route = self.Get_Route_Object(entry)
first_ip, last_ip = self.Get_Host_Range(route)
for no in range(len(route.protocol)):
spamwriter.writerow([route.route,
route.protocol[no],
route.metric[no],
route.next_hop[no],
route.age[no],
route.interface[no],
first_ip,
last_ip])
print (" -- Output saved to 'routes.csv'")
except:
print (" -- Unable to write to routes.csv, if the file is already open close it.")
def Read_File(self):
start_processing_routes = False
invalid_phrases = ["subnetted"]
with open("routes.txt","r") as route_file:
for line in route_file:
#-----------------------
# Ignore certain input
#-----------------------
if line.count(' ') < 2:
continue
if any(x in line for x in invalid_phrases):
continue
if "<string>" in line:
start_processing_routes = True
continue
line = line.strip().replace("\n","")
if start_processing_routes:
regex = r'(?P<route>[0-9].*), ubest/mbest: (?P<value>.*)'
match = re.search(regex,line)
if match:
self.Create_New_Route(match)
last_route = match.group('route').strip()
continue
self.Add_Route_Detail(last_route, line)
print ("Cisco NXOS Route Parser version: '{}'".format(__version__))
c = RouteParse()
|
# This file contains some code to test the DAAPClient as stand-alone application.
import sys
import logging
from .client import DAAPClient
log = logging.getLogger(__name__)
def main():
connection = DAAPClient()
if len(sys.argv) > 1:
host = sys.argv[1]
else:
host = "localhost"
if len(sys.argv) > 2:
port = sys.argv[2]
else:
port = 3689
logging.basicConfig(
level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s'
)
try:
# do everything in a big try, so we can disconnect at the end
connection.connect(host, port)
# auth isn't supported yet. Just log in
session = connection.login()
library = session.library()
log.debug("Library name is `%r`", library.name)
tracks = library.tracks()
# demo - save the first track to disk
# print("Saving %s by %s to disk as 'track.mp3'"%(tracks[0].name, tracks[0].artist))
# tracks[0].save("track.mp3")
if len(tracks) > 0:
tracks[0].atom.printTree()
else:
print('No Tracks')
session.update()
print(session.revision)
finally:
# this here, so we logout even if there's an error somewhere,
# or itunes will eventually refuse more connections.
print("--------------")
try:
session.logout()
except Exception:
pass
if __name__ == '__main__':
main()
|
from django.shortcuts import render, HttpResponse, redirect
from forms import LoginForm, RegistrationForm
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from taskManager.forms import TaskCreate,MultipleSelect
from taskManager.views import show_task, show_logs
from django.contrib.auth.hashers import make_password,is_password_usable
def index(request):
"""
Handles user login
"""
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
password = form.cleaned_data['password']
user = authenticate(email=email, password=password)
if user is not None:
if user.error is None:
login(request, user)
return redirect('home')
else:
form.message = "Email/Password Mismatch"
return render(request, 'index.html', {'form': form})
form.message = "Email not found"
return render(request, 'index.html',
{'form': form,
'page': 'index'})
else:
form.message = "Invalid Email"
return render(request, 'index.html',
{'form': form,
'page': 'index'})
else:
form = LoginForm()
return render(request, 'index.html', {'form': form, 'page': 'index'})
def register_user(request):
"""
Handles user Registration
"""
form = RegistrationForm(request.POST)
if request.method == 'POST':
if form.is_valid():
username = form.cleaned_data['username']
email = form.cleaned_data['email']
password = form.cleaned_data['password']
confirm = form.cleaned_data['confirm']
try:
user = User.objects.get(email=email)
form.error = "Email already registered!"
return render(request, 'registration.html', {'form': form})
except User.DoesNotExist:
if password == confirm:
password = make_password(password)
if is_password_usable(password):
user = User(username=username,
email=email,
password=password)
user.save()
form = RegistrationForm()
form.message = "Success"
else:
form.message = "Password cannot be used"
else:
form.message = "Comfirm and Password field do not match"
return render(request, 'registration.html',
{'form': form,
'page': 'reg'})
except Exception as e:
#logging be implemented here
print e
else:
form.error = "Invalid form feild Values"
return render(request, 'registration.html',
{'form': form,
'page': 'reg'})
else:
form = RegistrationForm()
return render(request, 'registration.html', {'form': form, 'page': 'reg'})
@login_required(login_url="/")
def dashboard(request):
"""
Handles dashboard tasklist request
functions: Sorting the tasks , Showing TrackerLogs
"""
col = request.GET.get('sortby', 'id')
order = request.GET.get('order', 'asc')
task = show_task(request, col=col, order=order)
logs = show_logs(request)
form = MultipleSelect()
return render(request, 'dashboard.html',
{'tasks': task,
'logs': logs,
'form': form,
'page': 'home'})
def logout_user(request):
"""
Logs user Out
"""
logout(request)
return redirect('/')
|
# -*- coding: utf-8 -*-
from pytest import raises
# The parametrize function is generated, so this doesn't work:
#
# from pytest.mark import parametrize
#
import pytest
parametrize = pytest.mark.parametrize
# from duckomatic import metadata
from duckomatic.utils.subscriber import (Subscriber, NoDataException)
class TestSubscriber(object):
@parametrize('id_prefix', [
'',
'test123'
])
def test_init(self, id_prefix):
subscriber = Subscriber(id_prefix)
assert type(subscriber) == Subscriber
assert subscriber.get_id().startswith(id_prefix)
@parametrize('topic, data', [
('', {}),
('test123', {'test': 123})
])
def test_update_and_simple_get_update(self, topic, data):
subscriber = Subscriber()
subscriber.update(topic, data)
(actual_topic, actual_data) = subscriber.get_update()
assert actual_topic == topic
assert actual_data == data
@parametrize('timeout', [
(0)
])
def test_get_update_with_timeout(self, timeout):
subscriber = Subscriber()
with raises(NoDataException):
subscriber.get_update(timeout=timeout)
# Should not get here as an exception should be raised.
assert False
# Exception was raised correctly.
assert True
@parametrize('id_prefix', [
'',
'test123'
])
def test_get_id_is_unique(self, id_prefix):
subscriber1 = Subscriber(id_prefix)
subscriber2 = Subscriber(id_prefix)
assert subscriber1.get_id() != subscriber2.get_id()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.