text stringlengths 957 885k |
|---|
import numpy as np
from subprocess import call
import os
import sys
sys.path.insert(1,'../')
import banner
import pc
import pylhe
import kinematics as kin
import collections
from scipy import interpolate, integrate, optimize
# Extract the axion from banner
def get_mass_from_pc(rd,pid=622, banner_tag='HPS'):
bfname = os.path.join(rd,str(os.path.basename(rd)+'_' + banner_tag + '_banner.txt'))
if not os.path.isfile(bfname):
print "# Banner corresponding to ", rd, "not found!"
b = banner.Banner(banner_path=bfname)
param_card = pc.ParamCard(b['slha'])
return param_card['mass'].get(pid).value
def get_raw_counts(rd):
evt_file = rd + '/evt.lhe'
with open(evt_file, "w") as outfile:
call(['gunzip', '-c', rd + '/unweighted_events.lhe.gz'],stdout=outfile)
call(['sed', '-i', 's/&/and/', evt_file])
# Extract the Branching fraction to rho pi from the banner
bfname = os.path.join(rd,str(os.path.basename(rd)+'_HPS'+'_banner.txt'))
if not os.path.isfile(bfname):
print "# Banner corresponding to ", rd, "not found!"
b = banner.Banner(banner_path=bfname)
param_card = pc.ParamCard(b['slha'])
"""
# mAp decay branching fractions
d1 = param_card['decay'].decay_table[666][1].lhacode[1]
if d1 != 624 and d2 != 625:
print 'Wrong decay mode!'
exit(0)
br = param_card['decay'].decay_table[622][1].value
"""
br = 1.
evt_weight_list = []
ax_energy_list = []
ax_rap_list = []
ax_th_list = []
ax_pz_list = []
Em_recoil_list = []
counter = 0
for evt in pylhe.readLHE(evt_file):
counter = counter + 1
# Note that we rescale the event weight to get rid of the branching fraction
evt_weight_list.append((evt['eventinfo']['weight'])/br)
if counter % 10000 == 0:
print "Processing event ", counter
for part in evt['particles']:
#print part
#exit(0)
mom = [part['e'], part['px'], part['py'], part['pz']]
if part['id'] == 622:
ax_energy_list.append(mom[0])
ax_rap_list.append(kin.rap(mom))
ax_th_list.append(kin.theta(mom))
ax_pz_list.append(mom[3])
if part['id'] == 11 and part['status'] == 1 and part['mother1']!=part['mother2']:
Em_recoil_list.append(mom[0])
os.remove(evt_file)
evt_weight_list = np.array(evt_weight_list)
ax_energy_list = np.array(ax_energy_list)
ax_rap_list = np.array(ax_rap_list)
ax_pz_list = np.array(ax_pz_list)
ax_th_list = np.array(ax_th_list)
Em_recoil_list = np.array(Em_recoil_list)
return evt_weight_list, ax_energy_list, ax_rap_list, ax_th_list, ax_pz_list, Em_recoil_list
def get_xsec(evt_weight_list):
return np.sum(evt_weight_list)/len(evt_weight_list)
def get_xsec_after_cut(Ebeam,xcut,evt_weight_list,Em_recoil_list,ax_pz_list):
num_evt_total = len(evt_weight_list)
ids = Em_recoil_list/Ebeam < xcut
ax_pz_after_cut = ax_pz_list[ids]
ax_pz_avg_after_cut = np.sum(ax_pz_after_cut)/len(ax_pz_after_cut)
xsec_after_cut = np.sum(evt_weight_list[ids])/num_evt_total
return xsec_after_cut, ax_pz_avg_after_cut
def get_momentum_list(rd, banner_tag='FT'):
"""
Returns a list of [pa, pem, pep] for all events
"""
evt_file = rd + '/evt.lhe'
with open(evt_file, "w") as outfile:
call(['gunzip', '-c', rd + '/unweighted_events.lhe.gz'],stdout=outfile)
call(['sed', '-i', 's/&/and/', evt_file])
# Extract the Branching fraction to rho pi from the banner
bfname = os.path.join(rd,str(os.path.basename(rd)+'_' + banner_tag + '_banner.txt'))
if not os.path.isfile(bfname):
print "# Banner corresponding to ", rd, "not found!"
b = banner.Banner(banner_path=bfname)
param_card = pc.ParamCard(b['slha'])
"""
# mAp decay branching fractions
d1 = param_card['decay'].decay_table[666][1].lhacode[1]
if d1 != 624 and d2 != 625:
print 'Wrong decay mode!'
exit(0)
br = param_card['decay'].decay_table[622][1].value
"""
br = 1.
counter = 0
four_vector_list = []
evt_weight_list = []
for evt in pylhe.readLHE(evt_file):
counter = counter + 1
# Note that we rescale the event weight to get rid of the branching fraction
evt_weight_list.append((evt['eventinfo']['weight'])/br)
if counter % 10000 == 0:
print "Processing event ", counter
pp = []
pr = []
for part in evt['particles']:
#print part
#exit(0)
mom = np.array([part['e'], part['px'], part['py'], part['pz']])
# Get axion momentum
if part['id'] == 622:
pa = mom
# Get recoil electon momentum
if part['id'] == 11 and part['status'] == 1 and part['mother1']!=part['mother2']:
pr = mom
"""
# Get pz/pt for the decay products of the rho
if part['id'] == -11 and part['status'] == 1:
pp = mom
if part['id'] == 11 and part['status'] == 1 and part['mother1']==part['mother2']:
pm = mom
"""
# Dummy decay product 4 vectors for now
four_vector_list.append([pa, pr])
four_vector_list = np.array(four_vector_list)
evt_weight_list = np.array(evt_weight_list)
return evt_weight_list, four_vector_list
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Various handlers/functionality for different types of files (e.g. for archives)
"""
import hashlib
import patoolib
from .external_versions import external_versions
# There were issues, so let's stay consistently with recent version
assert(external_versions["patoolib"] >= "1.7")
import os
import tempfile
from os.path import join as opj, exists, abspath, isabs, normpath, relpath, pardir, isdir
from os.path import sep as opsep
from os.path import realpath
from six import next
from six.moves.urllib.parse import unquote as urlunquote
import string
import random
from ..utils import any_re_search
import logging
lgr = logging.getLogger('datalad.files')
# Monkey-patch patoolib's logging, so it logs coherently with the rest of
# datalad
import patoolib.util
#
# Seems have managed with swallow_outputs
#
# def _patool_log(level, msg):
# lgr.log(level, "patool: %s" % msg)
#
# def _patool_log_info(msg, *args, **kwargs):
# _patool_log(logging.DEBUG, msg)
#
# def _patool_log_error(msg, *args, **kwargs):
# _patool_log(logging.ERROR, msg)
#
# patoolib.util.log_info = _patool_log_info
# patoolib.util.log_error = _patool_log_error
# patoolib.util.log_internal_error = _patool_log_error
# we need to decorate patool.util.run
# because otherwise it just lets processes to spit out everything to std and we
# do want to use it at "verbosity>=0" so we could get idea on what is going on.
# And I don't want to mock for every invocation
from ..support.exceptions import CommandError
from ..utils import swallow_outputs
from ..utils import rmtemp
from ..cmd import Runner
from ..consts import ARCHIVES_TEMP_DIR
from ..utils import rmtree
from ..utils import get_tempfile_kwargs
from ..utils import on_windows
_runner = Runner()
def _patool_run(cmd, verbosity=0, **kwargs):
"""Decorated runner for patool so it doesn't spit out outputs to stdout"""
# use our runner
try:
# kwargs_ = kwargs[:]; kwargs_['shell'] = True
_runner.run(cmd, **kwargs)
return 0
except CommandError as e:
return e.code
except Exception as e:
lgr.error("While invoking runner caught unexpected exception: %s" % e)
return 100 # unknown beast
patoolib.util.run = _patool_run
# yoh: only keys are used atm, logic in decompress_file is replaced to use
# patool
DECOMPRESSORS = {
'\.(tar\.bz|tbz)$': 'tar -xjvf %(file)s -C %(dir)s',
'\.(tar\.xz)$': 'tar -xJvf %(file)s -C %(dir)s',
'\.(tar\.gz|tgz)$': 'tar -xzvf %(file)s -C %(dir)s',
'\.(zip)$': 'unzip %(file)s -d %(dir)s',
}
def unixify_path(path):
"""On windows convert paths from drive:\d\file to /drive/d/file
This overcomes problems with various cmdline tools we are to use,
such as tar etc
"""
if on_windows:
drive, path_ = os.path.splitdrive(path)
path_ = path_.split(os.sep)
path_ = '/'.join(path_)
if drive:
# last one must be :
assert(drive[-1] == ":")
return '/%s%s' % (drive[:-1], path_)
else:
return path_
else:
return path
def decompress_file(archive, dir_, leading_directories='strip'):
"""Decompress `archive` into a directory `dir_`
Parameters
----------
archive: str
dir_: str
leading_directories: {'strip', None}
If `strip`, and archive contains a single leading directory under which
all content is stored, all the content will be moved one directory up
and that leading directory will be removed.
"""
if not exists(dir_):
lgr.debug("Creating directory %s to extract archive into" % dir_)
os.makedirs(dir_)
with swallow_outputs() as cmo:
patoolib.util.check_existing_filename(archive)
patoolib.util.check_existing_filename(dir_, onlyfiles=False)
# Call protected one to avoid the checks on existence on unixified path
patoolib._extract_archive(unixify_path(archive),
outdir=unixify_path(dir_),
verbosity=100)
if cmo.out:
lgr.debug("patool gave stdout:\n%s" % cmo.out)
if cmo.err:
lgr.debug("patool gave stderr:\n%s" % cmo.err)
if leading_directories == 'strip':
_, dirs, files = next(os.walk(dir_))
if not len(files) and len(dirs) == 1:
# move all the content under dirs[0] up 1 level
widow_dir = opj(dir_, dirs[0])
lgr.debug("Moving content within %s upstairs" % widow_dir)
subdir, subdirs_, files_ = next(os.walk(opj(dir_, dirs[0])))
for f in subdirs_ + files_:
os.rename(opj(subdir, f), opj(dir_, f))
os.rmdir(widow_dir)
elif leading_directories is None:
pass # really do nothing
else:
raise NotImplementedError("Not supported %s" % leading_directories)
def compress_files(files, archive, path=None, overwrite=True):
"""Compress `files` into an `archive` file
Parameters
----------
files : list of str
archive : str
path : str
Alternative directory under which compressor will be invoked, to e.g.
take into account relative paths of files and/or archive
overwrite : bool
Either to allow overwriting the target archive file if one already exists
"""
with swallow_outputs() as cmo:
# to test filenames, if path is not None, we should join:
if path:
opj_path = lambda p: opj(path, p)
else:
opj_path = lambda p: p
if not overwrite:
patoolib.util.check_new_filename(opj_path(archive))
patoolib.util.check_archive_filelist([opj_path(f) for f in files])
# ugly but what can you do? ;-) we might wrap it all into a class
# at some point. TODO
old_cwd = _runner.cwd
if path is not None:
_runner.cwd = path
try:
# Call protected one to avoid the checks on existence on unixified path
patoolib._create_archive(unixify_path(archive),
[unixify_path(f) for f in files],
verbosity=100)
finally:
_runner.cwd = old_cwd
if cmo.out:
lgr.debug("patool gave stdout:\n%s" % cmo.out)
if cmo.err:
lgr.debug("patool gave stderr:\n%s" % cmo.err)
def _get_cached_filename(archive):
"""A helper to generate a filename which has original filename and additional suffix
which wouldn't collide across files with the same name from different locations
"""
#return "%s_%s" % (basename(archive), hashlib.md5(archive).hexdigest()[:5])
# per se there is no reason to maintain any long original name here.
archive_cached = hashlib.md5(realpath(archive).encode()).hexdigest()[:10]
lgr.debug("Cached directory for archive %s is %s", archive, archive_cached)
return archive_cached
def _get_random_id(size=6, chars=string.ascii_uppercase + string.digits):
"""Return a random ID composed from digits and uppercase letters
upper-case so we are tolerant to unlikely collisions on dummy FSs
"""
return ''.join(random.choice(chars) for _ in range(size))
class ArchivesCache(object):
"""Cache to maintain extracted archives
Parameters
----------
toppath : str
Top directory under .git/ of which temp directory would be created.
If not provided -- random tempdir is used
persistent : bool, optional
Passed over into generated ExtractedArchives
"""
# TODO: make caching persistent across sessions/runs, with cleanup
# IDEA: extract under .git/annex/tmp so later on annex unused could clean it
# all up
def __init__(self, toppath=None, persistent=False):
self._toppath = toppath
if toppath:
path = opj(toppath, ARCHIVES_TEMP_DIR)
if not persistent:
tempsuffix = "-" + _get_random_id()
lgr.debug("For non-persistent archives using %s suffix for path %s",
tempsuffix, path)
path += tempsuffix
else:
if persistent:
raise ValueError("%s cannot be persistent since no toppath was provided" % self)
path = tempfile.mktemp(**get_tempfile_kwargs())
self._path = path
self.persistent = persistent
# TODO? assure that it is absent or we should allow for it to persist a bit?
#if exists(path):
# self._clean_cache()
self._archives = {}
# TODO: begging for a race condition
if not exists(path):
lgr.debug("Initiating clean cache for the archives under %s" % self.path)
try:
self._made_path = True
os.makedirs(path)
lgr.debug("Cache initialized")
except:
lgr.error("Failed to initialize cached under %s" % path)
raise
else:
lgr.debug("Not initiating existing cache for the archives under %s" % self.path)
self._made_path = False
@property
def path(self):
return self._path
def clean(self, force=False):
for aname, a in list(self._archives.items()):
a.clean(force=force)
del self._archives[aname]
# Probably we should not rely on _made_path and not bother if persistent removing it
# if ((not self.persistent) or force) and self._made_path:
# lgr.debug("Removing the entire archives cache under %s" % self.path)
# rmtemp(self.path)
if (not self.persistent) or force:
lgr.debug("Removing the entire archives cache under %s" % self.path)
rmtemp(self.path)
def _get_normalized_archive_path(self, archive):
"""Return full path to archive
So we have consistent operation from different subdirs,
while referencing archives from the topdir
TODO: why do we need it???
"""
if not isabs(archive) and self._toppath:
out = normpath(opj(self._toppath, archive))
if relpath(out, self._toppath).startswith(pardir):
raise RuntimeError("%s points outside of the topdir %s"
% (archive, self._toppath))
if isdir(out):
raise RuntimeError("got a directory here... bleh")
return out
return archive
def get_archive(self, archive):
archive = self._get_normalized_archive_path(archive)
if archive not in self._archives:
self._archives[archive] = \
ExtractedArchive(archive,
opj(self.path, _get_cached_filename(archive)),
persistent=self.persistent)
return self._archives[archive]
def __getitem__(self, archive):
return self.get_archive(archive)
def __delitem__(self, archive):
archive = self._get_normalized_archive_path(archive)
self._archives[archive].clean()
del self._archives[archive]
def __del__(self):
try:
# we can at least try
if not self.persistent:
self.clean()
except: # MIH: IOError?
pass
class ExtractedArchive(object):
"""Container for the extracted archive
"""
# suffix to use for a stamp so we could guarantee that extracted archive is
STAMP_SUFFIX = '.stamp'
def __init__(self, archive, path=None, persistent=False):
self._archive = archive
# TODO: bad location for extracted archive -- use tempfile
if not path:
path = tempfile.mktemp(**get_tempfile_kwargs(prefix=_get_cached_filename(archive)))
if exists(path) and not persistent:
raise RuntimeError("Directory %s already exists whenever it should not "
"persist" % path)
self._persistent = persistent
self._path = path
def __repr__(self):
return "%s(%r, path=%r)" % (self.__class__.__name__, self._archive, self.path)
def clean(self, force=False):
# would interfere with tests
# if os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
# lgr.info("As instructed, not cleaning up the cache under %s"
# % self._path)
# return
for path, name in [
(self._path, 'cache'),
(self.stamp_path, 'stamp file')
]:
if exists(path):
if (not self._persistent) or force:
lgr.debug("Cleaning up the %s for %s under %s", name, self._archive, path)
# TODO: we must be careful here -- to not modify permissions of files
# only of directories
(rmtree if isdir(path) else os.unlink)(path)
@property
def path(self):
"""Given an archive -- return full path to it within cache (extracted)
"""
return self._path
@property
def stamp_path(self):
return self._path + self.STAMP_SUFFIX
@property
def is_extracted(self):
return exists(self.path) and exists(self.stamp_path) \
and os.stat(self.stamp_path).st_mtime >= os.stat(self.path).st_mtime
def assure_extracted(self):
"""Return path to the extracted `archive`. Extract archive if necessary
"""
path = self.path
if not self.is_extracted:
# we need to extract the archive
# TODO: extract to _tmp and then move in a single command so we
# don't end up picking up broken pieces
lgr.debug("Extracting {self._archive} under {path}".format(**locals()))
if exists(path):
lgr.debug("Previous extracted (but probably not fully) cached archive found. Removing %s", path)
rmtree(path)
os.makedirs(path)
assert(exists(path))
# remove old stamp
if exists(self.stamp_path):
rmtree(self.stamp_path)
decompress_file(self._archive, path, leading_directories=None)
# TODO: must optional since we might to use this content, move it into the tree etc
# lgr.debug("Adjusting permissions to R/O for the extracted content")
# rotree(path)
assert(exists(path))
# create a stamp
with open(self.stamp_path, 'w') as f:
f.write(self._archive)
# assert that stamp mtime is not older than archive's directory
assert(self.is_extracted)
return path
# TODO: remove?
#def has_file_ready(self, afile):
# lgr.debug("Checking file {afile} from archive {archive}".format(**locals()))
# return exists(self.get_extracted_filename(afile))
def get_extracted_filename(self, afile):
"""Return full path to the `afile` within extracted `archive`
It does not actually extract any archive
"""
return opj(self.path, urlunquote(afile))
def get_extracted_files(self):
"""Generator to provide filenames which are available under extracted archive
"""
path = self.assure_extracted()
path_len = len(path) + (len(os.sep) if not path.endswith(os.sep) else 0)
for root, dirs, files in os.walk(path): # TEMP
for name in files:
yield opj(root, name)[path_len:]
def get_leading_directory(self, depth=None, consider=None, exclude=None):
"""Return leading directory of the content within archive
Parameters
----------
depth: int or None, optional
Maximal depth of leading directories to consider. If None - no upper
limit
consider : list of str, optional
Regular expressions for file/directory names to be considered (before
exclude). Applied to the entire relative path to the file as in the archive
exclude: list of str, optional
Regular expressions for file/directory names to be excluded from consideration.
Applied to the entire relative path to the file as in the archive
Returns
-------
str or None:
If there is no single leading directory -- None returned
"""
leading = None
# returns only files, so no need to check if a dir or not
for fpath in self.get_extracted_files():
if consider and not any_re_search(consider, fpath):
continue
if exclude and any_re_search(exclude, fpath):
continue
lpath = fpath.split(opsep)
dpath = lpath[:-1] # directory path components
if leading is None:
leading = dpath if depth is None else dpath[:depth]
else:
if dpath[:len(leading)] != leading:
# find smallest common path
leading_ = []
# TODO: there might be more efficient pythonic way
for d1, d2 in zip(leading, dpath):
if d1 != d2:
break
leading_.append(d1)
leading = leading_
if not len(leading):
# no common leading - ready to exit
return None
return leading if leading is None else opj(*leading)
def get_extracted_file(self, afile):
lgr.debug("Requested file {afile} from archive {self._archive}".format(**locals()))
# TODO: That could be a good place to provide "compatibility" layer if
# filenames within archive are too obscure for local file system.
# We could somehow adjust them while extracting and here channel back
# "fixed" up names since they are only to point to the load
self.assure_extracted()
path = self.get_extracted_filename(afile)
# TODO: make robust
lgr.log(2, "Verifying that %s exists" % abspath(path))
assert exists(path), "%s must exist" % path
return path
def __del__(self):
try:
if self._persistent:
self.clean()
except: # MIH: IOError?
pass
|
<gh_stars>10-100
# Copyright (C) 2010-2011 <NAME>
# Copyright (C) 2011 <NAME>
# Copyright (C) 2015 <NAME>
# Copyright (C) 2016 <NAME>, KKG - <EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from xml.etree.cElementTree import iterparse
from time import time
import logging
logger = logging.getLogger(__name__)
def cimread(source, packageMap=None, nsURI=None, start_dict=None):
""" CIM RDF/XML parser.
@type source: File-like object or a path to a file.
@param source: CIM RDF/XML file.
@type profile: dict
@param packageMap: Map of class name to PyCIM package name. All CIM
classes are under the one namespace, but are arranged into sub-packages
so a map from class name to package name is required. Defaults to the
latest CIM version, but may be set to a map from a profile to return
a profile model.
@type profile: string
@param nsURI: CIM namespace URI used in the RDF/XML file. For example:
http://iec.ch/TC57/2010/CIM-schema-cim15
@rtype: dict
@return: Map of UUID to CIM object.
@author: <NAME> <<EMAIL>>
"""
# Start the clock.
t0 = time()
#logger.info('##########################################################################')
logger.info('START of parsing file \"%s\"', source)
logger_errors_grouped = {}
# A map of uuids to CIM objects to be returned.
d = start_dict if start_dict is not None else {}
# Obtain the namespaces from the input file
namespaces = xmlns(source)
ns_rdf = get_rdf_ns(namespaces)
if bool(nsURI) != bool(packageMap):
raise ValueError(
'Either pass "packageMap" AND "nsURI" or none of them.')
elif (nsURI is None) and (packageMap is None):
nsURI, packageMap = get_cim_ns(namespaces)
# CIM element tag base (e.g. {http://iec.ch/TC57/2009/CIM-schema-cim14#}).
base = "{%s#}" % nsURI
# Length of element tag base.
m = len(base)
# First pass instantiates the classes.
context = iterparse(source, ("start", "end"))
# Turn it into an iterator (required for cElementTree).
context = iter(context)
# Get the root element ({http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF).
_, root = next(context)
for event, elem in context:
# Process 'end' elements in the CIM namespace.
if event == "end" and elem.tag[:m] == base:
# Unique resource identifier for the CIM object.
uuid = elem.get("{%s}ID" % ns_rdf)
if uuid is not None: # class
# Element tag without namespace (e.g. VoltageLevel).
tag = elem.tag[m:]
try:
mname = packageMap[tag]
except KeyError:
logger.error("Unable to locate module for: %s (%s)",
tag, uuid)
root.clear()
continue
# Import the module for the CIM object.
module = __import__(mname, globals(), locals(), [tag], 0)
# Get the CIM class from the module.
klass = getattr(module, tag)
# Instantiate the class and map it to the uuid.
d[uuid] = klass(UUID=uuid)
# Clear children of the root element to minimise memory usage.
root.clear()
# Reset stream
if hasattr(source, "seek"):
source.seek(0)
## Second pass sets attributes and references.
context = iter( iterparse(source, ("start", "end")) )
# Get the root element ({http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF).
_, root = next(context)
for event, elem in context:
# Process 'start' elements in the CIM namespace.
if event == "start" and elem.tag[:m] == base:
uuid = elem.get("{%s}ID" % ns_rdf)
if uuid is None:
uuid = elem.get("{%s}about" % ns_rdf)
if uuid is not None:
uuid = uuid[1:]
if uuid is not None:
# Locate the CIM object using the uuid.
try:
obj = d[uuid]
except KeyError:
logger.error("Missing '%s' object with uuid: %s",
elem.tag[m:], uuid)
root.clear()
continue
# Iterate over attributes/references.
for event, elem in context:
# Process end events with elements in the CIM namespace.
if event == "end" and elem.tag[:m] == base:
# Break if class closing element (e.g. </cim:Terminal>).
if elem.get("{%s}ID" % ns_rdf) is None and \
elem.get("{%s}about" % ns_rdf) is None:
# Get the attribute/reference name.
attr = elem.tag[m:].rsplit(".")[-1]
if not hasattr(obj, attr):
error_msg = "'%s' has not attribute '%s'" %(obj.__class__.__name__, attr)
try:
logger_errors_grouped[error_msg] += 1
except KeyError:
logger_errors_grouped[error_msg] = 1
# logger.error("'%s' has not attribute '%s'",
# obj.__class__.__name__, attr)
continue
# Use the rdf:resource attribute to distinguish
# between attributes and references/enums.
uuid2 = elem.get("{%s}resource" % ns_rdf)
if uuid2 is None: # attribute
# Convert value type using the default value.
try:
typ = type( getattr(obj, attr) )
if typ == type(True): # KKG: Test if it is boolean value
# KKG: NB: The function bool("false") returns True, because it is called upon non-empty string!
# This means that it wrongly reads "false" value as boolean True and this is why this special case testing is necessary
if str.title(elem.text) == 'True':
setattr(obj, attr, True)
else:
setattr(obj, attr, False)
else:
setattr(obj, attr, typ(elem.text))
except TypeError:
pass
else: # reference or enum
# Use the '#' prefix to distinguish between
# references and enumerations.
if uuid2[0] == "#": # reference
try:
val = d[uuid2[1:]] # remove '#' prefix
except KeyError:
logger.error("Referenced '%s' [%s] "
"object missing.",
obj.__class__.__name__,
uuid2[1:])
continue
default = getattr(obj, attr)
if default == None: # 1..1 or 1..n
# Rely on properties to set any
# bi-directional references.
setattr(obj, attr, val)
elif isinstance(default, list): # many
# Use 'add*' method to set reference.
getattr(obj, ("add%s" % attr))(val)
# else:
# logger.error("Reference error [%s].",
# default)
else: # enum
val = uuid2.rsplit(".", 1)[1]
setattr(obj, attr, val)
else:
# Finished setting object attributes.
break
# Clear children of the root element to minimise memory usage.
root.clear()
if logger_errors_grouped:
for error, count in logger_errors_grouped.items():
logging_message = '%s : %d times' %(error, count)
logger.warn(logging_message)
# logging_message = 'Created totally %d CIM objects in %.2fs.' %(len(d), time() - t0)
logger.info('Created totally %d CIM objects in %.2fs.' %(len(d), time() - t0))
# logging_message = 'END of parsing file \"%s\"\n' % source
logger.info('END of parsing file \"%s\"\n' % source)
return d
def xmlns(source):
"""
Returns a map of prefix to namespace for the given XML file.
"""
namespaces = {}
events=("end", "start-ns", "end-ns")
for (event, elem) in iterparse(source, events):
if event == "start-ns":
prefix, ns = elem
namespaces[prefix] = ns
elif event == "end":
break
# Reset stream
if hasattr(source, "seek"):
source.seek(0)
return namespaces
def get_rdf_ns(namespaces):
try:
ns = namespaces['rdf']
except KeyError:
ns = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
logger.warn('No rdf namespace found. Using %s' % ns)
return ns
def get_cim_ns(namespaces):
"""
Tries to obtain the CIM version from the given map of namespaces and
returns the appropriate *nsURI* and *packageMap*.
"""
try:
ns = namespaces['cim']
if ns.endswith('#'):
ns = ns[:-1]
except KeyError:
ns = ''
logger.error('No CIM namespace defined in input file.')
CIM16nsURI = 'http://iec.ch/TC57/2013/CIM-schema-cim16'
nsuri = ns
import CIM14, CIM15
if ns == CIM14.nsURI:
ns = 'CIM14'
elif ns == CIM15.nsURI:
ns = 'CIM15'
elif ns == CIM16nsURI:
ns = 'CIM15'
else:
ns = 'CIM15'
logger.warn('Could not detect CIM version. Using %s.' % ns)
cim = __import__(ns, globals(), locals(), ['nsURI', 'packageMap'])
return nsuri, cim.packageMap
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
cimread("Test/Data/EDF_AIGUE_v9_COMBINED.xml")
|
<reponame>ffcccc/MachineLearning
"""
@ Filename: AssociationAnalysis.py
@ Author: Danc1elion
@ Create Date: 2019-05-27
@ Update Date: 2019-06-02
@ Description: Implement AssociationAnalysis
"""
class Apriori:
def __init__(self, min_support=0.5, min_confidence=0.6):
self.min_support = min_support
self.min_confidence = min_confidence
'''
Function: createSingletonSet
Description: create set which only contain one elements
Input: data dataType: ndarray description: data
Output: singleton_set dataType: frozenset description: invariable set which only contain one elements
'''
def createSingletonSet(self, data):
singleton_set = []
for record in data:
for item in record:
if [item] not in singleton_set:
singleton_set.append([item])
singleton_set.sort()
singleton_set = list(map(frozenset, singleton_set)) # generate a invariable set
return singleton_set
'''
Function: calculateSupportDegree
Description: calculate the support degree for a given candidate set
Input: data dataType: ndarray description: data
candidate_set dataType: list description: candidate set
Output: support_degree dataType: dict description: dictionary contains all set support_degree(item, support)
frequent_items dataType: list description: frequent items set
'''
def calculateSupportDegree(self, data, candidate_set):
sample_sum = len(data)
data = map(set, data) # transform data into set
# calculate the frequency of each set in candidate_set appearing in data
frequency = {}
for record in data:
for element in candidate_set:
if element.issubset(record): # elements in record
frequency[element] = frequency.get(element, 0) + 1
# calculate the support degree for each set
support_degree = {}
frequent_items = []
for key in frequency:
support = frequency[key]/sample_sum
if support >= self.min_support:
frequent_items.insert(0, key)
support_degree[key] = support
return frequent_items, support_degree
'''
Function: createCandidateSet
Description: create candidate set
Input: frequent_items dataType: list description: frequent items set
k dataType: int description: the number of elements to be compared
Output: candidate_set dataType: list description: candidate set
'''
def createCandidateSet(self, frequent_items, k):
candidate_set = []
items_num = len(frequent_items)
# merge the sets which have same front k-2 element
for i in range(items_num):
for j in range(i+1, items_num):
L1 = list(frequent_items[i])[: k-2]
L2 = list(frequent_items[j])[: k-2]
if L1 == L2:
candidate_set.append(frequent_items[i] | frequent_items[j])
return candidate_set
'''
Function: findFrequentItem
Description: find frequenct items
Input: data dataType: ndarray description: data
Output: support_degree dataType: dict description: dictionary contains support_degree(item, support)
frequent_items dataType: list description: frequent items set
'''
def findFrequentItem(self, data):
singleton_set = self.createSingletonSet(data)
sub_frequent_items, sub_support_degree = self.calculateSupportDegree(data, singleton_set)
frequent_items = [sub_frequent_items]
support_degree = sub_support_degree
k = 2
while len(frequent_items[k-2]) > 0:
candidate_set = self.createCandidateSet(frequent_items[k-2], k)
sub_frequent_items, sub_support_degree = self.calculateSupportDegree(data, candidate_set)
support_degree.update(sub_support_degree)
if len(sub_frequent_items) == 0:
break
frequent_items.append(sub_frequent_items)
k = k + 1
return frequent_items, support_degree
'''
Function: calculateConfidence
Description: calculate confidence and generate rules
Input: frequent_item dataType: set description: one record of frequent_set
support_degree dataType: dict description: support_degree
candidate_set dataType: frozenset description: invariable set which only contain one elements
rule_list dataType: list description: invariable set which only contain one elements
Output: confidence dataType: dict description: confidence
rule dataType: list description: items whose confidence is larger than min_confidence
'''
def calculateConfidence(self, frequent_item, support_degree, candidate_set, rule_list):
rule = []
confidence = []
for item in candidate_set:
temp = support_degree[frequent_item]/support_degree[frequent_item - item]
confidence.append(temp)
if temp >= self.min_confidence:
rule_list.append((frequent_item - item, item, temp)) #
rule.append(item)
return rule
'''
Function: mergeFrequentItem
Description: merge frequent item and generate rules
Input: frequent_item dataType: set description: one record of frequent_set
support_degree dataType: dict description: support_degree
candidate_set dataType: frozenset description: candidate set
rule_list dataType: list description: the generated rules
'''
def mergeFrequentItem(self, frequent_item, support_degree, candidate_set, rule_list):
item_num = len(candidate_set[0])
if len(frequent_item) > item_num + 1:
candidate_set = self.createCandidateSet(candidate_set, item_num+1)
rule = self.calculateConfidence(frequent_item, support_degree, candidate_set, rule_list)
if len(rule) > 1:
self.mergeFrequentItem(frequent_item, support_degree, rule, rule_list)
'''
Function: generateRules
Description: generate association rules
Input: frequent_set dataType: set description: one record of frequent_set
support_degree dataType: dict description: support_degree
Output: rules dataType: list description: the generated rules
'''
def generateRules(self, frequent_set, support_degree):
rules = []
for i in range(1, len(frequent_set)): # generate rule from sets which contain more than two elements
for frequent_item in frequent_set[i]:
candidate_set = [frozenset([item]) for item in frequent_item]
if i > 1:
self.mergeFrequentItem(frequent_item, support_degree, candidate_set, rules)
else:
self.calculateConfidence(frequent_item, support_degree, candidate_set, rules)
return rules
'''
Function: train
Description: train the model
Input: train_data dataType: ndarray description: items
display dataType: bool description: print the rules
Output: rules dataType: list description: the learned rules
frequent_items dataType: list description: frequent items set
'''
def train(self, data, display=True):
frequent_set, support_degree = self.findFrequentItem(data)
rules = self.generateRules(frequent_set, support_degree)
if display:
print("Frequent Items:")
for item in frequent_set:
print(item)
print("_______________________________________")
print("Association Rules:")
for rule in rules:
print(rule)
return frequent_set, rules
class FPNode:
def __init__(self, item, count, parent):
self.item = item
self.count = count # support
self.parent = parent
self.next = None # the same elements
self.children = {}
def display(self, ind=1):
print(''*ind, self.item, '', self.count)
for child in self.children.values():
child.display(ind+1)
class FPgrowth:
def __init__(self, min_support=3, min_confidence=0.6):
self.min_support = min_support
self.min_confidence = min_confidence
'''
Function: transfer2FrozenDataSet
Description: transfer data to frozenset type
Input: data dataType: ndarray description: train_data
Output: frozen_data dataType: frozenset description: train_data in frozenset type
'''
def transfer2FrozenDataSet(self, data):
frozen_data = {}
for elem in data:
frozen_data[frozenset(elem)] = 1
return frozen_data
'''
Function: updataTree
Description: updata FP tree
Input: data dataType: ndarray description: ordered frequent items
FP_tree dataType: FPNode description: FP tree
header dataType: dict description: header pointer table
count dataType: count description: the number of a record
'''
def updataTree(self, data, FP_tree, header, count):
frequent_item = data[0]
if frequent_item in FP_tree.children:
FP_tree.children[frequent_item].count += count
else:
FP_tree.children[frequent_item] = FPNode(frequent_item, count, FP_tree)
if header[frequent_item][1] is None:
header[frequent_item][1] = FP_tree.children[frequent_item]
else:
self.updateHeader(header[frequent_item][1], FP_tree.children[frequent_item]) # share the same path
if len(data) > 1:
self.updataTree(data[1::], FP_tree.children[frequent_item], header, count) # recurrently update FP tree
'''
Function: updateHeader
Description: update header, add tail_node to the current last node of frequent_item
Input: head_node dataType: FPNode description: first node in header
tail_node dataType: FPNode description: node need to be added
'''
def updateHeader(self, head_node, tail_node):
while head_node.next is not None:
head_node = head_node.next
head_node.next = tail_node
'''
Function: createFPTree
Description: create FP tree
Input: train_data dataType: ndarray description: features
Output: FP_tree dataType: FPNode description: FP tree
header dataType: dict description: header pointer table
'''
def createFPTree(self, train_data):
initial_header = {}
# 1. the first scan, get singleton set
for record in train_data:
for item in record:
initial_header[item] = initial_header.get(item, 0) + train_data[record]
# get singleton set whose support is large than min_support. If there is no set meeting the condition, return none
header = {}
for k in initial_header.keys():
if initial_header[k] >= self.min_support:
header[k] = initial_header[k]
frequent_set = set(header.keys())
if len(frequent_set) == 0:
return None, None
# enlarge the value, add a pointer
for k in header:
header[k] = [header[k], None]
# 2. the second scan, create FP tree
FP_tree = FPNode('root', 1, None) # root node
for record, count in train_data.items():
frequent_item = {}
for item in record: # if item is a frequent set, add it
if item in frequent_set: # 2.1 filter infrequent_item
frequent_item[item] = header[item][0]
if len(frequent_item) > 0:
ordered_frequent_item = [val[0] for val in sorted(frequent_item.items(), key=lambda val:val[1], reverse=True)] # 2.1 sort all the elements in descending order according to count
self.updataTree(ordered_frequent_item, FP_tree, header, count) # 2.2 insert frequent_item in FP-Tree, share the path with the same prefix
return FP_tree, header
'''
Function: ascendTree
Description: ascend tree from leaf node to root node according to path
Input: node dataType: FPNode description: leaf node
Output: prefix_path dataType: list description: prefix path
'''
def ascendTree(self, node):
prefix_path = []
while node.parent != None and node.parent.item != 'root':
node = node.parent
prefix_path.append(node.item)
return prefix_path
'''
Function: getPrefixPath
Description: get prefix path
Input: base dataType: FPNode description: pattern base
header dataType: dict description: header
Output: prefix_path dataType: dict description: prefix_path
'''
def getPrefixPath(self, base, header):
prefix_path = {}
start_node = header[base][1]
prefixs = self.ascendTree(start_node)
if len(prefixs) != 0:
prefix_path[frozenset(prefixs)] = start_node.count
while start_node.next is not None:
start_node = start_node.next
prefixs = self.ascendTree(start_node)
if len(prefixs) != 0:
prefix_path[frozenset(prefixs)] = start_node.count
return prefix_path
'''
Function: findFrequentItem
Description: find frequent item
Input: header dataType: dict description: header [name : (count, pointer)]
prefix dataType: dict description: prefix path
frequent_set dataType: set description: frequent set
'''
def findFrequentItem(self, header, prefix, frequent_set):
# for each item in header, then iterate until there is only one element in conditional fptree
header_items = [val[0] for val in sorted(header.items(), key=lambda val: val[1][0])]
if len(header_items) == 0:
return
for base in header_items:
new_prefix = prefix.copy()
new_prefix.add(base)
support = header[base][0]
frequent_set[frozenset(new_prefix)] = support
prefix_path = self.getPrefixPath(base, header)
if len(prefix_path) != 0:
conditonal_tree, conditional_header = self.createFPTree(prefix_path)
if conditional_header is not None:
self.findFrequentItem(conditional_header, new_prefix, frequent_set)
'''
Function: generateRules
Description: generate association rules
Input: frequent_set dataType: set description: current frequent item
rule dataType: dict description: an item in current frequent item
'''
def generateRules(self, frequent_set, rules):
for frequent_item in frequent_set:
if len(frequent_item) > 1:
self.getRules(frequent_item, frequent_item, frequent_set, rules)
'''
Function: removeItem
Description: remove item
Input: current_item dataType: set description: one record of frequent_set
item dataType: dict description: support_degree
'''
def removeItem(self, current_item, item):
tempSet = []
for elem in current_item:
if elem != item:
tempSet.append(elem)
tempFrozenSet = frozenset(tempSet)
return tempFrozenSet
'''
Function: getRules
Description: get association rules
Input: frequent_set dataType: set description: one record of frequent_set
rule dataType: dict description: support_degree
'''
def getRules(self, frequent_item, current_item, frequent_set, rules):
for item in current_item:
subset = self.removeItem(current_item, item)
confidence = frequent_set[frequent_item]/frequent_set[subset]
if confidence >= self.min_confidence:
flag = False
for rule in rules:
if (rule[0] == subset) and (rule[1] == frequent_item - subset):
flag = True
if flag == False:
rules.append((subset, frequent_item - subset, confidence))
if (len(subset) >= 2):
self.getRules(frequent_item, subset, frequent_set, rules)
'''
Function: train
Description: train the model
Input: train_data dataType: ndarray description: items
display dataType: bool description: print the rules
Output: rules dataType: list description: the learned rules
frequent_items dataType: list description: frequent items set
'''
def train(self, data, display=True):
data = self.transfer2FrozenDataSet(data)
FP_tree, header = self.createFPTree(data)
#FP_tree.display()
frequent_set = {}
prefix_path = set([])
self.findFrequentItem(header, prefix_path, frequent_set)
rules = []
self.generateRules(frequent_set, rules)
if display:
print("Frequent Items:")
for item in frequent_set:
print(item)
print("_______________________________________")
print("Association Rules:")
for rule in rules:
print(rule)
return frequent_set, rules
class Eclat:
def __init__(self,min_support=3, min_confidence=0.6):
self.min_support = min_support
self.min_confidence = min_confidence
'''
Function: invert
Description: invert the data and filter the items smaller than min_support
Input: data dataType: list description: items
Output: frequent_item dataType: dict description: invert data
'''
def invert(self, data):
invert_data = {}
frequent_item = []
support = []
for i in range(len(data)):
for item in data[i]:
if invert_data.get(item) is not None:
invert_data[item].append(i)
else:
invert_data[item] = [i]
for item in invert_data.keys():
if len(invert_data[item]) >= self.min_support:
frequent_item.append([item])
support.append(invert_data[item])
frequent_item = list(map(frozenset, frequent_item))
return frequent_item, support
'''
Function: getIntersection
Description: get intersection
Input: frequent_set dataType: dict description: frequent set
support dataType: list description: support of data
Output: frequent_item dataType: dict description: frequent_item
'''
def getIntersection(self, frequent_item, support):
sub_frequent_item = []
sub_support = []
k = len(frequent_item[0]) + 1
for i in range(len(frequent_item)):
for j in range(i+1, len(frequent_item)):
L1 = list(frequent_item[i])[:k-2]
L2 = list(frequent_item[j])[:k-2]
if L1 == L2:
flag = len(list(set(support[i]).intersection(set(support[j]))))
if flag >= self.min_support:
sub_frequent_item.append(frequent_item[i] | frequent_item[j])
sub_support.append(list(set(support[i]).intersection(set(support[j]))))
return sub_frequent_item, sub_support
'''
Function: findFrequentItem
Description: find frequent item
Input: frequent_item dataType: list description: frequent item
support dataType: list description: support of data
frequent_set dataType: list description: frequent set
'''
def findFrequentItem(self, frequent_item, support, frequent_set,support_set):
frequent_set.append(frequent_item)
support_set.append(support)
while len(frequent_item) >= 2:
frequent_item, support = self.getIntersection(frequent_item, support)
frequent_set.append(frequent_item)
support_set.append(support)
'''
Function: generateRules
Description: generate association rules
Input: frequent_set dataType: set description: current frequent item
rule dataType: dict description: an item in current frequent item
'''
def generateRules(self, frequent_set, rules):
for frequent_item in frequent_set:
if len(frequent_item) > 1:
self.getRules(frequent_item, frequent_item, frequent_set, rules)
'''
Function: removeItem
Description: remove item
Input: current_item dataType: set description: one record of frequent_set
item dataType: dict description: support_degree
'''
def removeItem(self, current_item, item):
tempSet = []
for elem in current_item:
if elem != item:
tempSet.append(elem)
tempFrozenSet = frozenset(tempSet)
return tempFrozenSet
'''
Function: getRules
Description: get association rules
Input: frequent_set dataType: set description: one record of frequent_set
rule dataType: dict description: support_degree
'''
def getRules(self, frequent_item, current_item, frequent_set, rules):
for item in current_item:
subset = self.removeItem(current_item, item)
confidence = frequent_set[frequent_item] / frequent_set[subset]
if confidence >= self.min_confidence:
flag = False
for rule in rules:
if (rule[0] == subset) and (rule[1] == frequent_item - subset):
flag = True
if flag == False:
rules.append((subset, frequent_item - subset, confidence))
if len(subset) >= 2:
self.getRules(frequent_item, subset, frequent_set, rules)
'''
Function: train
Description: train the model
Input: train_data dataType: ndarray description: items
display dataType: bool description: print the rules
Output: rules dataType: list description: the learned rules
frequent_items dataType: list description: frequent items set
'''
def train(self, data, display=True):
# get the invert data
frequent_item, support = self.invert(data)
frequent_set = []
support_set = []
# get the frequent_set
self.findFrequentItem(frequent_item, support,frequent_set, support_set)
# transfer support list into frequency
data = {}
for i in range(len(frequent_set)):
for j in range(len(frequent_set[i])):
data[frequent_set[i][j]] = len(support_set[i][j])
rules = []
self.generateRules(data, rules)
if display:
print("Frequent Items:")
for item in frequent_set:
print(item)
print("_______________________________________")
print("Association Rules:")
for rule in rules:
print(rule)
return frequent_set, rules
|
#!/usr/bin/env python
import dbm.gnu
import os
import os.path
import sys
from google.cloud import pubsub_v1, storage
from tqdm import tqdm
from shared import (
encode_int,
encode_json,
future_waiter,
get_mogile_files_from_database,
get_state_int,
make_bucket_map,
make_db_connection,
maybe_update_max,
open_db,
BATCH_SETTINGS,
MIGRATE,
VERIFY
)
LATEST_MOGILE_FID_KEY = "mogile_fid"
LATEST_CREPO_ID_FOR_SHA_KEY = "sha_crepo_id"
LATEST_CREPO_ID_KEY = "crepo_id"
LATEST_RHINO_FILE_ID_KEY = "rhino_file_id"
BUCKET_MAP = make_bucket_map(os.environ["BUCKETS"])
IGNORE_BUCKETS = os.environ["IGNORE_BUCKETS"].split(",")
TOPIC_ID = os.environ["TOPIC_ID"]
GCP_PROJECT = os.environ["GCP_PROJECT"]
STATE_DIR = os.environ.get("STATE_DIR", os.getcwd())
SHAS_DB_PATH = os.path.join(STATE_DIR, "shas.db")
CLIENT = pubsub_v1.PublisherClient(batch_settings=BATCH_SETTINGS)
GCS_CLIENT = storage.Client()
TOPIC_PATH = CLIENT.topic_path(GCP_PROJECT, TOPIC_ID)
def build_shas_db(state_db):
"""Build a shas.db file where we will store the relationship between a UUID and a sha."""
initial_id = get_state_int(state_db, LATEST_CREPO_ID_FOR_SHA_KEY)
connection = make_db_connection(os.environ["CONTENTREPO_DATABASE_URL"])
if not os.path.exists(SHAS_DB_PATH):
# Need to regenerate from the beginning
initial_id = 0
try:
with open_db(SHAS_DB_PATH) as db:
cursor = connection.cursor()
query = "SELECT id, uuid, checksum FROM objects WHERE id > %s"
cursor.execute(query, initial_id)
with tqdm(desc="loading shas") as pbar:
row = cursor.fetchone()
pbar.update()
while row:
(crepo_id, uuid, checksum) = row
db[uuid] = checksum
row = cursor.fetchone()
pbar.update()
maybe_update_max(state_db, LATEST_CREPO_ID_FOR_SHA_KEY, crepo_id)
finally:
connection.close()
def send_message(mogile_file, action):
"""Send the mogile file as an pubsub message."""
return CLIENT.publish(TOPIC_PATH, mogile_file.to_json(), action=action)
def queue_migrate(mogile_file, state_db):
"""Queue mogile files for migration in pubsub."""
maybe_update_max(state_db, LATEST_MOGILE_FID_KEY, mogile_file.fid)
return send_message(mogile_file, MIGRATE)
def queue_verify(mogile_file):
"""Queue mogile files for verification in pubsub."""
return send_message(mogile_file, VERIFY)
def queue_rhino_final(bucket_name, state_db):
"""Queue up copies for the rhino final migration step in pubsub."""
connection = make_db_connection(os.environ["RHINO_DATABASE_URL"])
initial_id = get_state_int(state_db, LATEST_RHINO_FILE_ID_KEY)
sql = """
SELECT articleFile.fileId,
doi,
ingestionNumber,
ingestedFileName,
crepoUuid
FROM articleFile
JOIN articleIngestion ON articleFile.ingestionId = articleIngestion.ingestionId
JOIN article ON articleIngestion.articleId = article.articleId
WHERE articleFile.fileId > %s
ORDER BY articleFile.fileId asc
"""
with dbm.gnu.open(SHAS_DB_PATH) as db:
try:
cursor = connection.cursor()
cursor.execute(sql, initial_id)
row = cursor.fetchone()
while row:
(file_id, doi, ingestionNumber, ingestedFileName, uuid) = row
sha = db[uuid]
to_key = f"{doi}/{ingestionNumber}/{ingestedFileName}"
json = {
"bucket": bucket_name,
"from_key": sha.decode("utf-8"),
"to_key": to_key,
}
yield CLIENT.publish(TOPIC_PATH, encode_json(json), action="copy")
maybe_update_max(state_db, LATEST_RHINO_FILE_ID_KEY, file_id)
row = cursor.fetchone()
finally:
connection.close()
def queue_lemur_final(buckets, state_db):
"""Queue up copies for the lemur final migration step in pubsub."""
connection = make_db_connection(os.environ["CONTENTREPO_DATABASE_URL"])
initial_id = get_state_int(state_db, LATEST_CREPO_ID_KEY)
try:
for crepo_bucket_name, gcs_bucket_name in buckets.items():
cursor = connection.cursor()
cursor.execute(
"select bucketId from buckets where bucketName = %s",
(crepo_bucket_name,),
)
bucket_id = cursor.fetchone()[0]
cursor.close()
sql = """
SELECT id,
objects.objKey,
objects.downloadName,
checksum
FROM (
SELECT objKey,
MAX(versionNumber) AS latest
FROM objects
WHERE bucketId = %s
AND id > %s
GROUP BY objKey
) AS m
INNER JOIN objects
ON objects.objKey = m.objKey
AND objects.versionNumber = m.latest
AND objects.bucketId = %s
"""
cursor = connection.cursor()
cursor.execute(sql, (bucket_id, initial_id, bucket_id))
row = cursor.fetchone()
while row:
(crepo_file_id, obj_key, download_name, sha) = row
to_key = f"{obj_key}"
json = {
"bucket": gcs_bucket_name,
"from_key": sha,
"to_key": to_key,
"download_name": download_name
}
yield CLIENT.publish(TOPIC_PATH, encode_json(json), action="copy")
maybe_update_max(state_db, LATEST_CREPO_ID_KEY, crepo_file_id)
row = cursor.fetchone()
cursor.close()
finally:
connection.close()
def main():
"""Enqueue mogile file jobs to SQS for processing in AWS lambda.
The first command line argument is an action, either verify or
migrate. The following arguments are either a list of fids to
process or a single file that contains a list of fids to exclude.
"""
state_db = dbm.gnu.open(os.path.join(STATE_DIR, "state.db"), "cf")
try:
futures = None
action = sys.argv[1]
corpus_bucket = next(v for v in BUCKET_MAP.values() if "corpus" in v)
non_corpus_buckets = {
k: v
for k, v in BUCKET_MAP.items()
if "corpus" not in v and k not in IGNORE_BUCKETS
}
if action == "verify":
generator = tqdm(
[
mogile
for mogile in get_mogile_files_from_database(
os.environ["MOGILE_DATABASE_URL"]
)
if mogile.mogile_bucket not in IGNORE_BUCKETS
]
)
futures = (queue_verify(mogile) for mogile in generator)
elif action == "migrate":
latest_fid = get_state_int(state_db, LATEST_MOGILE_FID_KEY)
generator = tqdm(
[
mogile
for mogile in get_mogile_files_from_database(
os.environ["MOGILE_DATABASE_URL"], initial_fid=latest_fid
)
if mogile.mogile_bucket not in IGNORE_BUCKETS
]
)
futures = (queue_migrate(mogile, state_db) for mogile in generator)
elif action == "final_migrate_rhino":
build_shas_db(state_db)
futures = tqdm(
queue_rhino_final(corpus_bucket, state_db)
)
elif action == "final_migrate_lemur":
build_shas_db(state_db)
futures = tqdm(
queue_lemur_final(non_corpus_buckets, state_db)
)
elif action.startswith("update_"):
key = action[7:]
assert key in [LATEST_CREPO_ID_KEY, LATEST_MOGILE_FID_KEY, LATEST_RHINO_FILE_ID_KEY]
state_db[key] = encode_int(int(sys.argv[2]))
elif action == "dump_state":
for key in state_db.keys():
print(f"{key} = {state_db[key]}")
else:
raise Exception(f"Bad action: {action}.")
# Evaluate all the futures using our future_waiter, which will
# stop occasionally to clean up any completed futures. This avoids
# keeping too many results in memory.
if futures is not None:
for f in future_waiter(futures, 10000):
pass
state_db.sync()
state_db.close()
except:
state_db.close()
raise
if __name__ == "__main__":
main()
|
<reponame>AmaxJ/TaskWhip
#run this file to create and populate a db with some dummy values
from datetime import datetime
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
from api import db
from api.models.users import User
from api.models.tasks import Task
from api.models.groups import Group, Company
def reset_db():
db.drop_all()
db.create_all()
def create_companys():
company1 = Company(name="Chocolate.Factory",
website="www.site.com")
company2 = Company(name="Larchmont Nurseries",
website="www.larchmontnurseries.com")
db.session.add(company1)
db.session.add(company2)
db.session.commit()
print "# Companies created.."
def create_groups():
group1 = Group(name="Loompas", company_id=1,
createdOn=datetime.now(),
description="Mix the chocolate")
group2 = Group(name="Oompas", company_id=1,
createdOn=datetime.now(),
description="Gum research")
group3 = Group(name="Gardeners", company_id=2,
createdOn=datetime.now(),
description="Landscaping duties")
group4 = Group(name="Floor Staff", company_id=2,
createdOn=datetime.now(),
description="Assist customers")
db.session.add(group1)
db.session.add(group2)
db.session.add(group3)
db.session.add(group4)
db.session.commit()
print "# Groups created.."
def create_tasks():
try:
task1 = Task(title="Mash cocoa beans",
body="Make Chocolate!",
group_id=1, createdOn=datetime.now())
task2 = Task(title="Side-effects mitigation",
body="Fix swelling reaction some people have",
group_id=2, createdOn=datetime.now())
task3 = Task(title="Test flavors",
body="Create new flavors",
group_id=2, createdOn=datetime.now())
task4 = Task(title="Trim hedges on coligni ave",
body="Reponsible for houses 321, 326, 327",
group_id=3, createdOn=datetime.now())
task5 = Task(title="Fertilizer shipment",
body="Delivery at 9:00am tuesday.",
group_id=4, createdOn=datetime.now())
db.session.add(task1)
db.session.add(task2)
db.session.add(task3)
db.session.add(task4)
db.session.add(task5)
db.session.commit()
print "# Tasks created..."
except Exception as err:
print "-- Task creation failed.."
print err
def create_users():
user1 = User(username="Wonka", email="<EMAIL>",
rank="Admin", company_id=1)
user2 = User(username="Bob", email="<EMAIL>",
rank="employee", company_id=1)
user3 = User(username="Ed", email="<EMAIL>",
rank="manager", company_id=1)
user4 = User(username="Andy", email="<EMAIL>",
rank="employee", company_id=2)
user5 = User(username="Joanna", email="<EMAIL>",
rank="Admin", company_id=2)
user6 = User(username="Doug", email="<EMAIL>",
rank="employee", company_id=2)
db.session.add(user1)
db.session.add(user2)
db.session.add(user3)
db.session.add(user4)
db.session.add(user5)
db.session.add(user6)
db.session.commit()
print "# Users created..."
def assign_users_to_groups():
try:
#company1
a = User.query.filter_by(id=1).first()
b = User.query.filter_by(id=2).first()
c = User.query.filter_by(id=3).first()
#company2
d = User.query.filter_by(id=4).first()
e = User.query.filter_by(id=5).first()
f = User.query.filter_by(id=6).first()
#company1:
g = Group.query.filter_by(id=1).first()
h = Group.query.filter_by(id=2).first()
#company2:
i = Group.query.filter_by(id=3).first()
j = Group.query.filter_by(id=4).first()
g.members.extend([a,b,c])
h.members.extend([b,c])
i.members.extend([d,e,f])
j.members.extend([d,e])
db.session.add(g)
db.session.add(h)
db.session.add(i)
db.session.add(j)
db.session.commit()
print "## Group assignment successful"
except Exception as err:
print "-- Group assignment failed.."
print err
def assign_tasks():
try:
#company1
a = User.query.filter_by(id=1).first()
b = User.query.filter_by(id=2).first()
c = User.query.filter_by(id=3).first()
#company2
d = User.query.filter_by(id=4).first()
e = User.query.filter_by(id=5).first()
f = User.query.filter_by(id=6).first()
task1 = Task.query.filter_by(id=1).first()
a.tasks.append(task1)
b.tasks.append(task1)
c.tasks.append(task1)
task2 = Task.query.filter_by(id=2).first()
task3 = Task.query.filter_by(id=3).first()
b.tasks.extend([task2, task3])
c.tasks.extend([task2, task3])
task4 = Task.query.filter_by(id=4).first()
task5 = Task.query.filter_by(id=5).first()
d.tasks.extend([task4, task5])
e.tasks.extend([task4, task5])
f.tasks.append(task4)
db.session.add(a)
db.session.add(b)
db.session.add(c)
db.session.add(d)
db.session.add(e)
db.session.add(f)
db.session.commit()
print "## Task assignment successful"
except Exception as err:
print "-- Task assignment failed.."
print err
if __name__=='__main__':
reset_db()
create_companys()
create_groups()
create_users()
create_tasks()
assign_users_to_groups()
assign_tasks()
|
# coding=utf-8
"""
Module to detect the folder type.
"""
import os
import re
import configparser
def get_files(path, exclude=(), ignore_symlinks=True):
"""
Function to get files
Parameters
----------
path : str
Path walk through.
exclude : List[str] or Tuple[str]
List of files/folders to exclude. Default is ``[]``.
ignore_symlinks : bool
Switch to ignore simlinks. Default is ``True``.
Returns
-------
files : List[str]
List of files in the folder and subfolders.
"""
# make it unique
exclude = set(exclude)
# Get files
output = []
# walk through all folders
for root, dirs, files in os.walk(path, topdown=True):
# remove the files which match with the exclude flags
[dirs.remove(d) for d in list(dirs) for ex in exclude if re.match(ex, d)]
if ignore_symlinks:
# remove symlinks
[files.remove(f) for f in list(files)
if os.path.islink(os.path.join(root, f))]
# add the founded items to the list
output.extend(files)
return output
def _get_dict_cases(config_file, cases=('LAMMPS', 'GROMACS')):
"""
Function to return a dictionary with regex patterns for the given cases and a rating of those.
Reads in the config_file.
Parameters
----------
config_file : str or List[str]
File path to the config file or a List of config files.
cases : List[str] or Tuple[str]
List of cases. Default is ``('LAMMPS', 'GROMACS')``.
Returns
-------
dict_cases : dict[dict[unicode]]
Dictionary of regex patterns for the different cases and a rating of those.
"""
dict_cases = dict((case, dict()) for case in cases)
if isinstance(config_file, (tuple, list)):
for cfile in config_file:
dict_cases.update(_get_dict_cases(cfile, cases=cases))
return dict_cases
# readin the config file
config = configparser.ConfigParser(inline_comment_prefixes=';')
config.SECTCRE = re.compile(r"\[ *(?P<header>[^]]+?) *\]")
config.read(config_file)
for section in config.sections():
for case in cases:
if section.startswith(case):
for key, value in config[section].items():
if key not in dict_cases[case]:
dict_cases[case][key] = int(value)
elif dict_cases[case][key] < value:
dict_cases[case][key] = int(value)
return dict_cases
def _get_rating_for_regex_dict(fname, regex_dictionary):
"""
iterate check if fname match to any item of regexlist,
returns raiting for the best hit
Parameters
----------
fname : str
File name.
regex_dictionary : dict
Dictionary of (regex, rating).
Returns
-------
best_rating : int or None
Returns the found raiting. None if no matching regex pattern was found.
"""
best_rating = 0
for reg, rating in regex_dictionary.items():
if re.match(str(reg), fname) and rating > best_rating:
best_rating = rating
return best_rating
def _compare_with_dict_cases(fname, dict_cases, ignore_warning=False):
"""
Iterate over dict_cases, and checks for every case if the filename matches.
Then returns the raiting for the case.
Parameters
----------
fname : str
File name.
dict_cases : dict[dict[unicode]]
Dictionary of regex patterns for the different cases and a rating of those.
ignore_warning : bool
Switch if UserWarning should be ignored. Default is ``False``.
Returns
-------
best_rating : int or None
Best found raiting.
best_case : str .
Best found case in dict_cases.
"""
best_rating, cases = 0, []
for key, regex_dict in dict_cases.items():
rating = _get_rating_for_regex_dict(fname, regex_dict)
if rating == 0:
continue
elif rating > best_rating:
best_rating = rating
cases = [key]
elif rating == best_rating:
cases.append(key)
if not ignore_warning and len(cases) > 1:
raise UserWarning("Could not decide decide about the folder structure."
"\ncases: {}".format(cases))
best_case = cases.pop(0) if len(cases) > 0 else ""
return best_case, best_rating
def guess_folder_type_from_files(files, dict_cases, ignore_warning=True):
"""
Function to guess the folder type from a list of file names.
Parameters
----------
files : List[str]
List of file names.
dict_cases : dict[dict[unicode]]
Dictionary of regex patterns for the different cases and a rating of those.
ignore_warning : bool
Switch if UserWarning should be ignored. Default is ``False``.
Returns
-------
folder_type : str
Guessed folder type.
"""
file_ratings = sorted([_compare_with_dict_cases(fname, dict_cases=dict_cases, ignore_warning=ignore_warning)
for fname in files],
key=lambda x: x[1],
reverse=True)
folder_type = file_ratings[0][0]
if file_ratings[0][1] <= 0:
folder_type = None
return folder_type
def guess_folder_type(path, cases=('LAMMPS', 'GROMACS'), dir_ignore=(), config_file=None, ignore_warning=True):
"""
Guess the folder_type
Parameters
----------
path : str
Path to guess the folder structure from.
cases : List[str]
List of cases to use. Default is ``['LAMMPS', 'GROMACS']``.
dir_ignore : List[str]
Folders to ignore. Default is ``[]``.
config_file : str or None
Config files to use. If ``None`` the 'regex_weights.ini' in the module will be used.
Default is ``None``.
ignore_warning : bool
Switch if UserWarning should be ignored. Default is ``False``.
Returns
-------
folder_type : str
Guessed folder type.
"""
if config_file is None:
config_file = os.path.join(os.path.dirname(__file__), 'regex_weights.ini')
dict_cases = _get_dict_cases(config_file=config_file, cases=cases)
files = get_files(path, exclude=dir_ignore)
folder_type = guess_folder_type_from_files(files, dict_cases=dict_cases, ignore_warning=ignore_warning)
return folder_type
|
<filename>rc_pose_controller/src/pose_controller_with_reg_func_goal_tolerance.py
#! /usr/bin/env python
# coding: utf-8
# This is pos controller for like-car robot
import math
import numpy as np
import time
import rospy
import tf
from geometry_msgs.msg import Twist, Pose, TwistStamped, PoseStamped
from geometry_msgs.msg import Twist
from dynamic_reconfigure.server import Server
from rc_bringup.cfg import PoseControllerConfig
from rc_car_msgs.msg import CarPose
#value
velocity = float()
cmd_vel_msg = Twist()
current_pose = Pose()
current_course = float()
goal_pose = Pose()
init_flag = False
max_vel = 1.1 # m/s
min_vel = -1.5 # m/s
max_angle = 25
finish_flag = True
goal_tolerance = 0.2
dist=0.0
#topics
cmd_vel_topic = "/cmd_vel"
vel_topic = "/mavros/local_position/velocity"
goal_topic = "/goal"
pose_topic = "/mavros/local_position/pose"
#reg_functions
v_des=0.0
Ev=0.0
Erot=0.0
u_v=0.0
u_rot=0.0
plot_x=[0]
plot_y=[0]
v=0.0
sumErot=0
sumEv=0
distance=0
def trap_profile_linear_velocity(x, xy_des, v_max):
d = np.sqrt((x.x - xy_des.position.x) ** 2 + (x.y - xy_des.position.y) ** 2)
if d <= 1:
v_des = -d / (d - 1)
else:
v_des = v_max
return v_des
def rot_controller(Erot,Erot_old,sumErot,dT):
kp = 0.9
ki = 0.0229
kd = 0.00477
u_rot = kp * Erot + ki * sumErot + kd *(Erot-Erot_old) / dT
return u_rot
def velocity_controller(Ev, Ev_old,sumEv, dT):
kp = 0.1
ki = 0.87
kd = 0.001
u_v = kp * Ev + ki * sumEv + kd *(Ev-Ev_old) / dT
return u_v
def main():
global dt, current_pose, current_course, goal_pose, cmd_vel_msg , u_v, u_rot, Ev, Erot,sumErot,sumEv, plot_x,plot_y, v_des, leinght_v,leinght_rot,v
v_des=trap_profile_linear_velocity(current_pose.position,goal_pose,max_vel)
dx=(current_pose.position.x-plot_x[0])/dt
dy=(current_pose.position.y-plot_y[0])/dt
plot_x[0]=current_pose.position.x
plot_y[0] = current_pose.position.y
v = np.sqrt(dx**2+dy**2)
Ev_old=Ev
Ev=v_des-v
sumEv=sumEv+Ev
u_v=velocity_controller(Ev,Ev_old,sumEv,dt)
u_v_constraints = [min_vel,max_vel]
u_alpha_constraints=[-max_angle,max_angle]
if u_v>u_v_constraints[1]:
u_v = u_v_constraints[1]
elif u_v<u_v_constraints[0]:
u_v = u_v_constraints[0]
Erot_old=Erot
Erot=np.arctan2(goal_pose.position.y-current_pose.position.y,goal_pose.position.x-current_pose.position.x)-current_course ############!!!!!!!
sumErot=sumErot+Erot
u_rot = rot_controller(Erot,Erot_old,sumErot,dt)
if u_rot>u_alpha_constraints[1]:
u_rot=u_alpha_constraints[1]
elif u_rot<u_alpha_constraints[0]:
u_rot = u_alpha_constraints[0]
vel_and_angle=[u_v,u_rot]
#cmd_vel_msg.linear.x=vel_and_angle[0]
cmd_vel_msg.angular.z=vel_and_angle[1]
return cmd_vel_msg
def goal_clb(data):
#Get goal pose
global goal_pose, init_flag, finish_flag
goal_pose = data.pose
init_flag = True
finish_flag = False
def current_pose_clb(data):
#Get current pose from topic
global current_pose, current_course
current_pose = data.pose
rot = [data.pose.orientation.x, data.pose.orientation.y, data.pose.orientation.z, data.pose.orientation.w]
# # convert euler from quaternion
(roll, pitch, yaw) = tf.transformations.euler_from_quaternion(rot)
current_course = yaw
def cfg_callback(config, level):
global max_vel, min_vel, max_angle
max_vel = float(config["max_vel"])
min_vel = float(config["min_vel"])
max_angle = math.radians(float(config["max_angle"]))
return config
def callback(config):
rospy.loginfo("Config set to {max_vel}".format(**config))
if __name__ == "__main__":
# init ros node
rospy.init_node('rc_pos_controller', anonymous=True)
rate = rospy.Rate(10) # 10hz
# init dynamic reconfigure server
cfg_srv = Server(PoseControllerConfig, cfg_callback)
# Get ros args
if rospy.has_param('~vel_topic'):
vel_topic = rospy.get_param('~vel_topic', vel_topic)
if rospy.has_param('~cmd_vel'):
cmd_vel_topic = rospy.get_param('~cmd_vel', cmd_vel_topic)
if rospy.has_param('~goal_topic'):
goal_topic = rospy.get_param('~goal_topic', goal_topic)
if rospy.has_param('~pose_topic'):
pose_topic = rospy.get_param('~pose_topic', pose_topic)
if rospy.has_param('~max_vel'):
max_vel = rospy.get_param('~max_vel', max_vel)
cfg_srv.update_configuration({"max_vel": max_vel})
if rospy.has_param('~min_vel'):
min_vel = rospy.get_param('~min_vel', min_vel)
cfg_srv.update_configuration({"min_vel": min_vel})
if rospy.has_param('~max_angle'):
max_angle = rospy.get_param('~max_angle', max_angle)
cfg_srv.update_configuration({"max_angle": max_angle})
# start subscriber
#rospy.Subscriber(vel_topic, TwistStamped, vel_clb)
rospy.Subscriber(goal_topic, PoseStamped, goal_clb)
rospy.Subscriber(pose_topic, PoseStamped, current_pose_clb)
vec_pub = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)
listener = tf.TransformListener()
old_ros_time = rospy.get_time()
currentTime = 0.0
rate.sleep()
try:
while not rospy.is_shutdown():
dt = rospy.get_time() - old_ros_time
currentTime += dt
if(not init_flag):
if currentTime > 1.0:
print("pose controller: not init")
currentTime = 0.0
continue
old_ros_time = rospy.get_time()
cmd_vel_msg = main()
if finish_flag:
if currentTime > 1.0:
print("pose controller: finish_flag True")
currentTime = 0.0
cmd_vel_msg.linear.x = 0.0
init_flag = False
vec_pub.publish(cmd_vel_msg) # publish msgs to the robot
rate.sleep()
except KeyboardInterrupt: # if put ctr+c
exit(0)
|
### Task Serializers ###
from datetime import timedelta
from django.utils import timezone
from django.core.urlresolvers import reverse
from rest_framework import serializers
from task.models import Task, TaskDirectory, Assignment
from item.models import Item
from slave.settings import *
class AssignmentSerializer(serializers.ModelSerializer):
task_name = serializers.SerializerMethodField(read_only=True)
task_workflow = serializers.SerializerMethodField(read_only=True)
task_type = serializers.SerializerMethodField(read_only=True)
date_assigned = serializers.SerializerMethodField(read_only=True)
date_released = serializers.SerializerMethodField(read_only=True)
# FIXME Add filter by owner and same for Slave selector.
task = serializers.PrimaryKeyRelatedField(queryset=Task.objects.filter(_retrieved=False))
class Meta:
model = Assignment
fields = ('id', 'task', 'task_name', 'task_workflow', 'task_type', 'slave', 'date_assigned', 'date_released')
def get_task_name(self, object):
""" Get task name. """
return str(object.task)
def get_task_workflow(self, object):
""" Get task workflow. """
return object.task.type.id
def get_task_type(self, object):
""" Get task type. """
return object.task.type.get_type_readable()
def get_date_assigned(self, object):
""" Get date. """
return object.get_date_assigned()
def get_date_released(self, object):
""" Get date. """
return object.get_date_released()
def validate_slave(self, slave):
""" Game logic and some authorization is checked here. """
#print("Slave and Game Logic validation")
# We make both validation for Slave and Game Logic in this
# method, to avoid Task and Slave objects move here and there
# many times.
# We also assume that Task ownership was checked in POST
# validation earlier and we consider Task owner to be the
# current User.
task = Task.objects.get(pk=self.initial_data['task'])
# Verify that Slave and Task have the same owner.
if slave.get_owner() != task.get_owner():
#print("Owner of slave {0} is {1}, but owner of task {2} is {3}. Failed to assign!" \
# .format(slave, slave.get_owner(), task, task.get_owner()))
raise serializers.ValidationError("Authorization error for this Slave.")
# Verify that Task is running.
if task.is_retrieved():
raise serializers.ValidationError("Assignment error. Task is finished and retrieved.")
# Verify that Task has open vacancy.
if not task.has_open_vacancy():
raise serializers.ValidationError("Assignment error. The maximum slaves are working on this task already.")
# Verify that Task location has free space.
if not task.has_free_space_in_location():
raise serializers.ValidationError("Assignment error. The Task Location is overcrowded.")
# Verify that Slave and Task are currently in the same Region.
if not slave.get_location().get_region() == task.get_location().get_region():
raise serializers.ValidationError("Region error. Slave is in wrong region.")
# Verify that Slave is idle.
if slave.get_assignments(active=True).count() > 0:
raise serializers.ValidationError("Assignment error. Slave is busy.")
# Verify that Slave is of appropriate age.
if not slave.is_alive():
raise serializers.ValidationError("Assignment error. Slave is dead.")
if slave.is_baby():
raise serializers.ValidationError("Assignment error. Slave is too young.")
# Verify that Slave is qualified for this Task.
slave_skills = slave.get_trained_skills()
# Required primary and secondary skills.
ps = task.get_primary_skill()
if ps in list(slave_skills.keys()) and slave_skills[ps] > 0:
print("The slave posesses primary skill.")
else:
raise serializers.ValidationError("Assignment error. Slave is not qualified for the task.")
return slave
class TaskSerializer(serializers.ModelSerializer):
name = serializers.SerializerMethodField(read_only=True)
percent_completed = serializers.SerializerMethodField(read_only=True)
active_assignments_count = serializers.SerializerMethodField(read_only=True)
date_start = serializers.DateTimeField(read_only=True)
date_finish = serializers.DateTimeField(read_only=True)
class Meta:
model = Task
fields = ('id', 'name', 'type', 'percent_completed', 'active_assignments_count', 'is_retrieved', 'location', 'owner', 'date_start', 'date_finish')
def get_name(self, object):
""" Get readable name for Task. """
return object.get_name_readable()
def get_percent_completed(self, object):
""" Calculate the percentage of finished. """
# Farming tasks (time fixed)
if object.get_type().is_time_fixed():
# Return time delta (now-start)/(finish-start)
return 100 if timezone.now() > object.get_date_finish() else \
int((timezone.now() - object.get_date_start()) * 100 / \
(object.get_date_finish() - object.get_date_start()))
# Crafting, building tasks (work fixed)
# Return work required - (fulfilled + current_work_per_day * timedelta(estimated_finish - last_update)
elif object.get_type().is_work_fixed():
work_units = object.get_type().get_param('work_units')
fulfilled = object.get_fulfilled()
# We get a result of actually fulfilled and saved amount of work
result = 100.0 if fulfilled > work_units else \
fulfilled * 100.0 / work_units
# WARNING! The following can cause high loads. Monitor!
# Check if somebody is working on this task now.
running_assignments = object.get_assignments(running=True)
if running_assignments.count() > 0:
# Predict estimate of work done since last Task update.
last_update = object.get_date_updated()
current_work_per_day = 0
for a in running_assignments:
current_work_per_day += a.get_work_per_day()
result += ((timezone.now() - last_update).seconds / GAME_DAY) * current_work_per_day
#print("Task {0} is actually {1}% completed.".format(object, result))
# We return floored int to avoid float number problems in API.
return int(result)
else:
# In case some new task types appear.
return 0
def get_active_assignments_count(self, object):
""" Shows the number of active assignments for the task. """
return object.get_assignments(running=True).count()
def validate(self, data):
""" A little gameplay logic validation. """
# FIXME! Move this AWAY from serializer to Controller!!!
# Verify sufficient materials.
# print(data)
# A little sleepy now to use cooler search in ordered dictionary
# As long as data is OrderedDict we convert it to list and take
# the required params only. We need location to determine the region
# and the TaskWorkflow to get the recipe.
for i in list(data.items()):
if i[0] == 'type':
type = i[1]
elif i[0] == 'location':
location = i[1]
ingredients = type.get_param('ingredients')
#print("INGREDIENTS, SELF:", ingredients, self)
# As long as there might be no ingredient required, it might be False.
# Otherwise we check if there are sufficient materials in storage.
# This will be checked again later while saving.
if ingredients:
hq = location.region.get_locations('hq').first()
for i in ingredients:
if not Item.objects.exists(item=i.ingredient, location=hq, amount=i.amount):
raise serializers.ValidationError( "Not enough ingredient {0}".format(i.ingredient))
return data
class TaskDetailSerializer(TaskSerializer):
_yield = serializers.FloatField(default=0.0, read_only=True)
_fulfilled = serializers.FloatField(default=0.0, read_only=True)
date_updated = serializers.DateTimeField(read_only=True)
assignments = AssignmentSerializer(many=True, read_only=True)
class Meta:
model = Task
fields = ('id', 'name', 'type', 'percent_completed', 'active_assignments_count','is_retrieved', 'location', 'owner', '_fulfilled', '_yield', 'date_start', 'date_finish', 'date_updated', 'assignments')
# FIXME!
# This fucks the task on PUT request. :)))
# Never use PUT method to update anything in Task.
# Task interface should accept only "action".
def validate__yield(self, value):
""" Reset yield to zero if new Task posted. """
# This is not critical if something is specified.
# Simply reset it to zero according to Game logic.
# Though this might not be RESTful.
if not self.instance:
return 0.0
def validate__fulfilled(self, value):
""" Reset fulfilled to zero if new Task posted. """
# This is not critical if something is specified.
# Simply reset it to zero according to Game logic.
# Though this might not be RESTful.
if not self.instance:
return 0.0
def validate_location(self, location):
""" Location must be of correct type and have minimum free space. """
task_type = TaskDirectory.objects.get(pk=self.initial_data.get('type'))
# print(task_type)
# Authorize location.
# This validation moved to VIEW class as we do not longer pass owner manually.
#if not location.get_owner().id == int(self.initial_data.get('owner')):
#print("Location owner: {0}, you are: {1}".format(location.get_owner(), self.initial_data.get('owner')))
#raise serializers.ValidationError("You are not authorized for this location.")
# Verify location type.
if not location.get_type() == task_type.get_location_type():
#print (location.get_type())
raise serializers.ValidationError("Wrong type of location for the task.")
# Verify free space in location.
# The actual USE (reservation) of area will happen later on a
# per Slave (per Assignment) basis. Still we check for some minimum.
#print("Location required: {0}, free: {1}".format(task_type.get_area_per_worker(), location.get_free_area()))
if not location.get_free_area() >= task_type.get_area_per_worker():
raise serializers.ValidationError("Not enough minimum free space in location.")
# Succeeded with Location verification.
return location
def create(self, validated_data):
return Task.objects.create(**validated_data)
# WHAT IS THIS?????
# def update(self, instance, validated_data):
# instance._fulfilled = validated_data.get('_fulfilled', instance._fulfilled)
# instance._yield = validated_data.get('_yield', instance._yield)
# instance.save()
# return instance
class TaskDirectorySerializer(serializers.ModelSerializer):
""" Serialize TaskDirectory items for interface forms. """
class Meta:
model = TaskDirectory
fields = ('id', 'name', 'location_type', 'area_per_worker', 'max_slaves') |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
"""
LICENSE MIT
2020
<NAME>
Website : http://www.covidtracker.fr
Mail : <EMAIL>
README:
This file contains scripts that download data from data.gouv.fr and then process it to build many graphes.
I'm currently cleaning the code, please ask me if something is not clear enough.
The charts are exported to 'charts/images/france'.
Data is download to/imported from 'data/france'.
Requirements: please see the imports below (use pip3 to install them).
"""
# In[2]:
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import france_data_management as data
from datetime import datetime
from datetime import timedelta
import plotly
import math
import os
import json
from plotly.subplots import make_subplots
PATH = "../../"
PATH_STATS = "../../data/france/stats/"
import locale
locale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8')
COULEUR_NON_VACCINES = "#C65102"
COULEUR_COMPLETEMENT_VACCINES = "#00308F"
# In[3]:
df, df_confirmed, dates, df_new, df_tests, df_deconf, df_sursaud, df_incid, df_tests_viros = data.import_data()
# In[4]:
df_regions = df.groupby(["jour", "regionName"]).sum().reset_index()
df_incid_regions = df_incid[df_incid["cl_age90"] == 0].groupby(["jour", "regionName"]).sum().reset_index()
regions = list(dict.fromkeys(list(df_regions['regionName'].values)))
dates_incid = list(dict.fromkeys(list(df_incid['jour'].values)))
last_day_plot = (datetime.strptime(max(dates), '%Y-%m-%d') + timedelta(days=1)).strftime("%Y-%m-%d")
df_new_regions = df_new.groupby(["jour", "regionName"]).sum().reset_index()
# In[5]:
lits_reas = pd.read_csv(PATH+'data/france/lits_rea.csv', sep=",")
# In[6]:
regions_deps = df.groupby(["departmentName", "regionName"]).sum().reset_index().loc[:,["departmentName", "regionName"]]
lits_reas = lits_reas.merge(regions_deps, left_on="nom_dpt", right_on="departmentName").drop(["nom_dpt"], axis=1)
lits_reas_regs = lits_reas.groupby(["regionName"]).sum().reset_index()
df_regions = df_regions.merge(lits_reas_regs, left_on="regionName", right_on="regionName")
# In[7]:
df_drees_regions = pd.read_csv("https://data.drees.solidarites-sante.gouv.fr/explore/dataset/covid-19-resultats-regionaux-issus-des-appariements-entre-si-vic-si-dep-et-vac-s/download/?format=csv&timezone=Europe/Berlin&lang=fr&use_labels_for_header=true&csv_separator=%3B", sep=";")
noms_regions = pd.read_csv(PATH+"data/france/noms_regions_code_drees.csv", sep=",")
df_drees_regions = df_drees_regions.merge(noms_regions, left_on="region", right_on="codeRegionDrees")
# In[8]:
#data.download_data_variants_regs()
#df_variants = data.import_data_variants_regs()
# In[9]:
"""def nombre_variants(region):
df_incid_reg = df_incid_regions[df_incid_regions["regionName"] == region]
df_incid_reg["P_rolling"] = df_incid_reg["P"].rolling(window=7).mean()
df_variants_reg = df_variants[df_variants["reg"] == df_incid_dep["reg"].values[0]]
fig = go.Figure()
n_days = len(df_variants_dep)
y=df_incid_dep["P_rolling"].values[-n_days:] * df_variants_dep.Prc_susp_501Y_V1.values/100
proportion = str(round(y[-1]/df_incid_dep["P_rolling"].values[-1]*100, 1)).replace(".", ",")
fig.add_trace(
go.Scatter(
x=df_variants_dep.jour,
y=y,
name="<b>Variant UK </b><br>" + str(nbWithSpaces(y[-1])) + " cas (" + proportion + " %)",
stackgroup='one'
)
)
y=df_incid_dep["P_rolling"].values[-n_days:] * df_variants_dep.Prc_susp_501Y_V2_3.values/100
proportion = str(round(y[-1]/df_incid_dep["P_rolling"].values[-1]*100, 1)).replace(".", ",")
fig.add_trace(
go.Scatter(
x=df_variants_dep.jour,
y=y,
name="<b>Variants SA + BZ </b><br>" + str(nbWithSpaces(y[-1])) + " cas (" + proportion + " %)",
showlegend=True,
stackgroup='one'
)
)
y=df_incid_dep["P_rolling"].values[-n_days:] * df_variants_dep.Prc_susp_IND.values/100
proportion = str(round(y[-1]/df_incid_dep["P_rolling"].values[-1]*100, 1)).replace(".", ",")
fig.add_trace(
go.Scatter(
x=df_variants_dep.jour,
y=y,
name="<b>Variants indéterminés </b><br>" + str(nbWithSpaces(y[-1])) + " cas (" + proportion + " %)",
showlegend=True,
stackgroup='one'
)
)
y=df_incid_dep["P_rolling"].values[-n_days:] * df_variants_dep.Prc_susp_ABS.values/100
proportion = str(round(y[-1]/df_incid_dep["P_rolling"].values[-1]*100, 1)).replace(".", ",")
fig.add_trace(
go.Scatter(
x=df_variants_dep.jour,
y=y,
name="<b>Souche classique </b><br>" + str(nbWithSpaces(y[-1])) + " cas (" + proportion + " %)",
showlegend=True,
stackgroup='one'
)
)
fig.update_yaxes(ticksuffix="")
fig.update_layout(
title={
'text': "Nombre de variants dans les cas détectés - " + departement,
'y':0.97,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top',
'font': {'size': 20}
},
annotations = [
dict(
x=0.5,
y=1.1,
xref='paper',
yref='paper',
text='Date : {}. Données : Santé publique France. Auteur : @guillaumerozier - covidtracker.fr.'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %B %Y')),
showarrow = False
)]
)
fig.write_image(PATH+"images/charts/france/departements_dashboards/{}.jpeg".format("variants_nombre_"+departement), scale=1.5, width=750, height=500)
"""
# In[10]:
def hosp_statut_vaccinal(region):
df_drees_reg = df_drees_regions[df_drees_regions["regionName"] == region]
df_drees_reg = df_drees_reg.sort_values(by="date")
df_drees_non_vaccines = df_drees_reg[df_drees_reg["vac_statut"]=="Non-vaccinés"]
df_drees_non_vaccines["effectif J-7"] = df_drees_non_vaccines["effectif J-7"].rolling(window=7).mean()
df_drees_completement_vaccines = df_drees_reg[df_drees_reg["vac_statut"].isin(["Vaccination complète",])].groupby("date").sum().reset_index()
df_drees_completement_vaccines["effectif J-7"] = df_drees_completement_vaccines["effectif J-7"].rolling(window=7).mean()
df_drees_partiellement_vaccines = df_drees_reg[df_drees_reg["vac_statut"].isin(["Primo dose récente", "Primo dose efficace"])].groupby("date").sum().reset_index()
df_drees_partiellement_vaccines["effectif J-7"] = df_drees_partiellement_vaccines["effectif J-7"].rolling(window=7).mean()
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=df_drees_non_vaccines["date"].values,
y=df_drees_non_vaccines["HC_PCR+"].rolling(window=7).mean() / df_drees_non_vaccines["effectif J-7"] * 10000000,
name="Non vaccinés",
line_color="#C65102",
line_width=4
)
)
fig.add_trace(
go.Scatter(
x=[df_drees_non_vaccines["date"].values[-1]],
y=[(df_drees_non_vaccines["HC_PCR+"].rolling(window=7).mean() / df_drees_non_vaccines["effectif J-7"] * 10000000).values[-1]],
name="Non vaccinés",
line_color="#C65102",
marker_size=10,
showlegend=False
)
)
fig.add_trace(
go.Scatter(
x=df_drees_partiellement_vaccines["date"].values,
y=df_drees_partiellement_vaccines["HC_PCR+"].rolling(window=7).mean() / df_drees_partiellement_vaccines["effectif J-7"] * 10000000,
name="Partiellement vaccinés",
line_color="#4777d6",
line_width=4
)
)
fig.add_trace(
go.Scatter(
x=[df_drees_partiellement_vaccines["date"].values[-1]],
y=[(df_drees_partiellement_vaccines["HC_PCR+"].rolling(window=7).mean() / df_drees_partiellement_vaccines["effectif J-7"] * 10000000).values[-1]],
name="Partiellement vaccinés",
line_color="#4777d6",
marker_size=10,
showlegend=False
)
)
fig.add_trace(
go.Scatter(
x=df_drees_completement_vaccines["date"].values,
y=df_drees_completement_vaccines["HC_PCR+"].rolling(window=7).mean() / df_drees_completement_vaccines["effectif J-7"] * 10000000,
name="Vaccinés",
line_color="#00308F",
line_width=4
)
)
fig.add_trace(
go.Scatter(
x=[df_drees_completement_vaccines["date"].values[-1]],
y=[(df_drees_completement_vaccines["HC_PCR+"].rolling(window=7).mean() / df_drees_completement_vaccines["effectif J-7"] * 10000000).values[-1]],
name="Vaccinés",
line_color="#00308F",
marker_size=10,
showlegend=False
)
)
"""fig.add_trace(
go.Scatter(
x=df_drees_partiellement_vaccines["date"].values,
y=df_drees_partiellement_vaccines["HC"].rolling(window=7).mean() / df_drees_partiellement_vaccines["n_dose1"].rolling(window=30).sum() * 1000000,
name="Partiellement vaccinés",
line_color="#1E90FF",
line_width=3
)
)"""
fig.update_layout(
legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01,
bgcolor="rgba(256,256,256,0.8)"
),
margin=dict(
r=160
),
title={
'text': "<b>Admissions à l'hôpital</b> pour Covid - " + region,
'y':0.97,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=25),
annotations = [
dict(
x=0.5,
y=1.12,
xref='paper',
yref='paper',
font=dict(size=14),
text="selon le statut vaccinal, pour 10 Mio hab. de chaque groupe - {}<br>Données DREES - @GuillaumeRozier - covidtracker.fr".format(datetime.strptime(df_drees_reg.date.max(), '%Y-%m-%d').strftime('%d %B %Y')),#'Date : {}. Source : Santé publique France. Auteur : GRZ - covidtracker.fr.'.format(), showarrow = False
showarrow=False
),
]
)
y=df_drees_non_vaccines["HC_PCR+"].rolling(window=7).mean().values[-1] / df_drees_non_vaccines["effectif J-7"].values[-1] * 10000000
fig.add_annotation(
x=df_drees_reg.date.max(),
y=y,
text="<b>" + str(int(round(y))) + " admissions<br>non vaccinées</b><br>/ 10 Mio de non vaccinés",
font=dict(color=COULEUR_NON_VACCINES),
showarrow=False,
align="left",
yshift=0,
xshift=105
)
y=df_drees_completement_vaccines["HC_PCR+"].rolling(window=7).mean().values[-1] / df_drees_completement_vaccines["effectif J-7"].values[-1] * 10000000
fig.add_annotation(
x=df_drees_reg.date.max(),
y=y,
text="<b>" + str(int(round(y))) + " admissions<br>complètement vaccinées</b><br>/ 10 Mio de vaccinés",
font=dict(color=COULEUR_COMPLETEMENT_VACCINES),
showarrow=False,
align="left",
yshift=0,
xshift=105,
)
fig.add_annotation(
x=0.5,
y=-0.225,
xref='paper',
yref='paper',
text="<i>Une personne est considérée comme vaccinée après avoir terminé son schéma vaccinal. Hospitalisations avec test PCR+ dans les 21 jours avant/après l'admission.</i>",
font=dict(size=9),
showarrow=False,
yshift=30
)
fig.update_yaxes(title="Admissions quot. / 10 Mio hab. de chaque groupe")
fig.update_xaxes(tickformat="%d/%m", range=[datetime.strptime(df_drees_reg.date.min(), '%Y-%m-%d') + timedelta(days=5),
datetime.strptime(df_drees_reg.date.max(), '%Y-%m-%d') + timedelta(days=2)])
name_fig = "hc_proportion_selon_statut_vaccinal"
fig.write_image(PATH + "images/charts/france/regions_dashboards/{}.jpeg".format(name_fig+"_"+region), scale=1.5, width=900, height=600)
# In[11]:
def cas_journ(region):
df_incid_reg = df_incid_regions[df_incid_regions["regionName"] == region]
df_incid_reg_rolling = df_incid_reg["P"].rolling(window=7, center=True).mean()
df_tests_reg_rolling = df_incid_reg["T"].rolling(window=7, center=True).mean()
range_x, name_fig, range_y = ["2020-03-29", last_day_plot], "cas_journ_"+region, [0, df_incid_reg["P"].max()]
title = "<b>Cas positifs</b> au Covid19 - <b>" + region + "</b>"
#fig = go.Figure()
fig = make_subplots(rows=1, cols=1, shared_yaxes=True, subplot_titles=[""], vertical_spacing = 0.08, horizontal_spacing = 0.1, specs=[[{"secondary_y": True}]])
fig.add_trace(go.Scatter(
x = df_incid_reg["jour"],
y = df_incid_reg_rolling,
name = "",
marker_color='rgb(8, 115, 191)',
line_width=8,
opacity=0.8,
fill='tozeroy',
fillcolor="rgba(8, 115, 191, 0.3)",
showlegend=False
), secondary_y=True)
fig.add_trace(go.Scatter(
x = [dates_incid[-4]],
y = [df_incid_reg_rolling.values[-4]],
name = "",
mode="markers",
marker_color='rgb(8, 115, 191)',
marker_size=15,
opacity=1,
showlegend=False
), secondary_y=True)
"""fig.add_trace(go.Scatter(
x = df_incid_reg["jour"],
y = df_incid_reg["P"],
name = "",
mode="markers",
marker_color='rgb(8, 115, 191)',
line_width=3,
opacity=0.4,
showlegend=False
), secondary_y=True)"""
fig.add_trace(go.Bar(
x = df_incid_reg["jour"],
y = df_tests_reg_rolling,
name = "Tests réalisés",
marker_color='rgba(0, 0, 0, 0.2)',
opacity=0.8,
showlegend=False,
), secondary_y=False)
###
fig.update_yaxes(zerolinecolor='Grey', range=range_y, tickfont=dict(size=18), secondary_y=True)
fig.update_yaxes(zerolinecolor='Grey', tickfont=dict(size=18), secondary_y=False)
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
margin=dict(
l=50,
r=0,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.93,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0,
y=1,
xref='paper',
yref='paper',
font=dict(size=15),
text='{}. Données : Santé publique France. <b>@GuillaumeRozier - covidtracker.fr</b>'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b')), showarrow = False
),
]
)
fig['layout']['annotations'] += (dict(
x = dates_incid[-4], y = df_incid_reg_rolling.values[-4], # annotation point
xref='x1',
yref='y2',
text=" <b>{} {}".format('%d' % df_incid_reg_rolling.values[-4], "cas quotidiens<br></b>en moyenne du {} au {}.".format(datetime.strptime(dates_incid[-7], '%Y-%m-%d').strftime('%d'), datetime.strptime(dates_incid[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=0,
yshift=0,
xanchor="center",
align='center',
font=dict(
color="rgb(8, 115, 191)",
size=20
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=1,
ax=-250,
ay=-70,
arrowcolor="rgb(8, 115, 191)",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),dict(
x = dates_incid[-4], y = df_tests_reg_rolling.values[-4], # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format('%d' % df_tests_reg_rolling.values[-4], "tests réalisés<br></b>en moyenne du {} au {}.".format(datetime.strptime(dates_incid[-7], '%Y-%m-%d').strftime('%d'), datetime.strptime(dates_incid[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=-2,
yshift=0,
xanchor="center",
align='center',
font=dict(
color="rgba(0, 0, 0, 0.5)",
size=13
),
bgcolor="rgba(255, 255, 255, 0.4)",
opacity=1,
ax=-250,
ay=-70,
arrowcolor="rgba(0, 0, 0, 0.5)",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
))
fig.write_image(PATH+"images/charts/france/regions_dashboards/{}.jpeg".format(name_fig), scale=1.2, width=900, height=600)
print("> " + name_fig)
#cas_journ("Auvergne-Rhône-Alpes")
# In[12]:
def hosp_journ(region):
df_reg = df_regions[df_regions["regionName"] == region]
df_new_reg = df_new_regions[df_new_regions["regionName"] == region]
#df_incid_reg_rolling = df_incid_reg["P"].rolling(window=7, center=True).mean()
range_x, name_fig = ["2020-03-29", last_day_plot], "hosp_journ_"+region
title = "<b>Personnes hospitalisées</b> pour Covid19 - <b>" + region + "</b>"
fig = go.Figure()
fig.add_trace(go.Scatter(
x = df_reg["jour"],
y = df_reg["hosp"],
name = "",
marker_color='rgb(209, 102, 21)',
line_width=8,
opacity=0.8,
fill='tozeroy',
fillcolor="rgba(209, 102, 21,0.3)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = [dates[-1]],
y = [df_reg["hosp"].values[-1]],
name = "",
mode="markers",
marker_color='rgb(209, 102, 21)',
marker_size=15,
opacity=1,
showlegend=False
))
fig.add_trace(go.Bar(
x = df_new_reg["jour"],
y = df_new_reg["incid_hosp"],
name = "Admissions hosp.",
marker_color='rgb(209, 102, 21)',
#line_width=8,
opacity=0.8,
#fill='tozeroy',
#fillcolor="rgba(209, 102, 21,0.3)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = df_new_reg["jour"],
y = df_new_reg["incid_hosp"].rolling(window=7).mean(),
name = "Admissions hosp.",
marker_color='rgb(209, 102, 21)',
#mode="lines"
line_width=2,
opacity=0.8,
#fill='tozeroy',
#fillcolor="rgba(209, 102, 21,0.3)",
showlegend=False
))
###
fig.update_yaxes(zerolinecolor='Grey', tickfont=dict(size=18))
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
margin=dict(
l=50,
r=0,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.93,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0,
y=1,
xref='paper',
yref='paper',
font=dict(size=15),
text='{}. Données : Santé publique France. <b>@GuillaumeRozier - covidtracker.fr</b>'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b')), showarrow = False
),
]
)
fig['layout']['annotations'] += (dict(
x = dates[-1], y = df_reg["hosp"].values[-1], # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format('%d' % df_reg["hosp"].values[-1], "personnes<br>hospitalisées</b><br>le {}.".format(datetime.strptime(dates[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=-2,
yshift=0,
xanchor="center",
align='center',
font=dict(
color="rgb(209, 102, 21)",
size=20
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=0.8,
ax=-250,
ay=-90,
arrowcolor="rgb(209, 102, 21)",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),
dict(
x = df_new_reg["jour"].values[-1], y = (df_new_reg["incid_hosp"].values[-1]), # annotation point
xref='x1',
yref='y1',
text="<b>{}</b> {}".format('%d' % df_new_reg["incid_hosp"].values[-1], "<br>admissions"),
xshift=-2,
yshift=10,
xanchor="center",
align='center',
font=dict(
color="rgb(209, 102, 21)",
size=10
),
opacity=0.8,
ax=-20,
ay=-40,
arrowcolor="rgb(209, 102, 21)",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),)
fig.write_image(PATH+"images/charts/france/regions_dashboards/{}.jpeg".format(name_fig), scale=1.2, width=900, height=600)
print("> " + name_fig)
# In[13]:
def hosp_journ_elias(reg):
df_new_reg = df_new_regions[df_new_regions["regionName"]==reg]
entrees_rolling = df_new_reg["incid_hosp"].rolling(window=7).mean().values
rad_rolling = df_new_reg["incid_rad"].rolling(window=7).mean()
dc_rolling = df_new_reg["incid_dc"].rolling(window=7).mean()
sorties_rolling = (rad_rolling + dc_rolling).values
range_x, name_fig, range_y = ["2020-03-29", last_day_plot], "hosp_journ_flux_"+reg, [0, 1.1*max( max(np.nan_to_num(entrees_rolling)), max(np.nan_to_num(sorties_rolling)))]
title = "<b>Entrées et sorties de l'hôpital</b> pour Covid19 • <b>" + reg + "</b>"
for i in [""]:
if i=="log":
title+= " [log.]"
fig = go.Figure()
fig.add_trace(go.Scatter(
x = dates,
y = entrees_rolling,
name = "",
marker_color='red',
line_width=6,
opacity=1,
fill='tozeroy',
fillcolor="rgba(235, 64, 52,0.5)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = dates,
y = sorties_rolling,
name = "",
marker_color='green',
line_width=0,
opacity=1,
fill='tozeroy',
fillcolor="rgba(12, 161, 2, 0.5)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = dates,
y = [entrees_rolling[i] if entrees_rolling[i]<sorties_rolling[i] else sorties_rolling[i] for i in range(len(entrees_rolling))],
name = "",
marker_color='yellow',
line_width=0,
opacity=1,
fill='tozeroy',
fillcolor="rgba(255, 255, 255, 1)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = dates,
y = sorties_rolling,
name = "",
marker_color='green',
line_width=6,
opacity=1,
showlegend=False
))
fig.add_trace(go.Scatter(
x = dates,
y =entrees_rolling,
name = "",
marker_color='red',
line_width=6,
opacity=1,
showlegend=False
))
fig.add_shape(type="line",
x0="2020-03-17", y0=0, x1="2020-03-17", y1=300000,
line=dict(color="Red",width=0.5, dash="dot")
)
fig.add_shape(type="line",
x0="2020-05-11", y0=0, x1="2020-05-11", y1=300000,
line=dict(color="Green",width=0.5, dash="dot")
)
fig.add_shape(type="line",
x0="2020-10-30", y0=0, x1="2020-10-30", y1=300000,
line=dict(color="Red",width=0.5, dash="dot")
)
fig.add_shape(type="line",
x0="2020-11-28", y0=0, x1="2020-11-28", y1=300000,
line=dict(color="Orange",width=0.5, dash="dot")
)
fig.add_shape(type="line",
x0="2020-12-15", y0=0, x1="2020-12-15", y1=300000,
line=dict(color="green",width=0.5, dash="dot")
)
fig.add_trace(go.Scatter(
x = [dates[-1]],
y = [sorties_rolling[-1]],
name = "",
mode="markers",
marker_color='green',
marker_size=13,
opacity=1,
showlegend=False
))
fig.add_trace(go.Scatter(
x = [dates[-1]],
y = [entrees_rolling[-1]],
name = "",
mode="markers",
marker_color='red',
marker_size=13,
opacity=1,
showlegend=False
))
###
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18), ) #range=["2020-03-17", last_day_plot_dashboard]
fig.update_yaxes(zerolinecolor='Grey', tickfont=dict(size=18), range=range_y)
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
paper_bgcolor='rgba(255,255,255,1)',
plot_bgcolor='rgba(255,255,255,1)',
margin=dict(
l=50,
r=150,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=30),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0.5,
y=1.01,
font=dict(size=14),
xref='paper',
yref='paper',
text="Moyenne mobile 7 jours. Données Santé publique France. Auteurs @eorphelin @guillaumerozier - <b>covidtracker.fr</b>.", #'Date : {}. Source : Santé publique France. Auteur : guillaumerozier.fr.'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %B %Y')),
showarrow = False
),
]
)
if entrees_rolling[-1]<sorties_rolling[-1]:
y_e = -20
y_s = -100
else:
y_e = -100
y_s = -20
fig['layout']['annotations'] += (
dict(
x = "2020-05-20", y = (entrees_rolling[62]+sorties_rolling[62])/2, # annotation point
xref='x1',
yref='y1',
text="L'aire représente le solde.<br>Si elle est <span style='color:green'>verte</span>, il y a plus de sorties que d'entrées,<br>le nombre de lits occupés diminue.",
xshift=0,
yshift=0,
xanchor="center",
align='center',
font=dict(
color="black",
size=10
),
bgcolor="rgba(255, 255, 255, 0)",
opacity=0.8,
ax=80,
ay=-100,
arrowcolor="black",
arrowsize=1.5,
arrowwidth=1,
arrowhead=6,
showarrow=True
),
dict(
x = dates[-1], y = (entrees_rolling[-1]), # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format(round(entrees_rolling[-1], 1), "entrées à l'hôpital</b><br>en moyenne le {}.".format(datetime.strptime(dates[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=-2,
yshift=0,
xanchor="center",
align='center',
font=dict(
color="red",
size=12
),
bgcolor="rgba(255, 255, 255, 0)",
opacity=0.8,
ax=100,
ay=y_e,
arrowcolor="red",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),
dict(
x = dates[-1], y = (sorties_rolling[-1]), # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format(round(sorties_rolling[-1], 1), "sorties de l'hôpital</b><br>en moyenne le {}.<br>dont {} décès et<br>{} retours à domicile".format(datetime.strptime(dates[-1], '%Y-%m-%d').strftime('%d %b'), round(dc_rolling.values[-1], 1), round(rad_rolling.values[-1], 1))),
xshift=-2,
yshift=0,
xanchor="center",
align='center',
font=dict(
color="green",
size=12
),
bgcolor="rgba(255, 255, 255, 0)",
opacity=0.8,
ax=100,
ay=y_s,
arrowcolor="green",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),
dict(
x = "2020-10-30", y = 40000, # annotation point
xref='x1',
yref='y1',
text="Confinement",
xanchor="left",
yanchor="top",
align='center',
font=dict(
color="red",
size=8
),
showarrow=False
),
dict(
x = "2020-05-11", y = 40000, # annotation point
xref='x1',
yref='y1',
text="Déconfinement",
xanchor="left",
yanchor="top",
align='center',
font=dict(
color="green",
size=8
),
showarrow=False
),
dict(
x=0.5,
y=-0.1,
font=dict(size=10),
xref='paper',
yref='paper',
text="",#'Date : {}. Source : Santé publique France. Auteur : guillaumerozier.fr.'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %B %Y')), showarrow = False
showarrow=False
))
fig.write_image(PATH + "images/charts/france/regions_dashboards/{}.jpeg".format(name_fig+i), scale=1.5, width=1100, height=600)
#plotly.offline.plot(fig, filename = PATH + 'images/html_exports/france/departements_dashboards/{}.html'.format(name_fig+i), auto_open=False)
print("> " + name_fig)
#hosp_journ_elias("Nouvelle-Aquitaine")
# In[14]:
def rea_journ(region):
df_reg = df_regions[df_regions["regionName"] == region]
df_new_reg = df_new_regions[df_new_regions["regionName"] == region]
range_x, name_fig = ["2020-03-29", last_day_plot], "rea_journ_" + region
title = "Personnes en <b>réanimation</b> pour Covid19 - <b>" + region + "</b>"
fig = go.Figure()
fig.add_trace(go.Scatter(
x = dates,
y = df_reg["rea"],
name = "",
marker_color='rgb(201, 4, 4)',
line_width=8,
opacity=0.8,
fill='tozeroy',
fillcolor="rgba(201, 4, 4,0.3)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = [dates[-1]],
y = [df_reg["rea"].values[-1]],
name = "",
mode="markers",
marker_color='rgb(201, 4, 4)',
marker_size=15,
opacity=1,
showlegend=False
))
fig.add_trace(go.Bar(
x = df_new_reg["jour"],
y = df_new_reg["incid_rea"],
name = "Admissions",
marker_color='rgb(201, 4, 4)',
opacity=0.8,
showlegend=False
))
fig.add_trace(go.Scatter(
x = df_new_reg["jour"],
y = df_new_reg["incid_rea"].rolling(window=7).mean(),
name = "Admissions",
marker_color='rgb(201, 4, 4)',
marker_size=2,
opacity=0.8,
showlegend=False
))
###
fig.update_yaxes(zerolinecolor='Grey', tickfont=dict(size=18))
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
margin=dict(
l=50,
r=0,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.93,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0,
y=1,
xref='paper',
yref='paper',
font=dict(size=15),
text='{}. Données : Santé publique France. <b>@GuillaumeRozier - covidtracker.fr</b>'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b')), showarrow = False
),
]
)
fig['layout']['annotations'] += (dict(
x = dates[-1], y = df_reg["rea"].values[-1], # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format('%d' % df_reg["rea"].values[-1], "personnes<br>en réanimation</b><br>le {}.".format(datetime.strptime(dates[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=-2,
yshift=0,
xanchor="center",
align='center',
font=dict(
color="rgb(201, 4, 4)",
size=20
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=0.8,
ax=-250,
ay=-90,
arrowcolor="rgb(201, 4, 4)",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),
dict(
x = df_new_reg["jour"].values[-1], y = (df_new_reg["incid_rea"].values[-1]), # annotation point
xref='x1',
yref='y1',
text="<b>{}</b> {}".format('%d' % df_new_reg["incid_rea"].values[-1], "<br>admissions"),
xshift=-2,
yshift=10,
xanchor="center",
align='center',
font=dict(
color='rgb(201, 4, 4)',
size=10
),
opacity=0.8,
ax=-20,
ay=-40,
arrowcolor='rgb(201, 4, 4)',
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),)
fig.write_image(PATH+"images/charts/france/regions_dashboards/{}.jpeg".format(name_fig), scale=1.2, width=900, height=600)
print("> " + name_fig)
#rea_journ("Auvergne-Rhône-Alpes")
# In[15]:
def dc_journ(region):
df_reg = df_new_regions[df_new_regions["regionName"] == region]
dc_new_rolling = df_reg["incid_dc"].rolling(window=7).mean()
range_x, name_fig, range_y = ["2020-03-29", last_day_plot], "dc_journ_"+region, [0, df_reg["incid_dc"].max()]
title = "<b>Décès hospitaliers quotidiens</b> du Covid19 - <b>" + region + "</b>"
fig = go.Figure()
fig.add_trace(go.Scatter(
x = df_reg["jour"],
y = dc_new_rolling,
name = "Nouveaux décès hosp.",
marker_color='black',
line_width=8,
opacity=0.8,
fill='tozeroy',
fillcolor="rgba(0,0,0,0.3)",
showlegend=False
))
fig.add_trace(go.Scatter(
x = [dates[-1]],
y = [dc_new_rolling.values[-1]],
name = "Nouveaux décès hosp.",
mode="markers",
marker_color='black',
marker_size=15,
opacity=1,
showlegend=False
))
#
fig.add_trace(go.Scatter(
x = df_reg["jour"],
y = df_reg["incid_dc"],
name = "Nouveaux décès hosp.",
mode="markers",
marker_color='black',
line_width=3,
opacity=0.4,
showlegend=False
))
###
fig.update_yaxes(zerolinecolor='Grey', range=range_y, tickfont=dict(size=18))
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
margin=dict(
l=50,
r=0,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.93,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0,
y=1,
xref='paper',
yref='paper',
font=dict(size=15),
text='{}. Données : Santé publique France. <b>@GuillaumeRozier - covidtracker.fr</b>'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b')), showarrow = False
),
]
)
fig['layout']['annotations'] += (dict(
x = dates[-1], y = dc_new_rolling.values[-1], # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format('%d' % math.trunc(round(dc_new_rolling.values[-1], 2)), "décès quotidiens</b><br>en moyenne<br>du {} au {}.".format(datetime.strptime(dates[-7], '%Y-%m-%d').strftime('%d'), datetime.strptime(dates[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=-2,
yshift=0,
xanchor="center",
align='center',
font=dict(
color="black",
size=20
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=0.8,
ax=-250,
ay=-90,
arrowcolor="black",
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),)
fig.write_image(PATH+"images/charts/france/regions_dashboards/{}.jpeg".format(name_fig), scale=1.2, width=900, height=600)
print("> " + name_fig)
# In[16]:
def saturation_rea_journ(region):
df_reg = df_regions[df_regions["regionName"] == region]
df_saturation = 100 * df_reg["rea"] / df_reg["LITS_y"]
range_x, name_fig, range_y = ["2020-03-29", last_day_plot], "saturation_rea_journ_"+region, [0, df_saturation.max()*1.2]
title = "<b>Occupation des réa.</b> par les patients Covid19 - <b>" + region + "</b>"
fig = go.Figure()
colors_sat = ["green" if val < 60 else "red" if val > 100 else "orange" for val in df_saturation.values]
fig.add_trace(go.Bar(
x = dates,
y = df_saturation,
name = "Nouveaux décès hosp.",
marker_color=colors_sat,
opacity=0.8,
showlegend=False
))
fig.add_shape(
type="line",
x0="2019-03-15",
y0=100,
x1="2022-01-01",
y1=100,
opacity=1,
fillcolor="orange",
line=dict(
color="red",
width=1,
)
)
fig.update_yaxes(zerolinecolor='Grey', range=range_y, tickfont=dict(size=18))
fig.update_xaxes(nticks=10, ticks='inside', tickangle=0, tickfont=dict(size=18), range=["2020-03-15", last_day_plot])
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
margin=dict(
l=50,
r=0,
b=50,
t=70,
pad=0
),
legend_orientation="h",
barmode='group',
title={
'text': title,
'y':0.93,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
xaxis=dict(
title='',
tickformat='%d/%m'),
annotations = [
dict(
x=0,
y=1,
xref='paper',
yref='paper',
font=dict(size=15),
text='{}. Données : Santé publique France. <b>@GuillaumeRozier - covidtracker.fr</b>'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b')), showarrow = False
),
]
)
fig['layout']['annotations'] += (dict(
x = dates[-1], y = df_saturation.values[-1], # annotation point
xref='x1',
yref='y1',
text=" <b>{} {}".format('%d' % df_saturation.values[-1], " %</b> des lits de réa. occupés par<br>des patients Covid19 le {}.".format(datetime.strptime(dates[-1], '%Y-%m-%d').strftime('%d %b'))),
xshift=-2,
yshift=0,
xanchor="center",
align='center',
font=dict(
color=colors_sat[-1],
size=20
),
bgcolor="rgba(255, 255, 255, 0.6)",
opacity=1,
ax=-250,
ay=-20,
arrowcolor=colors_sat[-1],
arrowsize=1.5,
arrowwidth=1,
arrowhead=0,
showarrow=True
),)
fig.write_image(PATH+"images/charts/france/regions_dashboards/{}.jpeg".format(name_fig), scale=1.2, width=900, height=600)
print("> " + name_fig)
return df_saturation.values[-1]
# In[17]:
for reg in regions:
hosp_statut_vaccinal(reg)
# In[18]:
import cv2
dict_saturation = {}
for reg in regions:
dict_saturation[reg] = round(saturation_rea_journ(reg), 1)
hosp_journ_elias(reg)
saturation_rea_journ(reg)
cas_journ(reg)
hosp_journ(reg)
rea_journ(reg)
dc_journ(reg)
im1 = cv2.imread(PATH+'images/charts/france/regions_dashboards/cas_journ_{}.jpeg'.format(reg))
im2 = cv2.imread(PATH+'images/charts/france/regions_dashboards/hosp_journ_{}.jpeg'.format(reg))
im3 = cv2.imread(PATH+'images/charts/france/regions_dashboards/rea_journ_{}.jpeg'.format(reg))
im4 = cv2.imread(PATH+'images/charts/france/regions_dashboards/dc_journ_{}.jpeg'.format(reg))
im_haut = cv2.hconcat([im1, im2])
im_bas = cv2.hconcat([im3, im4])
im_totale = cv2.vconcat([im_haut, im_bas])
cv2.imwrite(PATH+'images/charts/france/regions_dashboards/dashboard_jour_{}.jpeg'.format(reg), im_totale)
os.remove(PATH+'images/charts/france/regions_dashboards/cas_journ_{}.jpeg'.format(reg))
os.remove(PATH+'images/charts/france/regions_dashboards/hosp_journ_{}.jpeg'.format(reg))
os.remove(PATH+'images/charts/france/regions_dashboards/rea_journ_{}.jpeg'.format(reg))
os.remove(PATH+'images/charts/france/regions_dashboards/dc_journ_{}.jpeg'.format(reg))
with open(PATH_STATS + 'saturation_rea_regions.json', 'w') as outfile:
json.dump(dict_saturation, outfile)
# In[19]:
n_tot=4
import locale
locale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8')
for i in range(0, n_tot):
evol_tests_regs, evol_hosp_regs = [], []
fig = go.Figure()
fig.add_shape(type="rect",
x0=-1000, y0=0, x1=0, y1=1000,
line=dict(color="orange",width=0.5, dash="dot"), fillcolor="orange", opacity=0.2,
layer="below"
)
fig.add_shape(type="rect",
x0=0, y0=-1000, x1=1000, y1=0,
line=dict(color="orange",width=0.5, dash="dot"), fillcolor="orange", opacity=0.2,
layer="below"
)
fig.add_shape(type="rect",
x0=0, y0=0, x1=1000, y1=1000,
line=dict(color="Red",width=0.5, dash="dot"), fillcolor="red", opacity=0.2,
layer="below"
)
fig.add_shape(type="rect",
x0=-1000, y0=-1000, x1=0, y1=0,
line=dict(color="red",width=0.5, dash="dot"), fillcolor="green", opacity=0.2,
layer="below"
)
regs_vert, regs_orange, regs_rouge = "", "", ""
nb_vert, nb_orange, nb_rouge = 0, 0, 0
for reg in regions:
df_incid_reg = df_incid_regions[df_incid_regions["regionName"]==reg]
tests_reg_rolling = df_incid_reg["P"].rolling(window=7).mean().values
evol_tests_reg = (tests_reg_rolling[-1-i] - tests_reg_rolling[-8-i]) / tests_reg_rolling[-8] * 100
evol_tests_regs += [evol_tests_reg]
hosp_reg_rolling = df_new_regions[df_new_regions["regionName"]==reg]["incid_hosp"].rolling(window=7).mean().values
evol_hosp_reg = ( hosp_reg_rolling[-1-i] - hosp_reg_rolling[-8-i]) / hosp_reg_rolling[-8] * 100
evol_hosp_regs += [evol_hosp_reg]
if (evol_tests_reg < 0) & (evol_hosp_reg<0):
color = "green"
regs_vert += df_incid_reg["regionName"].values[0] + ", "
nb_vert += 1
elif (evol_tests_reg > 0) & (evol_hosp_reg > 0):
color = "red"
regs_rouge += df_incid_reg["regionName"].values[0] + ", "
nb_rouge += 1
else:
color = "orange"
regs_orange += df_incid_reg["regionName"].values[0] + ", "
nb_orange += 1
fig.add_trace(go.Scatter(
x = [evol_tests_reg],
y = [evol_hosp_reg],
name = reg,
text=[df_incid_reg["regionName"].values[0][:4]+"."],
marker_color=color,
marker_size=20,
line_width=8,
opacity=0.8,
fill='tozeroy',
mode='markers+text',
fillcolor="rgba(8, 115, 191, 0.3)",
textfont_color="black",
showlegend=False,
textposition="middle center"
))
liste_deps_str = "{} en <b>vert</b> : {}<br><br>{} en <b>orange</b> : {}<br><br>{} en <b>rouge</b> : {}".format(nb_vert, regs_vert, nb_orange, regs_orange, nb_rouge, regs_rouge)
fig['layout']['annotations'] += (dict(
x = 50, y = 50, # annotation point
xref='x1', yref='y1',
text="Les cas augmentent.<br>Les admissions à l'hôpital augmentent.",
xanchor="center",align='center',
font=dict(
color="black", size=10
),
showarrow=False
),dict(
x = -50, y = -50, # annotation point
xref='x1', yref='y1',
text="Les cas baissent.<br>Les admissions à l'hôpital baissent.",
xanchor="center",align='center',
font=dict(
color="black", size=10
),
showarrow=False
),dict(
x = -50, y = 50, # annotation point
xref='x1', yref='y1',
text="Les cas baissent.<br>Les admissions à l'hôpital augmentent.",
xanchor="center",align='center',
font=dict(
color="black", size=10
),
showarrow=False
),dict(
x = 50, y = -50, # annotation point
xref='x1', yref='y1',
text="Les cas augmentent.<br>Les admissions à l'hôpital baissent.",
xanchor="center",align='center',
font=dict(
color="black", size=10
),
showarrow=False
),dict(
x=0.5,
y=1.05,
xref='paper',
yref='paper',
font=dict(size=14),
text='{}. Données : Santé publique France. Auteur : <b>@GuillaumeRozier - covidtracker.fr.</b>'.format(datetime.strptime(max(dates), '%Y-%m-%d').strftime('%d %b')), showarrow = False
),
dict(
x=-0.08,
y=-0.3,
xref='paper',
yref='paper',
font=dict(size=14),
align="left",
text=liste_deps_str[:150]+"<br>"+liste_deps_str[151:], showarrow = False
),)
fig.update_xaxes(title="Évolution hebdomadaire des cas positifs", range=[-200, 200], ticksuffix="%")
fig.update_yaxes(title="Évolution hedbomadaire des admissions à l'hôpital", range=[-200, 200], ticksuffix="%")
fig.update_layout(
title={
'text': "<b>Évolution des cas et hospitalisations dans les régions</b> • {}".format(datetime.strptime(dates[-i-1], '%Y-%m-%d').strftime('%d %b')),
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
margin=dict(
b=200
),
)
fig.write_image(PATH+"images/charts/france/evolution_regs/{}_{}.jpeg".format("evolution_regs", i), scale=3, width=1000, height=900)
# In[20]:
"""for reg in regions:
heading = "<!-- wp:heading --><h2 id=\"{}\">{}</h2><!-- /wp:heading -->\n".format(reg, reg)
string = "<p align=\"center\"> <a href=\"https://raw.githubusercontent.com/rozierguillaume/covid-19/master/images/charts/france/regions_dashboards/dashboard_jour_{}.jpeg\" target=\"_blank\" rel=\"noopener noreferrer\"><img src=\"https://raw.githubusercontent.com/rozierguillaume/covid-19/master/images/charts/france/regions_dashboards/dashboard_jour_{}.jpeg\" width=\"100%\"> </a></p><br>\n".format(reg, reg)
string2 = "<p align=\"center\"> <a href=\"https://raw.githubusercontent.com/rozierguillaume/covid-19/master/images/charts/france/regions_dashboards/saturation_rea_journ_{}.jpeg\" target=\"_blank\" rel=\"noopener noreferrer\"><img src=\"https://raw.githubusercontent.com/rozierguillaume/covid-19/master/images/charts/france/regions_dashboards/saturation_rea_journ_{}.jpeg\" width=\"70%\"> </a></p>\n".format(reg, reg)
space = "<!-- wp:spacer {\"height\":50} --><div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer\"></div><!-- /wp:spacer -->"
retourmenu="<a href=\"#Menu\">Retour au menu</a>"
print(space+retourmenu+heading+string+string2)
"""
# In[21]:
"""print("<!-- wp:buttons --><div class=\"wp-block-buttons\">\n")
#for reg in regions:
print("""#<!-- wp:button {"className":"is-style-outline"} -->
#<div class="wp-block-button is-style-outline">""")
#print("<a class=\"wp-block-button__link\" href=\"#{}\">".format(reg))
#print("{}</a></div><!-- /wp:button --></div>\n".format(reg))
#print("<!-- /wp:buttons -->")"""
|
<reponame>usualoma/mt-plugin-PerformanceProfiler
import pytest
from performance_profiler.normalizer.sql_normalizer import SQLNormalizer
normalizer = SQLNormalizer()
@pytest.mark.parametrize(
"identical_queries",
[
[
# SELECT
# Generally, results from this queries are not identical, but they are identical results in Data::ObjectDriver.
"""
SELECT entry_meta_entry_id, entry_meta_type, entry_meta_vchar
FROM mt_entry_meta
WHERE (entry_meta_type NOT IN (?)) AND (entry_meta_entry_id = ?)
ORDER BY entry_meta_entry_id ASC, entry_meta_type ASC
""",
"""
SELECT entry_meta_vchar, entry_meta_entry_id, entry_meta_type
FROM mt_entry_meta
WHERE (entry_meta_type NOT IN (?)) AND (entry_meta_entry_id = ?)
ORDER BY entry_meta_entry_id ASC, entry_meta_type ASC
""",
"""
SELECT entry_meta_type, entry_meta_vchar, entry_meta_entry_id
FROM mt_entry_meta
WHERE (entry_meta_type NOT IN (?)) AND (entry_meta_entry_id = ?)
ORDER BY entry_meta_entry_id ASC, entry_meta_type ASC
""",
],
[
# WHERE
"""
SELECT entry_meta_entry_id, entry_meta_type, entry_meta_vchar
FROM mt_entry_meta
WHERE (entry_meta_type NOT IN (?)) AND (entry_meta_entry_id = ?)
ORDER BY entry_meta_entry_id ASC, entry_meta_type ASC
""",
"""
SELECT entry_meta_entry_id, entry_meta_type, entry_meta_vchar
FROM mt_entry_meta
WHERE (entry_meta_entry_id = ?) AND (entry_meta_type NOT IN (?))
ORDER BY entry_meta_entry_id ASC, entry_meta_type ASC
""",
],
[
# complex WHERE
"""
SELECT entry_id
FROM mt_entry
WHERE ((((entry_authored_on < ?)) AND ((entry_status = ?)) AND ((entry_blog_id = ?)) AND ((entry_class = ?))) OR (((entry_authored_on = ?)) AND ((entry_status = ?)) AND ((entry_blog_id = ?)) AND ((entry_id < ?)) AND ((entry_class = ?))))
ORDER BY entry_authored_on DESC, entry_id DESC
LIMIT 1
""",
"""
SELECT entry_id
FROM mt_entry
WHERE ((((entry_authored_on = ?)) AND ((entry_status = ?)) AND ((entry_blog_id = ?)) AND ((entry_id < ?)) AND ((entry_class = ?))) OR (((entry_authored_on < ?)) AND ((entry_status = ?)) AND ((entry_blog_id = ?)) AND ((entry_class = ?))))
ORDER BY entry_authored_on DESC, entry_id DESC
LIMIT 1
""",
"""
SELECT entry_id
FROM mt_entry
WHERE ((((entry_status = ?)) AND ((entry_blog_id = ?)) AND ((entry_class = ?)) AND ((entry_authored_on < ?))) OR (((entry_authored_on = ?)) AND ((entry_status = ?)) AND ((entry_blog_id = ?)) AND ((entry_id < ?)) AND ((entry_class = ?))))
ORDER BY entry_authored_on DESC, entry_id DESC
LIMIT 1
""",
],
[
# JOIN
"""
SELECT COUNT(*) AS cnt, objecttag_tag_id
FROM mt_entry, mt_objecttag
WHERE (entry_class = ?) AND (entry_status = ?) AND (entry_blog_id IN (?)) AND (objecttag_object_datasource = ?) AND (objecttag_blog_id IN (?)) AND (entry_id = objecttag_object_id)
GROUP BY objecttag_tag_id
""",
"""
SELECT COUNT(*) AS cnt, objecttag_tag_id
FROM mt_objecttag, mt_entry
WHERE (entry_class = ?) AND (entry_status = ?) AND (entry_blog_id IN (?)) AND (objecttag_object_datasource = ?) AND (objecttag_blog_id IN (?)) AND (entry_id = objecttag_object_id)
GROUP BY objecttag_tag_id
""",
],
[
# DISTINCT
"""
SELECT DISTINCT entry_id, entry_authored_on
FROM mt_entry, mt_objecttag
WHERE (entry_status = ?) AND (entry_class = ?) AND (entry_blog_id = ?) AND (objecttag_blog_id = ?) AND (objecttag_tag_id IN (?,?)) AND (objecttag_object_datasource = ?) AND (entry_id = objecttag_object_id)
ORDER BY entry_authored_on DESC, entry_id ASC
""",
"""
SELECT DISTINCT entry_authored_on, entry_id
FROM mt_entry, mt_objecttag
WHERE (entry_status = ?) AND (entry_class = ?) AND (entry_blog_id = ?) AND (objecttag_blog_id = ?) AND (objecttag_tag_id IN (?,?)) AND (objecttag_object_datasource = ?) AND (entry_id = objecttag_object_id)
ORDER BY entry_authored_on DESC, entry_id ASC
""",
],
[
# find by id
"""
SELECT asset_id, asset_blog_id, asset_class, asset_created_by, asset_created_on, asset_description, asset_file_ext, asset_file_name, asset_file_path, asset_label, asset_mime_type, asset_modified_by, asset_modified_on, asset_parent, asset_url
FROM mt_asset
WHERE (asset_id IN (?))
ORDER BY asset_id ASC
""",
"""
SELECT asset_id, asset_blog_id, asset_class, asset_created_by, asset_created_on, asset_description, asset_file_ext, asset_file_name, asset_file_path, asset_label, asset_mime_type, asset_modified_by, asset_modified_on, asset_parent, asset_url
FROM mt_asset
WHERE (asset_id IN (?,?))
ORDER BY asset_id ASC
""",
"""
SELECT asset_id, asset_blog_id, asset_class, asset_created_by, asset_created_on, asset_description, asset_file_ext, asset_file_name, asset_file_path, asset_label, asset_mime_type, asset_modified_by, asset_modified_on, asset_parent, asset_url
FROM mt_asset
WHERE (asset_id IN (?,?,?))
ORDER BY asset_id ASC
""",
],
],
)
def test_normalize_identical(identical_queries):
normalized_queries = map(
lambda x: x[0], map(normalizer.normalize, identical_queries)
)
assert len(set(normalized_queries)) == 1
@pytest.mark.parametrize(
"different_queries",
[
[
# different order
"""
SELECT entry_meta_entry_id, entry_meta_type, entry_meta_vchar
FROM mt_entry_meta
WHERE (entry_meta_type NOT IN (?)) AND (entry_meta_entry_id = ?)
ORDER BY entry_meta_entry_id ASC, entry_meta_type ASC
""",
"""
SELECT entry_meta_entry_id, entry_meta_type, entry_meta_vchar
FROM mt_entry_meta
WHERE (entry_meta_type NOT IN (?)) AND (entry_meta_entry_id = ?)
ORDER BY entry_meta_type ASC, entry_meta_entry_id ASC
""",
],
],
)
def test_normalize_different(different_queries):
normalized_queries = map(
lambda x: x[0], map(normalizer.normalize, different_queries)
)
assert len(set(normalized_queries)) > 1
@pytest.mark.parametrize(
"unsupported_query",
[
"""
INSERT INTO mt_entry (entry_title) VALUES ('a')
""",
"""
UPDATE mt_entry SET entry_title = 'a'
""",
],
)
def test_unsupported_queries(unsupported_query):
res, structure = normalizer.normalize(unsupported_query)
assert unsupported_query == res
assert structure == {}
|
#!/usr/bin/env python3
# TODO: REVISE USING NEW HARDWARE CLASSES
__author__ = 'anton'
import evdev
from hardware.motors import DriveBase, Picker
from hardware.sensors import BallSensor
from hardware.gamepad import GamePadStub
import threading
import time
MAX_SPEED = 20 # cm per s
MIN_SPEED = 3
MAX_TURNRATE = 80 # deg per s
MIN_TURNRATE = 4
def scale(val, src, dst):
"""
Scale the given value from the scale of src to the scale of dst.
val: float or int
src: tuple
dst: tuple
example: print scale(99, (0.0, 99.0), (-1.0, +1.0))
"""
return (float(val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]
print("Finding ps3 controller...")
devices = [evdev.InputDevice(fn) for fn in evdev.list_devices()]
for device in devices:
if device.name == 'PLAYSTATION(R)3 Controller':
ps3dev = device.fn
try:
gamepad = evdev.InputDevice(ps3dev)
except:
gamepad = GamePadStub()
turn_rate = 0
turn_speed = 0
fwd_speed = 0
triangle_pressed_time = 0
running = True
picker = Picker('outA')
class MotorThread(threading.Thread):
def __init__(self):
self.base = DriveBase(left=('outC', 'inversed'),
right=('outB', 'inversed'),
wheel_diameter=4.3,
wheel_span=12,
counter_clockwise_is_positive=False)
self.ballsensor = BallSensor('in4')
self.picker = Picker('outA')
threading.Thread.__init__(self)
def run(self):
global gripper
print("Engines running!")
while running:
# Autopicker
if self.ballsensor.ball_detected() and not self.picker.is_running:
# print(self.picker.is_running)
self.picker.go_to_target(self.picker.STORE)
# Close after storing
if self.picker.is_at_store:
# print("at store")
self.picker.go_to_target(self.picker.OPEN)
# if cross_btn:
# self.picker.go_to_target(picker.STORE)
# if square_btn:
# break
self.base.drive_and_turn(fwd_speed, turn_rate)
#
# # Autopicker
# if picker.target == picker.OPEN and picker.is_at_target:
# if self.ballsensor.check_ball():
# gripper.target = picker.CLOSED
# elif picker.target == picker.STORE and picker.is_at_target:
# picker.target = picker.OPEN
#
# picker.run()
# self.base.drive_and_turn(fwd_speed, -turn_rate)
# Give the Ev3 some time to handle other threads.
time.sleep(0.04)
self.base.stop()
picker.stop()
if __name__ == "__main__":
motor_thread = MotorThread()
motor_thread.start()
for event in gamepad.read_loop(): #this loops infinitely
if event.type == 3: #A stick is moved
if event.code == 2: #X axis on right stick
turn_rate = scale(event.value, (255, 0), (MAX_TURNRATE, -MAX_TURNRATE))
if -MIN_TURNRATE < turn_rate < MIN_TURNRATE: turn_rate = 0
if event.code == 5: #Y axis on right stick
fwd_speed = scale(event.value, (255, 0), (-MAX_SPEED, MAX_SPEED))
if -MIN_SPEED < fwd_speed < MIN_SPEED: fwd_speed = 0
if event.type == 1:
if event.code == 300:
if event.value == 1:
triangle_pressed_time = time.time()
if event.value == 0 and time.time() > triangle_pressed_time + 1:
print("Triangle button is pressed. Break.")
running = False
time.sleep(0.5) # Wait for the motor thread to finish
break
elif event.code == 302:
if event.value == 1:
print("X button is pressed. Eating.")
# picker.target = picker.STORE
if event.value == 0:
pass
# picker.target = picker.OPEN
elif event.code == 301:
if event.value == 1:
print("O button is pressed. Purging.")
# picker.target = picker.PURGE
if event.value == 0:
pass
# picker.target = picker.OPEN
|
# _*_ coding:utf-8 _*_
import re
import json
from pyquery import PyQuery as pq
from app.spider_store.common import (
match1,
get_html,
get_content,
)
from app.spider_store.utils.content_cleaner import cleaner
def sohu_news_download(url,):
html = get_content(url, )
doc = pq(html)
if "www.sohu.com/a/" in url:
# 标题
title = doc('div.text div.text-title h1').text()
if not title:
title = doc('div.content.area div.article-box.l h3.article-title').text()
if re.match(r"原创", title):
title = title.replace("原创", '')
# 来源
source = doc('div.column.left div.user-info h4 a').text()
if not source:
source = doc('div.right-author-info.clearfix div.l.clearfix a.name.l').text()
# 预处理正文内容
content = doc('div.text article.article').html()
if not content:
content = doc('article.article-text').html()
backsohu = re.compile(r"<span\s*class=['|\"]backword['|\"]>.*?</span>")
editor_name = re.compile(r"<p\s*data-role=['|\"]editor-name['|\"]>.*</p>")
content = backsohu.sub('', content)
content = editor_name.sub('', content)
if re.search(r"(搜狐.*?独家出品 未经许可严禁转载)", content):
content = re.sub(r'(搜狐.*?独家出品 未经许可严禁转载)', '', content)
content = cleaner(str(content))
assert content, "获取文章内容失败"
# 获取文章内图片
image_urls = re.findall(r'src=[\'|"](.*?)[\'|"]', content, re.S)
# 获取不到返回空列表
assert image_urls, "文章中缺少图片"
image_urls_final = []
for url in image_urls:
regex = re.compile(r'http:|https:')
if regex.match(url):
image_urls_final.append(url)
else:
image_url = 'http:' + url
image_urls_final.append(image_url)
# 缩略图
thumbnail_urls = [image_urls_final[0]]
elif "sh.focus.cn/zixun/" in url:
# 标题
title = doc('div.main-content h1').text()
if re.match(r"原创", title):
title = title.replace("原创", '')
# 来源
source = doc('div.main-content div.s-pic-info div.info-source span a').text()
# 预处理正文内容
content = doc('div.main-content div.info-content').html()
backsohu = re.compile(r"<span\s*class=['|\"]backword['|\"]>.*?</span>")
editor_name = re.compile(r"<p\s*data-role=['|\"]editor-name['|\"]>.*</p>")
content = backsohu.sub('', content)
content = editor_name.sub('', content)
if re.search(r"(搜狐.*?独家出品 未经许可严禁转载)", content):
content = re.sub(r'(搜狐.*?独家出品 未经许可严禁转载)', '', content)
content = cleaner(str(content))
assert content, "获取文章内容失败"
# 获取文章内图片
image_urls = re.findall(r'src=[\'|"](.*?)[\'|"]', content, re.S)
# 获取不到返回空列表
assert image_urls, "文章中缺少图片"
image_urls_final = []
for url in image_urls:
regex = re.compile(r'http:|https:')
if regex.match(url):
image_urls_final.append(url)
else:
image_url = 'http:' + url
image_urls_final.append(image_url)
# 缩略图
thumbnail_urls = [image_urls_final[0]]
else:
raise AssertionError("urls检测爬虫异常")
data = {
"type": 'news',
"title": title,
"source": source,
"content": content,
"thumbnail_urls": thumbnail_urls,
"image_urls": image_urls_final,
}
return data
def real_url(fileName, key, ch,):
url = "https://data.vod.itc.cn/ip?new=" + fileName + "&num=1&key=" + key + "&ch=" + ch + "&pt=1&pg=2&prod=h5n"
return json.loads(get_html(url))['servers'][0]['url']
def sohu_video_download(url):
if re.match(r'http[s]?://share\.vrs\.sohu\.com', url):
vid = match1(url, 'id=(\d+)')
source = None
else:
html = get_content(url, charset="GBK")
vid = match1(html, r'\Wvid\s*[\:=]\s*[\'"]?(\d+)[\'"]?;')
if re.search(r"var\s*wm_username='(.*?)';", html):
source = re.search(r"var\s*wm_username='(.*?)';", html).group(1)
else:
source = None
assert vid, "视频vid获取失败,请检查url"
if re.match(r'http[s]?://tv\.sohu\.com/', url):
info = json.loads(get_content(
'http://hot.vrs.sohu.com/vrs_flash.action?vid={}'.format(vid)
))
if info.get("data") and (info.get("data") is not None):
for qtyp in ['oriVid', 'superVid', 'highVid', 'norVid', 'relativeId']:
if 'data' in info:
hqvid = info['data'][qtyp]
else:
hqvid = info[qtyp]
if hqvid != 0 and hqvid != vid:
info = json.loads(get_content(
'http://hot.vrs.sohu.com/vrs_flash.action?vid={}'.format(
hqvid
)
))
if 'allot' not in info:
continue
break
host = info['allot']
tvid = info['tvid']
urls = []
if not source:
if "wm_data" in info:
if 'wm_username' in info["wm_data"]:
source = info["wm_data"]["wm_username"]
else:
source = "crawl"
else:
source = "crawl"
data = info['data']
title = data['tvName']
thumbnail_url = data["coverImg"]
size = sum(data['clipsBytes'])
assert len(data['clipsURL']) == len(data['clipsBytes']) == len(data['su'])
for fileName, key in zip(data['su'], data['ck']):
urls.append(real_url(fileName, key, data['ch']))
else:
info = json.loads(get_content(
'http://my.tv.sohu.com/play/videonew.do?vid={}&referer='
'http://my.tv.sohu.com'.format(vid)
))
host = info['allot']
tvid = info['tvid']
urls = []
if not source:
if "wm_data" in info:
if 'wm_username' in info["wm_data"]:
source = info["wm_data"]["wm_username"]
else:
source = "crawl"
else:
source = "crawl"
data = info['data']
title = data['tvName']
thumbnail_url = data["coverImg"]
size = sum(map(int, data['clipsBytes']))
assert len(data['clipsURL']) == len(data['clipsBytes']) == len(data['su'])
for fileName, key in zip(data['su'], data['ck']):
urls.append(real_url(fileName, key, data['ch']))
data = {
"type": 'video',
"title": title,
"source": source,
"thumbnail_urls": [thumbnail_url],
"image_urls": None,
"video_url": urls,
"ext": None,
"size": size,
}
return data
else:
return None
def sohu_spider(url):
if news_type(url) == "video":
return sohu_video_download(url)
else:
return sohu_news_download(url)
def news_type(url):
if re.match(r"http[s]?://tv\.sohu\.com", url):
return "video"
else:
return "news"
download = sohu_spider
if __name__ == '__main__':
url = 'https://tv.sohu.com/v/MjAxOTA0MjMvbjYwMDcwMDA0MC5zaHRtbA==.html' # video
url2 = 'http://www.sohu.com/a/318287619_428290' # news
url3 = 'http://www.sohu.com/a/317886624_383324' # news
url4 = 'https://sh.focus.cn/zixun/e24c627c2a5fec58.html' # news
# data = sohu_video_download(url)
data = sohu_spider(url4)
for key, value in data.items():
print(key + ':' + '{}'.format(value))
|
# -*- coding: utf-8 -*-
"""Demonstration / usage example for the new features.
Created on Fri Mar 24 13:58:36 2017
@author: <NAME>, <EMAIL>
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm
import matplotlib.colors
import bspline
import bspline.splinelab as splinelab
def test():
########################
# config
########################
p = 3 # order of spline basis (as-is! 3 = cubic)
nknots = 5 # for testing: number of knots to generate (here endpoints count only once)
tau = [0.1, 0.33] # collocation sites (i.e. where to evaluate)
########################
# usage example
########################
knots = np.linspace(0,1,nknots) # create a knot vector without endpoint repeats
k = splinelab.augknt(knots, p) # add endpoint repeats as appropriate for spline order p
B = bspline.Bspline(k, p) # create spline basis of order p on knots k
# build some collocation matrices:
#
A0 = B.collmat(tau) # function value at sites tau
A2 = B.collmat(tau, deriv_order=2) # second derivative at sites tau
print( A0 )
print( A2 )
########################
# tests
########################
# number of basis functions
n_interior_knots = len(knots) - 2
n_basis_functions_expected = n_interior_knots + (p + 1)
n_basis_functions_actual = len(B(0.)) # perform dummy evaluation to get number of basis functions
assert n_basis_functions_actual == n_basis_functions_expected, "something went wrong, number of basis functions is incorrect"
# partition-of-unity property of the spline basis
assert np.allclose( np.sum(A0, axis=1), 1.0 ), "something went wrong, the basis functions do not form a partition of unity"
def main():
"""Demonstration: plot a B-spline basis and its first three derivatives."""
#########################################################################################
# config
#########################################################################################
# Order of spline basis.
#
p = 3
# Knot vector, including the endpoints.
#
# For convenience, endpoints are specified only once, regardless of the value of p.
#
# Duplicate knots *in the interior* are allowed, with the standard meaning for B-splines.
#
knots = [0., 0.25, 0.5, 0.75, 1.]
# How many plotting points to use on each subinterval [knots[i], knots[i+1]).
#
# Only intervals with length > 0 are actually plotted.
#
nt_per_interval = 101
#########################################################################################
# the demo itself
#########################################################################################
# The evaluation algorithm used in bspline.py uses half-open intervals t_i <= x < t_{i+1}.
#
# This causes the right endpoint of each interval to actually be the start point of the next interval.
#
# Especially, the right endpoint of the last interval is the start point of the next (nonexistent) interval,
# so the basis will return a value of zero there.
#
# We work around this by using a small epsilon to avoid evaluation exactly at t_{i+1} (for each interval).
#
epsrel = 1e-10
epsabs = epsrel * (knots[-1] - knots[0])
original_knots = knots
knots = splinelab.augknt( knots, p ) # add repeated endpoint knots for splines of order p
# treat each interval separately to preserve discontinuities
#
# (useful especially when plotting the highest-order nonzero derivative)
#
B = bspline.Bspline(knots, p)
xxs = []
for I in zip( knots[:-1], knots[1:] ):
t_i = I[0]
t_ip1 = I[1] - epsabs
if t_ip1 - t_i > 0.: # accept only intervals of length > 0 (to skip higher-multiplicity knots in the interior)
xxs.append( np.linspace(t_i, t_ip1, nt_per_interval) )
# common settings for all plotted lines
settings = { "linestyle" : 'solid',
"linewidth" : 1.0 }
# create a list of unique colors for plotting
#
# http://stackoverflow.com/questions/8389636/creating-over-20-unique-legend-colors-using-matplotlib
#
NUM_COLORS = nbasis = len( B(0.) ) # perform dummy evaluation to get number of basis functions
cm = plt.get_cmap('gist_rainbow')
cNorm = matplotlib.colors.Normalize(vmin=0, vmax=NUM_COLORS-1)
scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=cm)
colors = [scalarMap.to_rgba(i) for i in range(NUM_COLORS)]
labels = [ r"$B$",
r"$\mathrm{d}B\,/\,\mathrm{d}x$",
r"$\mathrm{d}^2B\,/\,\mathrm{d}x^2$",
r"$\mathrm{d}^3B\,/\,\mathrm{d}x^3$" ]
# for plotting the knot positions:
unique_knots_xx = np.unique(original_knots)
unique_knots_yy = np.zeros_like(unique_knots_xx)
# plot the basis functions B(x) and their first three derivatives
plt.figure(1)
plt.clf()
for k in range(4):
ax = plt.subplot(2,2, k+1)
# place the axis label where it fits
if k % 2 == 0:
ax.yaxis.set_label_position("left")
else:
ax.yaxis.set_label_position("right")
# plot the kth derivative; each basis function gets a unique color
f = B.diff(order=k) # order=0 is a passthrough
for xx in xxs:
yy = np.array( [f(x) for x in xx] ) # f(scalar) -> rank-1 array, one element per basis function
for i in range(nbasis):
settings["color"] = colors[i]
plt.plot( xx, yy[:,i], **settings )
plt.ylabel( labels[k] )
# show knot positions
plt.plot( unique_knots_xx, unique_knots_yy, "kx" )
plt.suptitle(r"$B$-spline basis functions, $p=%d$" % (p))
if __name__ == '__main__':
test()
main()
plt.show()
|
from functools import partial
import asyncio
from concurrent.futures import Executor
from function_scheduling_distributed_framework.concurrent_pool.custom_threadpool_executor import ThreadPoolExecutorShrinkAble
# 没有使用内置的concurrent.futures.ThreadpoolExecutor线程池,而是使用智能伸缩线程池。
async_executor_default = ThreadPoolExecutorShrinkAble()
async def simple_run_in_executor(f, *args, async_executor: Executor = None, async_loop=None, **kwargs):
"""
一个很强的函数,使任意同步同步函数f,转化成asyncio异步api语法,
例如 r = await simple_run_in_executor(block_fun, 20),可以不阻塞事件循环。
asyncio.run_coroutine_threadsafe 和 run_in_executor 是一对反义词。
asyncio.run_coroutine_threadsafe 是在非异步的上下文环境(也就是正常的同步语法的函数里面)下调用异步函数对象(协程),
因为当前函数定义没有被async修饰,就不能在函数里面使用await,必须使用这。这个是将asyncio包的future对象转化返回一个concurrent.futures包的future对象。
run_in_executor 是在异步环境(被async修饰的异步函数)里面,调用同步函数,将函数放到线程池运行防止阻塞整个事件循环的其他任务。
这个是将 一个concurrent.futures包的future对象 转化为 asyncio包的future对象,
asyncio包的future对象是一个asyncio包的awaitable对象,所以可以被await,concurrent.futures.Future对象不能被await。
:param f: f是一个同步的阻塞函数,f前面不能是由async定义的。
:param args: f函数的位置方式入参
:async_executor: 线程池
:param async_loop: async的loop对象
:param kwargs:f函数的关键字方式入参
:return:
"""
loopx = async_loop or asyncio.get_event_loop()
async_executorx = async_executor or async_executor_default
# print(id(loopx))
result = await loopx.run_in_executor(async_executorx, partial(f, *args, **kwargs))
return result
if __name__ == '__main__':
import time
import requests
def block_fun(x):
print(x)
time.sleep(5)
return x * 10
async def enter_fun(xx): # 入口函数,模拟一旦异步,必须处处异步。不能直接调用block_fun,否则阻塞其他任务。
await asyncio.sleep(1)
# r = block_fun(xx) # 如果这么用就完蛋了,阻塞事件循环, 运行完所有任务需要更久。
r = await simple_run_in_executor(block_fun, xx)
print(r)
loopy = asyncio.get_event_loop()
print(id(loopy))
tasks = [simple_run_in_executor(requests.get, url='http://www.baidu.com', timeout=10),
simple_run_in_executor(block_fun, 1),
simple_run_in_executor(block_fun, 2),
simple_run_in_executor(block_fun, 3),
simple_run_in_executor(time.sleep, 8),
enter_fun(4),
enter_fun(5),
enter_fun(6)]
print('开始')
loopy.run_until_complete(asyncio.wait(tasks))
print('结束')
time.sleep(200)
|
#!/usr/bin/env python
"""Contains classes that represent biological sequence data. These
provide generic biological sequence manipulation functions, plus functions
that are critical for the EVOLVE calculations.
WARNING: Do not import sequence classes directly! It is expected that you will
access them through the moltype module. Sequence classes depend on information
from the MolType that is _only_ available after MolType has been imported.
Sequences are intended to be immutable. This is not enforced by the code for
performance reasons, but don't alter the MolType or the sequence data after
creation.
"""
from __future__ import division
from annotation import Map, Feature, _Annotatable
from cogent.util.transform import keep_chars, for_seq, per_shortest, \
per_longest
from cogent.util.misc import DistanceFromMatrix
from cogent.core.genetic_code import DEFAULT as DEFAULT_GENETIC_CODE, \
GeneticCodes
from cogent.parse import gff
from cogent.format.fasta import fasta_from_sequences
from cogent.core.info import Info as InfoClass
from numpy import array, zeros, put, nonzero, take, ravel, compress, \
logical_or, logical_not, arange
from numpy.random import permutation
from operator import eq, ne
from random import shuffle
import re
import warnings
__author__ = "<NAME>, <NAME>, and <NAME>"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["<NAME>", "<NAME>", "<NAME>",
"<NAME>", "<NAME>"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
ARRAY_TYPE = type(array(1))
#standard distance functions: left because generally useful
frac_same = for_seq(f=eq, aggregator=sum, normalizer=per_shortest)
frac_diff = for_seq(f=ne, aggregator=sum, normalizer=per_shortest)
class SequenceI(object):
"""Abstract class containing Sequence interface.
Specifies methods that Sequence delegates to its MolType, and methods for
detecting gaps.
"""
#String methods delegated to self._seq -- remember to override if self._seq
#isn't a string in your base class, but it's probably better to make
#self._seq a property that contains the string.
LineWrap = None #used for formatting FASTA strings
def __str__(self):
"""__str__ returns self._seq unmodified."""
return self._seq
def toFasta(self, make_seqlabel=None):
"""Return string of self in FASTA format, no trailing newline
Arguments:
- make_seqlabel: callback function that takes the seq object and
returns a label str
"""
return fasta_from_sequences([self], make_seqlabel = make_seqlabel,
line_wrap=self.LineWrap)
def translate(self, *args, **kwargs):
"""translate() delegates to self._seq."""
return self._seq.translate(*args, **kwargs)
def count(self, item):
"""count() delegates to self._seq."""
return self._seq.count(item)
def __cmp__(self, other):
"""__cmp__ compares based on the sequence string."""
return cmp(self._seq, other)
def __hash__(self):
"""__hash__ behaves like the sequence string for dict lookup."""
return hash(self._seq)
def __contains__(self, other):
"""__contains__ checks whether other is in the sequence string."""
return other in self._seq
def shuffle(self):
"""returns a randomized copy of the Sequence object"""
randomized_copy_list = list(self)
shuffle(randomized_copy_list)
return self.__class__(''.join(randomized_copy_list), Info=self.Info)
def complement(self):
"""Returns complement of self, using data from MolType.
Always tries to return same type as item: if item looks like a dict,
will return list of keys.
"""
return self.__class__(self.MolType.complement(self), Info=self.Info)
def stripDegenerate(self):
"""Removes degenerate bases by stripping them out of the sequence."""
return self.__class__(self.MolType.stripDegenerate(self), Info=self.Info)
def stripBad(self):
"""Removes any symbols not in the alphabet."""
return self.__class__(self.MolType.stripBad(self), Info=self.Info)
def stripBadAndGaps(self):
"""Removes any symbols not in the alphabet, and any gaps."""
return self.__class__(self.MolType.stripBadAndGaps(self), Info=self.Info)
def rc(self):
"""Returns reverse complement of self w/ data from MolType.
Always returns same type self.
"""
return self.__class__(self.MolType.rc(self), Info=self.Info)
def isGapped(self):
"""Returns True if sequence contains gaps."""
return self.MolType.isGapped(self)
def isGap(self, char=None):
"""Returns True if char is a gap.
If char is not supplied, tests whether self is gaps only.
"""
if char is None: #no char - so test if self is all gaps
return len(self) == self.countGaps()
else:
return self.MolType.isGap(char)
def isDegenerate(self):
"""Returns True if sequence contains degenerate characters."""
return self.MolType.isDegenerate(self)
def isValid(self):
"""Returns True if sequence contains no items absent from alphabet."""
return self.MolType.isValid(self)
def isStrict(self):
"""Returns True if sequence contains only monomers."""
return self.MolType.isStrict(self)
def firstGap(self):
"""Returns the index of the first gap in the sequence, or None."""
return self.MolType.firstGap(self)
def firstDegenerate(self):
"""Returns the index of first degenerate symbol in sequence, or None."""
return self.MolType.firstDegenerate(self)
def firstInvalid(self):
"""Returns the index of first invalid symbol in sequence, or None."""
return self.MolType.firstInvalid(self)
def firstNonStrict(self):
"""Returns the index of first non-strict symbol in sequence, or None."""
return self.MolType.firstNonStrict(self)
def disambiguate(self, method='strip'):
"""Returns a non-degenerate sequence from a degenerate one.
method can be 'strip' (deletes any characters not in monomers or gaps)
or 'random'(assigns the possibilities at random, using equal
frequencies).
"""
return self.__class__(self.MolType.disambiguate(self, method), \
Info=self.Info)
def degap(self):
"""Deletes all gap characters from sequence."""
return self.__class__(self.MolType.degap(self), Info=self.Info)
def gapList(self):
"""Returns list of indices of all gaps in the sequence, or []."""
return self.MolType.gapList(self)
def gapVector(self):
"""Returns vector of True or False according to which pos are gaps."""
return self.MolType.gapVector(self)
def gapMaps(self):
"""Returns dicts mapping between gapped and ungapped positions."""
return self.MolType.gapMaps(self)
def countGaps(self):
"""Counts the gaps in the specified sequence."""
return self.MolType.countGaps(self)
def countDegenerate(self):
"""Counts the degenerate bases in the specified sequence."""
return self.MolType.countDegenerate(self)
def possibilities(self):
"""Counts number of possible sequences matching the sequence.
Uses self.Degenerates to decide how many possibilites there are at
each position in the sequence.
"""
return self.MolType.possibilities(self)
def MW(self, method='random', delta=None):
"""Returns the molecular weight of (one strand of) the sequence.
If the sequence is ambiguous, uses method (random or strip) to
disambiguate the sequence.
If delta is passed in, adds delta per strand (default is None, which
uses the alphabet default. Typically, this adds 18 Da for terminal
water. However, note that the default nucleic acid weight assumes
5' monophosphate and 3' OH: pass in delta=18.0 if you want 5' OH as
well.
Note that this method only calculates the MW of the coding strand. If
you want the MW of the reverse strand, add self.rc().MW(). DO NOT
just multiply the MW by 2: the results may not be accurate due to
strand bias, e.g. in mitochondrial genomes.
"""
return self.MolType.MW(self, method, delta)
def canMatch(self, other):
"""Returns True if every pos in self could match same pos in other.
Truncates at length of shorter sequence.
Gaps are only allowed to match other gaps.
"""
return self.MolType.canMatch(self, other)
def canMismatch(self, other):
"""Returns True if any position in self could mismatch with other.
Truncates at length of shorter sequence.
Gaps are always counted as matches.
"""
return self.MolType.canMismatch(self, other)
def mustMatch(self, other):
"""Returns True if all positions in self must match positions in other."""
return self.MolType.mustMatch(self, other)
def canPair(self, other):
"""Returns True if self and other could pair.
Pairing occurs in reverse order, i.e. last position of other with
first position of self, etc.
Truncates at length of shorter sequence.
Gaps are only allowed to pair with other gaps, and are counted as 'weak'
(same category as GU and degenerate pairs).
NOTE: second must be able to be reverse
"""
return self.MolType.canPair(self, other)
def canMispair(self, other):
"""Returns True if any position in self could mispair with other.
Pairing occurs in reverse order, i.e. last position of other with
first position of self, etc.
Truncates at length of shorter sequence.
Gaps are always counted as possible mispairs, as are weak pairs like GU.
"""
return self.MolType.canMispair(self, other)
def mustPair(self, other):
"""Returns True if all positions in self must pair with other.
Pairing occurs in reverse order, i.e. last position of other with
first position of self, etc.
"""
return not self.MolType.canMispair(self, other)
def diff(self, other):
"""Returns number of differences between self and other.
NOTE: truncates at the length of the shorter sequence. Case-sensitive.
"""
return self.distance(other)
def distance(self, other, function=None):
"""Returns distance between self and other using function(i,j).
other must be a sequence.
function should be a function that takes two items and returns a
number. To turn a 2D matrix into a function, use
cogent.util.miscs.DistanceFromMatrix(matrix).
NOTE: Truncates at the length of the shorter sequence.
Note that the function acts on two _elements_ of the sequences, not
the two sequences themselves (i.e. the behavior will be the same for
every position in the sequences, such as identity scoring or a function
derived from a distance matrix as suggested above). One limitation of
this approach is that the distance function cannot use properties of
the sequences themselves: for example, it cannot use the lengths of the
sequences to normalize the scores as percent similarities or percent
differences.
If you want functions that act on the two sequences themselves, there
is no particular advantage in making these functions methods of the
first sequences by passing them in as parameters like the function
in this method. It makes more sense to use them as standalone functions.
The factory function cogent.util.transform.for_seq is useful for
converting per-element functions into per-sequence functions, since it
takes as parameters a per-element scoring function, a score aggregation
function, and a normalization function (which itself takes the two
sequences as parameters), returning a single function that combines
these functions and that acts on two complete sequences.
"""
if function is None:
#use identity scoring function
function = lambda a, b : a != b
distance = 0
for first, second in zip(self, other):
distance += function(first, second)
return distance
def matrixDistance(self, other, matrix):
"""Returns distance between self and other using a score matrix.
WARNING: the matrix must explicitly contain scores for the case where
a position is the same in self and other (e.g. for a distance matrix,
an identity between U and U might have a score of 0). The reason the
scores for the 'diagonals' need to be passed explicitly is that for
some kinds of distance matrices, e.g. log-odds matrices, the 'diagonal'
scores differ from each other. If these elements are missing, this
function will raise a KeyError at the first position that the two
sequences are identical.
"""
return self.distance(other, DistanceFromMatrix(matrix))
def fracSame(self, other):
"""Returns fraction of positions where self and other are the same.
Truncates at length of shorter sequence.
Note that fracSame and fracDiff are both 0 if one sequence is empty.
"""
return frac_same(self, other)
def fracDiff(self, other):
"""Returns fraction of positions where self and other differ.
Truncates at length of shorter sequence.
Note that fracSame and fracDiff are both 0 if one sequence is empty.
"""
return frac_diff(self, other)
def fracSameGaps(self, other):
"""Returns fraction of positions where self and other share gap states.
In other words, if self and other are both all gaps, or both all
non-gaps, or both have gaps in the same places, fracSameGaps will
return 1.0. If self is all gaps and other has no gaps, fracSameGaps
will return 0.0. Returns 0 if one sequence is empty.
Uses self's gap characters for both sequences.
"""
if not self or not other:
return 0.0
is_gap = self.MolType.Gaps.__contains__
return sum([is_gap(i) == is_gap(j) for i,j in zip(self, other)]) \
/min(len(self),len(other))
def fracDiffGaps(self, other):
"""Returns frac. of positions where self and other's gap states differ.
In other words, if self and other are both all gaps, or both all
non-gaps, or both have gaps in the same places, fracDiffGaps will
return 0.0. If self is all gaps and other has no gaps, fracDiffGaps
will return 1.0.
Returns 0 if one sequence is empty.
Uses self's gap characters for both sequences.
"""
if not self or not other:
return 0.0
return 1.0 - self.fracSameGaps(other)
def fracSameNonGaps(self, other):
"""Returns fraction of non-gap positions where self matches other.
Doesn't count any position where self or other has a gap.
Truncates at the length of the shorter sequence.
Returns 0 if one sequence is empty.
"""
if not self or not other:
return 0.0
is_gap = self.MolType.Gaps.__contains__
count = 0
identities = 0
for i, j in zip(self, other):
if is_gap(i) or is_gap(j):
continue
count += 1
if i == j:
identities += 1
if count:
return identities/count
else: #there were no positions that weren't gaps
return 0
def fracDiffNonGaps(self, other):
"""Returns fraction of non-gap positions where self differs from other.
Doesn't count any position where self or other has a gap.
Truncates at the length of the shorter sequence.
Returns 0 if one sequence is empty. Note that this means that
fracDiffNonGaps is _not_ the same as 1 - fracSameNonGaps, since both
return 0 if one sequence is empty.
"""
if not self or not other:
return 0.0
is_gap = self.MolType.Gaps.__contains__
count = 0
diffs = 0
for i, j in zip(self, other):
if is_gap(i) or is_gap(j):
continue
count += 1
if i != j:
diffs += 1
if count:
return diffs/count
else: #there were no positions that weren't gaps
return 0
def fracSimilar(self, other, similar_pairs):
"""Returns fraction of positions where self[i] is similar to other[i].
similar_pairs must be a dict such that d[(i,j)] exists if i and j are
to be counted as similar. Use PairsFromGroups in cogent.util.misc to
construct such a dict from a list of lists of similar residues.
Truncates at the length of the shorter sequence.
Note: current implementation re-creates the distance function each
time, so may be expensive compared to creating the distance function
using for_seq separately.
Returns 0 if one sequence is empty.
"""
if not self or not other:
return 0.0
return for_seq(f = lambda x, y: (x,y) in similar_pairs, \
normalizer=per_shortest)(self, other)
def withTerminiUnknown(self):
"""Returns copy of sequence with terminal gaps remapped as missing."""
gaps = self.gapVector()
first_nongap = last_nongap = None
for i, state in enumerate(gaps):
if not state:
if first_nongap is None:
first_nongap = i
last_nongap = i
missing = self.MolType.Missing
if first_nongap is None: #sequence was all gaps
result = self.__class__([missing for i in len(self)],Info=self.Info)
else:
prefix = missing*first_nongap
mid = str(self[first_nongap:last_nongap+1])
suffix = missing*(len(self)-last_nongap-1)
result = self.__class__(prefix + mid + suffix, Info=self.Info)
return result
class Sequence(_Annotatable, SequenceI):
"""Holds the standard Sequence object. Immutable."""
MolType = None #connected to ACSII when moltype is imported
def __init__(self, Seq='',Name=None, Info=None, check=True, \
preserve_case=False, gaps_allowed=True, wildcards_allowed=True):
"""Initialize a sequence.
Arguments:
Seq: the raw sequence string, default is ''
Name: the sequence name
check: if True (the default), validates against the MolType
"""
if Name is None and hasattr(Seq, 'Name'):
Name = Seq.Name
self.Name = Name
orig_seq = Seq
if isinstance(Seq, Sequence):
Seq = Seq._seq
elif isinstance(Seq, ModelSequence):
Seq = str(Seq)
elif type(Seq) is not str:
try:
Seq = ''.join(Seq)
except TypeError:
Seq = ''.join(map(str, Seq))
Seq = self._seq_filter(Seq)
if not preserve_case and not Seq.isupper():
Seq = Seq.upper()
self._seq = Seq
if check:
self.MolType.verifySequence(self._seq, gaps_allowed, \
wildcards_allowed)
if not isinstance(Info, InfoClass):
try:
Info = InfoClass(Info)
except TypeError:
Info = InfoClass()
if hasattr(orig_seq, 'Info'):
try:
Info.update(orig_seq.Info)
except:
pass
self.Info = Info
if isinstance(orig_seq, _Annotatable):
self.copyAnnotations(orig_seq)
def _seq_filter(self, seq):
"""Returns filtered seq; used to do DNA/RNA conversions."""
return seq
def getColourScheme(self, colours):
return {}
#dict([(motif,colours.black) for motif in self.MolType])
def getColorScheme(self, colors): #alias to support US spelling
return self.getColourScheme(colours=colors)
def copyAnnotations(self, other):
self.annotations = other.annotations[:]
def annotateFromGff(self, f):
first_seqname = None
for (seqname, source, feature, start, end, score, strand,
frame, attributes, comments) in gff.GffParser(f):
if first_seqname is None:
first_seqname = seqname
else:
assert seqname == first_seqname, (seqname, first_seqname)
feat_label = gff.parse_attributes(attributes)
self.addFeature(feature, feat_label, [(start, end)])
def withMaskedAnnotations(self, annot_types, mask_char=None, shadow=False):
"""returns a sequence with annot_types regions replaced by mask_char
if shadow is False, otherwise all other regions are masked.
Arguments:
- annot_types: annotation type(s)
- mask_char: must be a character valid for the seq MolType. The
default value is the most ambiguous character, eg. '?' for DNA
- shadow: whether to mask the annotated regions, or everything but
the annotated regions"""
if mask_char is None:
ambigs = [(len(v), c) for c,v in self.MolType.Ambiguities.items()]
ambigs.sort()
mask_char = ambigs[-1][1]
assert mask_char in self.MolType, 'Invalid mask_char %s' % mask_char
annotations = []
annot_types = [annot_types, [annot_types]][isinstance(annot_types, str)]
for annot_type in annot_types:
annotations += self.getAnnotationsMatching(annot_type)
region = self.getRegionCoveringAll(annotations)
if shadow:
region = region.getShadow()
i = 0
segments = []
for b, e in region.getCoordinates():
segments.append(self._seq[i:b])
segments.append(mask_char * (e-b))
i = e
segments.append(self._seq[i:])
new = self.__class__(''.join(segments), Name=self.Name, check=False,
Info=self.Info)
new.annotations = self.annotations[:]
return new
def gappedByMapSegmentIter(self, map, allow_gaps=True, recode_gaps=False):
for span in map.spans:
if span.lost:
if allow_gaps:
unknown = span.terminal or recode_gaps
seg = "-?"[unknown] * span.length
else:
raise ValueError('Gap(s) in map %s' % map)
else:
seg = self._seq[span.Start:span.End]
if span.Reverse:
complement = self.MolType.complement
seg = [complement(base) for base in seg[::-1]]
seg = ''.join(seg)
yield seg
def gappedByMapMotifIter(self, map):
for segment in self.gappedByMapSegmentIter(map):
for motif in segment:
yield motif
def gappedByMap(self, map, recode_gaps=False):
segments = self.gappedByMapSegmentIter(map, True, recode_gaps)
new = self.__class__(''.join(segments),
Name=self.Name, check=False, Info=self.Info)
annots = self._slicedAnnotations(new, map)
new.annotations = annots
return new
def _mapped(self, map):
# Called by generic __getitem__
segments = self.gappedByMapSegmentIter(map, allow_gaps=False)
new = self.__class__(''.join(segments), self.Name, Info=self.Info)
return new
def __add__(self, other):
"""Adds two sequences (other can be a string as well)."""
if hasattr(other, 'MolType'):
if self.MolType != other.MolType:
raise ValueError, "MolTypes don't match: (%s,%s)" % \
(self.MolType, other.MolType)
other_seq = other._seq
else:
other_seq = other
new_seq = self.__class__(self._seq + other_seq)
# Annotations which extend past the right end of the left sequence
# or past the left end of the right sequence are dropped because
# otherwise they will annotate the wrong part of the constructed
# sequence.
left = [a for a in self._shiftedAnnotations(new_seq, 0)
if a.map.End <= len(self)]
if hasattr(other, '_shiftedAnnotations'):
right = [a for a in other._shiftedAnnotations(new_seq, len(self))
if a.map.Start >= len(self)]
new_seq.annotations = left + right
else:
new_seq.annotations = left
return new_seq
def __repr__(self):
myclass = '%s' % self.__class__.__name__
myclass = myclass.split('.')[-1]
if len(self) > 10:
seq = str(self._seq[:7]) + '... %s' % len(self)
else:
seq = str(self._seq)
return "%s(%s)" % (myclass, seq)
def getTracks(self, policy):
return policy.tracksForSequence(self)
def getName(self):
"""Return the sequence name -- should just use Name instead."""
return self.Name
def __len__(self):
return len(self._seq)
def __iter__(self):
return iter(self._seq)
def gettype(self):
"""Return the sequence type."""
return self.MolType.label
def resolveambiguities(self):
"""Returns a list of tuples of strings."""
ambigs = self.MolType.resolveAmbiguity
return [ambigs(motif) for motif in self._seq]
def slidingWindows(self, window, step, start=None, end=None):
"""Generator function that yield new sequence objects
of a given length at a given interval.
Arguments:
- window: The length of the returned sequence
- step: The interval between the start of the returned
sequence objects
- start: first window start position
- end: last window start position
"""
start = [start, 0][start is None]
end = [end, len(self)-window+1][end is None]
end = min(len(self)-window+1, end)
if start < end and len(self)-end >= window-1:
for pos in xrange(start, end, step):
yield self[pos:pos+window]
def getInMotifSize(self, motif_length=1, log_warnings=True):
"""returns sequence as list of non-overlapping motifs
Arguments:
- motif_length: length of the motifs
- log_warnings: whether to notify of an incomplete terminal motif"""
seq = self._seq
if motif_length == 1:
return seq
else:
length = len(seq)
remainder = length % motif_length
if remainder and log_warnings:
warnings.warn('Dropped remainder "%s" from end of sequence' %
seq[-remainder:])
return [seq[i:i+motif_length]
for i in range(0, length-remainder, motif_length)]
def parseOutGaps(self):
gapless = []
segments = []
nongap = re.compile('([^%s]+)' % re.escape("-"))
for match in nongap.finditer(self._seq):
segments.append(match.span())
gapless.append(match.group())
map = Map(segments, parent_length=len(self)).inverse()
seq = self.__class__(
''.join(gapless),
Name = self.getName(), Info=self.Info)
if self.annotations:
seq.annotations = [a.remappedTo(seq, map) for a in self.annotations]
return (map, seq)
class ProteinSequence(Sequence):
"""Holds the standard Protein sequence. MolType set in moltype module."""
pass
class ProteinWithStopSequence(Sequence):
"""Holds the standard Protein sequence, allows for stop codon
MolType set in moltype module
"""
pass
class NucleicAcidSequence(Sequence):
"""Base class for DNA and RNA sequences. Abstract."""
PROTEIN = None #will set in moltype
CodonAlphabet = None #will set in moltype
def reversecomplement(self):
"""Converts a nucleic acid sequence to its reverse complement.
Synonymn for rc."""
return self.rc()
def rc(self):
"""Converts a nucleic acid sequence to its reverse complement."""
complement = self.MolType.rc(self)
rc = self.__class__(complement, Name=self.Name, Info=self.Info)
self._annotations_nucleic_reversed_on(rc)
return rc
def _gc_from_arg(self, gc):
# codon_alphabet is being deprecated in favor of genetic codes.
if gc is None:
gc = DEFAULT_GENETIC_CODE
elif isinstance(gc, (int, basestring)):
gc = GeneticCodes[gc]
return gc
def hasTerminalStop(self, gc=None):
"""Return True if the sequence has a terminal stop codon.
Arguments:
- gc: a genetic code"""
gc = self._gc_from_arg(gc)
codons = self._seq
assert len(codons) % 3 == 0
return codons and gc.isStop(codons[-3:])
def withoutTerminalStopCodon(self, gc=None):
gc = self._gc_from_arg(gc)
codons = self._seq
assert len(codons) % 3 == 0, "seq length not divisible by 3"
if codons and gc.isStop(codons[-3:]):
codons = codons[:-3]
return self.__class__(codons, Name=self.Name, Info=self.Info)
def getTranslation(self, gc=None):
gc = self._gc_from_arg(gc)
codon_alphabet = self.CodonAlphabet(gc).withGapMotif()
# translate the codons
translation = []
for posn in range(0, len(self._seq)-2, 3):
orig_codon = self._seq[posn:posn+3]
resolved = codon_alphabet.resolveAmbiguity(orig_codon)
trans = []
for codon in resolved:
if codon == '---':
aa = '-'
else:
assert '-' not in codon
aa = gc[codon]
if aa == '*':
continue
trans.append(aa)
if not trans:
raise ValueError(orig_codon)
aa = self.PROTEIN.whatAmbiguity(trans)
translation.append(aa)
translation = self.PROTEIN.makeSequence(
Seq=''.join(translation), Name=self.Name)
return translation
def getOrfPositions(self, gc=None, atg=False):
gc = self._gc_from_arg(gc)
orfs = []
start = None
protein = self.getTranslation(gc=gc)
for (posn, aa) in enumerate(protein):
posn *= 3
if aa == '*':
if start is not None:
orfs.append((start,posn))
start = None
else:
if start is None:
if (not atg) or gc.isStart(self[posn:posn+3]):
start = posn
if start is not None:
orfs.append((start, posn+3))
return orfs
def toRna(self):
"""Returns copy of self as RNA."""
return RnaSequence(self)
def toDna(self):
"""Returns copy of self as DNA."""
return DnaSequence(self)
class DnaSequence(NucleicAcidSequence):
def getColourScheme(self, colours):
return {
'A': colours.black,
'T': colours.red,
'C': colours.blue,
'G': colours.green,
}
def _seq_filter(self, seq):
"""Converts U to T."""
return seq.replace('u','t').replace('U','T')
class RnaSequence(NucleicAcidSequence):
def getColourScheme(self, colours):
return {
'A': colours.black,
'U': colours.red,
'C': colours.blue,
'G': colours.green,
}
def _seq_filter(self, seq):
"""Converts T to U."""
return seq.replace('t','u').replace('T','U')
class ABSequence(Sequence):
"""Used for two-state modeling; MolType set in moltypes."""
pass
class ByteSequence(Sequence):
"""Used for storing arbitrary bytes."""
def __init__(self, Seq='', Name=None, Info=None, check=False, \
preserve_case=True):
return super(ByteSequence, self).__init__(Seq, Name=Name, Info=Info, \
check=check, preserve_case=preserve_case)
class ModelSequenceBase(object):
"""Holds the information for a non-degenerate sequence. Mutable.
A ModelSequence is an array of indices of symbols, where those symbols are
defined by an Alphabet. This representation of Sequence is convenient for
counting symbol frequencies or tuple frequencies, remapping data (e.g. for
reverse-complement), looking up model parameters, etc. Its main drawback is
that the sequences can no longer be treated as strings, and conversion
to/from strings can be fairly time-consuming. Also, any symbol not in the
Alphabet cannot be represented at all.
A sequence can have a Name, which will be used for output in formats
such as FASTA.
A sequence Class has an alphabet (which can be overridden in instances
where necessary), a delimiter used for string conversions, a LineWrap
for wrapping characters into lines for e.g. FASTA output.
Note that a ModelSequence _must_ have an Alphabet, not a MolType,
because it is often important to store just a subset of the possible
characters (e.g. the non-degenerate bases) for modeling purposes.
"""
Alphabet = None #REPLACE IN SUBCLASSES
MolType = None #REPLACE IN SUBCLASSES
Delimiter = '' #Used for string conversions
LineWrap = 80 #Wrap sequences at 80 characters by default.
def __init__(self, data='', Alphabet=None, Name=None, Info=None, \
check='ignored'):
"""Initializes sequence from data and alphabet.
WARNING: Does not validate the data or alphabet for compatibility.
This is for speed. Use isValid() to check whether the data
is consistent with the alphabet.
WARNING: If data has name and/or Info, gets ref to same object rather
than copying in each case.
"""
if Name is None and hasattr(data, 'Name'):
Name = data.Name
if Info is None and hasattr(data, 'Info'):
Info = data.Info
#set the label
self.Name = Name
#override the class alphabet if supplied
if Alphabet is not None:
self.Alphabet = Alphabet
#if we haven't already set self._data (e.g. in a subclass __init__),
#guess the data type and set it here
if not hasattr(self, '_data'):
#if data is a sequence, copy its data and alphabet
if isinstance(data, ModelSequence):
self._data = data._data
self.Alphabet = data.Alphabet
#if it's an array
elif type(data) == ARRAY_TYPE:
self._data = data
else: #may be set in subclass init
self._from_sequence(data)
self.MolType = self.Alphabet.MolType
self.Info = Info
def __getitem__(self, *args):
"""__getitem__ returns char or slice, as same class."""
if len(args) == 1 and not isinstance(args[0], slice):
result = array([self._data[args[0]]])
else:
result = self._data.__getitem__(*args)
return self.__class__(result)
def __cmp__(self, other):
"""__cmp__ compares based on string"""
return cmp(str(self), other)
def _from_sequence(self, data):
"""Fills self using the values in data, via the Alphabet."""
if self.Alphabet:
self._data = array(self.Alphabet.toIndices(data), \
self.Alphabet.ArrayType)
else:
self._data = array(data)
def __str__(self):
"""Uses alphabet to convert self to string, using delimiter."""
if hasattr(self.Alphabet, 'toString'):
return self.Alphabet.toString(self._data)
else:
return self.Delimiter.join(map(str, \
self.Alphabet.fromIndices(self._data)))
def __len__(self):
"""Returns length of data."""
return len(self._data)
def toFasta(self, make_seqlabel=None):
"""Return string of self in FASTA format, no trailing newline
Arguments:
- make_seqlabel: callback function that takes the seq object and
returns a label str
"""
return fasta_from_sequences([self], make_seqlabel = make_seqlabel,
line_wrap=self.LineWrap)
def toPhylip(self, name_len=28, label_len=30):
"""Return string of self in one line for PHYLIP, no newline.
Default: max name length is 28, label length is 30.
"""
return str(self.Name)[:name_len].ljust(label_len) + str(self)
def isValid(self):
"""Checks that no items in self are out of the Alphabet range."""
return self._data == self._data.clip(m, 0, len(self.Alphabet)-1)
def toKwords(self, k, overlapping=True):
"""Turns sequence into sequence of its k-words.
Just returns array, not Sequence object."""
alpha_len = len(self.Alphabet)
seq = self._data
seq_len = len(seq)
if overlapping:
num_words = seq_len - k + 1
else:
num_words, remainder = divmod(seq_len, k)
last_index = num_words * k
result = zeros(num_words)
for i in range(k):
if overlapping:
curr_slice = seq[i:i+num_words]
else:
curr_slice = seq[i:last_index+i:k]
result *= alpha_len
result += curr_slice
return result
def __iter__(self):
"""iter returns characters of self, rather than slices."""
if hasattr(self.Alphabet, 'toString'):
return iter(self.Alphabet.toString(self._data))
else:
return iter(self.Alpabet.fromIndices(self._data))
def tostring(self):
"""tostring delegates to self._data."""
return self._data.tostring()
def gaps(self):
"""Returns array containing 1 where self has gaps, 0 elsewhere.
WARNING: Only checks for standard gap character (for speed), and
does not check for ambiguous gaps, etc.
"""
return self._data == self.Alphabet.GapIndex
def nongaps(self):
"""Returns array contining 0 where self has gaps, 1 elsewhere.
WARNING: Only checks for standard gap character (for speed), and
does not check for ambiguous gaps, etc.
"""
return self._data != self.Alphabet.GapIndex
def regap(self, other, strip_existing_gaps=False):
"""Inserts elements of self into gaps specified by other.
WARNING: Only checks for standard gap character (for speed), and
does not check for ambiguous gaps, etc.
"""
if strip_existing_gaps:
s = self.degap()
else:
s = self
c = self.__class__
a = self.Alphabet.Gapped
result = zeros(len(other),a.ArrayType)+a.GapIndex
put(result, nonzero(other.nongaps()), s._data)
return c(result)
def degap(self):
"""Returns ungapped copy of self, not changing alphabet."""
if not hasattr(self.Alphabet, 'Gap') or self.Alphabet.Gap is None:
return self.copy()
d = take(self._data, nonzero(logical_not(self.gapArray()))[0])
return self.__class__(d, Alphabet=self.Alphabet, Name=self.Name, \
Info=self.Info)
def copy(self):
"""Returns copy of self, always separate object."""
return self.__class__(self._data.copy(), Alphabet=self.Alphabet, \
Name=self.Name, Info=self.Info)
def __contains__(self, item):
"""Returns true if item in self (converts to strings)."""
return item in str(self)
def disambiguate(self, *args, **kwargs):
"""Disambiguates self using strings/moltype. Should recode if demand."""
return self.__class__(self.MolType.disambiguate(str(self), \
*args,**kwargs))
def distance(self, other, function=None, use_indices=False):
"""Returns distance between self and other using function(i,j).
other must be a sequence.
function should be a function that takes two items and returns a
number. To turn a 2D matrix into a function, use
cogent.util.miscs.DistanceFromMatrix(matrix).
use_indices: if False, maps the indices onto items (e.g. assumes
function relates the characters). If True, uses the indices directly.
NOTE: Truncates at the length of the shorter sequence.
Note that the function acts on two _elements_ of the sequences, not
the two sequences themselves (i.e. the behavior will be the same for
every position in the sequences, such as identity scoring or a function
derived from a distance matrix as suggested above). One limitation of
this approach is that the distance function cannot use properties of
the sequences themselves: for example, it cannot use the lengths of the
sequences to normalize the scores as percent similarities or percent
differences.
If you want functions that act on the two sequences themselves, there
is no particular advantage in making these functions methods of the
first sequences by passing them in as parameters like the function
in this method. It makes more sense to use them as standalone functions.
The factory function cogent.util.transform.for_seq is useful for
converting per-element functions into per-sequence functions, since it
takes as parameters a per-element scoring function, a score aggregation
function, and a normalization function (which itself takes the two
sequences as parameters), returning a single function that combines
these functions and that acts on two complete sequences.
"""
if function is None:
#use identity scoring
shortest = min(len(self), len(other))
if not hasattr(other, '_data'):
other = self.__class__(other)
distance = (self._data[:shortest] != other._data[:shortest]).sum()
else:
distance = 0
if use_indices:
self_seq = self._data
if hasattr(other, '_data'):
other_seq = other._data
else:
self_seq = self.Alphabet.fromIndices(self._data)
if hasattr(other, '_data'):
other_seq = other.Alphabet.fromIndices(other._data)
else:
other_seq = other
for first, second in zip(self_seq, other_seq):
distance += function(first, second)
return distance
def matrixDistance(self, other, matrix, use_indices=False):
"""Returns distance between self and other using a score matrix.
if use_indices is True (default is False), assumes that matrix is
an array using the same indices that self uses.
WARNING: the matrix must explicitly contain scores for the case where
a position is the same in self and other (e.g. for a distance matrix,
an identity between U and U might have a score of 0). The reason the
scores for the 'diagonals' need to be passed explicitly is that for
some kinds of distance matrices, e.g. log-odds matrices, the 'diagonal'
scores differ from each other. If these elements are missing, this
function will raise a KeyError at the first position that the two
sequences are identical.
"""
return self.distance(other, DistanceFromMatrix(matrix))
def shuffle(self):
"""Returns shuffled copy of self"""
return self.__class__(permutation(self._data), Info=self.Info)
def gapArray(self):
"""Returns array of 0/1 indicating whether each position is a gap."""
gap_indices = []
a = self.Alphabet
for c in self.MolType.Gaps:
if c in a:
gap_indices.append(a.index(c))
gap_vector = None
for i in gap_indices:
if gap_vector is None:
gap_vector = self._data == i
else:
gap_vector = logical_or(gap_vector, self._data == i)
return gap_vector
def gapIndices(self):
"""Returns array of indices of gapped positions in self."""
return self.gapArray().nonzero()[0]
def fracSameGaps(self, other):
"""Returns fraction of positions where gaps match other's gaps.
"""
if not other:
return 0
self_gaps = self.gapArray()
if hasattr(other, 'gapArray'):
other_gaps = other.gapArray()
elif hasattr(other, 'gapVector'):
other_gaps = array(other.gapVector())
else:
other_gaps = array(self.MolType.gapVector(other))
min_len = min(len(self), len(other))
self_gaps, other_gaps = self_gaps[:min_len], other_gaps[:min_len]
return (self_gaps == other_gaps).sum()/float(min_len)
class ModelSequence(ModelSequenceBase, SequenceI):
"""ModelSequence provides an array-based implementation of Sequence.
Use ModelSequenceBase if you need a stripped-down, fast implementation.
ModelSequence implements everything that SequenceI implements.
See docstrings for ModelSequenceBase and SequenceI for information about
these respective classes.
"""
def stripBad(self):
"""Returns copy of self with bad chars excised"""
valid_indices = self._data < len(self.Alphabet)
result = compress(valid_indices, self._data)
return self.__class__(result, Info=self.Info)
def stripBadAndGaps(self):
"""Returns copy of self with bad chars and gaps excised."""
gap_indices = map(self.Alphabet.index, self.MolType.Gaps)
valid_indices = self._data < len(self.Alphabet)
for i in gap_indices:
valid_indices -= self._data == i
result = compress(valid_indices, self._data)
return self.__class__(result, Info=self.Info)
def stripDegenerate(self):
"""Returns copy of self without degenerate symbols.
NOTE: goes via string intermediate because some of the algorithms
for resolving degenerates are complex. This could be optimized if
speed becomes critical.
"""
return self.__class__(self.MolType.stripDegenerate(str(self)), \
Info=self.Info)
def countGaps(self):
"""Returns count of gaps in self."""
return self.gapArray().sum()
def gapVector(self):
"""Returns list of bool containing whether each pos is a gap."""
return map(bool, self.gapArray())
def gapList(self):
"""Returns list of gap indices."""
return list(self.gapIndices())
def gapMaps(self):
"""Returns dicts mapping gapped/ungapped positions."""
nongaps = logical_not(self.gapArray())
indices = arange(len(self)).compress(nongaps)
new_indices = arange(len(indices))
return dict(zip(new_indices, indices)), dict(zip(indices, new_indices))
def firstGap(self):
"""Returns position of first gap, or None."""
a = self.gapIndices()
try:
return a[0]
except IndexError:
return None
def isGapped(self):
"""Returns True of sequence contains gaps."""
return len(self.gapIndices())
def MW(self, *args, **kwargs):
"""Returns molecular weight.
Works via string intermediate: could optimize using array of MW if
speed becomes important.
"""
return self.MolType.MW(str(self), *args, **kwargs)
def fracSimilar(self, other, similar_pairs):
"""Returns fraction of positions where self[i] is similar to other[i].
similar_pairs must be a dict such that d[(i,j)] exists if i and j are
to be counted as similar. Use PairsFromGroups in cogent.util.misc to
construct such a dict from a list of lists of similar residues.
Truncates at the length of the shorter sequence.
Note: current implementation re-creates the distance function each
time, so may be expensive compared to creating the distance function
using for_seq separately.
Returns 0 if one sequence is empty.
NOTE: goes via string intermediate, could optimize using array if
speed becomes important. Note that form of similar_pairs input would
also have to change.
"""
if not self or not other:
return 0.0
return for_seq(f = lambda x, y: (x,y) in similar_pairs, \
normalizer=per_shortest)(str(self), str(other))
class ModelNucleicAcidSequence(ModelSequence):
"""Abstract class defining ops for codons, translation, etc."""
def toCodons(self):
"""Returns copy of self in codon alphabet. Assumes ungapped."""
alpha_len = len(self.Alphabet)
return ModelCodonSequence(alpha_len*(\
alpha_len*self._data[::3] + self._data[1::3]) + self._data[2::3], \
Name=self.Name, Alphabet=self.Alphabet.Triples)
def complement(self):
"""Returns complement of sequence"""
return self.__class__(self.Alphabet._complement_array.take(self._data),\
Info=self.Info)
def rc(self):
"""Returns reverse-complement of sequence"""
comp = self.Alphabet._complement_array.take(self._data)
return self.__class__(comp[::-1], Info=self.Info)
def toRna(self):
"""Returns self as RNA"""
return ModelRnaSequence(self._data)
def toDna(self):
"""Returns self as DNA"""
return ModelDnaSequence(self._data)
class ModelRnaSequence(ModelNucleicAcidSequence):
MolType = None #set to RNA in moltype.py
Alphabet = None #set to RNA.Alphabets.DegenGapped in moltype.py
def __init__(self, data='', *args, **kwargs):
"""Returns new ModelRnaSequence, converting T -> U"""
if hasattr(data, 'upper'):
data = data.upper().replace('T','U')
return super(ModelNucleicAcidSequence, self).__init__(data, \
*args, **kwargs)
class ModelDnaSequence(ModelNucleicAcidSequence):
MolType = None #set to DNA in moltype.py
Alphabet = None #set to DNA.Alphabets.DegenGapped in moltype.py
def __init__(self, data='', *args, **kwargs):
"""Returns new ModelRnaSequence, converting U -> T"""
if hasattr(data, 'upper'):
data = data.upper().replace('U','T')
return super(ModelNucleicAcidSequence, self).__init__(data, \
*args, **kwargs)
class ModelCodonSequence(ModelSequence):
"""Abstract base class for codon sequences, incl. string conversion."""
SequenceClass = ModelNucleicAcidSequence
def __str__(self):
"""Joins triplets together as string."""
return self.Delimiter.join(map(''.join, \
self.Alphabet.fromIndices(self._data)))
def _from_string(self, s):
"""Reads from a raw string, rather than a DnaSequence."""
s = s.upper().replace('U','T') #convert to uppercase DNA
d = self.SequenceClass(s, \
Alphabet=self.Alphabet.SubEnumerations[0])
self._data = d.toCodons()._data
def __init__(self, data='', Alphabet=None, Name=None, Info=None):
"""Override __init__ to handle init from string."""
if isinstance(data, str):
self._from_string(data)
ModelSequence.__init__(self, data, Alphabet, Name, Info=Info)
def toCodons(self):
"""Converts self to codons -- in practice, just returns self.
Supports interface of other NucleicAcidSequences."""
return self
def toDna(self):
"""Returns a ModelDnaSequence from the data in self"""
unpacked = self.Alphabet.unpackArrays(self._data)
result = zeros((len(self._data),3))
for i, v in enumerate(unpacked):
result[:,i] = v
return ModelDnaSequence(ravel(result), Name=self.Name)
def toRna(self):
"""Returns a ModelDnaSequence from the data in self."""
unpacked = self.Alphabet.unpackArrays(self._data)
result = zeros((len(self._data),3))
for i, v in enumerate(unpacked):
result[:,i] = v
return ModelRnaSequence(ravel(result), Name=self.Name)
class ModelDnaCodonSequence(ModelCodonSequence):
"""Holds non-degenerate DNA codon sequence."""
Alphabet = None #set to DNA.Alphabets.Base.Triples in moltype.py
SequenceClass = ModelDnaSequence
class ModelRnaCodonSequence(ModelCodonSequence):
"""Holds non-degenerate DNA codon sequence."""
Alphabet = None #set to RNA.Alphabets.Base.Triples in motype.py
SequenceClass = ModelRnaSequence
def _from_string(self, s):
"""Reads from a raw string, rather than a DnaSequence."""
s = s.upper().replace('T','U') #convert to uppercase DNA
d = self.SequenceClass(s, \
Alphabet=self.Alphabet.SubEnumerations[0])
self._data = d.toCodons()._data
class ModelProteinSequence(ModelSequence):
MolType = None #set to PROTEIN in moltype.py
Alphabet = None #set to PROTEIN.Alphabets.DegenGapped in moltype.py
class ModelProteinWithStopSequence(ModelSequence):
MolType = None #set to PROTEIN_WITH_STOP in moltype.py
Alphabet= None #set to PROTEIN_WITH_STOP.Alphabets.DegenGapped in moltype.py
|
"""
List of some ECP URLs and strings
"""
import re
from urllib.parse import quote_plus
BASE_URL = "https://api.emea.ecp.electrolux.com"
X_API_KEY = "<KEY>"
BRAND = "Electrolux"
_region_params = {
"emea": ["https://api.emea.ecp.electrolux.com",
"714fc3c7-ad68-4c2f-9a1a-b3dbe1c8bb35",
"Electrolux"],
"apac": ["https://api.apac.ecp.electrolux.com",
"1c064d7a-c02e-438c-9ac6-78bf7311ba7c",
"Electrolux"],
"na": ["https://api.latam.ecp.electrolux.com",
"dc9cfac1-4a29-4509-9041-9ae4a0572aac",
"Electrolux-NA"],
"latam":["https://api.latam.ecp.electrolux.com",
"3aafa8f0-9fd8-454d-97f6-f46e87b280e2",
"Electrolux"],
"frigidaire": ["https://api.latam.ecp.electrolux.com",
"7ff2358e-8d6d-4cf6-814a-fcb498fa2cf9",
"frigidaire"]
}
def getEcpClientUrl(region):
if region in _region_params:
return _region_params[region][0]
else:
return BASE_URL
def getEcpClientId(region):
if region in _region_params:
return _region_params[region][1]
else:
return X_API_KEY
def getEcpClientBrand(region):
if region in _region_params:
return _region_params[region][2]
else:
return BRAND
#Authenticate (get Session key)
def login():
return ["{base_url}/authentication/authenticate".format(
base_url=BASE_URL),
"POST"
]
#Get appliances list registered to account
def getAppliances(username):
return ["{base_url}/user-appliance-reg/users/{username}/appliances".format(
base_url=BASE_URL,
username=re.sub("(?i)\%2f", "f", quote_plus(username))),
"GET"
]
#Get general HACL map
def getHaclMap():
return ["{base_url}/config-files/haclmap".format(
base_url=BASE_URL),
"GET"
]
#Get list of supported appliances
def getApplianceConfigurations():
return ["{base_url}/config-files/configurations".format(
base_url=BASE_URL),
"GET"
]
#Get appliance connection state
def getApplianceConnectionState(appliance):
return ["{base_url}/elux-ms/appliances/latest?pnc={pnc}&elc={elc}&sn={sn}&states=ConnectivityState&includeSubcomponents=false".format(
base_url=BASE_URL,
pnc=re.sub("(?i)\%2f", "f", quote_plus(appliance["pnc"])),
sn=re.sub("(?i)\%2f", "f", quote_plus(appliance["sn"])),
elc=re.sub("(?i)\%2f", "f", quote_plus(appliance["elc"]))),
"GET"
]
#Get appliance parameter state
def getApplianceParameterState(appliance,parameter):
return ["{base_url}/elux-ms/appliances/latest?pnc={pnc}&elc={elc}&sn={sn}&states={param}&includeSubcomponents=true".format(
base_url=BASE_URL,
pnc=re.sub("(?i)\%2f", "f", quote_plus(appliance["pnc"])),
sn=re.sub("(?i)\%2f", "f", quote_plus(appliance["sn"])),
elc=re.sub("(?i)\%2f", "f", quote_plus(appliance["elc"])),
param=re.sub("(?i)\%2f", "f", quote_plus(parameter))),
"GET"
]
#Get all appliance parameters state
def getApplianceAllStates(appliance):
return ["{base_url}/elux-ms/appliances/latest?pnc={pnc}&elc={elc}&sn={sn}&includeSubcomponents=true".format(
base_url=BASE_URL,
pnc=re.sub("(?i)\%2f", "f", quote_plus(appliance["pnc"])),
sn=re.sub("(?i)\%2f", "f", quote_plus(appliance["sn"])),
elc=re.sub("(?i)\%2f", "f", quote_plus(appliance["elc"]))),
"GET"
]
#Send command do appliance
def setApplianceCommand(appliance):
return ["{base_url}/commander/remote/sendjson?pnc={pnc}&elc={elc}&sn={sn}&mac={mac}".format(
base_url=BASE_URL,
pnc=re.sub("(?i)\%2f", "f", quote_plus(appliance["pnc"])),
sn=re.sub("(?i)\%2f", "f", quote_plus(appliance["sn"])),
elc=re.sub("(?i)\%2f", "f", quote_plus(appliance["elc"])),
mac=re.sub("(?i)\%2f", "f", quote_plus(appliance["mac"]))),
"POST"
]
#Get selected appliance configuration
def getApplianceConfigurationVersion(appliance):
return ["{base_url}/config-files/configurations/search?pnc={pnc}&elc={elc}&serial_number={sn}".format(
base_url=BASE_URL,
pnc=re.sub("(?i)\%2f", "f", quote_plus(appliance["pnc"])),
sn=re.sub("(?i)\%2f", "f", quote_plus(appliance["sn"])),
elc=re.sub("(?i)\%2f", "f", quote_plus(appliance["elc"]))),
"GET"
]
#Download configuration file
def getApplianceConfigurationFile(configurationId):
return ["{base_url}/config-files/configurations/{configurationId}/bundle".format(
base_url=BASE_URL,
configurationId=re.sub("(?i)\%2f", "f", quote_plus(configurationId))),
"GET"
]
#Register Client to MQTT broker
def registerMQTT():
return ["{base_url}/livesubscribe/livestream/register".format(
base_url=BASE_URL),
"POST"
]
#Unregister Client from MQTT broker
def unregisterMQTT():
return ["{base_url}/livesubscribe/livestream/unregister".format(
base_url=BASE_URL),
"POST"
] |
#! /usr/bin/python3
# HTML frontend
from flask import render_template, request, Flask
# Email functionality
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Random crap
import time
import unidecode
import sys
# Create Flask app
app = Flask(__name__)
def read_properties():
user, password = None, None
try:
infile = open("email.properties", "r")
for line in infile:
key, val = line.split("=")
key, val = key.strip(), val.strip()
if key == "user":
user = val
elif key == "pass":
password = val
infile.close()
except FileNotFoundError:
infile = open("email.properties", "w")
infile.write("user=\npass=\n")
infile.close()
print("email.properties was created, please provide login info for UiO user")
raise Exception
return user, password
def get_text(language):
text = ""
try:
infile = open(language, "r")
for line in infile:
text += line
infile.close()
except FileNotFoundError:
raise Exception("No email text made for language %s!" % language)
return text
def send_mail(to_name, to_email, text):
# Enforce to_name as ascii to comply with RFC 5322
to_name = unidecode.unidecode(to_name)
# Create message container
msg = MIMEMultipart('alternative')
msg['Subject'] = "BB-info!"
msg['From'] = "\"Studentorchesteret Biorneblaes\" <<EMAIL>>"
msg['To'] = "%s <%s>" % (to_name, to_email)
msg['reply-to'] = "<EMAIL>"
# Create message body
html = text
# Attach body to message container
part2 = MIMEText(html, 'html', "utf-8")
msg.attach(part2)
# Send the email
s = smtplib.SMTP("smtp.uio.no", 587)
s.ehlo()
s.starttls()
s.login(user, password)
s.sendmail("<EMAIL>", to_email, msg.as_string())
s.quit()
# Responds to user input
@app.route("/", methods=["GET"])
def main_screen():
return render_template('index.html')
@app.route("/send", methods=["POST"])
def send_screen():
# Read properties from request
name = request.form["name"]
email_addr = request.form["email_addr"]
lang = request.form["lang"]
print("Request: ", name, email_addr, lang)
text = ""
try:
print("Sending email to %s..." % email_addr, end="")
send_mail(name, email_addr, get_text(lang))
logfile.write("%s,%s,%s,%s\n" % (name, email_addr, lang, "true"))
print(" OK!")
if lang == "nobm":
text = "Vi har sendt deg en infomail!"
elif lang == "eng":
text = "We have sent you an info email!"
except Exception as e:
print(" Fail!")
print("Could not send automatic email. Logging email either way.")
logfile.write("%s,%s,%s,%s\n" % (name, email_addr, lang, "false"))
print(" -> Error message: '%s'" % str(e))
if lang == "nobm":
text = "Vi kunne ikke sende eposten, kan du ha skrevet feil?"
elif lang == "eng":
text = "We could not send the email, could you have typed something wrong?"
# Make sure file is actually updated
logfile.flush()
return render_template('sent.html', infotext=text)
def main():
# Declare these as global vars, so that they are available from send_mail()
global user
global password
global logfile
try:
user, password = read_properties()
except:
print("Error in reading email settings, closing app")
return
try:
if len(sys.argv) > 1:
logfile = open(sys.argv[1], "a")
else:
logfile = open("collected_%d.csv" % time.time(), "w")
logfile.write("navn,epost,språk,sendt infomail\n")
logfile.flush()
except:
print("Error in creating logfile, closing app")
return
app.run()
logfile.close()
if __name__ == '__main__':
main()
|
<filename>scripts/hard_box.py
# -*- coding: utf-8 -*-
"""
Program: hard_box
Created: Apr 2020
@author: <NAME> (RRCC)
<EMAIL>
"""
### IMPORTS
import random
import math as m
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.ticker import AutoMinorLocator
from matplotlib import cm
### SET COLOR MAP
colors = cm.get_cmap('gist_rainbow')
# colors = cm.get_cmap('seismic')
### CLASSES
class HB:
"""
HB: Hard Box Class
NOTE: Circles interact with each other via elastic collisions, i.e.
2D version of hard sphere scattering.
NOTE: Circles interact with the walls of the box.
"""
# Class Variables
numInstances = 0 # # Number of instanciations of this class.
dt = 0.01 # seconds Time-Step
boxU = 10.0 # meters Top of Box (Up)
boxD = 0.0 # meters Bottom of Box (Down)
boxL = 0.0 # meters Left Side of Box (Left)
boxR = 10.0 # meters Right Side of Box (Right)
figW = 8 # inches Width of Figure (Plot)
figH = 8 # inches Height of Figure (Plot)
def __init__(self,x=0,y=0,vx=0,vy=0,r=0.1):
"""
Hard Box Constructor
Parameters
----------
x : DOUBLE, optional
X-coordinate of circle center [m]. The default is 0.
y : DOUBLE, optional
Y-coordinate of circle center [m]. The default is 0.
vx : DOUBLE, optional
X-component of circle velocity [m/s].The default is 0.
vy : DOUBLE, optional
Y-component of circle velocity [m/s]. The default is 0.
r : DOUBLE, optional
Radius of circle [m]. The default is 0.1.
Returns
-------
None.
"""
HB.numInstances += 1
self.x = x
self.xC = x/HB.boxR
self.y = y
self.vx = vx
self.vy = vy
self.r = r
self.t = 0.0
v = m.hypot(vx,vy)
dt = r/(2.0*v) # Time step control. Prevent circle centers
# from crossing in a single time-step.
HB.dt = min(HB.dt,dt)
self.graphic = plt.Circle((x,y), radius=r, fill=False,
color=colors(self.xC), linewidth=1)
def __del__(self):
"""
Hard Box Destructor
"""
HB.numInstances -= 1
def move(self):
"""
Move circle according to its velocity.
"""
dt = HB.dt
vx = self.vx
vy = self.vy
# X
self.x += vx*dt
# Y
self.y += vy*dt
# Collision with wall?
self.__boundaries()
# Time
self.t += dt
# Graphic
# self.updateGraphic()
def __boundaries(self):
# dt = HB.dt
x = self.x
y = self.y
# vx = self.vx
# vy = self.vy
r = self.r
bD = HB.boxD
bU = HB.boxU
bL = HB.boxL
bR = HB.boxR
# Y
if( y < bD+r ):
# tD = dt - abs( (bD - y)/vy )
self.vy *= -1.0
# self.y = bD + vy*tD + r
self.y = bD + r
elif( y > bU-r ):
# tU = dt - abs( (bU - y)/vy )
self.vy *= -1.0
# self.y = bU + vy*tU - r
self.y = bU - r
# X
if( x < bL+r ):
# tL = dt - abs( (bL - x)/vx )
self.vx *= -1.0
# self.x = bL + vx*tL + r
self.x = bL + r
elif( x > bR-r ):
# tR = dt - abs( (bR - x)/vx )
self.vx *= -1.0
# self.x = bR + vx*tR - r
self.x = bR - r
def updateGraphic(self):
"""
Update graphic after a move.
"""
self.graphic = plt.Circle((self.x,self.y), radius=self.r,
color=colors(self.xC),
fill=False, linewidth=1)
# END: GC
### END: CLASSES
### FUNCTIONS
## Collision Functions:
def collision(balls):
"""
Step 1: Detect collisions
TODO: Algorithm Documentation ...
Step 2: Handle collisions
TODO: Algorithm Documentation ...
Parameters
----------
balls : Python list.
List of HB instances.
Returns
-------
None.
"""
for i in np.arange(HB.numInstances-1):
for j in np.arange(i+1,HB.numInstances):
d = balls[i].r + balls[j].r
drx = balls[i].x - balls[j].x
dry = balls[i].y - balls[j].y
dr = m.hypot(drx, dry)
if( dr < d ):
# COLLISION! Case #1: Penetration
# Game Engine Style Collision Correction
offset = (d - dr)/2.0
dx = offset*drx/dr
dy = offset*dry/dr
xiNew = balls[i].x + dx
yiNew = balls[i].y + dy
xjNew = balls[j].x - dx
yjNew = balls[j].y - dy
drx = xiNew - xjNew
dry = yiNew - yjNew
dvx = balls[i].vx - balls[j].vx
dvy = balls[i].vy - balls[j].vy
fac = (dvx*drx + dvy*dry)/(d*d)
delvx = fac*drx
delvy = fac*dry
balls[i].vx -= delvx
balls[i].vy -= delvy
balls[j].vx += delvx
balls[j].vy += delvy
balls[i].x = xiNew
balls[i].y = yiNew
balls[j].x = xjNew
balls[j].y = yjNew
elif( dr == d ):
# COLLISTION! Case #2: Perfect
# This is going to be a VERY rare event ...
dvx = balls[i].vx - balls[j].vx
dvy = balls[i].vy - balls[j].vy
fac = (dvx*drx + dvy*dry)/(d*d)
delvx = fac*drx
delvy = fac*dry
balls[i].vx -= delvx
balls[i].vy -= delvy
balls[j].vx += delvx
balls[j].vy += delvy
### END: Collision Functions
## Animation Functions:
def init():
patches = []
return patches
def animate(i):
ax.clear()
ax.grid(b=True, which='major', color='lightgrey')
# ax.grid(b=True, which='minor', color='lightgrey')
ax.xaxis.set_minor_locator(AutoMinorLocator(10))
ax.yaxis.set_minor_locator(AutoMinorLocator(10))
ax.set_title('Impenetrable Circles')
ax.axis('scaled')
ax.set_xlim([HB.boxL,HB.boxR])
ax.set_ylim([HB.boxD,HB.boxU])
tText = ax.text(4, 9.5, '')
patches = []
if( i > 0 ):
# N time-steps per graphics update
for j in np.arange(1):
for hb in hbList:
hb.move()
collision(hbList)
for hb in hbList:
hb.updateGraphic()
# Graphics update
s = 'Time = %.2f s' % hbList[0].t
tText.set_text(s)
for hb in hbList:
patches.append(ax.add_patch(hb.graphic))
patches.append(tText)
return patches
## END: Animation Functions
### END: FUNCTIONS
if __name__ == '__main__':
numCircles = 3 # Number of circles along an axis. Total number of
# circles is numCircles**2
dW = HB.boxR/(numCircles+1)
dH = HB.boxU/(numCircles+1)
rC = m.hypot(dW, dH)/6.0 # Diameter of circle is 1/3 of initial circle spacing.
hbList = []
for i in np.arange(numCircles):
for j in np.arange(numCircles):
x = dW*(j+1)
y = dH*(i+1)
# NOTE: Time-step control will adjust based on the velocities, i.e. the
# larger the velocity the smaller the time-step.
# NOTE: This simulation assumes non-relativistic velocities. If you want
# to REALLY crank-up the velocities please consider appropriate
# adjustments to the physics.
vxR = random.uniform(-10,10) # Want ghosts to move faster? Crank this up!
vyR = random.uniform(-10,10) # Want ghosts to move faster? Crank this up!
hbList.append( HB(x,y,vxR,vyR,rC) )
fig, ax = plt.subplots()
fig.set_size_inches(HB.figW,HB.figH)
ax.grid(b=True, which='major', color='lightgrey')
# ax.grid(b=True, which='minor', color='lightgrey')
ax.xaxis.set_minor_locator(AutoMinorLocator(10))
ax.yaxis.set_minor_locator(AutoMinorLocator(10))
ax.set_title('Impenetrable Circles')
ax.axis('scaled')
ax.set_xlim([HB.boxL,HB.boxR])
ax.set_ylim([HB.boxD,HB.boxU])
tText = ax.text(4, 9.5, 'Time = ')
ani = animation.FuncAnimation(fig, animate, frames=101,
interval=100, blit=True,
init_func=init, repeat=False)
# Uncomment next two lines to write file to disk.
# pwriter = animation.PillowWriter(fps=10, metadata=dict(artist='Dr. <NAME>'))
# ani.save('../movies/hard_box.gif',writer=pwriter)
plt.show()
|
<reponame>jeremyjyang/BioClients<gh_stars>1-10
#!/usr/bin/env python3
"""
TCRD db client utility (see also Pharos GraphQL API)
"""
import os,sys,argparse,re,time,json,logging
from ...idg import tcrd
from ...util import yaml as util_yaml
#############################################################################
if __name__=='__main__':
PARAM_FILE = os.environ['HOME']+"/.tcrd.yaml"
idtypes=['TID', 'GENEID', 'UNIPROT', 'GENESYMB', 'ENSP']
epilog = f"default param_file: {PARAM_FILE}"
parser = argparse.ArgumentParser(description='TCRD MySql client utility', epilog=epilog)
ops = ['info', 'listTables', 'listColumns', 'tableRowCounts', 'tdlCounts',
'listTargets', 'listXrefTypes', 'listXrefs', 'listDatasets',
'listTargetsByDTO', 'listTargetFamilies',
'listPhenotypes', 'listPhenotypeTypes',
'listPublications',
'getTargets', 'getTargetsByXref', 'getTargetPage',
'listDiseases', 'listDiseaseTypes',
'getDiseaseAssociations', 'getDiseaseAssociationsPage',
'getTargetpathways']
parser.add_argument("op", choices=ops, help='OPERATION')
parser.add_argument("--o", dest="ofile", help="output (TSV)")
parser.add_argument("--i", dest="ifile", help="input target ID file")
parser.add_argument("--ids", help="input IDs")
parser.add_argument("--idtype", choices=idtypes, default='TID', help='target ID type')
parser.add_argument("--xreftypes", help='Xref types, comma-separated')
parser.add_argument("--tdls", help="TDLs, comma-separated ({})".format('|'.join(tcrd.TDLS)))
parser.add_argument("--tfams", help="target families, comma-separated")
parser.add_argument("--param_file", default=PARAM_FILE)
parser.add_argument("--dbhost")
parser.add_argument("--dbport")
parser.add_argument("--dbusr")
parser.add_argument("--dbpw")
parser.add_argument("--dbname")
parser.add_argument("-v", "--verbose", dest="verbose", action="count", default=0)
parser.add_argument("-q", "--quiet", action="store_true", help="Suppress progress notification.")
args = parser.parse_args()
# logging.PROGRESS = 15 (custom)
logging.basicConfig(format='%(levelname)s:%(message)s', level=(logging.DEBUG if args.verbose>0 else logging.ERROR if args.quiet else 15))
params = util_yaml.ReadParamFile(args.param_file) if os.path.isfile(args.param_file) else {}
if args.dbhost: params['DBHOST'] = args.dbhost
if args.dbport: params['DBPORT'] = args.dbport
if args.dbusr: params['DBUSR'] = args.dbusr
if args.dbpw: params['DBPW'] = args.dbpw
if args.dbname: params['DBNAME'] = args.dbname
fout = open(args.ofile, "w+") if args.ofile else sys.stdout
ids=[]
if args.ifile:
fin = open(args.ifile)
while True:
line = fin.readline()
if not line: break
ids.append(line.rstrip())
logging.info('Input IDs: %d'%(len(ids)))
fin.close()
elif args.ids:
ids = re.split(r'[,\s]+', args.ids)
try:
import mysql.connector as mysql
dbcon = mysql.connect(host=params['DBHOST'], port=params['DBPORT'], user=params['DBUSR'], passwd=params['<PASSWORD>'], db=params['DBNAME'])
except Exception as e:
logging.error(f'{e}')
try:
import MySQLdb as mysql
dbcon = mysql.connect(host=params['DBHOST'], port=int(params['DBPORT']), user=params['DBUSR'], passwd=params['<PASSWORD>'], db=params['DBNAME'])
except Exception as e2:
logging.error(f'{e2}')
sys.exit(1)
if args.op=='listColumns':
tcrd.Utils.ListColumns(dbcon, fout)
elif args.op=='info':
tcrd.Utils.Info(dbcon, fout)
elif args.op=='tableRowCounts':
tcrd.Utils.TableRowCounts(dbcon, fout)
elif args.op=='tdlCounts':
tcrd.Utils.TDLCounts(dbcon, fout)
elif args.op=='listTables':
tcrd.Utils.ListTables(dbcon, fout)
elif args.op=='listTargets':
tdls = re.split(r'\s*,\s*', args.tdls) if args.tdls else []
tfams = re.split(r'\s*,\s*', args.tfams) if args.tfams else []
tcrd.Utils.ListTargets(dbcon, tdls, tfams, fout)
elif args.op=='getTargets':
if not ids:
parser.error(f'IDs required for operation: {args.op}')
tcrd.Utils.GetTargets(dbcon, ids, args.idtype, fout)
elif args.op=='getTargetPage':
if not ids:
parser.error(f'Target ID required for operation: {args.op}')
tcrd.Utils.GetTargetPage(dbcon, ids[0], fout)
elif args.op=='getTargetsByXrefs':
if not ids:
parser.error(f'IDs required for operation: {args.op}')
tcrd.Utils.GetTargetsByXrefs(dbcon, ids, args.xreftypes, fout)
elif args.op=='getTargetpathways':
if not ids:
parser.error(f'IDs required for operation: {args.op}')
tids = tcrd.Utils.GetTargets(dbcon, ids, args.idtype, None)
tcrd.Utils.GetPathways(dbcon, tids, fout)
elif args.op=='listXrefTypes':
tcrd.Utils.ListXrefTypes(dbcon, fout)
elif args.op=='listXrefs':
if args.xreftypes:
xreftypes = re.split(r'\s*,\s*', args.xreftypes.strip())
xreftypes_all = tcrd.Utils.ListXrefTypes(dbcon).iloc[:,0]
for xreftype in xreftypes:
if xreftype not in list(xreftypes_all):
parser.error(f"xreftype '{xreftype}' invalid. Available xreftypes: {str(list(xreftypes_all))}")
else:
xreftypes = []
tcrd.Utils.ListXrefs(dbcon, xreftypes, fout)
elif args.op=='listTargetFamilies':
tcrd.Utils.ListTargetFamilies(dbcon, fout)
elif args.op=='listTargetsByDTO':
tcrd.Utils.ListTargetsByDTO(dbcon, fout)
elif args.op=='listDiseases':
tcrd.Utils.ListDiseases(dbcon, fout)
elif args.op=='listDiseaseTypes':
tcrd.Utils.ListDiseaseTypes(dbcon, fout)
elif args.op=='getDiseaseAssociations':
if not ids:
parser.error(f'IDs required for operation: {args.op}')
tcrd.Utils.GetDiseaseAssociations(dbcon, ids, fout)
elif args.op=='getDiseaseAssociationsPage':
if not ids:
parser.error(f'ID required for operation: {args.op}')
tcrd.Utils.GetDiseaseAssociationsPage(dbcon, ids[0], fout)
elif args.op=='listPhenotypes':
tcrd.Utils.ListPhenotypes(dbcon, fout)
elif args.op=='listPhenotypeTypes':
tcrd.Utils.ListPhenotypeTypes(dbcon, fout)
elif args.op=='listPublications':
tcrd.Utils.ListPublications(dbcon, fout)
elif args.op=='listDatasets':
tcrd.Utils.ListDatasets(dbcon, fout)
else:
parser.error(f"Invalid operation: {args.op}")
parser.print_help()
|
<filename>data.py
import matplotlib.pyplot as plt
import math
import numpy as np
from utils import tool
import torch
def add_particle(img_sz, particle):
"""
This method cannot build very small particle image.
"""
u, v = np.meshgrid(np.arange(0, img_sz[0]),np.arange(0, img_sz[1]))
u, v = u[:,:,np.newaxis], v[:,:,np.newaxis]
x = np.reshape(particle.x, (1,1,-1))
y = np.reshape(particle.y, (1,1,-1))
dp = np.reshape(particle.d, (1,1,-1))
intensity = np.reshape(particle.i, (1,1,-1))
image = np.exp(-8*((u-x)**2+(v-y)**2)/dp**2)*intensity # Gaussian function
image = np.sum(image, axis=-1)*255.0
image = image + np.random.randint(-20, 20, image.shape)
# image = np.round(image).astype(np.uint8)
image = np.round(image)
image = image[10:-10,10:-10]
return image
def erf(x):
"""
It's hard to believe we have to wrapper the erf function from pytorch
"""
x = torch.tensor(x)
y = torch.erf(x).cpu().numpy()
return y
def add_particle2(img_sz, particle):
"""
Using the erf function to synthesis the particle images
"""
image = np.zeros(img_sz)
v, u = np.meshgrid(np.arange(0, img_sz[0]),np.arange(0, img_sz[1]))
# u, v = u[:,:waxis], v[:,:,np.newaxis]
x_s = np.reshape(particle.x, (-1,1))
y_s = np.reshape(particle.y, (-1,1))
dp_s = np.reshape(particle.d, (-1,1))
intensity_s = np.reshape(particle.i, (-1,1))
dp_nominal=particle.nd
for x, y, dp, intensity in zip(x_s, y_s, dp_s, intensity_s):
ind_x1 = np.int(min(max(0, x-3*dp-2), img_sz[0]-6*dp-3))
ind_y1 = np.int(min(max(0, y-3*dp-2), img_sz[1]-6*dp-3))
ind_x2 = ind_x1 + np.int(6*dp+3)
ind_y2 = ind_y1 + np.int(6*dp+3)
lx = u[ind_x1:ind_x2, ind_y1:ind_y2]-x
ly = v[ind_x1:ind_x2, ind_y1:ind_y2]-y
b = dp/np.sqrt(8) # from the Gaussian intensity profile assumption
img =(erf((lx+0.5)/b)-erf((lx-0.5)/b))*(erf((ly+0.5)/b)-erf((ly-0.5)/b))
img = img*intensity
image[ind_x1:ind_x2, ind_y1:ind_y2] = image[ind_x1:ind_x2, ind_y1:ind_y2]+ img
b_n = dp_nominal/np.sqrt(8)
partition = 1.5*(erf(0.5/b_n)-erf(-0.5/b_n))**2
image = np.clip(image/partition,0,1.0)
image = image*255.0
image = np.round(image)
return image
def gen_image_pair(config):
# settings
img_sz = (config.img_sz[0]+20,config.img_sz[1]+20) # add boundary
ppp = config.ppp
dp, d_std = config.dp, config.d_std
i_std = config.i_std
miss_ratio = config.miss_ratio
displacement = config.displacement
# generate particles' parameters
p1, p2= tool.AttrDict(), tool.AttrDict()
p1.num = p2.num = np.round(ppp*np.prod(img_sz)).astype(np.int)
p1.nd = p2.nd = dp
p1.x = p2.x = np.random.uniform(0,img_sz[0],p1.num)
p1.y = p2.y = np.random.uniform(0,img_sz[1],p1.num)
p1.d = p2.d = np.abs(np.random.randn(p1.num)*d_std+ dp)
p1.i = p2.i = np.random.randn(p1.num)*i_std+ 0.85
p1.x = p1.x + displacement/2
p2.x = p2.x - displacement/2
# generate images
img1 = add_particle2(img_sz,p1)
img2 = add_particle2(img_sz,p2)
# img1 = add_particle(img_sz,p1)
# img2 = add_particle(img_sz,p2)
img1=img1[10:-10,10:-10]
img2=img2[10:-10,10:-10]
return img1, img2
def main():
config = tool.AttrDict
config.img_sz = (256,256)
config.ppp = 0.05
config.dp = 2.2
config.d_std = 0.1
config.i_std =0.1
config.miss_ratio = 0.1
config.displacement = 2.25
img1, img2 = gen_image_pair(config)
plt.figure()
plt.imshow(img1)
plt.figure()
plt.imshow(img2)
plt.show()
if __name__=='__main__':
main()
|
<reponame>diogo1790team/inphinity_DM<filename>objects_new/Couples_new.py
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 31 14:40:55 2017
@author: Diogo
"""
from SQL_obj_new.Couple_sql_new import _Couple_sql_new
class Couple(object):
"""
This class treat the Couples object has it exists in COUPLES table database
By default, all FK are in the lasts positions in the parameters declaration
Interactions type:
- 1 - exists - positive
- 0 - not exists - negative
"""
def __init__(self, id_couple, interact_pn, fk_bacteria, fk_phage, fk_type_inter, fk_level_interact, fk_lysis_inter = -1, fk_source_data = -1):
"""
Constructor of the Couple object. All the parameters have a default value
:param id_family: id of the couple - -1 if unknown
:param interact_pn: Type of interaction (positive or negative) - -1 if unknown
:param fk_bacteria: Bacterium FK key - -1 if unknown
:param fk_phage: Bacterium FK key - -1 if unknown
:param fk_type_inter: Bacterium FK key - -1 if unknown
:param fk_level_interact: Bacterium FK key - -1 if unknown
:param fk_lysis_inter: In case of positif interaction and if it comes from Grég, we have the information about the degree of "infection" - -1 if unknown
:param fk_source_data: Source where we see the interaction (NCBI, PhageDB, Grég,...)
:type id_family: int - no required
:type interact_pn: int - required
:type fk_bacteria: int - required
:type fk_phage: int - required
:type fk_type_inter: int - required
:type fk_level_interact: int - required
:type fk_lysis_inter: int - not required
:type fk_source_data: int - required
"""
self.id_couple = id_couple
self.fk_bacteria = fk_bacteria
self.fk_phage = fk_phage
self.fk_type_inter = fk_type_inter
self.fk_level_interact = fk_level_interact
self.interact_pn = interact_pn
self.fk_lysis_inter = fk_lysis_inter
self.fk_source_data = fk_source_data
def get_all_couples():
"""
return an array with all the couples in the database
:return: array of couple
:rtype: array(Couple)
"""
listOfCouples = []
sqlObj = _Couple_sql_new()
results = sqlObj.select_all_couples_all_attributes()
for element in results:
listOfCouples.append(Couple(element[0], element[1], element[2], element[3], element[4], element[5], element[6], element[7]))
return listOfCouples
def get_couples_by_list_id(list_ids):
"""
return an array with all the couples in the database given an array with these ids
:return: array of couple
:rtype: array(Couple)
"""
listOfCouples = []
sqlObj = _Couple_sql_new()
results = sqlObj.select_all_couples_all_attributes_by_arrays_ids(list_ids)
for element in results:
listOfCouples.append(Couple(element[0], element[1], element[2], element[3], element[4], element[5], element[6], element[7]))
return listOfCouples
def get_all_couples_by_phage_id(fk_phage):
"""
return an array with all the couples in the database based on phage fk_id
:param fk_phage: id of the phage - -1 if unknown
:type fk_phage: int - required
:return: array of couple
:rtype: array(Couple)
"""
listOfCouples = []
sqlObj = _Couple_sql_new()
results = sqlObj.select_all_couples_all_attributes_by_fk_phage(fk_phage)
for element in results:
listOfCouples.append(Couple(element[0], element[1], element[2], element[3], element[4], element[5], element[6], element[7]))
return listOfCouples
def get_all_positive_couples_by_phage_id(fk_phage):
"""
return an array with all the positive couples in the database based on phage fk_id
:param fk_phage: id of the phage - -1 if unknown
:type fk_phage: int - required
:return: array of couple
:rtype: array(Couple)
"""
listOfCouples = []
sqlObj = _Couple_sql_new()
results = sqlObj.select_all_positive_couples_all_attributes_by_fk_phage(fk_phage)
for element in results:
listOfCouples.append(Couple(element[0], element[1], element[2], element[3], element[4], element[5], element[6], element[7]))
return listOfCouples
def get_all_positive_couples_by_phage_id_level_id(fk_phage, fk_level):
"""
return an array with all the positive couples in the database based on phage fk_id_phage and the level id
:param fk_phage: id of the phage - -1 if unknown
:type fk_phage: int - required
:return: array of couple
:rtype: array(Couple)
"""
listOfCouples = []
sqlObj = _Couple_sql_new()
results = sqlObj.select_all_positive_couples_all_attributes_by_fk_phage_level_id(fk_phage, fk_level)
for element in results:
listOfCouples.append(Couple(element[0], element[1], element[2], element[3], element[4], element[5], element[6], element[7]))
return listOfCouples
def get_all_couples_by_bacterium(fk_bacterium):
"""
return an array with all the couples in the database based on bacterium fk_id
:param fk_bacterium: id of the bacterium - -1 if unknown
:type fk_bacterium: int - required
:return: array of couple
:rtype: array(Couple)
"""
listOfCouples = []
sqlObj = _Couple_sql_new()
results = sqlObj.select_all_couples_all_attributes_by_fk_bacterium(fk_bacterium)
for element in results:
listOfCouples.append(Couple(element[0], element[1], element[2], element[3], element[4], element[5], element[6], element[7]))
return listOfCouples
def get_all_couples_positiv_by_bacterium_level(fk_bacterium, fk_level):
"""
return an array with all the couples in the database based on bacterium fk_id
:param fk_bacterium: id of the bacterium - -1 if unknown
:param fk_level: id of the interaction leve
:type fk_bacterium: int - required
:type fk_level: int - required
:return: array of couple
:rtype: array(Couple)
"""
listOfCouples = []
sqlObj = _Couple_sql_new()
results = sqlObj.select_all_couples_all_attributes_by_fk_bacterium_level_type(fk_bacterium, fk_level, 1)
for element in results:
listOfCouples.append(Couple(element[0], element[1], element[2], element[3], element[4], element[5], element[6], element[7]))
return listOfCouples
def get_all_couples_negative_by_bacterium_level(fk_bacterium, fk_level):
"""
return an array with all the couples in the database based on bacterium fk_id
:param fk_bacterium: id of the bacterium - -1 if unknown
:param fk_level: id of the interaction leve
:type fk_bacterium: int - required
:type fk_level: int - required
:return: array of couple
:rtype: array(Couple)
"""
listOfCouples = []
sqlObj = _Couple_sql_new()
results = sqlObj.select_all_couples_all_attributes_by_fk_bacterium_level_type(fk_bacterium, fk_level, 2)
for element in results:
listOfCouples.append(Couple(element[0], element[1], element[2], element[3], element[4], element[5], element[6], element[7]))
return listOfCouples
def get_all_couples_by_type_level_source(interaction_type, fk_level, fk_source):
"""
return an array with all the couples in the database based on interaction type (positive or negative), level (specie, strain,...) and source (NCBI, Phages,...)
:param interaction_type: id of the bacterium - -1 if unknown
:param fk_level: id of the interaction leve
:param fk_source: id of the interaction leve
:type interaction_type: int - required
:type fk_level: int - required
:type fk_source: int - required
:return: array of couple
:rtype: array(Couple)
"""
listOfCouples = []
sqlObj = _Couple_sql_new()
results = sqlObj.select_all_couples_all_attributes_by_type_level_source(interaction_type, fk_level, fk_source)
for element in results:
listOfCouples.append(Couple(element[0], element[1], element[2], element[3], element[4], element[5], element[6], element[7]))
return listOfCouples
def create_couple(self):
"""
Insert a Couple in the database if it not already exits and update its id
The Couple contain a :
- Interaction
- Bacterium
- Phage
- Type interaction
- Level interaction
- Level of lysis
- Source of data
:return: id of the Couple
:rtype int
"""
value_couple = None
sqlObj = _Couple_sql_new()
value_couple = sqlObj.insert_couple_if_ot_exist(self.interact_pn, self.fk_bacteria, self.fk_phage, self.fk_type_inter, self.fk_level_interact, self.fk_lysis_inter, self.fk_source_data)
self.id_couple = value_couple
print(value_couple)
return value_couple
def remove_couple_by_id(id_couple):
"""
remove a couple given its id
:param id_couple: id of the couple
:type id_couple: int - required
:return: couple it removed
:rtype: int
"""
sqlObj = _Couple_sql_new()
id_couple = sqlObj.remove_couple_by_id(id_couple)
return id_couple
def remove_couple_by_id_bacterium(id_bacterium):
"""
remove a couple given its id_bacterium
:param id_bacterium: id of the couple
:type id_bacterium: int - required
:return: couple it removed
:rtype: int
"""
sqlObj = _Couple_sql_new()
id_couple = sqlObj.remove_couple_by_fk_bacterium(id_bacterium)
return id_couple
def verify_couple_exist_by_phage_bact(id_bact, id_phage):
"""
Verify if a couple exists by the couple of phage, bact
:param id_bact: id of the couple
:param id_phage: id of the couple
:type id_bact: int - required
:type id_bacterium: int - required
:return: couple it removed
:rtype: int
"""
sqlObj = _Couple_sql_new()
id_couple = sqlObj.get_id_couple_by_phage_bact(id_bact, id_phage)
return id_couple
def get_all_couples_by_type_level_source_bact_id(interaction_type, fk_level, fk_source, fk_bact):
"""
return an array with all the couples in the database based on interaction type (positive or negative), level (specie, strain,...) source (NCBI, Phages,...) and bacterium id
:param interaction_type: id of the bacterium - -1 if unknown
:param fk_level: id of the interaction leve
:param fk_source: id of the interaction leve
:param fk_bact: id of the bacterium
:type interaction_type: int - required
:type fk_level: int - required
:type fk_source: int - required
:type fk_bact: int - required
:return: array of couple
:rtype: array(Couple)
"""
listOfCouples = []
sqlObj = _Couple_sql_new()
results = sqlObj.select_all_couples_all_attributes_by_type_level_source_id_bact(interaction_type, fk_level, fk_source, fk_bact)
for element in results:
listOfCouples.append(Couple(element[0], element[1], element[2], element[3], element[4], element[5], element[6], element[7]))
return listOfCouples
def __str__(self):
"""
Ovewrite of the str method
"""
message_str = ""
if type(self.fk_lysis_inter) is not int:
message_str = "ID: {0:d} Type interaction: {1}, ID bacterium {2:d}, ID phage {3:d}, Interaction state {4:d}, leve {5:d}".format(self.id_couple, self.interact_pn, self.fk_bacteria, self.fk_phage, self.fk_type_inter, self.fk_level_interact)
elif self.interact_pn == 1:
message_str = "ID: {0:d} Type interaction: {1}, ID bacterium {2:d}, ID phage {3:d}, Interaction state {4:d}, leve {5:d}, type of lysis {6:d} ".format(self.id_couple, self.interact_pn, self.fk_bacteria, self.fk_phage, self.fk_type_inter, self.fk_level_interact, self.fk_lysis_inter)
elif self.interact_pn == 0:
message_str = "ID: {0:d} Type interaction: {1}, ID bacterium {2:d}, ID phage {3:d}, Interaction state {4:d}, leve {5:d}".format(self.id_couple, self.interact_pn, self.fk_bacteria, self.fk_phage, self.fk_type_inter, self.fk_level_interact)
return message_str
|
<reponame>pstephens/project-5g
#!/usr/bin/env python3
import argparse
import itertools
import json
import sys
def load_data(f):
data = []
for line in f:
data.append(json.loads(line))
return data
def Deg(r):
return r["Deg"]
n
def Values(path):
parts = [x for x in path.split(".") if x]
def g(r, i):
rtype = type(r)
if rtype is list:
for x in r:
for y in g(x, i):
yield y
elif rtype is int or rtype is float:
if i == len(parts):
yield r
elif rtype is dict:
if i < len(parts):
for x in g(r.get(parts[i]), i + 1):
yield x
elif r is None:
return
else:
raise ValueError(f"Unexpected type {rtype} encountered during traversial of path {path}")
def f(r):
for x in g(r, 0):
yield x
return f
def get_value_by_paths(data, path):
if type(path) is list:
return [list(Values(p)(data)) for p in path]
else:
return list(Values(path)(data))
# group on Deg
def scatter_data(data, path, agg):
output = []
for k, g in itertools.groupby(data, Deg):
samples = get_value_by_paths(list(g), path)
output.append({
"x": k,
"y": agg(samples)
})
return json.dumps(output)
def p(factor):
def f(samples):
if len(samples) < 0:
return None
samples.sort()
return samples[int(round(max(0, min(len(samples) - 1, len(samples) * factor - 1))))]
return f
def agg_min(samples):
if len(samples) < 0:
return None
else:
return min(samples)
def agg_max(samples):
if len(samples) < 0:
return None
else:
return max(samples)
def agg_ping_loss(samples):
sent, recv = samples
sent = sum(sent)
recv = len(recv)
if sent < 1:
return None
else:
return (sent - recv)/recv*100
def write_html(f, data):
str = \
f"""<!doctype html>
<html>
<head><title>{data[0]["Time"]}</title></head>
<script src="https://code.highcharts.com/highcharts.js"></script>
<script src="https://code.highcharts.com/highcharts-more.js"></script>
<body>
<div id="container" style="width: 2000px; height: 1000px; margin: 0 auto"></div>
<script>
Highcharts.chart("container", {{
chart: {{
type: "scatter"
}},
yAxis: [{{
title: {{ text: "Elapsed (ms)"}},
max: 500
}},
{{
title: {{ text: "Percent"}},
max: 100
}},
{{
title: {{ text: "RSRP (dBm)"}},
max: -44,
min: -140
}},
{{
title: {{ text: "RSRQ (dBm)"}},
max: -3,
min: -20
}},
{{
title: {{ text: "SINR (db)"}},
max: 30,
min: -20
}}],
series: [{{
type: "scatter",
name: "Ping Min",
yAxis: 0,
lineWidth: 2,
data: {scatter_data(data, "Ping Samples", agg_min)}
}},
{{
type: "scatter",
name: "Ping P90",
yAxis: 0,
lineWidth: 2,
data: {scatter_data(data, "Ping Samples", p(0.90))}
}},
{{
type: "scatter",
name: "Ping Max",
yAxis: 0,
lineWidth: 2,
data: {scatter_data(data, "Ping Samples", agg_max)}
}},
{{
type: "scatter",
name: "Ping Loss",
yAxis: 1,
lineWidth: 2,
data: {scatter_data(data, ["Ping Cnt", "Ping Samples"], agg_ping_loss)}
}},
{{
type: "scatter",
name: "LTE RSRP",
yAxis: 2,
lineWidth: 2,
data: {scatter_data(data, "SC LTE RSRP", p(0.90))}
}},
{{
type: "scatter",
name: "NSA RSRP",
yAxis: 2,
lineWidth: 2,
data: {scatter_data(data, "SC NSA RSRP", p(0.90))}
}},
{{
type: "scatter",
name: "LTE RSRQ",
yAxis: 3,
lineWidth: 2,
data: {scatter_data(data, "SC LTE RSRQ", p(0.90))}
}},
{{
type: "scatter",
name: "NSA RSRQ",
yAxis: 3,
lineWidth: 2,
data: {scatter_data(data, "SC NSA RSRQ", p(0.90))}
}},
{{
type: "scatter",
name: "LTE SINR",
yAxis: 4,
lineWidth: 2,
data: {scatter_data(data, "SC LTE SINR", p(0.90))}
}},
{{
type: "scatter",
name: "NSA SINR",
yAxis: 4,
lineWidth: 2,
data: {scatter_data(data, "SC NSA SINR", p(0.90))}
}}
]
}});
</script>
</body>
</html>
"""
f.write(str)
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument("input", type=argparse.FileType("r"),
help="Input data set, single JSON object per line")
parser.add_argument("output", type=argparse.FileType("w"),
help="Output name to write html to")
args = parser.parse_args(args)
data = load_data(args.input)
write_html(args.output, data)
if __name__ == "__main__":
main(sys.argv[1:])
|
from __future__ import absolute_import
import os
import sys
from collections import defaultdict
from .gemini_constants import *
from . import GeminiQuery
class Site(object):
def __init__(self, row):
self.chrom = row['chrom']
self.end = int(row['end'])
self.gt_type = None
def _prune_run(run):
"""
Prune the current run of genotypes.
Remove genotypes from the left of the first
non-homozygous genotype, since, owing to the
same logic behind run length encoding, those
genotypes cannot be part of a longer run than
we have seen before.
For example:
breaks = * * *
run = H H H h H H H U H H H h H H H H H H
prune_1 = H H H U H H H h H H H H H H
prune_2 = H H H h H H H H H H
prune_3 = H H H H H H
"""
try:
first_het_idx = run.index('H')
except:
first_het_idx = None
try:
first_unk_idx = run.index('U')
except:
first_unk_idx = None
if first_het_idx is not None and first_unk_idx is not None:
idx_of_first_disruption = min(run.index('H'), run.index('U'))
elif first_het_idx is not None:
idx_of_first_disruption = first_het_idx
elif first_unk_idx is not None:
idx_of_first_disruption = first_unk_idx
else:
# no interuptions, return an empty list
return 0, 0, len(run), []
hets_removed = run[0:idx_of_first_disruption+1].count('H')
unks_removed = run[0:idx_of_first_disruption+1].count('U')
homs_removed = idx_of_first_disruption - (hets_removed + unks_removed) + 1
return hets_removed, unks_removed, homs_removed, run[idx_of_first_disruption+1:]
def sweep_genotypes_for_rohs(args, chrom, samples):
"""
Sweep through the genotypes for each sample in search of ROHs.
Note: If the genotype was homozygous, the end position
of the variant is stored. Otherwise 'H' for het
and 'U' for unknown.
"""
hom_count = 0
het_count = 0
unk_count = 0
curr_run = []
for sample in samples:
sites = iter(samples[sample])
for site in sites:
# retain the last homozygote from previous
# run. See function docs for details
if len(curr_run):
hets_removed, unks_removed, homs_removed, curr_run = \
_prune_run(curr_run)
# reset for next run
hom_count -= homs_removed
het_count -= hets_removed
unk_count -= unks_removed
# sweep through the active sites until we encounter
# too many HETS or UNKNOWN genotypes.
while het_count <= args.max_hets and unk_count <= args.max_unknowns:
if site != 'H' and site != 'U':
hom_count +=1
curr_run.append(site)
elif site == 'H':
curr_run.append(site)
het_count += 1
elif site == 'U':
curr_run.append(site)
unk_count += 1
try:
site = next(sites)
except StopIteration:
break
# skip the current run unless it contains enough sites.
if hom_count >= args.min_snps:
run_start = min(c for c in curr_run if c not in ['H', 'U'])
run_end = max(c for c in curr_run if c not in ['H', 'U'])
run_length = run_end - run_start
# report the run if it is long enough.
if run_length >= args.min_size:
density_per_kb = float(len(curr_run) * 1000) / float(run_length)
print("\t".join(str(s) for s in [chrom,
run_start, run_end, sample,
hom_count, round(density_per_kb, 4),
run_length]))
else:
curr_run = []
hom_count = 0
het_count = 0
unk_count = 0
def get_homozygosity_runs(args):
gq = GeminiQuery.GeminiQuery(args.db)
# get a mapping of sample ids to sample indices
idx2smp = gq.index2sample
smp2idx = gq.sample2index
sm_index = []
# prepare a lookup of just the samples
# for which the user wishes to search for ROHs
if args.samples is not None:
sample_filter = args.samples.strip().split(",")
for sample in sample_filter:
try:
idx = smp2idx[sample]
except:
raise ValueError("Sample %s could not be found.\n" \
% (sample))
sm_index.append(smp2idx[sample])
else:
for sample in smp2idx:
sm_index.append(smp2idx[sample])
###########################################################################
# Phase 1. Retrieve the variants for each chrom/sample
###########################################################################
query = "SELECT chrom, start, end, gt_types, gt_depths \
FROM variants \
WHERE type = 'snp' \
AND filter is NULL \
AND depth >= " + str(args.min_total_depth) + \
" ORDER BY chrom, end"
sys.stderr.write("LOG: Querying and ordering variants by chromosomal position.\n")
gq.run(query, needs_genotypes=True)
print("\t".join(['chrom',
'start', 'end', 'sample',
'num_of_snps','density_per_kb',
'run_length_in_bp']))
variants_seen = 0
samples = defaultdict(list)
prev_chrom = None
curr_chrom = None
for row in gq:
variants_seen += 1
if variants_seen % 10000 == 0:
sys.stderr.write("LOG: Loaded %d variants. Current variant on %s, position %d.\n" \
% (variants_seen, row['chrom'], row['end']))
gt_types = row['gt_types']
gt_depths = row['gt_depths']
curr_chrom = row['chrom']
# the chromosome has changed. search for ROHs in the previous chrom
if curr_chrom != prev_chrom and prev_chrom is not None:
sweep_genotypes_for_rohs(args, prev_chrom, samples)
samples = defaultdict(list)
# associate the genotype for the variant with each sample
for idx in sm_index:
sample = idx2smp[idx]
gt_type = gt_types[idx]
depth = gt_depths[idx]
# the genotype must have had sufficient depth to be considered
if depth < args.min_genotype_depth:
continue
if (gt_type == HOM_ALT or gt_type == HOM_REF):
samples[sample].append(row['end'])
elif gt_type == HET:
samples[sample].append('H')
elif gt_type == UNKNOWN:
samples[sample].append('U')
prev_chrom = curr_chrom
# search for ROHs in the final chromosome
sweep_genotypes_for_rohs(args, curr_chrom, samples)
def run(parser, args):
if os.path.exists(args.db):
# run the roh caller
get_homozygosity_runs(args)
|
import subprocess
import fpga
import pathlib
import os
import time
def modelsim_write(proc,command):
proc.stdin.write(command)
proc.stdin.flush()
def modelsim_read(proc):
dat = ""
dat_out = ""
while (True):
dat = proc.stdout.read(1)
if (not dat) or (dat == ">"):
break
dat_out=dat_out+dat
return dat_out
class VlibDriver():
def __init__(self,modelsim_path, target_path = pathlib.Path.cwd(), lib_name = "work"):
self.process = subprocess.Popen([modelsim_path / 'vlib',"-target" , target_path / lib_name],
universal_newlines=True,
stdout=subprocess.PIPE)
self.outs, self.errs = self.process.communicate()
return
class VmapDriver():
def __init__(self, modelsim_path, target_path = pathlib.Path.cwd(), lib_name = "work"):
self.process = subprocess.Popen([modelsim_path / "vmap", lib_name,target_path/lib_name],
universal_newlines=True,
stdout=subprocess.PIPE)
self.outs, self.errs = self.process.communicate()
class VlogDriver():
def __init__(self, modelsim_path, target_path = pathlib.Path.cwd(),verilog_files = "**.v", lib_name = "work"):
self.process = subprocess.Popen([modelsim_path / "vlog","-nocreatelib","+incdir+"+str(target_path),"-work",target_path/lib_name ,target_path/verilog_files],
universal_newlines=True,
stdout=subprocess.PIPE)
self.outs, self.errs = self.process.communicate()
class VsimDriver():
def __init__(self, modelsim_path, top_level_entity,target_path = pathlib.Path.cwd(), time_resolution = "1ms"):
self.process = subprocess.Popen([modelsim_path / "vsim", "-t", time_resolution, "-c", "-wlfslim", "1","-Ldir", target_path / "work", "work."+top_level_entity],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=False,
universal_newlines=True)
self.process.stdin.flush()
print(modelsim_read(self.process))
self.process.stdin.write("transcript file \"\"\n")
self.process.stdin.flush()
modelsim_read(self.process)
def force(self, top_level_entity, port_name, port_value):
self.process.stdin.write("force sim:/"+top_level_entity+"/"+port_name+" "+port_value+" \n")
self.process.stdin.flush()
return modelsim_read(self.process)
def examine(self, port_name):
self.process.stdin.write("examine "+port_name+" \n")
self.process.stdin.flush()
return modelsim_read(self.process)
def step(self):
self.process.stdin.write("run 1 \n")
self.process.stdin.flush()
return modelsim_read(self.process)
def run(self, duration):
self.process.stdin.write("run "+duration+ " \n")
self.process.stdin.flush()
return modelsim_read(self.process)
def restart(self):
self.process.stdin.write("restart \n")
self.process.stdin.flush()
return modelsim_read(self.process)
def quitsim(self):
# self.process.stdin.write("quit -sim \n")
self.process.stdin.write("exit \n")
self.process.stdin.flush()
class VsimController():
def __init__(self, fpga,config):
self.fpga = fpga
self.config = config
self.vsim = VsimDriver(config["modelsim_path"], config["lib_top_level_entity"], target_path = config["target_path"])
def group_force(self):
for (name, port) in self.fpga.get_all():
if port.direction == "input":
self.vsim.force(self.config["lib_top_level_entity"], port.name, port.get_value_lsb() )
def group_examine(self):
for (name, port) in self.fpga.get_all():
if port.direction == "output":
data = self.vsim.examine(port.name)
if ("Error" not in data):
blocks = data.split()
port.set_value_lsb(blocks[3])
def quitsim(self):
self.vsim.quitsim()
def step(self):
## Force Update the sim then step and examine
self.group_force()
self.vsim.step()
self.group_examine()
def run(self, duration):
## Force Update the sim then run and examine
self.group_force()
self.vsim.run(duration)
self.group_examine()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import numpy
from PyQt4 import QtCore, QtGui
from gui.qtgen.matrizdialog import Ui_MatrizDialog
try:
_tr = QtCore.QString.fromUtf8
except AttributeError:
_tr = lambda s: s
class ShowMatrizDialog(QtGui.QDialog):
u"""
Clase de diálogo 'Opciones' heredada de QDialog.
"""
def __init__(self, matriz, titulo_corto, titulo_largo=None, parent=None):
u"""
Constructor de la clase.
:param parent: Widget padre.
"""
super(ShowMatrizDialog, self).__init__(parent)
self.ShowMatrizD = Ui_MatrizDialog()
self.ShowMatrizD.setupUi(self)
self.setWindowFlags(QtCore.Qt.Dialog |
QtCore.Qt.WindowSystemMenuHint |
QtCore.Qt.WindowTitleHint |
QtCore.Qt.WindowMinMaxButtonsHint)
self.matriz = matriz
self.titulo_corto_dialogo = titulo_corto
self.titulo_largo_dialogo = titulo_largo
self.init_vars()
self.initialize_dialog()
def init_vars(self):
self.window_config = {"item":
{"show_tooltip": False,
"size": 30}}
def initialize_dialog(self):
u"""
Configura y establece estado de los widgets en el cuadro de diálogo.
"""
self._set_dialog_signals()
self.setWindowTitle(self.titulo_corto_dialogo)
# Cachear acceso a atributos y métodos
matriz = self.matriz
tbl_set_item = self.ShowMatrizD.tblMatriz.setItem
# Dimensiones de la matriz
ancho_mat, alto_mat = matriz.shape
# Dimensiones del GridWorld
self.alto_gw = int(alto_mat ** 0.5)
self.ancho_gw = int(ancho_mat ** 0.5)
alto_gw = self.alto_gw
ancho_gw = self.ancho_gw
ancho_estado_px = self.window_config["item"]["size"]
# ancho_gw_px = ancho_estado_px * ancho_gw
# Establecer propiedades visuales de la tabla
self.ShowMatrizD.tblMatriz.setRowCount(alto_mat)
self.ShowMatrizD.tblMatriz.setColumnCount(ancho_mat)
self.ShowMatrizD.tblMatriz.setMouseTracking(True)
# Desactivar actualización de la tabla para optimizar la carga
self.ShowMatrizD.tblMatriz.setUpdatesEnabled(False)
headers_horizontales = []
headers_verticales = []
for fila in xrange(alto_mat):
# Coordenadas del estado
coord_x = int(fila / alto_gw)
coord_y = fila - (coord_x * ancho_gw)
# Armar headers horizontales (Acciones)
headers_horizontales.append("A{0}\n({1},{2})".format(fila + 1,
coord_x + 1,
coord_y + 1))
# Armar headers verticales (Estados)
headers_verticales.append("E{0} ({1},{2})".format(fila + 1,
coord_x + 1,
coord_y + 1))
# Ítem para transición válida
item_bg_color_val = QtGui.QColor("#FFFFFF")
item_flags_val = QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
# Ítem para transición inválida
item_bg_color_inv = QtGui.QColor(240, 240, 240)
item_flags_inv = QtCore.Qt.ItemIsEnabled
item_text_align = QtCore.Qt.AlignHCenter | QtCore.Qt.AlignCenter
for i, accion in numpy.ndenumerate(matriz):
if numpy.isnan(accion):
item = QtGui.QTableWidgetItem('-')
item.setBackgroundColor(item_bg_color_inv)
item.setFlags(item_flags_inv)
item.setTextAlignment(item_text_align)
else:
# Cada item muestra el valor asociado a la acción
if isinstance(accion, float):
item = QtGui.QTableWidgetItem("{0:.2f}".format(accion))
elif isinstance(accion, int):
item = QtGui.QTableWidgetItem(str(accion))
item.setBackgroundColor(item_bg_color_val)
item.setFlags(item_flags_val)
item.setTextAlignment(item_text_align)
# Coordenadas de origen
coord_x_orig = int(i[0] / alto_gw)
coord_y_orig = i[0] - (coord_x_orig * ancho_gw)
# Coordenadas de destino
coord_x_dest = int(i[1] / alto_gw)
coord_y_dest = i[1] - (coord_x_dest * ancho_gw)
item.setToolTip(u"({0},{1}) --> ({2},{3})".format(coord_x_orig + 1,
coord_y_orig + 1,
coord_x_dest + 1,
coord_y_dest + 1))
# Agregar ítem a GridWorld
tbl_set_item(i[0], i[1], item)
self.ShowMatrizD.tblMatriz.setHorizontalHeaderLabels(headers_horizontales)
self.ShowMatrizD.tblMatriz.setVerticalHeaderLabels(headers_verticales)
# Reactivar la actualización de la tabla
self.ShowMatrizD.tblMatriz.setUpdatesEnabled(True)
self.ShowMatrizD.tblMatriz.horizontalHeader().setDefaultSectionSize(ancho_estado_px)
self.ShowMatrizD.tblMatriz.horizontalHeader().setResizeMode(QtGui.QHeaderView.Fixed)
self.ShowMatrizD.tblMatriz.verticalHeader().setDefaultSectionSize(ancho_estado_px)
self.ShowMatrizD.tblMatriz.verticalHeader().setResizeMode(QtGui.QHeaderView.Fixed)
self.ShowMatrizD.tblMatriz.resizeColumnsToContents()
def _set_dialog_signals(self):
self.ShowMatrizD.tblMatriz.itemEntered.connect(self.mostrar_info_item)
def mostrar_info_item(self, item):
nro_estado = item.row()
nro_accion = item.column()
item_valor = str(item.text())
try:
item_text = "Valor = {0}".format(float(item_valor))
except ValueError:
item_text = "Sin transición"
self.ShowMatrizD.lblMatrizItemInfo.setText(_tr("Estado {0} --> Acción {1} ({2})"
.format(nro_estado,
nro_accion,
item_text)))
def accept(self):
super(ShowMatrizDialog, self).accept()
def reject(self):
super(ShowMatrizDialog, self).reject()
|
import os
import random
import re
from datetime import datetime
import pytest
from invoke.context import Context
import test.test_utils.ec2 as ec2_utils
import test.test_utils.eks as eks_utils
from src.github import GitHubHandler
from test.test_utils import is_pr_context, SKIP_PR_REASON
# Test only runs in region us-west-2, on instance type p3.16xlarge, on PR_EKS_CLUSTER_NAME_TEMPLATE cluster
@pytest.mark.skipif(is_pr_context(), reason=SKIP_PR_REASON)
def test_eks_tensorflow_multi_node_training_gpu(tensorflow_training, example_only):
eks_cluster_size = 3
ec2_instance_type = "p3.16xlarge"
cluster_name = eks_utils.PR_EKS_CLUSTER_NAME_TEMPLATE.format("tensorflow")
assert eks_utils.is_eks_cluster_active(cluster_name), f"EKS Cluster {cluster_name} is inactive. Exiting test"
eks_gpus_per_worker = ec2_utils.get_instance_num_gpus(instance_type=ec2_instance_type)
_run_eks_tensorflow_multinode_training_resnet50_mpijob(tensorflow_training, eks_cluster_size, eks_gpus_per_worker)
def _run_eks_tensorflow_multinode_training_resnet50_mpijob(example_image_uri, cluster_size, eks_gpus_per_worker):
"""
Run Tensorflow distributed training on EKS using horovod docker images with synthetic dataset
:param example_image_uri:
:param cluster_size:
:param eks_gpus_per_worker:
:return: None
"""
user = Context().run("echo $USER").stdout.strip("\n")
framework_version = re.search(r"\d+(\.\d+)+", example_image_uri).group()
major_version = framework_version.split(".")[0]
random.seed(f"{example_image_uri}-{datetime.now().strftime('%Y%m%d%H%M%S%f')}")
unique_tag = f"{user}-{random.randint(1, 10000)}"
namespace = f"tf{major_version}-multi-node-train-{'py2' if 'py2' in example_image_uri else 'py3'}-{unique_tag}"
job_name = f"tf-resnet50-horovod-job-{unique_tag}"
script_name = ("/deep-learning-models/models/resnet/tensorflow2/train_tf2_resnet.py" if major_version == "2" else
"/deep-learning-models/models/resnet/tensorflow/train_imagenet_resnet_hvd.py")
command_to_run = ("mpirun,-mca,btl_tcp_if_exclude,lo,-mca,pml,ob1,-mca,btl,^openib,--bind-to,none,-map-by,slot,"
"-x,LD_LIBRARY_PATH,-x,PATH,-x,NCCL_SOCKET_IFNAME=eth0,-x,NCCL_DEBUG=INFO,python,") + script_name
args_to_pass = ("-- --synthetic,--batch_size,128,--num_batches,100,--clear_log,2" if major_version == "2" else
"-- --num_epochs=1,--synthetic")
home_dir = Context().run("echo $HOME").stdout.strip("\n")
path_to_ksonnet_app = os.path.join(home_dir, f"tensorflow_multi_node_eks_test-{unique_tag}")
app_name = f"kubeflow-tf-hvd-mpijob-{unique_tag}"
_run_eks_tensorflow_multi_node_training_mpijob(namespace, app_name, example_image_uri, job_name,
command_to_run, args_to_pass, path_to_ksonnet_app,
cluster_size, eks_gpus_per_worker)
def _run_eks_tensorflow_multi_node_training_mpijob(namespace, app_name, custom_image, job_name,
command_to_run, args_to_pass, path_to_ksonnet_app,
cluster_size, eks_gpus_per_worker):
"""
Run Tensorflow distributed training on EKS using horovod docker images using MPIJob
:param namespace:
:param app_name:
:param custom_image:
:param job_name:
:param command_to_run:
:param args_to_pass:
:param path_to_ksonnet_app:
:param cluster_size:
:param eks_gpus_per_worker:
:return: None
"""
KUBEFLOW_VERSION = "v0.5.1"
pod_name = None
env = f"{namespace}-env"
ctx = Context()
github_handler = GitHubHandler("aws", "kubeflow")
github_token = github_handler.get_auth_token()
ctx.run(f"kubectl create namespace {namespace}")
if not os.path.exists(path_to_ksonnet_app):
ctx.run(f"mkdir -p {path_to_ksonnet_app}")
with ctx.cd(path_to_ksonnet_app):
ctx.run(f"rm -rf {app_name}")
ctx.run(f"ks init {app_name} --namespace {namespace}", env={"GITHUB_TOKEN": github_token})
with ctx.cd(app_name):
ctx.run(f"ks env add {env} --namespace {namespace}")
# Check if the kubeflow registry exists and create. Registry will be available in each pod.
registry_not_exist = ctx.run("ks registry list | grep kubeflow", warn=True)
if registry_not_exist.return_code:
ctx.run(
f"ks registry add kubeflow github.com/kubeflow/kubeflow/tree/{KUBEFLOW_VERSION}/kubeflow",
env={"GITHUB_TOKEN": github_token}
)
ctx.run(f"ks pkg install kubeflow/common@{KUBEFLOW_VERSION}", env={"GITHUB_TOKEN": github_token})
ctx.run(f"ks pkg install kubeflow/mpi-job@{KUBEFLOW_VERSION}", env={"GITHUB_TOKEN": github_token})
try:
ctx.run("ks generate mpi-operator mpi-operator")
# The latest mpi-operator docker image does not accept the gpus-per-node parameter
# which is specified by the older spec file from v0.5.1.
ctx.run("ks param set mpi-operator image mpioperator/mpi-operator:0.2.0")
mpi_operator_start = ctx.run(f"ks apply {env} -c mpi-operator", warn=True)
if mpi_operator_start.return_code:
raise RuntimeError(f"Failed to start mpi-operator:\n{mpi_operator_start.stderr}")
eks_utils.LOGGER.info(
f"The mpi-operator package must be applied to {env} env before we can use mpiJob. "
f"Check status before moving on."
)
ctx.run("kubectl get crd")
# Use Ksonnet to generate manifest files which are then applied to the default context.
ctx.run(f"ks generate mpi-job-custom {job_name}")
ctx.run(f"ks param set {job_name} replicas {cluster_size}")
ctx.run(f"ks param set {job_name} gpusPerReplica {eks_gpus_per_worker}")
ctx.run(f"ks param set {job_name} image {custom_image}")
ctx.run(f"ks param set {job_name} command {command_to_run}")
ctx.run(f"ks param set {job_name} args {args_to_pass}")
# use `$ks show default` to see details.
ctx.run(f"kubectl get pods -n {namespace} -o wide")
eks_utils.LOGGER.info(f"Apply the generated manifest to the {env} env.")
training_job_start = ctx.run(f"ks apply {env} -c {job_name}", warn=True)
if training_job_start.return_code:
raise RuntimeError(f"Failed to start {job_name}:\n{training_job_start.stderr}")
eks_utils.LOGGER.info("Check pods")
ctx.run(f"kubectl get pods -n {namespace} -o wide")
eks_utils.LOGGER.info(
"First the mpi-operator and the n-worker pods will be created and then "
"the launcher pod is created in the end. Use retries until launcher "
"pod's name is available to read logs."
)
complete_pod_name = eks_utils.is_mpijob_launcher_pod_ready(ctx, namespace, job_name)
_, pod_name = complete_pod_name.split("/")
eks_utils.LOGGER.info(f"The Pods have been created and the name of the launcher pod is {pod_name}")
eks_utils.LOGGER.info(f"Wait for the {job_name} job to complete")
if eks_utils.is_eks_multinode_training_complete(ctx, namespace, env, pod_name, job_name):
eks_utils.LOGGER.info(f"Wait for the {pod_name} pod to reach completion")
distributed_out = ctx.run(f"kubectl logs -n {namespace} -f {complete_pod_name}").stdout
eks_utils.LOGGER.info(distributed_out)
finally:
eks_utils.eks_multinode_cleanup(ctx, pod_name, job_name, namespace, env)
|
<filename>chandra_aca/planets.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Functions for planet position relative to Chandra, Earth, or Solar System
Barycenter.
Estimated accuracy of planet coordinates (RA, Dec) is as follows, where the JPL
Horizons positions are used as the "truth".
- `get_planet_chandra` errors:
- Venus: < 4 arcsec with a peak around 3.5
- Mars: < 3 arcsec with a peak around 2.0
- Jupiter: < 0.8 arcsec
- Saturn: < 0.5 arcsec
- `get_planet_eci` errors:
- Venus: < 12 arcmin with peak around 2 arcmin
- Mars: < 8 arcmin with peak around 1.5 arcmin
- Jupiter: < 1 arcmin with peak around 0.5 arcmin
- Saturn: < 0.5 arcmin with peak around 0.3 arcmin
See the ``validation/planet-accuracy.ipynb`` notebook for details.
"""
from chandra_aca.transform import eci_to_radec
from datetime import datetime
from pathlib import Path
import astropy.constants as const
import astropy.units as u
from astropy.io import ascii
import numpy as np
from cxotime import CxoTime
from ska_helpers.utils import LazyVal
__all__ = ('get_planet_chandra', 'get_planet_barycentric', 'get_planet_eci',
'get_planet_chandra_horizons', 'get_planet_angular_sep',
'NoEphemerisError', 'GET_PLANET_ECI_ERRORS',
'GET_PLANET_CHANDRA_ERRORS')
GET_PLANET_ECI_ERRORS = {
'venus': 12 * u.arcmin,
'mars': 8 * u.arcmin,
'jupiter': 1 * u.arcmin,
'saturn': 0.5 * u.arcmin,
}
GET_PLANET_CHANDRA_ERRORS = {
'Venus': 4 * u.arcsec,
'Mars': 3 * u.arcsec,
'Jupiter': 0.8 * u.arcsec,
'Saturn': 0.5 * u.arcsec,
}
class NoEphemerisError(Exception):
"""If there is no Chandra orbital ephemeris available"""
pass
def load_kernel():
from jplephem.spk import SPK
kernel_path = Path(__file__).parent / 'data' / 'de432s.bsp'
if not kernel_path.exists():
raise FileNotFoundError(f'kernel data file {kernel_path} not found, '
'run "python setup.py build" to install it locally')
kernel = SPK.open(kernel_path)
return kernel
KERNEL = LazyVal(load_kernel)
BODY_NAME_TO_KERNEL_SPEC = dict([
('sun', [(0, 10)]),
('mercury', [(0, 1), (1, 199)]),
('venus', [(0, 2), (2, 299)]),
('earth-moon-barycenter', [(0, 3)]),
('earth', [(0, 3), (3, 399)]),
('moon', [(0, 3), (3, 301)]),
('mars', [(0, 4)]),
('jupiter', [(0, 5)]),
('saturn', [(0, 6)]),
('uranus', [(0, 7)]),
('neptune', [(0, 8)]),
('pluto', [(0, 9)])
])
URL_HORIZONS = 'https://ssd.jpl.nasa.gov/horizons_batch.cgi?'
def get_planet_angular_sep(body: str, ra: float, dec: float,
time=None, observer_position: str = 'earth') -> float:
"""Get angular separation between planet ``body`` and target ``ra``, ``dec``.
Valid values for the ``observer_position`` argument are:
- 'earth' (default, approximate, fastest)
- 'chandra' (reasonably accurate fast, requires fetching ephemeris)
- 'chandra-horizons' (most accurate, slow, requires internet access)
:param body: str
Body name (lower case planet name)
:param ra: float
RA in degrees
:param dec: float
Dec in degrees
:param time: CxoTime-compatible object
Time or times of observation
:param observer_position: str
Observer position ('earth', 'chandra', or 'chandra-horizons')
:returns: angular separation (deg)
"""
from agasc import sphere_dist
if not isinstance(time, CxoTime):
time = CxoTime(time)
if observer_position == 'earth':
eci = get_planet_eci(body, time)
body_ra, body_dec = eci_to_radec(eci)
elif observer_position == 'chandra':
eci = get_planet_chandra(body, time)
body_ra, body_dec = eci_to_radec(eci)
elif observer_position == 'chandra-horizons':
if time.shape == ():
time = CxoTime([time, time + 1000 * u.s])
is_scalar = True
else:
is_scalar = False
pos = get_planet_chandra_horizons(body, time[0], time[1], n_times=len(time))
body_ra = pos['ra']
body_dec = pos['dec']
if is_scalar:
body_ra = body_ra[0]
body_dec = body_dec[0]
else:
raise ValueError(f'{observer_position} is not an allowed value: '
f'("earth", "chandra", or "chandra-horizons")')
sep = sphere_dist(ra, dec, body_ra, body_dec)
return sep
def get_planet_barycentric(body, time=None):
"""Get barycentric position for solar system ``body`` at ``time``.
This uses the built-in JPL ephemeris file DE432s and jplephem.
:param body: Body name (lower case planet name)
:param time: Time or times for returned position (default=NOW)
:returns: barycentric position (km) as (x, y, z) or N x (x, y, z)
"""
kernel = KERNEL.val
if body not in BODY_NAME_TO_KERNEL_SPEC:
raise ValueError(f'{body} is not an allowed value '
f'{tuple(BODY_NAME_TO_KERNEL_SPEC)}')
spk_pairs = BODY_NAME_TO_KERNEL_SPEC[body]
time = CxoTime(time)
time_jd = time.jd
pos = kernel[spk_pairs[0]].compute(time_jd)
for spk_pair in spk_pairs[1:]:
pos += kernel[spk_pair].compute(time_jd)
return pos.transpose() # SPK returns (3, N) but we need (N, 3)
def get_planet_eci(body, time=None, pos_observer=None):
"""Get ECI apparent position for solar system ``body`` at ``time``.
This uses the built-in JPL ephemeris file DE432s and jplephem. The position
is computed at the supplied ``time`` minus the light-travel time from the
observer to ``body`` to generate the apparent position on Earth at ``time``.
Estimated accuracy of planet coordinates (RA, Dec) is as follows, where the
JPL Horizons positions are used as the "truth". This assumes the observer
position is Earth (default).
- Venus: < 12 arcmin with peak around 2 arcmin
- Mars: < 8 arcmin with peak around 1.5 arcmin
- Jupiter: < 1 arcmin with peak around 0.5 arcmin
- Saturn: < 0.5 arcmin with peak around 0.3 arcmin
:param body: Body name (lower case planet name)
:param time: Time or times for returned position (default=NOW)
:param pos_observer: Observer position (default=Earth)
:returns: Earth-Centered Inertial (ECI) position (km) as (x, y, z)
or N x (x, y, z)
"""
time = CxoTime(time)
pos_planet = get_planet_barycentric(body, time)
if pos_observer is None:
pos_observer = get_planet_barycentric('earth', time)
dist = np.sqrt(np.sum((pos_planet - pos_observer) ** 2, axis=-1)) * u.km
light_travel_time = (dist / const.c).to(u.s)
pos_planet = get_planet_barycentric(body, time - light_travel_time)
return pos_planet - pos_observer
def get_planet_chandra(body, time=None):
"""Get position for solar system ``body`` at ``time`` relative to Chandra.
This uses the built-in JPL ephemeris file DE432s and jplephem, along with
the CXC predictive Chandra orbital ephemeris (from the OFLS). The position
is computed at the supplied ``time`` minus the light-travel time from
Chandra to ``body`` to generate the apparent position from Chandra at
``time``.
Estimated accuracy of planet coordinates (RA, Dec) from Chandra is as
follows, where the JPL Horizons positions are used as the "truth".
- Venus: < 4 arcsec with a peak around 3.5
- Mars: < 3 arcsec with a peak around 2.0
- Jupiter: < 0.8 arcsec
- Saturn: < 0.5 arcsec
:param body: Body name
:param time: Time or times for returned position (default=NOW)
:returns: position relative to Chandra (km) as (x, y, z) or N x (x, y, z)
"""
from cheta import fetch
time = CxoTime(time)
# Get position of Chandra relative to Earth
try:
dat = fetch.MSIDset(['orbitephem0_x', 'orbitephem0_y', 'orbitephem0_z'],
np.min(time) - 500 * u.s, np.max(time) + 500 * u.s)
except ValueError:
raise NoEphemerisError('Chandra ephemeris not available')
if len(dat['orbitephem0_x'].vals) == 0:
raise NoEphemerisError('Chandra ephemeris not available')
times = np.atleast_1d(time.secs)
dat.interpolate(times=times)
pos_earth = get_planet_barycentric('earth', time)
# Chandra position in km
chandra_eci = np.zeros_like(pos_earth)
chandra_eci[..., 0] = dat['orbitephem0_x'].vals.reshape(time.shape) / 1000
chandra_eci[..., 1] = dat['orbitephem0_y'].vals.reshape(time.shape) / 1000
chandra_eci[..., 2] = dat['orbitephem0_z'].vals.reshape(time.shape) / 1000
planet_chandra = get_planet_eci(body, time, pos_observer=pos_earth + chandra_eci)
return planet_chandra
def get_planet_chandra_horizons(body, timestart, timestop, n_times=10, timeout=10):
"""Get body position, rate, mag, surface brightness, diameter from Horizons.
This function queries the JPL Horizons site using the CGI batch interface
(See https://ssd.jpl.nasa.gov/horizons_batch.cgi for docs).
The return value is an astropy Table with columns: time, ra, dec, rate_ra,
rate_dec, mag, surf_brt, ang_diam. The units are included in the table
columns. The ``time`` column is a ``CxoTime`` object.
The returned Table has a meta key value ``response_text`` with the full text
of the Horizons response.
Example::
>>> from chandra_aca.planets import get_planet_chandra_horizons
>>> dat = get_planet_chandra_horizons('jupiter', '2020:001', '2020:002', n_times=4)
>>> dat
<Table length=5>
time ra dec rate_ra rate_dec mag surf_brt ang_diam
deg deg arcsec / h arcsec / h mag mag / arcsec2 arcsec
object float64 float64 float64 float64 float64 float64 float64
--------------------- --------- --------- ---------- ---------- ------- ------------- --------
2020:001:00:00:00.000 276.96494 -23.20087 34.22 0.98 -1.839 5.408 31.75
2020:001:06:00:00.000 277.02707 -23.19897 34.30 1.30 -1.839 5.408 31.76
2020:001:12:00:00.000 277.08934 -23.19652 34.39 1.64 -1.839 5.408 31.76
2020:001:18:00:00.000 277.15181 -23.19347 34.51 2.03 -1.839 5.408 31.76
2020:002:00:00:00.000 277.21454 -23.18970 34.69 2.51 -1.839 5.408 31.76
:param body: one of 'mercury', 'venus', 'mars', 'jupiter', 'saturn',
'uranus', 'neptune'
:param timestart: start time (any CxoTime-compatible time)
:param timestop: stop time (any CxoTime-compatible time)
:param n_times: number of time samples (inclusive, default=10)
:param timeout: timeout for query to Horizons (secs)
:returns: Table of information
"""
import requests
timestart = CxoTime(timestart)
timestop = CxoTime(timestop)
planet_ids = {'mercury': '199',
'venus': '299',
'mars': '499',
'jupiter': '599',
'saturn': '699',
'uranus': '799',
'neptune': '899'}
if body not in planet_ids:
raise ValueError(f'body must be one of {tuple(planet_ids)}')
params = dict(
COMMAND=planet_ids[body],
MAKE_EPHEM='YES',
CENTER='@-151',
TABLE_TYPE='OBSERVER',
ANG_FORMAT='DEG',
START_TIME=timestart.datetime.strftime('%Y-%b-%d %H:%M'),
STOP_TIME=timestop.datetime.strftime('%Y-%b-%d %H:%M'),
STEP_SIZE=str(n_times - 1),
QUANTITIES='1,3,9,13',
CSV_FORMAT='YES')
# The HORIZONS web API seems to require all params to be quoted strings.
# See: https://ssd.jpl.nasa.gov/horizons_batch.cgi
for key, val in params.items():
params[key] = repr(val)
params['batch'] = 1
resp = requests.get(URL_HORIZONS, params=params, timeout=timeout)
if resp.status_code != requests.codes['ok']:
raise ValueError('request {resp.url} failed: {resp.reason} ({resp.status_code})')
lines = resp.text.splitlines()
idx0 = lines.index('$$SOE') + 1
idx1 = lines.index('$$EOE')
lines = lines[idx0: idx1]
dat = ascii.read(lines, format='no_header', delimiter=',',
names=['time', 'null1', 'null2', 'ra', 'dec', 'rate_ra', 'rate_dec',
'mag', 'surf_brt', 'ang_diam', 'null3']
)
times = [datetime.strptime(val[:20], '%Y-%b-%d %H:%M:%S') for val in dat['time']]
dat['time'] = CxoTime(times, format='datetime')
dat['time'].format = 'date'
dat['ra'].info.unit = u.deg
dat['dec'].info.unit = u.deg
dat['rate_ra'].info.unit = u.arcsec / u.hr
dat['rate_dec'].info.unit = u.arcsec / u.hr
dat['mag'].info.unit = u.mag
dat['surf_brt'].info.unit = u.mag / (u.arcsec**2)
dat['ang_diam'].info.unit = u.arcsec
dat['ra'].info.format = '.5f'
dat['dec'].info.format = '.5f'
dat['rate_ra'].info.format = '.2f'
dat['rate_dec'].info.format = '.2f'
dat['mag'].info.format = '.3f'
dat['surf_brt'].info.format = '.3f'
dat['ang_diam'].info.format = '.2f'
dat.meta['response_text'] = resp.text
del dat['null1']
del dat['null2']
del dat['null3']
return dat
|
<reponame>EvanKepner/m
"""
API
---
These are high level objects for interacting with ``mutatest``. The primary objects include:
1. The ``Genome``
2. The ``GenomeGroup``
3. The ``Mutant``
``Genomes`` are representations of a Python source code file. This includes a representation of
the Abstract Syntax Tree (AST) and the locations within the AST that could be mutated. The
locations are accessed by the ``targets`` and ``covered_targets`` properties of the ``Genome``,
the latter being available if a coverage file is set for the ``Genome``.
Locations are represented as ``LocIndex`` objects from ``mutatest.transformers`` which may be
referenced as specific points of mutation.
``Mutants`` are created from ``Genome.mutate()`` for a specific ``LocIndex`` in the ``Genome``
targets. A ``mutant`` is an immutable named-tuple with all of the attributes necessary to mutate
the appropriate ``__pycache__`` file with the ``write_cache()`` method.
Collections of ``Genomes`` can be managed through a ``GenomeGroup``. The ``GenomeGroup`` provides
methods for setting global filters, coverage files, and producing targets of ``LocIndex`` objects
across the collection of ``Genomes``. This is a useful representation when dealing with a folder
of multiple source files.
"""
import ast
import importlib
import itertools
import logging
from collections.abc import MutableMapping
from copy import deepcopy
from pathlib import Path
from typing import (
Any,
Dict,
ItemsView,
Iterable,
Iterator,
KeysView,
Mapping,
NamedTuple,
Optional,
Set,
Union,
ValuesView,
)
from mutatest import cache
from mutatest.filters import CategoryCodeFilter, CoverageFilter
from mutatest.transformers import CATEGORIES, LocIndex, MutateAST
LOGGER = logging.getLogger(__name__)
class MutationException(Exception):
"""Mutation Exception type specifically for mismatches in mutation operations."""
pass
class Mutant(NamedTuple):
"""Mutant definition.
Mutants are created through the Genome at specific targets using the mutate method.
Mutants are immutable and can be written to disk in the ``__pycache__``.
You can create ``Mutants`` using ``Genome.mutate``, and then ``write_cache`` to apply to
the ``__pycache__``.
"""
mutant_code: Any
src_file: Path
cfile: Path
loader: Any
source_stats: Mapping[str, Any]
mode: int
src_idx: LocIndex
mutation: Any
def write_cache(self) -> None:
"""Create the cache file for the mutant on disk in ``__pycache__``.
Existing target cache files are removed to ensure clean overwrites.
Reference: https://github.com/python/cpython/blob/master/Lib/py_compile.py#L157
Returns:
None, creates the cache file on disk.
"""
cache.check_cache_invalidation_mode()
bytecode = importlib._bootstrap_external._code_to_timestamp_pyc( # type: ignore
self.mutant_code, self.source_stats["mtime"], self.source_stats["size"]
)
cache.remove_existing_cache_files(self.src_file)
cache.create_cache_dirs(self.cfile)
LOGGER.debug("Writing mutant cache file: %s", self.cfile)
importlib._bootstrap_external._write_atomic(self.cfile, bytecode, self.mode) # type: ignore
class Genome:
"""The Genome class describes the source file to be mutated.
The class describes a single .py file and has properties for the abstract syntax tree (AST)
and the viable mutation targets. You can initialize without any arguments. If the
``source_file`` is changed the ast and targets properties will be recalculated for that file.
Locations in the Genome may be mutated and written to the ``__pycache__`` using the mutate
method.
"""
def __init__(
self,
source_file: Optional[Union[str, Path]] = None,
coverage_file: Optional[Union[str, Path]] = Path(".coverage"),
filter_codes: Optional[Iterable[str]] = None,
) -> None:
"""Initialize the Genome.
There are internal properties prefixed with an underscore used for the lazy evaluation
of the AST and mutation targets.
Args:
source_file: an optional source file path
coverage_file: coverage file for filtering covered lines,
default value is set to ".coverage".
filter_codes: 2-letter category codes to filter returned targets
"""
# Properties with an underscore prefix are used for local caching and are not designed
# to be modified directly.
# Related to source files, AST, targets
self._source_file = None
self._ast: Optional[ast.Module] = None
self._targets: Optional[Set[LocIndex]] = None
# Related to coverage filtering
self._coverage_file = None
self._covered_targets: Optional[Set[LocIndex]] = None
# Related to category code filtering, not cached but uses a setter for valid value checks
self._filter_codes: Set[str] = set()
# Initialize set values using properties
# These may be set later and clear the cached values in the setters
self.source_file = Path(source_file) if source_file else None
self.coverage_file = Path(coverage_file) if coverage_file else None
self.filter_codes: Set[str] = set(filter_codes) if filter_codes else set()
################################################################################################
# CATEGORY FILTER CODES PROPERTIES
################################################################################################
@property
def filter_codes(self) -> Set[str]:
"""Filter codes applied to targets and covered targets."""
return self._filter_codes
@filter_codes.setter
def filter_codes(self, value: Iterable[str]) -> None:
"""Setter for filter codes. These are always applied when set on the Genome.
Set this to an empty set to remove all category code filters from returned targets.
Args:
value: a set of 2-letter codes, use a set of a single code if needed.
Returns:
None
Raises:
ValueError: if the 2-letter codes in value are not supported by the transformer.
"""
value, valid_codes = set(value), set(CATEGORIES.values())
if not value.issubset(valid_codes):
raise ValueError(
f"Invalid category codes: {value - valid_codes}.\nValid codes: {CATEGORIES}"
)
self._filter_codes = value
################################################################################################
# SOURCE FILE PROPERTIES
################################################################################################
@property
def source_file(self) -> Optional[Path]:
"""The source .py file represented by this Genome.
Returns:
The ``source_file`` path.
"""
return self._source_file
@source_file.setter
def source_file(self, value: Optional[Union[str, Path]]) -> None:
"""Setter for the source_file that clears the AST and targets for recalculation."""
self._source_file = Path(value) if value else None
self._ast = None
self._targets = None
@property
def ast(self) -> ast.Module: # type: ignore
"""Abstract Syntax Tree (AST) representation of the source_file.
This is cached locally and updated if the source_file is changed.
Returns:
Parsed AST for the source file.
Raises:
TypeError: if ``source_file`` is not set.
"""
if self._ast is None:
if not self.source_file:
raise TypeError("Source_file property is set to NoneType.")
with open(self.source_file, "rb") as src_stream:
self._ast = ast.parse(src_stream.read())
return self._ast
@property
def targets(self) -> Set[LocIndex]:
"""Viable mutation targets within the AST of the ``source_file``.
This is cached locally and updated if the source_file is changed. Filtering is not
cached and applies any time the ``filter_codes`` are changed.
Returns:
The set of the location index objects from the transformer that could be
potential mutation targets.
"""
if self._targets is None:
ro_mast = MutateAST(
target_idx=None, mutation=None, readonly=True, src_file=self.source_file
)
ro_mast.visit(self.ast)
self._targets = ro_mast.locs
return CategoryCodeFilter(codes=self.filter_codes).filter(self._targets)
################################################################################################
# COVERAGE FILTER PROPERTIES
################################################################################################
@property
def coverage_file(self) -> Optional[Path]:
"""The .coverage file to use for filtering targets."""
return self._coverage_file
@coverage_file.setter
def coverage_file(self, value: Optional[Union[str, Path]]) -> None:
"""Setter for ``coverage_file``, clears the cached ``covered_targets``."""
self._coverage_file = Path(value) if value else None
self._covered_targets = None
@property
def covered_targets(self) -> Set[LocIndex]:
"""Targets that are marked as covered based on the ``coverage_file``.
This is cached locally and updated if the coverage_file is changed. Filtering is not
cached and applies any time the filter_codes are changed.
Returns:
The targets that are covered.
Raises:
TypeError: if the ``source_file`` or ``coverage_file`` is not set for the Genome.
"""
if not self.source_file:
raise TypeError("Source_file property is set to NoneType.")
if not self.coverage_file:
raise TypeError("Coverage_file property is set to NoneType.")
if self._covered_targets is None:
self._covered_targets = CoverageFilter(coverage_file=self.coverage_file).filter(
self.targets, self.source_file
)
return CategoryCodeFilter(codes=self.filter_codes).filter(self._covered_targets)
################################################################################################
# MUTATION METHODS
################################################################################################
def mutate(self, target_idx: LocIndex, mutation_op: Any, write_cache: bool = False) -> Mutant:
"""Create a mutant from a single LocIndex that is in the Genome.
Mutation_op must be a valid mutation for the target_idx operation code type.
Optionally, use write_cache to write the mutant to ``__pycache__`` based on the detected
location at the time of creation. The Genome AST is unmodified by mutate.
Args:
target_idx: the target location index (member of .targets)
mutation_op: the mutation operation to use
write_cache: optional flag to write to ``__pycache__``
Returns:
The mutant definition
Raises:
MutationException: if ``mutation_op`` is not a valid mutation for the location index.
TypeError: if the source_file property is not set on the Genome.
ValueError: if the target_idx is not a member of Genome targets.
"""
op_code = CATEGORIES[target_idx.ast_class]
valid_mutations = CategoryCodeFilter(codes=(op_code,)).valid_mutations
if mutation_op not in valid_mutations:
raise MutationException(
f"{mutation_op} is not a member of mutation category {op_code}.\n"
f"Valid mutations for {op_code}: {valid_mutations}."
)
if not self.source_file:
raise TypeError("Source_file is set to NoneType")
if target_idx not in self.targets:
raise ValueError(f"{target_idx} is not in the Genome targets.")
mutant_ast = MutateAST(
target_idx=target_idx, mutation=mutation_op, src_file=self.source_file, readonly=False
).visit(
deepcopy(self.ast) # deepcopy to avoid in-place modification of AST
)
# generate cache file pyc machinery for writing the __pycache__ file
loader = importlib.machinery.SourceFileLoader( # type: ignore
"<py_compile>", self.source_file
)
# create the cache files with the mutated AST
mutant = Mutant(
mutant_code=compile(mutant_ast, str(self.source_file), "exec"),
src_file=Path(self.source_file),
cfile=Path(cache.get_cache_file_loc(self.source_file)),
loader=loader,
source_stats=loader.path_stats(self.source_file),
mode=importlib._bootstrap_external._calc_mode(self.source_file), # type: ignore
src_idx=target_idx,
mutation=mutation_op,
)
if write_cache:
mutant.write_cache()
return mutant
class GenomeGroupTarget(NamedTuple):
"""Container for targets returned from GenomeGroup to associated source path to LocIdx."""
source_path: Path
loc_idx: LocIndex
class GenomeGroup(MutableMapping): # type: ignore
"""The GenomeGroup: a MutableMapping of Genomes for operations on the group.
"""
def __init__(self, source_location: Optional[Union[str, Path]] = None) -> None:
"""Initialize the GenomeGroup.
GenomeGroup is a MutableMapping collection of Genomes with defined ``source_file``
locations. You can use it to apply standard filters or coverage files across the group and
get all mutation targets for the group. Folders and files can be added through methods.
Args:
source_location: an optional folder for initialization using the default settings
of no file exclusions except 'test' files. For more flexibility, initialize
the class and then use the ``.add_folder()`` method directly.
"""
# internal mapping for Genomes, not designed for direct modification, use class properties
self._store: Dict[Path, Genome] = dict()
if source_location is not None:
source_location = Path(source_location)
if source_location.is_dir():
self.add_folder(source_location)
elif source_location.is_file():
self.add_file(source_location)
else:
raise TypeError(f"{source_location} is not a folder or file.")
def __setitem__(self, key: Path, value: Genome) -> None:
"""Setter for GenomeGroup, enforces Path keys and Genome values.
Args:
key: key for the mapping, must be a path
value: the genome
Returns:
None
"""
if not isinstance(key, Path):
raise TypeError("Only Path keys are supported.")
if not isinstance(value, Genome):
raise TypeError("Only Genome values are supported.")
self._store[key] = value
def __getitem__(self, key: Path) -> Genome:
"""Getter for keys from the mapping store."""
return self._store[key]
def __delitem__(self, key: Path) -> None:
"""Delete a key from the mapping store."""
del self._store[key]
def __iter__(self) -> Iterator[Path]:
"""Iterate over the mapping store keys."""
return iter(self._store)
def __len__(self) -> int:
"""Count of keys in the mapping store."""
return len(self._store)
def __repr__(self) -> str:
"""Base mapping store repr."""
return self._store.__repr__()
def items(self) -> ItemsView[Path, Genome]:
"""ItemsView for the mapping store."""
return self._store.items()
def keys(self) -> KeysView[Path]:
"""KeysView of the mapping store."""
return self._store.keys()
def values(self) -> ValuesView[Genome]:
"""ValuesView of the mapping store."""
return self._store.values()
def add_genome(self, genome: Genome) -> None:
"""Add a Genome to the GenomeGroup. Genomes must have a defined ``source_file``.
Args:
genome: the ``Genome`` to add
Returns:
None
Raises:
TypeError: if the ``Genome.source_file`` is not set.
"""
if genome.source_file is None:
raise TypeError("Genome source_file is set to NoneType.")
self.__setitem__(genome.source_file, genome)
def add_file(
self,
source_file: Union[str, Path],
coverage_file: Optional[Union[str, Path]] = Path(".coverage"),
) -> None:
"""Add a ``.py`` source file to the group as a new Genome.
The Genome is created automatically.
Args:
source_file: the source file to add with Genome creation
coverage_file: an optional coverage file to set on the Genome, defaults to ".coverage".
Returns:
None
"""
self.add_genome(Genome(source_file=source_file, coverage_file=coverage_file))
def add_folder(
self,
source_folder: Union[str, Path],
exclude_files: Optional[Iterable[Union[str, Path]]] = None,
ignore_test_files: bool = True,
) -> None:
"""Add a folder (recursively) to the GenomeGroup for all ``.py`` files.
Args:
source_folder: the folder to recursively search
exclude_files: optional iterable of specific files in the source_folder to skip
ignore_test_files: optional flag, default to true, to ignore files prefixed with
``test_`` or suffixed with ``_test`` in the stem of the file name.
Returns:
None, adds all files as Genomes to the group.
Raises:
TypeError: if ``source_folder`` is not a folder.
"""
source_folder = Path(source_folder)
exclude_files = [Path(e).resolve() for e in exclude_files] if exclude_files else set()
if not source_folder.is_dir():
raise TypeError(f"{source_folder} is not a directory.")
for fn in source_folder.rglob("*.py"):
if (fn.stem.startswith("test_") or fn.stem.endswith("_test")) and ignore_test_files:
continue
else:
if fn.resolve() not in exclude_files:
self.add_file(fn)
def set_filter(self, filter_codes: Iterable[str]) -> None:
"""Set the filter codes for all Genomes in the group.
Args:
filter_codes: iterable of 2-letter codes to set on all Genomes in the group.
Returns:
None
"""
for k, v in self.items():
v.filter_codes = set(filter_codes)
def set_coverage(self, coverage_file: Union[str, Path]) -> None:
"""Set a common coverage file for all Genomes in the group.
Args:
coverage_file: the coverage file to set.
Returns:
None
"""
for k, v in self.items():
v.coverage_file = Path(coverage_file)
@property
def targets(self) -> Set[GenomeGroupTarget]:
"""All mutation targets in the group, returned as tuples of ``source_file`` and location
indices in a single set.
Returns:
Set of tuples of ``source_file`` and location index for all targets in the group.
These are ``GenomeGroupTargets`` to make attribute access easier.
"""
targets = set()
for k, v in self.items():
targets.update(set(itertools.product([k], v.targets)))
return {GenomeGroupTarget(*t) for t in targets}
@property
def covered_targets(self) -> Set[GenomeGroupTarget]:
"""All mutation targets in the group that are covered,
returned as tuples of ``source_file`` and location indices in a single set.
Returns:
Set of tuples of ``source_file`` and location index for all covered targets in the
group. These are ``GenomeGroupTargets`` to make attribute access easier.
"""
covered_targets = set()
for k, v in self.items():
covered_targets.update(set(itertools.product([k], v.covered_targets)))
return {GenomeGroupTarget(*c) for c in covered_targets}
|
<filename>vmcnet/physics/spin.py
"""Spin calculations."""
from typing import Callable
import jax.numpy as jnp
from vmcnet.utils.distribute import get_mean_over_first_axis_fn
from vmcnet.utils.typing import Array, P, ModelApply, SLArray
def create_spin_square_expectation(
local_spin_exchange: ModelApply[P], nelec: Array, nan_safe: bool = True
) -> Callable[[P, Array], jnp.float32]:
"""Create a function which estimates the observable <psi | S^2 | psi> / <psi | psi>.
We assume a wavefunction of spin-1/2 particles. If the wavefunction psi is given by
psi(X) = antisymmetrize(F(R) xi(s)),
where R is the real-space coordinates in X and s are the corresponding spins, and
xi(s) is the spin configuration function corresponding to the S_z eigenstate with
the first nelec[0] electrons being spin-up and the last nelec[1] electrons being
spin-down.
The S^2 operator can be written as (sum_i S_i)^2, which can be decomposed in terms
of the total S_z operator and the raising and lowering operators S_{i+} and S_{i-},
i.e. S_{i+} = S_{i,x} + iS_{i,y} and S_{j-} = S_{j,x} - iS_{j,y}.
We have
S^2 = S_z^2 - S_z + sum_i S_{i+} * S_{i-} + sum_{i != j} S_{i+} * S_{j-}.
Direct calculation with the first three terms shows that
<psi | S_z^2 - S_z + sum_i S_{i+} * S_{i-} | psi>
= 0.25 (N_up - N_down)^2 + 0.5 (N_up + N_down).
The remaining term is computed by integrating a local electron exchange term, as
detailed in `create_local_spin_exchange`.
Args:
local_spin_exchange (Callable): a function which computes the local spin
exchange term, F(R_{1 <-> 1 + nelec[0]}) / F(R). Must have the signature
(params, x) -> local exchange term array with shape (x.shape[0],).
nelec (Array): an array of size (2,) with nelec[0] being the number of spin-up
electrons and nelec[1] being the number of spin-down electrons.
nan_safe (bool, optional): flag which controls if jnp.nanmean is used instead of
jnp.mean. Can be set to False when debugging if trying to find the source of
unexpected nans. Defaults to True.
Returns:
Callable: a function which computes the S^2 expectation for a wavefunction (not
necessarily normalized) given the number of spin-up and spin-down electrons and
a collection of samples used to estimate the local spin exchange. Has the
signature
(params, x) -> <psi | S^2 | psi> / <psi | psi>
"""
mean_fn = get_mean_over_first_axis_fn(nan_safe=nan_safe)
def spin_square_expectation(params: P, x: Array) -> jnp.float32:
local_spin_exchange_out = local_spin_exchange(params, x)
spin_square = (
0.25 * (nelec[0] - nelec[1]) ** 2
+ 0.5 * (nelec[0] + nelec[1])
+ mean_fn(local_spin_exchange_out)
)
return spin_square
return spin_square_expectation
def create_local_spin_exchange(
slog_psi_apply: Callable[[P, Array], SLArray], nelec: Array
) -> ModelApply[P]:
"""Create the local observable from exchange of a spin-up and spin-down electron.
We assume a wavefunction of spin-1/2 particles. If the wavefunction psi is given by
psi(X) = antisymmetrize(F(R) xi(s)),
where R is the real-space coordinates in X and s are the corresponding spins, and
xi(s) is the spin configuration function corresponding to the S_z eigenstate with
the first nelec[0] electrons being spin-up and the last nelec[1] electrons being
spin-down.
This factory creates a function which computes
-1 * nelec[0] * nelec[1] * F(R_{1 <-> 1 + nelec[0]}) / F(R),
where R_{1 <-> 1 + nelec[0]} denotes the exchange of the first spin-up and spin-down
electron. When integrated over the distribution p(R) = |F(R)|^2 / int_R |F(R)|^2,
this quantity gives the overlap integral
<psi | sum_{i != j} S_{i+} * S_{j-} | psi> / <psi | psi>,
where S_i+ and S_j- are the total spin raising and lowering operators, respectively,
i.e. S_{i+} = S_{i,x} + iS_{i,y} and S_{j-} = S_{j,x} - iS_{j,y}.
Args:
log_psi_apply (Callable): a function which computes log|psi(x)| for single
inputs x. It is okay for it to produce batch outputs on batches of x as long
as it produces a single number for single x. Has the signature
(params, single_x_in) -> log|psi(single_x_in)|
nelec (Array): an array of size (2,) with nelec[0] being the number of spin-up
electrons and nelec[1] being the number of spin-down electrons.
Returns:
Callable: function which computes a local spin exchange term, i.e. the function
nelec[0] * nelec[1] * F(R_{1 <-> 1 + nelec[0]}) / F(R). Has signature
(params, x) -> local exchange term array with shape (x.shape[0],)
"""
if nelec[0] == 0 or nelec[1] == 0:
# if only one spin species, then no exchange term
def local_spin_exchange(params: P, x: Array) -> Array:
return jnp.zeros_like(x[..., 0, 0])
else:
def local_spin_exchange(params: P, x: Array) -> Array:
sign_psi, log_psi = slog_psi_apply(params, x)
swapped_indices = list(range(x.shape[-2]))
swapped_indices[0], swapped_indices[nelec[0]] = nelec[0], 0
x_exchanged = jnp.take(x, jnp.array(swapped_indices), axis=-2)
sign_exchanged_psi, log_exchanged_psi = slog_psi_apply(params, x_exchanged)
sign_out = sign_psi * sign_exchanged_psi
log_out = log_exchanged_psi - log_psi
return -nelec[0] * nelec[1] * sign_out * jnp.exp(log_out)
return local_spin_exchange
|
<gh_stars>1-10
"""surf URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
import os
from django.conf import settings
from django.contrib import admin
from django.contrib.sitemaps import views as sitemap_views
from django.conf.urls import url, include
from django.urls import path
from django.conf.urls.static import static
from django.conf.urls.i18n import i18n_patterns
from django.utils.translation import gettext_lazy as _
from django.contrib.auth import views as auth_views
from django.views.generic import TemplateView
from rest_framework.schemas import get_schema_view
from surf.sitemap import MainSitemap, MaterialsSitemap
from surf.routers import CustomRouter
from surf.apps.materials.views import (
portal_material,
portal_single_page_application,
MaterialSearchAPIView,
MaterialSetAPIView,
KeywordsAPIView,
SimilarityAPIView,
AuthorSuggestionsAPIView,
MaterialAPIView,
MaterialRatingAPIView,
MaterialApplaudAPIView,
CollectionViewSet,
CollectionMaterialPromotionAPIView,
)
from surf.apps.filters.views import FilterCategoryView
from surf.apps.users.views import (
DeleteAccountAPIView,
UserDetailsAPIView,
ObtainTokenAPIView
)
from surf.apps.core.views import health_check, robots_txt
from surf.apps.communities.views import CommunityViewSet
from surf.apps.themes.views import ThemeViewSet
from surf.apps.stats.views import StatsViewSet, StatsView
from surf.apps.locale.views import get_localisation_strings
from surf.apps.feedback.views import FeedbackAPIView
admin.site.site_header = 'Surf'
admin.site.site_title = 'Surf'
admin.site.index_title = 'Surf'
public_api_patterns = [
url(r'^search/filter-categories/', FilterCategoryView.as_view()),
url(r'^search/autocomplete/', KeywordsAPIView.as_view()),
url(r'^search/', MaterialSearchAPIView.as_view()),
url(r'^documents/stats', StatsView.as_view()),
url(r'^suggestions/similarity/', SimilarityAPIView.as_view()),
url(r'^suggestions/author/', AuthorSuggestionsAPIView.as_view()),
]
schema_view = get_schema_view(
title="Search API",
description="An API that allows search through Elastic Search. Instead of writing Elastic queries "
"search can be done simply by passing a few parameters to the endpoints.",
patterns=public_api_patterns,
url="/api/v1/"
)
swagger_view = TemplateView.as_view(
template_name='swagger/swagger-ui.html',
extra_context={'schema_url': 'openapi-schema'}
)
router = CustomRouter()
router.register(r'collections', CollectionViewSet)
router.register(r'communities', CommunityViewSet)
router.register(r'themes', ThemeViewSet)
router.register(r'stats', StatsViewSet, basename="stats")
apipatterns = public_api_patterns + router.urls + [
path('openapi', schema_view, name='openapi-schema'),
path('docs/', swagger_view, name='docs'),
url(r'^users/me/', UserDetailsAPIView.as_view()),
url(r'^users/delete-account/', DeleteAccountAPIView.as_view()),
url(r'^users/obtain-token/', ObtainTokenAPIView.as_view()),
url(r'^rate_material/', MaterialRatingAPIView.as_view()),
url(r'^applaud_material/', MaterialApplaudAPIView.as_view()),
url(r'^materials/set/', MaterialSetAPIView.as_view()),
url(r'^materials/search/', MaterialSearchAPIView.as_view()),
url(r'^filter-categories/', FilterCategoryView.as_view()),
url(r'^keywords/', KeywordsAPIView.as_view()),
url(r'^materials/(?P<external_id>.+)/', MaterialAPIView.as_view()),
url(r'^materials/', MaterialAPIView.as_view()),
url(r'^collections/(?P<collection_id>.+)/promote_material/(?P<external_id>.+)/',
CollectionMaterialPromotionAPIView.as_view()),
url(r'^feedback/', FeedbackAPIView.as_view())
]
sitemaps = {
"main": MainSitemap,
"materials": MaterialsSitemap
}
urlpatterns = [
# System
url(r'^health/?$', health_check, name="health-check"),
# Authentication
# Catching frontend login endpoints before social auth handles "login" prefix
url(r'^login/(permissions|success)/?', portal_single_page_application),
url('', include('social_django.urls', namespace='social')),
url(r'^logout/?$', auth_views.LogoutView.as_view(success_url_allowed_hosts=settings.ALLOWED_REDIRECT_HOSTS)),
# Admin interface
url(r'^admin/', admin.site.urls),
# API and other data
url(r'^api/v1/', include(apipatterns)),
url(r'^locales/(?P<locale>en|nl)/?$', get_localisation_strings)
]
if settings.PROJECT == "edusources":
urlpatterns += [
# For crawlers
path('sitemap.xml', sitemap_views.index, {'sitemaps': sitemaps}, name="sitemap-index"),
path('sitemap-<section>.xml', sitemap_views.sitemap, {'sitemaps': sitemaps},
name="django.contrib.sitemaps.views.sitemap"),
path('robots.txt', robots_txt)
]
# Translated frontend patterns
urlpatterns += i18n_patterns(
url(_(r'^materialen/zoeken/?'), portal_single_page_application, name="portal-search"),
url(_(r'^materialen/(?P<external_id>.+)/'), portal_material),
url(r'^$', portal_single_page_application, name="portal-spa"),
url(r'^.*/$', portal_single_page_application),
prefix_default_language=False
)
else:
urlpatterns += [
url(r'^$', health_check),
]
if settings.MODE == 'localhost':
# These patterns are ignored in production, but are needed for localhost media and some static files
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static("/images/", document_root=os.path.join(settings.PORTAL_BASE_DIR, "images"))
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
handler404 = 'surf.apps.materials.views.portal_page_not_found'
|
<filename>bundle/sagemaker_rl_agent/lib/python3.6/site-packages/markov/gazebo_utils/model_updater.py
import time
import rospy
import markov.rollout_constants as const
from markov.log_handler.deepracer_exceptions import GenericRolloutException
from markov.domain_randomizations.constants import GazeboServiceName
from markov.rospy_wrappers import ServiceProxyWrapper
from markov.track_geom.constants import SET_MODEL_STATE
from markov.track_geom.utils import euler_to_quaternion
from markov.gazebo_tracker.trackers.get_model_state_tracker import GetModelStateTracker
from markov.gazebo_tracker.trackers.set_model_state_tracker import SetModelStateTracker
from std_msgs.msg import ColorRGBA
from std_srvs.srv import Empty, EmptyRequest
from geometry_msgs.msg import Pose
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import SetModelState, GetModelProperties, GetModelPropertiesRequest
from deepracer_msgs.srv import (GetVisualNames, GetVisualNamesRequest,
GetVisuals, GetVisualsRequest,
SetVisualColors, SetVisualColorsRequest,
SetVisualTransparencies, SetVisualTransparenciesRequest,
SetVisualVisibles, SetVisualVisiblesRequest)
GAZEBO_SERVICES = ["PAUSE_PHYSICS", "UNPAUSE_PHYSICS",
"GET_MODEL_PROPERTIES",
"GET_VISUAL_NAMES",
"GET_VISUALS",
"SET_VISUAL_COLORS",
"SET_VISUAL_TRANSPARENCIES",
"SET_VISUAL_VISIBLES"]
class ModelUpdater(object):
"""
ModelUpdater class
"""
_instance_ = None
@staticmethod
def get_instance():
"""Method for getting a reference to the Model Updater object"""
if ModelUpdater._instance_ is None:
ModelUpdater()
return ModelUpdater._instance_
def __init__(self):
"""Initialize a ModelUpdater Object.
Raises:
GenericRolloutException: raise a GenericRolloutException if the object is no longer singleton.
"""
if ModelUpdater._instance_ is not None:
raise GenericRolloutException("Attempting to construct multiple ModelUpdater")
# Wait for required services to be available
rospy.wait_for_service(SET_MODEL_STATE)
# Wait for gazebo plugin services to be available
for service in GazeboServiceName:
if service.name in GAZEBO_SERVICES:
rospy.wait_for_service(service.value)
# Gazebo service that allows us to position the car
self._model_state_client = ServiceProxyWrapper(SET_MODEL_STATE, SetModelState)
self._get_model_prop = ServiceProxyWrapper(GazeboServiceName.GET_MODEL_PROPERTIES.value,
GetModelProperties)
self._get_visual_names = ServiceProxyWrapper(GazeboServiceName.GET_VISUAL_NAMES.value,
GetVisualNames)
self._get_visuals = ServiceProxyWrapper(GazeboServiceName.GET_VISUALS.value, GetVisuals)
self._set_visual_colors = ServiceProxyWrapper(GazeboServiceName.SET_VISUAL_COLORS.value,
SetVisualColors)
self._set_visual_visibles = ServiceProxyWrapper(GazeboServiceName.SET_VISUAL_VISIBLES.value,
SetVisualVisibles)
self._set_visual_transparencies = ServiceProxyWrapper(GazeboServiceName.SET_VISUAL_TRANSPARENCIES.value,
SetVisualTransparencies)
self._pause_physics = ServiceProxyWrapper(GazeboServiceName.PAUSE_PHYSICS.value, Empty)
self._unpause_physics = ServiceProxyWrapper(GazeboServiceName.UNPAUSE_PHYSICS.value, Empty)
self._set_model_state_tracker = SetModelStateTracker.get_instance()
self._get_model_state_tracker = GetModelStateTracker.get_instance()
# there should be only one model updater instance
ModelUpdater._instance_ = self
@property
def pause_physics_service(self):
"""return the gazebo service for pause physics.
Returns:
ServiceProxyWrapper: The pause physics gazebo service
"""
return self._pause_physics
@property
def unpause_physics_service(self):
"""return the gazebo service for unpause physics.
Returns:
ServiceProxyWrapper: The unpause physics gazebo service
"""
return self._unpause_physics
def update_model_color(self, model_name, car_color):
""" Update the model's color.
Args:
model_name (str): The model name for the race car we want to update color for.
e.g. racecar_0
car_color (str): The car color we want to update the visuals to.
e.g. Purple, Orange, White, Black, etc.
"""
visuals = self.get_model_visuals(model_name)
self.update_color(visuals=visuals,
car_color=car_color)
def update_color(self, visuals, car_color):
"""Update the model's color using it's visuals
Args:
visuals (Visuals): The visuals of the current model
car_color (str): The car color we want to update the visuals to.
e.g. Purple, Orange, White, Black, etc.
"""
link_names = []
visual_names = []
ambients, diffuses, speculars, emissives = [], [], [], []
for visual_name, link_name in zip(visuals.visual_names, visuals.link_names):
if "car_body_link" in visual_name:
visual_names.append(visual_name)
link_names.append(link_name)
ambient = ColorRGBA(const.COLOR_MAP[car_color].r * 0.1,
const.COLOR_MAP[car_color].g * 0.1,
const.COLOR_MAP[car_color].b * 0.1,
const.COLOR_MAP[car_color].a)
diffuse = ColorRGBA(const.COLOR_MAP[car_color].r * 0.35,
const.COLOR_MAP[car_color].g * 0.35,
const.COLOR_MAP[car_color].b * 0.35,
const.COLOR_MAP[car_color].a)
ambients.append(ambient)
diffuses.append(diffuse)
speculars.append(const.DEFAULT_COLOR)
emissives.append(const.DEFAULT_COLOR)
if len(visual_names) > 0:
req = SetVisualColorsRequest()
req.visual_names = visual_names
req.link_names = link_names
req.ambients = ambients
req.diffuses = diffuses
req.speculars = speculars
req.emissives = emissives
self._set_visual_colors(req)
def get_model_visuals(self, model_name):
"""Get the model visuals associated to the model name
Args:
model_name (str): The model name for the race car we want to hide visuals for.
e.g. racecar_0
Returns:
Visuals: The visuals of the current model.
"""
# Get all model's link names
body_names = self._get_model_prop(GetModelPropertiesRequest(model_name=model_name)) \
.body_names
link_names = ["%s::%s" % (model_name, b) for b in body_names]
res = self._get_visual_names(GetVisualNamesRequest(link_names=link_names))
get_visuals_req = GetVisualsRequest(link_names=res.link_names,
visual_names=res.visual_names)
visuals = self._get_visuals(get_visuals_req)
return visuals
def hide_visuals(self, visuals, ignore_keywords, retry_num=3):
"""
Set the transparencies to 1.0 for all links and visibles to False
to hide the model's visuals passed in.
Args:
visuals (Visuals): the visuals for the current model.
ignore_keywords (List): list of keywords name in visual which
we should not hidden its visual
retry_num (int): number of retries for hiding visuals.
"""
link_names = []
visual_names = []
for visual_name, link_name in zip(visuals.visual_names, visuals.link_names):
is_to_hide = True
for name in ignore_keywords:
if name in visual_name:
is_to_hide = False
if is_to_hide:
visual_names.append(visual_name)
link_names.append(link_name)
retry_count = 0
while not self._are_visuals_hidden(link_names, visual_names):
if retry_count >= retry_num:
raise GenericRolloutException("Hide Visuals Gazebo Services constantly failing. Something Wrong with gzserver.")
self._set_visible_false(link_names, visual_names)
self._set_transparent(link_names, visual_names)
retry_count += 1
time.sleep(2) # sleep 2 seconds until next retry
def _set_visible_false(self, link_names, visual_names, block=True):
"""Set visuals of the passed in link_names and visual_names as not visible.
Args:
link_names ([str]): A list of link names.
visual_names ([str]): A list of visual names.
block (bool, optional): Is the call is blocking. Defaults to True.
"""
req = SetVisualVisiblesRequest()
req.link_names = link_names
req.visual_names = visual_names
req.visibles = [False] * len(link_names)
req.block = block
self._set_visual_visibles(req)
def _set_transparent(self, link_names, visual_names, block=True):
"""Set transparencies of the passed in link_names and visual_names as transparent.
Args:
link_names ([str]): A list of link names.
visual_names ([str]): A list of visual names.
block (bool, optional): Is the call is blocking. Defaults to True.
"""
req = SetVisualTransparenciesRequest()
req.link_names = link_names
req.visual_names = visual_names
req.transparencies = [1.0] * len(link_names)
req.block = block
self._set_visual_transparencies(req)
def _are_visuals_hidden(self, link_names, visual_names):
"""Check if the visuals are hidden.
If all the visuals are visible as False or all are transparent,
return True.
Args:
link_names ([str]): A list of link names.
visual_names ([str]): A list of visual names.
Returns:
bool: If the visuals are hiddens
"""
get_visuals_req = GetVisualsRequest(link_names=link_names,
visual_names=visual_names)
visuals = self._get_visuals(get_visuals_req)
# all transparency are 1.0
all_transparent = (int(sum(visuals.transparencies)) == len(link_names))
# all visible are false
all_non_visible = (sum(visuals.visibles) == 0)
return all_transparent or all_non_visible
def _construct_model_pose(self, model_position, yaw):
car_pose = Pose()
orientation = euler_to_quaternion(yaw=yaw)
car_pose.position.x = model_position[0]
car_pose.position.y = model_position[1]
car_pose.position.z = 0.0
car_pose.orientation.x = orientation[0]
car_pose.orientation.y = orientation[1]
car_pose.orientation.z = orientation[2]
car_pose.orientation.w = orientation[3]
return car_pose
def set_model_position(self, model_name, model_position, yaw, is_blocking=False):
''' get initial car position on the track
'''
model_pose = self._construct_model_pose(model_position, yaw)
return self.set_model_pose(model_name, model_pose, is_blocking)
def set_model_pose(self, model_name, model_pose, is_blocking=False):
"""Set the model state with a model pose.
Args:
model_name (str): The model name of the model state we want to set.
model_pose (Pose): The pose we want to set the model to.
Returns:
[type]: [description]
"""
# Construct the model state and send to Gazebo
model_state = ModelState()
model_state.model_name = model_name
model_state.pose = model_pose
model_state.twist.linear.x = 0
model_state.twist.linear.y = 0
model_state.twist.linear.z = 0
model_state.twist.angular.x = 0
model_state.twist.angular.y = 0
model_state.twist.angular.z = 0
return self.set_model_state(model_state, is_blocking)
def set_model_state(self, model_state, is_blocking=False):
"""Set the models state in gazebo.
Args:
model_state (ModelState): The current model state to set to.
Returns:
ModelState: The latest model state of the robot.
"""
if is_blocking:
self._set_model_state_tracker.set_model_state(model_state, blocking=True)
self._get_model_state_tracker.get_model_state(model_state.model_name, '', blocking=True)
else:
self._model_state_client(model_state)
return model_state
def pause_physics(self):
"""
Pause the current gazebo environment physics.
"""
self._pause_physics(EmptyRequest())
def unpause_physics(self):
"""
Pause the current gazebo environment physics.
"""
self._unpause_physics(EmptyRequest())
|
"""
Ingress example for Kusto DB (from CSV file)
There must be a file called ".env" in this folder with the environment variables
(each is NAME_OF_VAR=value, one per line).
It must be an AAD application that is cleared by the engineering team for access to
the databases below and the variables needed are:
- TENANT_ID
- CLIENT_ID
- SECRET
To run you will need to pip install the following Python packages:
pandas==1.1.4
azure-kusto-data==1.0.3
azure-kusto-ingest==1.0.3
python-dotenv==0.12.0
Prerequisite: correct schema in ingress Kusto DB that matches contents of csv's dataframe
"""
import os
import pandas as pd
import time
import logging
from azure.kusto.data.exceptions import KustoServiceError
from azure.kusto.data.helpers import dataframe_from_result_table
from azure.kusto.data import KustoClient, KustoConnectionStringBuilder
from azure.kusto.ingest import (
IngestionProperties,
DataFormat,
KustoIngestClient
)
from dotenv import load_dotenv
# set logger to print at info level
logger_format = '%(asctime)s:%(message)s'
logging.basicConfig(format=logger_format, level=logging.INFO, datefmt="%H:%M:%S")
# Load my environment variables (looks for .env file - for Kusto auth)
load_dotenv()
# read more at https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id
AUTHORITY_ID = os.getenv("TENANT_ID", "")
# In case you want to authenticate with AAD username and password
CLIENT_ID = os.getenv("CLIENT_ID", "")
CLIENT_SECRET = os.getenv("SECRET", "")
# Kusto clusters (ingress and ingress for query)
CLUSTER_INGR = "https://ingest-<clusername>.<region>.kusto.windows.net"
CLUSTER_INGR_QUERY = "https://<clusername>.<region>.kusto.windows.net"
# Databases and tablenames
DB_INGR = '<database-name>'
TABLENAME_INGR = '<table-name>'
def authenticate_to_kusto(cluster):
"""Authenticate and return kusto connection client"""
kcsb = KustoConnectionStringBuilder.with_aad_application_key_authentication(cluster,
CLIENT_ID,
CLIENT_SECRET,
AUTHORITY_ID)
# The authentication method will be taken from the chosen KustoConnectionStringBuilder.
kusto_client = KustoClient(kcsb)
return kusto_client
def authenticate_to_kusto_ingress(cluster):
"""Authenticate and return kusto connection client"""
kcsb = KustoConnectionStringBuilder.with_aad_application_key_authentication(cluster,
CLIENT_ID,
CLIENT_SECRET,
AUTHORITY_ID)
# The authentication method will be taken from the chosen KustoConnectionStringBuilder.
kusto_client = KustoIngestClient(kcsb)
return kusto_client
def query_kusto(query, db, client):
"""Query a kusto DB given client object, returns pandas dataframe"""
dataframe = pd.DataFrame([])
logging.info('Retry is set to 3 times.')
for i in range(4):
if i > 0:
logging.info('Retry {}'.format(i))
try:
# Execute query
response = client.execute(db, query)
# Convert to pandas dataframe
res = response.primary_results[0]
if res:
dataframe = dataframe_from_result_table(res)
if dataframe.empty:
time.sleep(10)
continue
return dataframe
except Exception as exp:
logging.error('Exception occured: {}'.format(exp))
# wait 10 seconds, then retry
time.sleep(10)
continue
return dataframe
def get_last_ingress_date(client_ingr):
"""Example to get the last row of ingress table to retrieve last date
so as not to ingest same data twice. Use your timestamp column instead
of 'PreciseTimeStamp' as needed."""
query = """
{}
| order by PreciseTimeStamp desc
| limit 1
""".format(TABLENAME_INGR)
dataframe_last = query_kusto(query, DB_INGR, client_ingr)
return dataframe_last['PreciseTimeStamp'][0]
def ingress_kusto(dataframe_input, db, client):
"""Ingest into a kusto db give an input pandas dataframe"""
t0 = time.time()
logging.info('Ingest started.')
ingestion_properties = IngestionProperties(database=DB_INGR, table=TABLENAME_INGR, data_format=DataFormat.CSV)
try:
response = client.ingest_from_dataframe(dataframe_input, ingestion_properties=ingestion_properties)
return response
except KustoServiceError as error:
print("1. Error:", error)
print("2. Is semantic error:", error.is_semantic_error())
print("3. Has partial results:", error.has_partial_results())
print("3. Result size:", len(error.get_partial_results()))
except Exception as exp:
print("Error {}", exp)
t1 = time.time()
logging.info('Ingest took {:.04f} minutes.'.format((t1-t0)/60))
def main():
# Authenticate main kusto db
client_ingr = authenticate_to_kusto_ingress(CLUSTER_INGR)
# Get last date in sequence in ingress kusto db - use to avoid duplicating data
# Logic may be used when reading csv or pulling data from original source
# Not used directly in this script, only added as example
client_ingr_for_query = authenticate_to_kusto(CLUSTER_INGR_QUERY)
first_date = pd.to_datetime(get_last_ingress_date(client_ingr_for_query))
logging.info('Last date in kusto db is {}'.format(first_date))
# Example of reading a csv into dataframe, parsing first column as dates
# which will populate the index for dataframe.
dataframe_final = pd.read_csv('new_data.csv',
header=0,
parse_dates=[0])
# Check sizes and drop rows with NA/None
print('Dataframe final size before dropna = {}'.format(dataframe_final.shape))
dataframe_final.dropna(inplace=True)
print('Dataframe final size after dropna = {}'.format(dataframe_final.shape))
# Ingress to Kusto db...
resp = ingress_kusto(dataframe_final, DB_INGR, client_ingr)
if __name__ == '__main__':
main()
|
<filename>omnicanvas/svg.py
def generate_graphic_svg(graphic, include_fill=None):
width = "stroke-width:%.1f;" % graphic.line_width()
pattern = "stroke-dasharray:%s;" % {
"-": "1,0",
"--": "%.1f,%.1f" % (10 * graphic.line_width(), 5 * graphic.line_width()),
"..": "%.1f,%.1f" % (1 * graphic.line_width(), 2 * graphic.line_width())
}[graphic.line_style()]
return "%sstroke:%s;%s%s" % (
"fill:none;" if include_fill else "",
graphic.line_color(),
width if graphic.line_width() != 1 else "",
pattern if graphic.line_style() != "-" else ""
)
def generate_rotation_svg(graphic):
return (' transform="rotate(%.1f %.1f %.1f)"' % (
graphic.rotation()[2], graphic.rotation()[0], graphic.rotation()[1]
)) if graphic.rotation() != (0, 0, 0) else ""
def generate_data_svg(graphic):
return " " + " ".join(
['%s="%s"' % (str(key), str(graphic.data()[key])) for key in graphic.data()]
) if graphic.data() else ""
def generate_shape_svg(shape):
opacity = "fill-opacity:%.3f;" % shape.opacity()
return "fill:%s;%s%s" % (
shape.fill_color(),
opacity if shape.opacity() != 1 else "",
shape.graphic_svg()
)
def generate_rectangle_svg(rectangle):
return '<rect x="%.1f" y="%.1f" width="%.1f" height="%.1f" style="%s"%s%s />' % (
rectangle.x(),
rectangle.y(),
rectangle.width(),
rectangle.height(),
rectangle.shape_svg(),
rectangle.rotation_svg(),
rectangle.data_svg()
)
def generate_line_svg(line):
return '<line x1="%.1f" y1="%.1f" x2="%.1f" y2="%.1f" style="%s"%s%s />' % (
line.x1(),
line.y1(),
line.x2(),
line.y2(),
line.graphic_svg(),
line.rotation_svg(),
line.data_svg()
)
def generate_polygon_svg(polygon):
return '<polygon points="%s" style="%s"%s%s />' % (
", ".join(["%.1f,%.1f" % (
point[0], point[1]
) for point in polygon.coordinates(xy_pairs=True)]),
polygon.shape_svg(),
polygon.rotation_svg(),
polygon.data_svg(),
)
def generate_oval_svg(oval):
return '<ellipse cx="%.1f" cy="%.1f" rx="%.1f" ry="%.1f" style="%s"%s%s />' % (
*oval.center(),
oval.width() / 2,
oval.height() / 2,
oval.shape_svg(),
oval.rotation_svg(),
oval.data_svg()
)
def generate_text_svg(text):
horizontal_align = {
"left": "end",
"center": "middle",
"right": "start"
}[text.horizontal_align()]
vertical_align = {
"top": "baseline",
"center": "middle",
"bottom": "hanging"
}[text.vertical_align()]
return '<text x="%.1f" y="%.1f" text-anchor="%s" alignment-baseline="%s" style="font-size:%.1f;%s"%s%s>%s</text>' % (
text.x(),
text.y(),
horizontal_align,
vertical_align,
text.font_size(),
text.shape_svg(),
text.rotation_svg(),
text.data_svg(),
text.text()
)
def generate_polyline_svg(polyline):
return '<polyline points="%s" style="%s"%s%s />' % (
", ".join(["%.1f,%.1f" % (
point[0], point[1]
) for point in polyline.coordinates(xy_pairs=True)]),
polyline.graphic_svg(include_fill=True),
polyline.rotation_svg(),
polyline.data_svg(),
)
SVG_BASE = """<?xml version="1.0" encoding="UTF-8"?>
<!-- Created with OmniCanvas (omnicanvas.readthedocs.io) -->
<svg xmlns="http://www.w3.org/2000/svg" width="%i" height="%i">
%s
%s
</svg>"""
def generate_canvas_svg(canvas):
return SVG_BASE % (
canvas.width(),
canvas.height(),
('<rect x="0" y="0" width="%i" height="%i" style="fill:%s;stroke-width:0;" />' % (
canvas.width(), canvas.height(), canvas.background_color()
)) if canvas.background_color() else "",
"\n".join(
[graphic.to_svg() for graphic in canvas.graphics()]
)
)
|
<filename>limited/core/admin.py<gh_stars>0
# -*- coding: utf-8 -*-
from django.contrib import admin
from django import forms
from django.contrib.auth.admin import UserAdmin
from django.template.defaultfilters import filesizeformat
from limited.core.models import FileLib, Permission, Home, History, Link, LUser, Profile
from limited.core.utils import urlbilder
class AdminFileLib( admin.ModelAdmin ):
"""
File lib admin with simple file size notes of trash and cache
and links to clear it.
"""
list_display = ( '__unicode__', 'get_cache', 'get_trash', )
fieldsets = (
('Main', {
'fields': ( 'name', 'description', 'path', )
}),
('Advanced info', {
'classes': ('wide',),
'fields': ('dir_cache', 'dir_trash', 'cache', )
}),
)
readonly_fields = ( 'dir_cache', 'dir_trash', 'get_cache', 'get_trash', )
def get_cache(self, obj):
return filesizeformat( obj.get_cache_size( ) )
get_cache.short_description = u'Cache size'
def get_trash(self, obj):
return filesizeformat( obj.get_trash_size( ) )
get_trash.short_description = u'Trash size'
def dir_cache(self, obj):
size = filesizeformat( obj.get_cache_size( ) )
url = urlbilder( u'clear', obj.id, u'cache' )
return u'{0} / <a href="{1}">clear</a>'.format( size, url )
dir_cache.short_description = 'Cache size'
dir_cache.allow_tags = True
def dir_trash(self, obj):
size = filesizeformat( obj.get_trash_size( ) )
url = urlbilder( u'clear', obj.id, u'trash' )
return u'{0} / <a href="{1}">clear</a>'.format( size, url )
dir_trash.short_description = u'Trash size'
dir_trash.allow_tags = True
admin.site.register( FileLib, AdminFileLib )
class AdminPermission( admin.ModelAdmin ):
list_display = ( 'id', 'edit', 'move', 'create', 'delete', 'upload', 'http_get', )
list_filter = ( 'edit', 'move', 'create', 'delete', 'upload', 'http_get', )
ordering = ('id',)
admin.site.register( Permission, AdminPermission )
class HomeForm( forms.ModelForm ):
"""
Some hacks to represent permission like check boxes.
If checked permits record not found it will be created.
"""
# Override 'permission' to set it readonly
perm_id = forms.CharField( widget=forms.TextInput( attrs={ 'readonly': 'readonly' } ), required=False )
perm = forms.MultipleChoiceField(
choices=[(i, i) for i in Permission.fields( )],
widget=forms.CheckboxSelectMultiple,
required=False
)
def __init__(self, *args, **kwargs):
super( HomeForm, self ).__init__( *args, **kwargs )
if kwargs.has_key( 'instance' ):
# init data
instance = kwargs['instance']
self.initial['perm_id'] = instance.permission_id
self.initial['perm'] = []
permission = Permission.objects.get( id=instance.permission_id )
# if Permission.{edit..} == true
# append edit to init data
for name in Permission.fields( ):
if getattr( permission, name ):
self.initial['perm'].append( name )
def save(self, commit=True):
model = super( HomeForm, self ).save( commit=False )
# Get checked names
new = self.cleaned_data['perm']
kwargs = { }
# All array is False items
# if name exist in new, set it True
for item in Permission.fields( ):
if item in new:
kwargs[item] = True
else:
kwargs[item] = False
model.permission = Permission.objects.get_or_create( **kwargs )[0]
if commit:
model.save( )
return model
class Meta:
model = Home
exclude = ('permission',)
class AdminHome( admin.ModelAdmin ):
list_display = ( 'user', 'lib', 'permission', )
list_filter = ( 'user', 'lib', )
ordering = ('user',)
form = HomeForm
admin.site.register( Home, AdminHome )
class AdminHistory( admin.ModelAdmin ):
list_display = ( 'user', 'lib', 'type', 'time', )
list_filter = ( 'time', 'user', 'lib', )
fieldsets = (
('Reference', { 'fields': ('user', 'lib', 'type', ) }),
('Info', { 'fields': ('get_files', 'path', 'extra', ) }),
)
readonly_fields = ( 'time', 'get_files' )
def get_files(self, obj):
return u", ".join( obj.name )
admin.site.register( History, AdminHistory )
class AdminLink( admin.ModelAdmin ):
list_display = ( 'path', 'lib', 'hash', 'time', 'expires', )
list_filter = ( 'time', )
readonly_fields = ( 'time', )
admin.site.register( Link, AdminLink )
class AdminProfile( admin.ModelAdmin ):
list_display = ( 'user', 'mail_notify', 'rss_token', )
list_filter = ( 'mail_notify', )
readonly_fields = ( 'user', )
def has_add_permission(self, request):
return False
admin.site.register( Profile, AdminProfile )
class HomeInline( admin.TabularInline ):
model = Home
raw_id_fields = ( "permission", )
class AdminUser( UserAdmin ):
"""
Simple LUser with Home Inline
"""
list_select_related = True
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff', )
fieldsets = (
('Main', { 'fields': ('username', 'password') }),
('Personal info', { 'fields': ('first_name', 'last_name', 'email') }),
)
inlines = [HomeInline, ]
# Need to remove inlines when adding object
def get_formsets(self, request, obj=None):
if obj == None:
return []
return super( UserAdmin, self ).get_formsets( request, obj=None )
admin.site.register( LUser, AdminUser ) |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from collections import OrderedDict
import numpy as np
import os
import pandas
import posixpath
import warnings
from ast import literal_eval
from pyiron.base.settings.generic import Settings
from pyiron.base.generic.template import PyironObject
"""
GenericParameters class defines the typical input file with a key value structure plus an additional column for comments.
"""
__author__ = "<NAME>"
__copyright__ = (
"Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "production"
__date__ = "Sep 1, 2017"
s = Settings()
class GenericParameters(PyironObject):
"""
GenericParameters class defines the typical input file with a key value structure plus an additional column for comments.
Convenience class to easily create, read, and modify input files
Args:
table_name (str): name of the input file inside the HDF5 file - optional
input_file_name (str): name of the input file (if None default parameters are used)
val_only (bool): input format consists of value (comments) only
comment_char (str): separator that characterizes comment (e.g. "#" for python)
separator_char (str): separator that characterizes the split between key and value - default=' '
end_value_char (str): special character at the end of every line - default=''
Attributes:
.. attribute:: file_name
file name of the input file
.. attribute:: table_name
name of the input table inside the HDF5 file
.. attribute:: val_only
boolean option to switch from a key value list to an value only input file
.. attribute:: comment_char
separator that characterizes comment
.. attribute:: separator_char
separator that characterizes the split between key and value
.. attribute:: multi_word_separator
multi word separator to have multi word keys
.. attribute:: end_value_char
special character at the end of every line
.. attribute:: replace_char_dict
dictionary to replace certain character combinations
"""
def __init__(
self,
table_name=None,
input_file_name=None,
val_only=False,
comment_char="#",
separator_char=" ",
end_value_char="",
):
self.__name__ = "GenericParameters"
self.__version__ = "0.1"
self._file_name = None
self._table_name = None
self._comment_char = None
self._val_only = None
self._separator_char = None
self._multi_word_separator = None
self._end_value_char = None
self._replace_char_dict = None
self._block_dict = None
self._bool_dict = {True: "True", False: "False"}
self._dataset = OrderedDict()
self._block_line_dict = {}
self.end_value_char = end_value_char
self.file_name = input_file_name
self.table_name = table_name
self.val_only = val_only
self.comment_char = comment_char
self.separator_char = separator_char
self.multi_word_separator = "___"
self.read_only = False
if input_file_name is None:
self.load_default()
else:
self.read_input(self.file_name)
@property
def file_name(self):
"""
Get the file name of the input file
Returns:
str: file name
"""
return self._file_name
@file_name.setter
def file_name(self, new_file_name):
"""
Set the file name of the input file
Args:
new_file_name (str): file name
"""
self._file_name = new_file_name
@property
def table_name(self):
"""
Get the name of the input table inside the HDF5 file
Returns:
str: table name
"""
return self._table_name
@table_name.setter
def table_name(self, new_table_name):
"""
Set the name of the input table inside the HDF5 file
Args:
new_table_name (str): table name
"""
self._table_name = new_table_name
@property
def val_only(self):
"""
Get the boolean option to switch from a key value list to an value only input file
Returns:
bool: [True/False]
"""
return self._val_only
@val_only.setter
def val_only(self, val_only):
"""
Set the boolean option to switch from a key value list to an value only input file
Args:
val_only (bool): [True/False]
"""
self._val_only = val_only
@property
def comment_char(self):
"""
Get the separator that characterizes comment
Returns:
str: comment character
"""
return self._comment_char
@comment_char.setter
def comment_char(self, new_comment_char):
"""
Set the separator that characterizes comment
Args:
new_comment_char (str): comment character
"""
self._comment_char = new_comment_char
@property
def separator_char(self):
"""
Get the separator that characterizes the split between key and value
Returns:
str: separator character
"""
return self._separator_char
@separator_char.setter
def separator_char(self, new_separator_char):
"""
Set the separator that characterizes the split between key and value
Args:
new_separator_char (str): separator character
"""
self._separator_char = new_separator_char
@property
def multi_word_separator(self):
"""
Get the multi word separator to have multi word keys
Returns:
str: multi word separator
"""
return self._multi_word_separator
@multi_word_separator.setter
def multi_word_separator(self, new_multi_word_separator):
"""
Set the multi word separator to have multi word keys
Args:
new_multi_word_separator (str): multi word separator
"""
self._multi_word_separator = new_multi_word_separator
@property
def end_value_char(self):
"""
Get the special character at the end of every line
Returns:
str: end of line character
"""
return self._end_value_char
@end_value_char.setter
def end_value_char(self, new_end_value_char):
"""
Set the special character at the end of every line
Args:
new_end_value_char (str): end of line character
"""
self._end_value_char = new_end_value_char
@property
def replace_char_dict(self):
"""
Get the dictionary to replace certain character combinations
Returns:
dict: character replace dictionary
"""
return self._replace_char_dict
@replace_char_dict.setter
def replace_char_dict(self, new_replace_char_dict):
"""
Set the dictionary to replace certain character combinations
Args:
new_replace_char_dict (dict): character replace dictionary
"""
self._replace_char_dict = new_replace_char_dict
def _read_only_check_dict(self, new_dict):
if self.read_only and new_dict != self._dataset:
self._read_only_error()
@staticmethod
def _read_only_error():
warnings.warn(
"The input in GenericParameters changed, while the state of the job was already finished."
)
def load_string(self, input_str):
"""
Load a multi line string to overwrite the current parameter settings
Args:
input_str (str): multi line string
"""
new_dict = self._lines_to_dict(input_str.splitlines())
self._read_only_check_dict(new_dict=new_dict)
self._dataset = new_dict
def load_default(self):
"""
Load defaults resets the dataset in the background to be empty
"""
new_dict = OrderedDict()
new_dict["Parameter"] = []
new_dict["Value"] = []
new_dict["Comment"] = []
self._read_only_check_dict(new_dict=new_dict)
self._dataset = new_dict
def keys(self):
"""
Return keys of GenericParameters object
"""
if self.val_only:
return []
else:
return self._dataset["Parameter"]
def read_input(self, file_name, ignore_trigger=None):
"""
Read input file and store the data in GenericParameters - this overwrites the current parameter settings
Args:
file_name (str): absolute path to the input file
ignore_trigger (str): trigger for lines to be ignored
"""
Settings().logger.debug("file: %s %s", file_name, os.path.isfile(file_name))
if not os.path.isfile(file_name):
raise ValueError("file does not exist: " + file_name)
with open(file_name, "r") as f:
lines = f.readlines()
new_lines = np.array(lines).tolist()
if ignore_trigger is not None:
del_ind = list()
for i, line in enumerate(lines):
line = line.strip()
if len(line.split()) > 0:
if ignore_trigger == line.strip()[0]:
del_ind.append(i)
elif ignore_trigger in line:
lines[i] = line[: line.find("!")]
lines = np.array(lines)
new_lines = lines[np.setdiff1d(np.arange(len(lines)), del_ind)]
new_dict = self._lines_to_dict(new_lines)
self._read_only_check_dict(new_dict=new_dict)
self._dataset = new_dict
def get_pandas(self):
"""
Output the GenericParameters object as Pandas Dataframe for human readability.
Returns:
pandas.DataFrame: Pandas Dataframe of the GenericParameters object
"""
return pandas.DataFrame(self._dataset)
def get(self, parameter_name, default_value=None):
"""
Get the value of a specific parameter from GenericParameters - if the parameter is not available return
default_value if that is set.
Args:
parameter_name (str): parameter key
default_value (str): default value to return is the parameter is not set
Returns:
str: value of the parameter
"""
i_line = self._find_line(parameter_name)
if i_line > -1:
val = self._dataset["Value"][i_line]
try:
val_v = literal_eval(val)
except (ValueError, SyntaxError):
val_v = val
if callable(val_v):
val_v = val
return val_v
elif default_value is not None:
return default_value
else:
raise NameError("parameter not found: " + parameter_name)
def get_attribute(self, attribute_name):
"""
Get the value of a specific parameter from GenericParameters
Args:
attribute_name (str): parameter key
Returns:
str: value of the parameter
"""
if "_attributes" not in dir(self):
return None
i_line = np.where(np.array(self._attributes["Parameter"]) == attribute_name)[0]
if i_line > -1:
return self._attributes["Value"][i_line]
else:
return None
def modify(self, separator=None, append_if_not_present=False, **modify_dict):
"""
Modify values for existing parameters. The command is called as modify(param1=val1, param2=val2, ...)
Args:
separator (str): needed if the parameter name contains special characters such as par:
use then as input: modify(separator=":", par=val) - optional
append_if_not_present (bool): do not raise an exception but append the parameter in practice use set(par=val)
- default=False
**modify_dict (dict): dictionary of parameter names and values
"""
# print ("modify: ", modify_dict)
if separator is not None:
modify_dict = {k + separator: v for k, v in modify_dict.items()}
for key, val in modify_dict.items():
i_key = self._find_line(key)
if i_key == -1:
if append_if_not_present:
self._append(**{key: val})
continue
else:
raise ValueError("key for modify not found " + key)
if isinstance(val, tuple):
val, comment = val
if self.read_only and self._dataset["Comment"][i_key] != comment:
self._read_only_error()
self._dataset["Comment"][i_key] = comment
if self.read_only and str(self._dataset["Value"][i_key]) != str(val):
self._read_only_error()
self._dataset["Value"][i_key] = str(val)
def set(self, separator=None, **set_dict):
"""
Set the value of multiple parameters or create new parameter key, if they do not exist already.
Args:
separator (float/int/str): separator string - optional
**set_dict (dict): dictionary containing the parameter keys and their corresponding values to be set
"""
self.modify(separator=separator, append_if_not_present=True, **set_dict)
def set_value(self, line, val):
"""
Set the value of a parameter in a specific line
Args:
line (float/int/str): line number - starting with 0
val (str/bytes): value to be set
"""
if line < len(self._dataset["Value"]):
if self.read_only and self._dataset["Value"][line] != val:
self._read_only_error()
self._dataset["Value"][line] = val
elif line >= len(self._dataset["Value"]):
new_array = []
new_comments = []
new_params = []
for el in self._dataset["Value"]:
new_array.append(el)
new_comments.append("")
new_params.append("")
new_array.append(val)
new_comments.append("")
new_params.append("")
new_dict = OrderedDict()
new_dict["Value"] = new_array
new_dict["Comment"] = new_comments
new_dict["Parameter"] = new_params
self._read_only_check_dict(new_dict=new_dict)
self._dataset = new_dict
else:
raise ValueError("Wrong indexing")
def remove_keys(self, key_list):
"""
Remove a list of keys from the GenericParameters
Args:
key_list (list): list of keys to be removed
"""
if self.read_only and any([k in self._dataset["Parameter"] for k in key_list]):
self._read_only_error()
for key in key_list:
params = np.array(self._dataset["Parameter"])
i_keys = np.where(params == key)[0]
if len(i_keys) == 0:
continue
if i_keys[0] == -1:
continue
for i_key in i_keys[::-1]:
self._delete_line(i_key)
def define_blocks(self, block_dict):
"""
Define a block section within the GenericParameters
Args:
block_dict (dict): dictionary to define the block
"""
if not isinstance(block_dict, OrderedDict):
raise AssertionError()
self._block_dict = block_dict
def to_hdf(self, hdf, group_name=None):
"""
Store the GenericParameters in an HDF5 file
Args:
hdf (ProjectHDFio): HDF5 group object
group_name (str): HDF5 subgroup name - optional
"""
if group_name:
with hdf.open(group_name) as hdf_group:
hdf_child = hdf_group.create_group(self.table_name)
else:
hdf_child = hdf.create_group(self.table_name)
self._type_to_hdf(hdf_child)
hdf_child["data_dict"] = self._dataset
def from_hdf(self, hdf, group_name=None):
"""
Restore the GenericParameters from an HDF5 file
Args:
hdf (ProjectHDFio): HDF5 group object
group_name (str): HDF5 subgroup name - optional
"""
if group_name:
with hdf.open(group_name) as hdf_group:
data = hdf_group[self.table_name]
else:
data = hdf[self.table_name]
if isinstance(data, dict):
self._dataset = data
else:
self._dataset = data._read("data_dict")
def get_string_lst(self):
"""
Get list of strings from GenericParameters to write to input file
"""
tab_dict = self._dataset
# assert(len(tab_dict['Value']) == len(tab_dict['Parameter']))
if "Parameter" not in tab_dict:
tab_dict["Parameter"] = ["" for _ in tab_dict["Value"]]
string_lst = []
if self.val_only:
value_lst = tab_dict["Value"]
else:
try:
value_lst = [self[p] for p in tab_dict["Parameter"]]
except ValueError:
value_lst = tab_dict["Value"]
for par, v, c in zip(tab_dict["Parameter"], value_lst, tab_dict["Comment"]):
# special treatment for values that are bool or str
if isinstance(v, bool):
v_str = self._bool_dict[v]
elif isinstance(
v, str
): # TODO: introduce variable for string symbol (" or ')
v_str = v
else:
v_str = str(v)
par = " ".join(par.split(self.multi_word_separator))
if par == "Comment":
string_lst.append(str(v) + self.end_value_char + "\n")
elif c.strip() == "":
if self.val_only:
string_lst.append(v_str + self.end_value_char + "\n")
else:
string_lst.append(
par + self.separator_char + v_str + self.end_value_char + "\n"
)
else:
if self.val_only:
string_lst.append(
v_str + self.end_value_char + " " + self.comment_char + c + "\n"
)
else:
string_lst.append(
par
+ self.separator_char
+ v_str
+ " "
+ self.end_value_char
+ self.comment_char
+ c
+ "\n"
)
return string_lst
def write_file(self, file_name, cwd=None):
"""
Write GenericParameters to input file
Args:
file_name (str): name of the file, either absolute (then cwd must be None) or relative
cwd (str): path name (default: None)
"""
if cwd is not None:
file_name = posixpath.join(cwd, file_name)
with open(file_name, "w") as f:
for line in self.get_string_lst():
f.write(line)
def __repr__(self):
"""
Human readable string representation
Returns:
str: pandas Dataframe structure as string
"""
return str(self.get_pandas())
def __setitem__(self, key, value):
"""
Set a value for the corresponding key
Args:
key (str): key to be set of modified
value (float/int/str): value to be set
"""
if isinstance(key, int):
if self.read_only and self._dataset["Value"][key] != value:
self._read_only_error()
self._dataset["Value"][key] = value
else:
self.set(**{key: value})
def set_dict(self, dictionary):
"""
Set a dictionary of key value pairs
Args:
dictionary (dict): dictionary of key value pairs
"""
self.set(**dictionary)
def __getitem__(self, item):
"""
Get a value for the corresponding key
Args:
item (int, str): key
Returns:
str: value
"""
if isinstance(item, int):
return self._dataset["Value"][item]
elif item in self._dataset["Parameter"]:
return self.get(item)
def __delitem__(self, key):
"""
Delete a key from GenericParameters
Args:
key (str): single key
"""
self.remove_keys([key])
def _get_block(self, block_name):
"""
Internal helper function to get a block by name
Args:
block_name (str): block name
Returns:
dict: dictionary of the specific block
"""
if block_name not in self._block_dict:
raise ValueError("unknown block: " + block_name)
keys = self._dataset["Parameter"]
block_dict = OrderedDict()
for key in self._dataset:
block_dict[key] = []
for i, tag in enumerate(keys):
if tag in self._block_dict[block_name]:
for key in self._dataset:
block_dict[key].append(self._dataset[key][i])
return block_dict
def _get_attributes(self):
"""
Internal helper function to extract pyiron specific commands (start in comments with " @my_command")
Returns:
(dict): {"Parameter": list of tags, "Value": list of values}
"""
tags = self._dataset["Parameter"]
lst_tag, lst_val = [], []
for i, tag in enumerate(tags):
if tag not in ["Comment"]:
continue
c = self._dataset["Value"][i]
s_index = c.find(" @")
if s_index > -1:
tag, val = c[s_index:].split()[:2]
lst_tag.append(tag[1:])
lst_val.append(val)
self._attributes = {"Parameter": lst_tag, "Value": lst_val}
return self._attributes
def _remove_block(self, block_name):
"""
Internal helper function to remove a block by name
Args:
block_name (str): block name
"""
if block_name not in self._block_dict:
raise ValueError("unknown block to be removed")
self.remove_keys(self._block_dict[block_name])
def _insert_block(self, block_dict, next_block=None):
"""
Internal helper function to insert a block by name
Args:
block_dict (dict): block dictionary
next_block (str): name of the following block - optional
"""
if next_block is None: # append
for key in block_dict:
self._dataset[key] += block_dict[key]
else:
for i, tag in enumerate(self._dataset["Parameter"]):
if tag in self._block_dict[next_block]:
self._insert(line_number=i, data_dict=block_dict) # , shift=1)
break
def _update_block(self, block_dict):
"""
Internal helper function to update a block by name
Args:
block_dict (dict): block dictionary
"""
tag_lst = block_dict["Parameter"]
val_lst = block_dict["Value"]
par_dict = {}
for t, v in zip(tag_lst, val_lst):
par_dict[t] = v
self.modify(**par_dict)
def _delete_line(self, line_number):
"""
Internal helper function to delete a single line
Args:
line_number (int): line number
"""
if self.read_only:
self._read_only_error()
for key, val in self._dataset.items():
if "numpy" in str(type(val)):
val = val.tolist()
del val[line_number]
self._dataset[key] = val
def _insert(self, line_number, data_dict, shift=0):
"""
Internal helper function to insert a single line by line number
Args:
line_number (int): line number
data_dict (dict): data dictionary
shift (int): shift line number - default=0
"""
if self.read_only:
self._read_only_error()
for key, val in data_dict.items():
lst = self._dataset[key]
val = np.array(val).tolist()
lst = np.array(lst).tolist()
self._dataset[key] = lst[: line_number - shift] + val + lst[line_number:]
def _refresh_block_line_hash_table(self):
"""
Internal helper function to refresh the block dictionary hash table
"""
self._block_line_dict = {}
for i_line, par in enumerate(self._dataset["Parameter"]):
if par.strip() == "":
continue
for key, val in self._block_dict.items():
par_single = par.split()[0].split(self.multi_word_separator)[0]
if par_single in val:
if key in self._block_line_dict:
self._block_line_dict[key].append(i_line)
else:
self._block_line_dict[key] = [i_line]
break
i_line_old = 0
for key in self._block_dict:
if key in self._block_line_dict:
i_line_old = np.max(self._block_line_dict[key])
else:
self._block_line_dict[key] = [i_line_old]
def _append_line_in_block(self, parameter_name, value):
"""
Internal helper function to append a line within a block
Args:
parameter_name (str): name of the parameter
value (str): value of the parameter
Returns:
bool: [True/False]
"""
for key, val in self._block_dict.items():
par_first = parameter_name.split()[0].split(self.multi_word_separator)[0]
if par_first in val:
i_last_block_line = max(self._block_line_dict[key])
self._insert(
line_number=i_last_block_line + 1,
data_dict={
"Parameter": [parameter_name],
"Value": [str(value)],
"Comment": [""],
},
)
return True
else:
s.logger.warning(
"Unknown parameter (does not exist in block_dict): {}".format(
parameter_name
)
)
return False
def _append(self, **qwargs):
"""
Internal helper function to append data to the GenericParameters object
Args:
**qwargs (dict): dictionary with parameter keys and their corresponding values
"""
if self.read_only:
self._read_only_error()
for par, val in qwargs.items():
if par in self._dataset["Parameter"]:
raise ValueError("Parameter exists already: " + par)
if self._block_dict is not None:
self._refresh_block_line_hash_table()
if self._append_line_in_block(par, val):
continue
for col in self._dataset:
self._dataset[col] = np.array(self._dataset[col]).tolist()
comment = ""
if isinstance(val, tuple):
val, comment = val
self._dataset["Parameter"].append(par)
self._dataset["Value"].append(val)
self._dataset["Comment"].append(comment)
def _is_multi_word_parameter(self, key):
"""
Internal helper function to check if a parameter included multiple words
Args:
key (str): parameter
Returns:
bool: [True/False]
"""
par_list = key.split(self.multi_word_separator)
return len(par_list) > 1
def _repr_html_(self):
"""
Internal helper function to represent the GenericParameters object within the Jupyter Framework
Returns:
HTML: Jupyter HTML object
"""
return self.get_pandas()._repr_html_()
def _lines_to_dict(self, lines):
"""
Internal helper function to convert multiple lines to a dictionary
Args:
lines (list): list of lines
Returns:
dict: GenericParameters dictionary
"""
lst = OrderedDict()
lst["Parameter"] = []
lst["Value"] = []
lst["Comment"] = []
for line in lines:
# print ("line: ", line)
if self.replace_char_dict is not None:
for key, val in self.replace_char_dict.items():
line = line.replace(key, val)
sep = line.split(self.comment_char)
if len(line.strip()) > 0 and (
line.strip()[0] == self.comment_char
): # comment line
lst["Parameter"].append("Comment")
lst["Value"].append(line[:-1])
lst["Comment"].append("")
elif not sep[0].strip() == "":
sep[0] = sep[0].strip()
if self.val_only: # Value only entries
val = sep[0]
name = ""
else:
keypos = sep[0].find(self.separator_char)
if keypos == -1: # Key only entries
name = sep[0]
val = ""
else: # Entires with key and value
name = sep[0][0:keypos]
val = sep[0][keypos + len(self.separator_char) :]
lst["Parameter"].append(name.strip())
lst["Value"].append(val.strip())
if len(sep) > 1: # Handle comments
lst["Comment"].append(sep[-1].strip())
else:
lst["Comment"].append("")
else: # Handle empty lines
lst["Parameter"].append("")
lst["Value"].append("")
lst["Comment"].append("")
return lst
def _type_to_hdf(self, hdf):
"""
Internal helper function to save type and version in hdf root
Args:
hdf (ProjectHDFio): HDF5 group object
"""
hdf["NAME"] = self.__name__
hdf["TYPE"] = str(type(self))
hdf["VERSION"] = self.__version__
hdf["OBJECT"] = "GenericParameters"
def _find_line(self, key_name):
"""
Internal helper function to find a line by key name
Args:
key_name (str): key name
Returns:
list: [line index, line]
"""
params = self._dataset["Parameter"]
if len(params) > 0:
i_line_lst = np.where(np.array(params) == key_name)[0]
else:
i_line_lst = []
if len(i_line_lst) == 0:
return -1
elif len(i_line_lst) == 1:
return i_line_lst[0]
else:
error_msg = list()
error_msg.append("Multiple occurrences of key_name: " + key_name + ". They are as follows")
for i in i_line_lst:
error_msg.append("dataset: {}, {}, {}".format(i,
self._dataset["Parameter"][i],
self._dataset["Value"][i]))
error_msg = "\n".join(error_msg)
raise ValueError(error_msg)
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@rules_python//python:pip.bzl", "pip_install")
_PANDOC_BUILD_FILE = """
filegroup(
name = "pandoc",
srcs = ["bin/pandoc"],
visibility = ["//visibility:public"],
)"""
def gapic_generator_python():
_maybe(
pip_install,
name = "gapic_generator_python_pip_deps",
requirements = "@gapic_generator_python//:requirements.txt",
)
_protobuf_version = "3.15.8"
_protobuf_version_in_link = "v%s" % _protobuf_version
_maybe(
http_archive,
name = "com_google_protobuf",
urls = ["https://github.com/protocolbuffers/protobuf/archive/%s.zip" % _protobuf_version_in_link],
strip_prefix = "protobuf-%s" % _protobuf_version,
)
_maybe(
http_archive,
name = "bazel_skylib",
strip_prefix = "bazel-skylib-2169ae1c374aab4a09aa90e65efe1a3aad4e279b",
urls = ["https://github.com/bazelbuild/bazel-skylib/archive/2169ae1c374aab4a09aa90e65efe1a3aad4e279b.tar.gz"],
)
_maybe(
http_archive,
name = "com_github_grpc_grpc",
strip_prefix = "grpc-1.36.4",
urls = ["https://github.com/grpc/grpc/archive/v1.36.4.zip"],
)
_maybe(
http_archive,
name = "pandoc_linux",
build_file_content = _PANDOC_BUILD_FILE,
strip_prefix = "pandoc-2.2.1",
url = "https://github.com/jgm/pandoc/releases/download/2.2.1/pandoc-2.2.1-linux.tar.gz",
)
_maybe(
http_archive,
name = "pandoc_macOS",
build_file_content = _PANDOC_BUILD_FILE,
strip_prefix = "pandoc-2.2.1",
url = "https://github.com/jgm/pandoc/releases/download/2.2.1/pandoc-2.2.1-macOS.zip",
)
_rules_gapic_version = "0.5.4"
_maybe(
http_archive,
name = "rules_gapic",
strip_prefix = "rules_gapic-%s" % _rules_gapic_version,
urls = ["https://github.com/googleapis/rules_gapic/archive/v%s.tar.gz" % _rules_gapic_version],
)
_maybe(
http_archive,
name = "com_google_googleapis",
strip_prefix = "googleapis-ffc531383747ebb702dad3db237ef5fdea796363",
urls = ["https://github.com/googleapis/googleapis/archive/ffc531383747ebb702dad3db237ef5fdea796363.zip"],
)
def gapic_generator_register_toolchains():
native.register_toolchains(
"@gapic_generator_python//:pandoc_toolchain_linux",
"@gapic_generator_python//:pandoc_toolchain_macOS",
)
def _maybe(repo_rule, name, strip_repo_prefix = "", **kwargs):
if not name.startswith(strip_repo_prefix):
return
repo_name = name[len(strip_repo_prefix):]
if repo_name in native.existing_rules():
return
repo_rule(name = repo_name, **kwargs)
|
<reponame>HosodaMath/creative_to_svg<filename>Python/testLine.py
from mathematics.vector import Vector2
from geometry import line as li
from testBaseSVG import TestSVG
class Line2Test:
def __init__(self, init_position1: Vector2, init_position2: Vector2) -> None:
"""
line test lineテストの定義クラス
:param init_position1: line1の座標
:param init_position2: line2の座標
"""
self.line1 = init_position1
self.line2 = init_position2
def test_line1(self) -> None:
"""
line1 and line2座標出力のテスト
:return: None
"""
line = li.BaseLine2(self.line1, self.line2)
print(line.drawline1())
line.drawline2()
def test_line2(self, init_size: Vector2) -> None:
"""
lineのsvg形式のフォーマット出力のテスト(生の出力(コンソール出力))
:param init_size: Vector2 SVG画像のサイズ
:return: None
"""
svg = TestSVG(init_size.get_x(), init_size.get_y())
line = li.Line(self.line1, self.line2)
line_tag = line.render_line1("#000000", 1.0)
svg.testSVG2(line_tag)
def test_line3(self, init_size: Vector2) -> None:
"""
lineのsvg形式のフォーマット出力のテスト(svg形式での出力)
:param init_size: Vector2 SVG画像のサイズ
:return:
"""
svg = TestSVG(init_size.get_x(), init_size.get_y())
line = li.Line(self.line1, self.line2)
line_tag = line.render_line1("#000000", 1.0)
svg.testSVG3("testLine3", line_tag)
def line_test1(init_position1: Vector2, init_position2: Vector2) -> None:
"""
line1 and line2座標出力のテス
:param init_position1:
:param init_position2:
:return:
"""
line = Line2Test(init_position1, init_position2)
line.test_line1()
def line_test2(init_position1: Vector2, init_position2: Vector2, init_size: Vector2) -> None:
"""
lineのsvg形式のフォーマット出力のテスト(生の出力(コンソール出力))
line1 and line2座標出力のテスト
:param init_size:
:param init_position1:
:param init_position2:
:return:
"""
line = Line2Test(init_position1, init_position2)
line.test_line2(init_size)
def line_test3(init_position1: Vector2, init_position2: Vector2, init_size: Vector2) -> None:
"""
lineのsvg形式のフォーマット出力のテスト(生の出力(コンソール出力))
line1 and line2座標出力のテスト
:param init_size:
:param init_position1:
:param init_position2:
:return:
"""
line = Line2Test(init_position1, init_position2)
line.test_line3(init_size)
if __name__ == '__main__':
width: float = 512
height: float = 512
position1 = Vector2(width / 4, height / 2)
position2 = Vector2(width - width / 4, height / 2)
line_test1(position1, position2)
size: Vector2 = Vector2(width, height)
line_test2(position1, position2, size)
line_test3(position1, position2, size)
|
from . import SpecValidator
AzureSpec = {
'available_os': {
'CentOS7': {
'publisher': SpecValidator(type='string', default="OpenLogic"),
'offer': SpecValidator(type='string', default="CentOS"),
'sku': SpecValidator(type='string', default="7.7"),
'ssh_user': SpecValidator(type='string', default='edbadm')
},
'CentOS8': {
'publisher': SpecValidator(type='string', default="OpenLogic"),
'offer': SpecValidator(type='string', default="CentOS"),
'sku': SpecValidator(type='string', default="8_1"),
'ssh_user': SpecValidator(type='string', default='edbadm')
},
'RedHat7': {
'publisher': SpecValidator(type='string', default="RedHat"),
'offer': SpecValidator(type='string', default="RHEL"),
'sku': SpecValidator(type='string', default="7.8"),
'ssh_user': SpecValidator(type='string', default='edbadm')
},
'RedHat8': {
'publisher': SpecValidator(type='string', default="RedHat"),
'offer': SpecValidator(type='string', default="RHEL"),
'sku': SpecValidator(type='string', default="8.2"),
'ssh_user': SpecValidator(type='string', default='edbadm')
}
},
'postgres_server': {
'instance_type': SpecValidator(
type='choice',
choices=[
'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2',
'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2',
'Standard_A8m_v2'
],
default='Standard_A4_v2'
),
'volume': {
'storage_account_type': SpecValidator(
type='choice',
choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',
'UltraSSD_LRS'],
default='Standard_LRS'
)
},
'additional_volumes': {
'count': SpecValidator(
type='integer',
min=0,
max=5,
default=2
),
'storage_account_type': SpecValidator(
type='choice',
choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',
'UltraSSD_LRS'],
default='StandardSSD_LRS'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=100
)
}
},
'pem_server': {
'instance_type': SpecValidator(
type='choice',
choices=[
'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2',
'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2',
'Standard_A8m_v2'
],
default='Standard_A2_v2'
),
'volume': {
'storage_account_type': SpecValidator(
type='choice',
choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',
'UltraSSD_LRS'],
default='Standard_LRS'
)
}
},
'pooler_server': {
'instance_type': SpecValidator(
type='choice',
choices=[
'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2',
'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2',
'Standard_A8m_v2'
],
default='Standard_A2_v2'
),
'volume': {
'storage_account_type': SpecValidator(
type='choice',
choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',
'UltraSSD_LRS'],
default='Standard_LRS'
)
}
},
'barman_server': {
'instance_type': SpecValidator(
type='choice',
choices=[
'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2',
'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2',
'Standard_A8m_v2'
],
default='Standard_A2_v2'
),
'volume': {
'storage_account_type': SpecValidator(
type='choice',
choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',
'UltraSSD_LRS'],
default='Standard_LRS'
)
},
'additional_volumes': {
'count': SpecValidator(
type='integer',
min=0,
max=1,
default=1
),
'storage_account_type': SpecValidator(
type='choice',
choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',
'UltraSSD_LRS'],
default='StandardSSD_LRS'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=300
)
}
}
}
|
"""
OpenVINO DL Workbench
Endpoints to work with downloading of files
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from contextlib import closing
from flask import jsonify, request, send_file
from config.constants import LOG_FILE
from wb.extensions_factories.database import get_db_session_for_app
from wb.main.api_endpoints.v1 import V1_DOWNLOAD_API, V1_EXPORT_PROJECT_API
from wb.main.api_endpoints.utils import md5
from wb.main.models.download_configs_model import ModelDownloadConfigsModel
from wb.main.models.downloadable_artifacts_model import DownloadableArtifactsModel
from wb.main.enumerates import TargetTypeEnum
from wb.main.models.target_model import TargetModel
from wb.main.pipeline_creators.download_model_pipeline_creator import DownloadModelPipelineCreator
from wb.main.pipeline_creators.export_project_pipeline_creator import ExportProjectPipelineCreator
from wb.main.pipeline_creators.inference_report_export_pipeline_creator import InferenceReportExportPipelineCreator
from wb.main.pipeline_creators.project_report_export_pipeline_creator import ProjectReportExportPipelineCreator
from wb.main.utils.safe_runner import safe_run
@V1_DOWNLOAD_API.route('/model-archive/<int:model_id>')
@safe_run
def archive_model(model_id: int):
download_job: ModelDownloadConfigsModel = ModelDownloadConfigsModel.query.filter_by(model_id=model_id).first()
if download_job:
downloadable_artifact = download_job.shared_artifact
exist, _ = downloadable_artifact.archive_exists()
if exist:
return jsonify({
'jobId': None,
'message': 'archive already exists',
'artifactId': downloadable_artifact.id
})
with closing(get_db_session_for_app()) as session:
downloadable_artifact.delete_record(session)
download_job.delete_record(session)
tab_id = request.args.get('tabId')
name = request.args.get('name')
local_target_model = TargetModel.query.filter_by(target_type=TargetTypeEnum.local).first()
pipeline_creator = DownloadModelPipelineCreator(local_target_model.id, tab_id, model_id, name)
pipeline_creator.create()
pipeline_creator.run_pipeline()
download_model_job = pipeline_creator.first_job
return jsonify(download_model_job.json())
@V1_DOWNLOAD_API.route('/check-sum/<int:model_id>')
@safe_run
def check_sum(model_id: int):
download_job = ModelDownloadConfigsModel.query.filter_by(model_id=model_id).first()
if not download_job:
return 'Artifact for model id {} was not found on database'.format(model_id), 404
downloadable_artifact = DownloadableArtifactsModel.query.filter_by(job_id=download_job.job_id).first()
exist, archive_path = downloadable_artifact.archive_exists()
if not exist:
return 'Cannot find archive for model {}'.format(model_id), 404
md5sum = md5(archive_path)
return jsonify({'hash': md5sum})
@V1_DOWNLOAD_API.route('/test-log', methods=['POST'])
@safe_run
def test_log():
return send_file(LOG_FILE, as_attachment=True)
@V1_DOWNLOAD_API.route('/project-report/<int:project_id>', methods=['GET'])
@safe_run
def export_project_report(project_id: int):
tab_id = request.args.get('tabId')
creator = ProjectReportExportPipelineCreator(tab_id=tab_id, project_id=project_id)
pipeline = creator.create()
creator.run_pipeline()
return jsonify({'id': pipeline.id})
@V1_DOWNLOAD_API.route('/inference-report/<int:inference_id>', methods=['GET'])
@safe_run
def export_inference_report(inference_id: int):
tab_id = request.args.get('tabId')
creator = InferenceReportExportPipelineCreator(tab_id=tab_id, inference_id=inference_id)
pipeline = creator.create()
creator.run_pipeline()
return jsonify({'id': pipeline.id})
@V1_EXPORT_PROJECT_API.route('/project/<int:project_id>/export', methods=['POST'])
@safe_run
def export_project(project_id: int):
config = request.get_json()['data']
config['projectId'] = project_id
export_project_pipeline_creator = ExportProjectPipelineCreator(config)
export_project_pipeline_creator.create()
export_project_pipeline_creator.run_pipeline()
export_project_job = export_project_pipeline_creator.first_job
return jsonify(export_project_job.json())
|
<gh_stars>0
# import random
#
# x = []
# for j in range(0, 10000):
# num = random.randint(1, 2000000) # picking between 1-200000 numbers value into num variable
# x.append(num) #adding num value to array
# with open('originalList5.txt', 'w') as filehandle: #Writing to original list files, i did it 5 times
# for listitem in x:
# filehandle.write('%s\n' % listitem)
import time # required library for calculate time
# test=[] #these are my test list for insertion sort
# with open('sortedList.txt', 'r') as filehandle:
# for line in range(0,3):
# currentPlace = int(line[:-1])
# test.append(currentPlace)
# print(y)
x = []
with open('originalList5.txt', 'r') as filehandle: # read list file
for line in filehandle:
currentPlace = int(line[:-1])
x.append(currentPlace) # adding items into array
def insertionSort(array): # insertion sort function
for j in range(1, len(array)): # 10.000 (array length) times loop
key = array[j]
i = j - 1
while (i > -1) and key < array[i]: # checking values lower or not
array[i + 1] = array[i] # transform locations
i = i - 1
array[i + 1] = key
return array
def mergeSort(array): # merge sort function
if len(array) > 1: # if it is 1 it cannot split
mid = len(array) // 2 # splitting into middle
lefthalf = array[:mid] # lefthalf of list
righthalf = array[mid:] # righthalf of list
# recursion splitting all the list
mergeSort(lefthalf)
mergeSort(righthalf)
i = 0
j = 0
k = 0
while i < len(lefthalf) and j < len(righthalf): # checking sort lower
if lefthalf[i] < righthalf[j]:
array[k] = lefthalf[i] # make it lefthalf
i = i + 1
else:
array[k] = righthalf[j] # make it right half
j = j + 1
k = k + 1
while i < len(lefthalf):
array[k] = lefthalf[i] # make it left half
i = i + 1
k = k + 1
while j < len(righthalf):
array[k] = righthalf[j] # make it right half
j = j + 1
k = k + 1
start = time.time() # timer start
# insertionSort(x) #sort function start
x.sort() # sort function start
# mergeSort(x) #sort function start
stop = time.time() # timer stop
print(x)
print(stop - start) # calculate time difference between start and stop time
# with open('sortedList5.txt', 'w') as filehandle: # write sorted list into txt file
# for listitem in x:
# filehandle.write('%s\n' % listitem)
avgInsertionSort = (
6.108926773071289 + 6.030807256698608 + 6.008946084976196 + 6.280808687210083 + 6.062066555023193 + 5.9839372634887695 + 6.030825138092041 + 6.108946084976196 + 6.108946084976456 + 6.171425104141235 + 5.999558448791504 + 5.95268702507019 + 6.04643440246582 + 6.093322038650513 + 6.093322038650513 + 6.1870269775390625 + 6.171424865722656 + 6.030824899673462 + 5.968331575393677 + 6.202671766281128 + 5.937065362930298 + 5.968331575393677 + 5.921440839767456 + 6.030808925628662 + 6.1558146476745605) / 25
avgMergeSort = (
0.10936880111694336 + 0.06251120567321777 + 0.07811641693115234 + 0.07811737060546875 + 0.06249284744262695 + 0.07810831069946289 + 0.07812070846557617 + 0.07812047004699707 + 0.07811999320983887 + 0.07811808586120605 + 0.07811856269836426 + 0.07121856269436213 + 0.07811808586120605 + 0.066491655349731785 + 0.07811713218688965 + 0.07811784744262695 + 0.07811665534973145 + 0.07811713218688965 + 0.062491655349731445 + 0.07812023162841797 + 0.07811975479125977 + 0.07811808586120605 + 0.07811951637268066 + 0.07813429832458496 + 0.07811808586120605) / 25
avgTimSort = (
0.015623092651367188 + 0.0 + 0.015624523162841797 + 0.0 + 0.0 + 0.015622377395629883 + 0.0 + 0.015624761581420898 + 0.0 + 0.015624046325683594 + 0.0 + 0.0 + 0.01262951215300093 + 0.0 + 0.0 + 0.0 + 0.0 + 0.015619754791259766 + 0.0 + 0.015622854232788086 + 0.015624523162841797 + 0.0 + 0.01562952995300293 + 0.0 + 0.015623807907104492) / 25
print(avgInsertionSort, avgMergeSort, avgTimSort) # average of 25 values
|
"""
The models implemented here are the architectures to use the original EmbraceNets and the proposed EmbraceNet +.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils.componets import WeightedSum, EmbraceNet
import numpy as np
class ModelOne(nn.Module):
'''
EembraceNet
'''
def __init__(self, num_classes, input_sizes, embrace_size, docker_architecture, finalouts,
device, use_ll, ll_config, trainable_probs):
super(ModelOne, self).__init__()
self.NClasses = num_classes
self.InputSize = len(input_sizes)
self.Device = device
self.EmbNet = EmbraceNet(input_sizes, embrace_size, docker_architecture, self.Device)
self.FinalOut = finalouts
self.UseLL = use_ll
self.TrainableProbs = trainable_probs
self.initProbabilities()
if use_ll or num_classes != embrace_size:
self.UseLL = True
self.LL = self.gen_ll(ll_config, embrace_size)
def gen_ll(self, config ,embrace_size):
layers = []
inC = embrace_size
for x in config:
if x == 'D':
layers += [nn.Dropout()]
elif x == 'R':
layers += [nn.ReLU()]
else:
layers += [nn.Linear(inC, x)]
inC = x
return nn.Sequential(*layers)
def initProbabilities(self):
p = torch.ones(1, self.InputSize, dtype=torch.float)
self.p = torch.div(p, torch.sum(p, dim=-1, keepdim=True)).to(self.Device)
self.P = nn.Parameter(self.p, requires_grad=self.TrainableProbs)
def forward(self, outputs1, outputs2, available):
batch_size = outputs1[0].shape[0]
availabilities = torch.ones(batch_size , self.InputSize, dtype=torch.float, device=self.Device)
for i, av in enumerate(available):
if av == 0.0:
availabilities[:,i] = 0.0
probabilities = torch.stack([self.p]*batch_size, dim=0).view(batch_size, self.InputSize)
if self.FinalOut:
out = self.EmbNet.forward(outputs2, availabilities, probabilities)
else:
out = self.EmbNet.forward(outputs1, availabilities, probabilities)
if self.UseLL:
outl = self.LL(out)
return outl, out
return out, None
class ModelNewFour(nn.Module):
'''
EmbraceNet +, which integrate three EmbraceNets and add a naive concatenation and a weighted sum
'''
def __init__(self, num_classes, input_sizes, final_input_sizes,
embrace1_param, embrace2_param, embrace3_param, wsum_confg,
device, trainable_probs, useffinal, use_ws, use_ll, ll_configs):
super(ModelNewFour, self).__init__()
self.NClasses = num_classes
self.InputSize = input_sizes
self.FinalInputSize = final_input_sizes
self.Device = device
self.EmbNet1 = EmbraceNet(**embrace1_param)
self.EmbNet2 = EmbraceNet(**embrace2_param)
self.EmbNet3 = EmbraceNet(**embrace3_param)
self.WeightedSum = WeightedSum(**wsum_confg)
self.UseLL1 = use_ll[0]
self.UseLL2 = use_ll[1]
self.UseLL3 = use_ll[2]
self.UseFinalsInFinal = useffinal
self.UseWSum = use_ws
self.TrainableProbs = trainable_probs
self.initProbabilities()
if self.UseLL1:
self.LL1 = self.gen_ll(**ll_configs[0])
if self.UseLL2:
self.LL2 = self.gen_ll(**ll_configs[1])
if self.UseLL3:
self.LL3 = self.gen_ll(**ll_configs[2])
def gen_ll(self, config ,embrace_size):
layers = []
inC = embrace_size
for x in config:
if x == 'D':
layers += [nn.Dropout()]
elif x == 'R':
layers += [nn.ReLU()]
else:
layers += [nn.Linear(inC, x)]
inC = x
return nn.Sequential(*layers)
def initProbabilities(self):
p1 = torch.ones(1, self.InputSize, dtype=torch.float)
p2 = torch.ones(1, self.InputSize, dtype=torch.float)
p3 = torch.ones(1, self.FinalInputSize, dtype=torch.float)
self.p1 = torch.div(p1, torch.sum(p1, dim=-1, keepdim=True)).to(self.Device)
self.p2 = torch.div(p2, torch.sum(p2, dim=-1, keepdim=True)).to(self.Device)
self.p3 = torch.div(p3, torch.sum(p3, dim=-1, keepdim=True)).to(self.Device)
self.P1 = nn.Parameter(p1,requires_grad=self.TrainableProbs)
self.P2 = nn.Parameter(p2,requires_grad=self.TrainableProbs)
self.P3 = nn.Parameter(p3, requires_grad=self.TrainableProbs)
def forward(self, outputs1, outputs2, available):
batch_size = outputs1[0].shape[0]
availabilities = torch.ones(batch_size , self.InputSize+4, dtype=torch.float, device=self.Device)
for i, av in enumerate(available):
if av == 0.0:
availabilities[:,i] = 0.0
probabilities1 = torch.stack([self.p1]*batch_size,dim=0).view(batch_size, self.InputSize)
out1 = self.EmbNet1.forward(outputs1, availabilities[:,:-4], probabilities1)
if self.UseLL1:
out1 = self.LL1(out1)
probabilities2 = torch.stack([self.p2]*batch_size,dim=0).view(batch_size, self.InputSize)
out2 = self.EmbNet2.forward(outputs2, availabilities[:,:-4], probabilities2)
if self.UseLL2:
out2 = self.LL2(out2)
wsout = self.WeightedSum.forward(torch.stack(outputs2, dim=1), availabilities[:,:-4])
concat = torch.cat(outputs2, dim=-1)
probabilities3 = torch.stack([self.p3]*batch_size, dim=0).view(batch_size, self.FinalInputSize)
if not self.UseFinalsInFinal:
availabilities[:, -1] = 0.0
if not self.UseWSum:
availabilities[:, -2] = 0.0
out = self.EmbNet3.forward([out1,out2,wsout,concat], availabilities[:, 4:], probabilities3)
if self.UseLL3:
out = self.LL3(out)
return out, (out1, out2, wsout)
class MergeClass():
'''
This is a wrapper class for the true trainable fusion class
'''
def __init__(self, models={}, config={}, device=torch.device('cpu'),
labels={}, self_embeding=False, debug_mode=False):
'''
models : dictionary with the models already loaded and in eval mode
config : dictionary with the parameters to define the merge module
device : torch device
dataset : dataset already loaded
tags : dictionary with the tags used
self_embeding : boolean, if true then embed heuristic is used
debug_mode : bolean, if true, show various final and average informations
'''
self.Modalities = models
self.MergeConfig = config
self.Device = device
self.MergeModel = self.get_model(self.MergeConfig)
self.Classes = labels
self.SelfEmbeddin = self_embeding
def get_model(self, config):
type = config['type']
if type == 1:
return ModelOne(**config['parameters']).to(self.Device)
elif type == 5:
return ModelNewFour(**config['parameters']).to(self.Device)
else:
raise NameError('type {} is not supported yet'.format(type))
# models 2, 3 and 4 was discarded
def parameters(self):
return self.MergeModel.parameters()
def train(self):
self.MergeModel.train()
def eval(self):
self.MergeModel.eval()
def state_dict(self):
return self.MergeModel.state_dict()
def load_state_dict(self, dict):
self.MergeModel.load_state_dict(dict)
def forward(self, data):
availables = [1.0] *4
fb, _, mb = self.Modalities['body'].forward(data['body'])
fc, _, mc = self.Modalities['context'].forward(data['context'])
middle_out = [mb[3], mc[3]]
final_out = [fb, fc]
if data['face'].sum().item() != 0.0:
ff, mf = self.Modalities['face'].forward(data['face'])
middle_out += [mf]
final_out += [ff]
else:
availables[2] = 0.0
middle_out += [mc[3]]
final_out += [fc]
if data['joint'].sum().item() != 0.0:
fs, ms = self.Modalities['pose'].forward((data['joint'],data['bone']),0)
ms = torch.cat((ms[0], ms[1]), dim=-1)
middle_out += [ms]
final_out += [fs]
else:
availables[3] = 0.0
middle_out += [mc[3]]
final_out += [fc]
out, middle = self.MergeModel.forward(middle_out, final_out, availables)
return out, middle |
<reponame>oceanobservatories/ooi-instrument-agent<filename>ooi_instrument_agent/client.py<gh_stars>0
import logging
import six
import zmq.green as zmq
log = logging.getLogger(__name__)
context = zmq.Context()
DEFAULT_TIMEOUT = 60
class TimeoutException(Exception):
status_code = 408
def __init__(self, message=None):
Exception.__init__(self)
self.message = message
class ParameterException(Exception):
status_code = 400
def __init__(self, message=None):
Exception.__init__(self)
self.message = message
class ZmqDriverClient(object):
"""
A class for performing RPC with a ZMQ-based driver process
Utilizes zmq.green to provide a gevent-compatible version of zmq
"""
def __init__(self, host, port):
"""
:param host: Hostname or IP of the target driver
:param port: Port number of the target driver
"""
self.host = host
self.port = port
self._socket = None
log.debug('Start %r', self)
def _connect(self):
"""
:return: Connected ZMQ REQ socket
"""
if self._socket is None:
log.debug('Connecting ZMQ client: %r', self)
socket = context.socket(zmq.REQ)
socket.connect('tcp://{host}:{port}'.format(host=self.host, port=self.port))
log.debug('Connected: %r', self)
self._socket = socket
return self._socket
def _command(self, command, *args, **kwargs):
"""
:param command: RPC Command to execute
:param args: Positional arguments
:param kwargs: Keyword arguments
:return Response from driver
:raises TimeoutException if no response received before timeout milliseconds
"""
socket = self._connect()
log.debug('%r _command(%r %r %r)', self, command, args, kwargs)
timeout = kwargs.pop('timeout', None)
timeout = timeout if timeout is not None else 1000
msg = {'cmd': command, 'args': args, 'kwargs': kwargs}
socket.send_json(msg)
events = socket.poll(timeout=timeout)
if events:
response = socket.recv_json()
return response
raise TimeoutException({'timeout': 'no response in timeout interval %d' % timeout})
def __enter__(self):
"""Client as context manager"""
return self
def __exit__(self, *args, **kwargs):
if self._socket is not None:
self._socket.close()
self._socket = None
def ping(self, *args, **kwargs):
return self._command('process_echo', *args, **kwargs)
def execute(self, command, *args, **kwargs):
timeout = kwargs.pop('timeout', None)
if timeout is None:
state = self.get_state()
timeout = _get_timeout(command, state)
kwargs['timeout'] = timeout
return self._command('execute_resource', command, *args, **kwargs)
def init_params(self, *args, **kwargs):
return self._command('set_init_params', *args, **kwargs)
def shutdown(self, *args, **kwargs):
return self._command('stop_driver_process', *args, **kwargs)
def get_state(self, *args, **kwargs):
return self._command('overall_state', *args, **kwargs)
def get_resource_state(self, *args, **kwargs):
return self._command('get_resource_state', *args, **kwargs)
def get_resource(self, *args, **kwargs):
return self._command('get_resource', *args, **kwargs)
def set_resource(self, resource, *args, **kwargs):
state = self.get_state()
parameter_metadata = _get_parameters(state)
timeout = kwargs.pop('timeout')
if timeout is None:
timeout = _get_timeout('DRIVER_EVENT_SET', state)
kwargs['timeout'] = timeout
resource = _validate_parameters(parameter_metadata, resource)
return self._command('set_resource', resource, *args, **kwargs)
def discover(self, *args, **kwargs):
return self.execute('DRIVER_EVENT_DISCOVER', *args, **kwargs)
def set_log_level(self, *args, **kwargs):
return self._command('set_log_level', *args, **kwargs)
def __repr__(self):
return 'ZmqDriverClient(%r, %r)' % (self.host, self.port)
def _get_timeout(command, state_response):
commands = state_response.get('value', {}).get('metadata', {}).get('commands', {})
return commands.get(command, {}).get('timeout', DEFAULT_TIMEOUT) * 1000
def _get_parameters(state_response):
return state_response.get('value', {}).get('metadata', {}).get('parameters', {})
def _is_writable(parameter, parameter_metadata):
return parameter_metadata.get(parameter, {}).get('visibility') == 'READ_WRITE'
def _get_range_ptype(parameter, parameter_metadata):
prange = parameter_metadata.get(parameter, {}).get('range')
ptype = parameter_metadata.get(parameter, {}).get('value', {}).get('type')
return prange, ptype
def _coerce_type(value, ptype):
if ptype == 'string':
if isinstance(value, basestring):
return value
try:
return str(value)
except:
return None
if ptype == 'bool':
if isinstance(value, bool):
return value
elif isinstance(value, int):
if value == 0:
return False
if value == 1:
return True
elif isinstance(value, basestring):
if value in ['true', 'True']:
return True
if value in ['false', 'False']:
return False
if ptype == 'int':
if isinstance(value, bool):
return None
if isinstance(value, int):
return value
if isinstance(value, basestring):
try:
return int(value)
except ValueError:
return None
if ptype == 'float':
if isinstance(value, float):
return value
if isinstance(value, int):
return float(value)
if isinstance(value, basestring):
try:
return float(value)
except ValueError:
return None
def _validate_parameters(parameter_metadata, parameters):
errors = {}
out = {}
for parameter, value in six.iteritems(parameters):
if not _is_writable(parameter, parameter_metadata):
errors[parameter] = 'Parameter(%s) not writeable' % parameter
continue
prange, ptype = _get_range_ptype(parameter, parameter_metadata)
new_value = _coerce_type(value, ptype)
if new_value is None:
errors[parameter] = 'Parameter(%s) Unable to coerce to %s (%s)' % (parameter, ptype, value)
continue
if prange is not None:
if isinstance(prange, list):
if new_value < prange[0] or new_value > prange[-1]:
errors[parameter] = 'Parameter(%s) outside valid range (%r) (%r)' % (parameter, prange, value)
continue
elif isinstance(prange, dict):
if new_value not in prange.values():
errors[parameter] = 'Parameter(%s) not one of (%r) (%r)' % (parameter, prange.values(), value)
continue
out[parameter] = new_value
if errors:
raise ParameterException(errors)
return out
|
<filename>monai/deploy/core/resource.py
# Copyright 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Union
from monai.deploy.exceptions import ItemAlreadyExistsError, UnknownTypeError, WrongValueError
from monai.deploy.utils.sizeutil import get_bytes
class Resource:
"""Class responible for resource limits.
Each resource limit value is None (its access would return 0) and the value is overriden by @resource decorator
and then by the CLI arguments.
To do so, first, each resource limit value is set to None unless those values are set by the CLI arguments.
Then, if the resource limit value is None and the user specifies a value through @resource decorator,
the value is set to the given attribute.
"""
def __init__(self, cpu: Optional[int] = None, memory: Optional[int] = None, gpu: Optional[int] = None):
self._cpu = cpu
self._memory = memory
self._gpu = gpu
@property
def cpu(self) -> int:
if self._cpu is None:
return 0
return self._cpu
@property
def memory(self) -> int:
if self._memory is None:
return 0
return self._memory
@property
def gpu(self) -> int:
# TODO(gigony): check if the gpu limit can be distinguished between all gpus vs zero gpu.
# https://github.com/NVIDIA/k8s-device-plugin/issues/61
if self._gpu is None:
return 0
return self._gpu
def set_resource_limits(
self,
cpu_limit: Optional[int] = None,
memory_limit: Optional[Union[int, str]] = None,
gpu_limit: Optional[int] = None,
):
"""Sets resource limits from the given values if each attribute is not None."""
if cpu_limit is not None:
if self._cpu is None:
self._cpu = cpu_limit
else:
raise ItemAlreadyExistsError(
f"'cpu' wouldn't be set to {cpu_limit} because it is already set to {self._cpu} by the runtime environment."
)
if gpu_limit is not None:
if self._gpu is None:
self._gpu = gpu_limit
else:
raise ItemAlreadyExistsError(
f"'gpu' wouldn't be set to {gpu_limit} because it is already set to {self._gpu} by the runtime environment."
)
if type(memory_limit) == str:
try:
self._memory = get_bytes(memory_limit)
except Exception as e:
raise WrongValueError(
f"Memory size specified in the application (via @resource) is not valid: {e.args[0]}"
)
elif type(memory_limit) == int:
if self._memory is None:
self._memory = memory_limit
else:
raise ItemAlreadyExistsError(
f"'memory' wouldn't be set to {memory_limit} because it is already set to {self._memory}"
" by the runtime environment."
)
def __str__(self):
return "Resource(cpu={}, memory={}, gpu={})".format(self.cpu, self.memory, self.gpu)
def resource(
cpu: Optional[int] = None,
memory: Optional[Union[int, str]] = None,
gpu: Optional[int] = None,
):
"""A decorator that adds an resource requirement to the application.
Args:
cpu (int): A number of CPU cores required.
memory (int): A size of memory required (in bytes).
gpu (int): A number of GPUs required.
Returns:
A decorator that adds an resource requirement to the application.
"""
# Import here to avoid circular imports
from .application import Application
def decorator(cls):
if issubclass(cls, Application):
builder = cls.__dict__.get("_builder")
else:
raise UnknownTypeError("Use @resource decorator only for a subclass of Application!")
def new_builder(self: Application):
# Execute (this) outer decorator first so decorators are executed in order
try:
self.context.resource.set_resource_limits(cpu, memory, gpu)
except ItemAlreadyExistsError as e:
raise ItemAlreadyExistsError(f"In @resource decorator at {self.name}, {e.args[0]}")
if builder:
builder(self) # execute the original builder
return self
cls._builder = new_builder
return cls
return decorator
|
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Definitions of the topologies for the example research projects.
"""
from typing import Tuple
import matplotlib.pyplot as plt
import numpy as np
coupling_maps = {}
"""A dictionary with coupling maps (lists of bonds) for different qubit configurations and topologies."""
qubit_coordinates = {}
"""A dictionary with coordinates lists positioning qubits in the plane for plotting configurations,
corresponding to those defined in the `coupling_maps` dictionary."""
h_z_patterns = {}
"""A dictionary with a pattern of the relative strength of h_z terms of a qubit configuration."""
def _create_ring_A(n_qubits: int, i_offset: int = 0) -> Tuple[list, list, list]:
"""Generate a ring topology of type A - indicating a ladder-like ordering of the qubits.
Args:
n_qubits: The number of qubits composing the ring.
i_offset: The index offset of the first qubit.
Returns:
A tuple with the coupling map, qubit coordinates in the plane (with the first qubit placed
at [1, 1]), and a 0-1 pattern array with alternating values for neighboring qubits.
"""
c_map = []
q_coordinates = []
h_z_pat = []
c_map.extend([[i_offset, i_offset + 1], [i_offset, i_offset + 2]])
for i in range(i_offset + 1, i_offset + n_qubits - 2):
c_map.append([i, i + 2])
c_map.append([i_offset + n_qubits - 2, i_offset + n_qubits - 1])
h_z_pat.append(0)
q_coordinates.append([1, 1])
h_z_ = 1
for i in range(1, n_qubits - 2, 2):
h_z_pat.extend([h_z_, h_z_])
q_coordinates.extend([[2 + int(i / 2), 0], [2 + int(i / 2), 2]])
h_z_ = 0 if h_z_ == 1 else 1
h_z_pat.append(h_z_)
q_coordinates.append([1 + int(n_qubits / 2), 1])
return c_map, q_coordinates, h_z_pat
# Add a 1D chain topology for an odd number of qubits (3 to 61 qubits).
# This chain topology entries have keys of the form 'N.chain.M' where N is the number of qubits,
# and M indicates that the middle qubit has h_z amplitude 0.
for n_qubits in range(3, 63, 2):
c_map = []
q_coordinates = []
h_z_pat = []
for i in range(n_qubits - 1):
c_map.append([i, i + 1])
driven_qubit_is_odd = 1 if (n_qubits - 1) % 4 != 0 else 0
for i in range(n_qubits):
q_coordinates.append([0, i])
h_z_pat.append(0 if i % 2 == driven_qubit_is_odd else 1)
s_key = f"{n_qubits}.chain.M"
coupling_maps[s_key] = c_map
qubit_coordinates[s_key] = q_coordinates
h_z_patterns[s_key] = h_z_pat
# Add a 1D chain topology for all numbers of qubits (3 to 61 qubits).
# The chain topology entries have keys of the form 'N.chain.E' where N is the number of qubits,
# and E indicates that the left edge qubit (indexed 0) has h_z amplitude 0.
for n_qubits in range(2, 63):
c_map = []
q_coordinates = []
h_z_pat = []
for i in range(n_qubits - 1):
c_map.append([i, i + 1])
for i in range(n_qubits):
q_coordinates.append([0, i])
h_z_pat.append(0 if i % 2 == 0 else 1)
s_key = f"{n_qubits}.chain.E"
coupling_maps[s_key] = c_map
qubit_coordinates[s_key] = q_coordinates
h_z_patterns[s_key] = h_z_pat
# We add ring topologies for 4 to 62 qubits, with keys in the form 'N.ring.A' where N is the number
# of qubits, and A is the mpo_ordering - indicating a ladder-like ordering of the qubits.
for n_qubits in range(4, 64, 2):
c_map, q_coordinates, h_z_pat = _create_ring_A(n_qubits)
s_key = f"{n_qubits}.ring.A"
coupling_maps[s_key] = c_map
qubit_coordinates[s_key] = q_coordinates
h_z_patterns[s_key] = h_z_pat
# We add ring topologies for 4 to 62 qubits, with keys in the form 'N.ring.B' where N is the number of
# qubits, and B is the mpo_ordering - indicating qubits ordered to have one large jump at the last one.
for n_qubits in range(4, 64, 2):
c_map = []
q_coordinates = []
h_z_pat = []
for i in range(n_qubits - 1):
c_map.append([i, i + 1])
c_map.append([n_qubits - 1, 0])
for i in range(n_qubits):
h_z_pat.append(0 if i % 2 == 0 else 1)
q_coordinates.append([1, 1])
N_by_2 = int(n_qubits / 2)
for i in range(2, N_by_2 + 1):
q_coordinates.append([i, 0])
q_coordinates.append([N_by_2 + 1, 1])
for i in range(N_by_2, 1, -1):
q_coordinates.append([i, 2])
s_key = f"{n_qubits}.ring.B"
coupling_maps[s_key] = c_map
qubit_coordinates[s_key] = q_coordinates
h_z_patterns[s_key] = h_z_pat
# We add plaquette topologies for 6 to 62 qubits, with keys in the form 'N.plaquette.A' where N is
# the number of qubits, and A is the mpo_ordering - indicating a ladder-like ordering of the qubits.
for n_qubits in range(6, 64, 2):
c_map1, q_coordinates1, h_z_pat1 = _create_ring_A(n_qubits - 2, 1)
c_map = [[0, 1]]
c_map.extend(c_map1)
c_map.append([n_qubits - 2, n_qubits - 1])
q_coordinates = [[0, 1]]
q_coordinates.extend(q_coordinates1)
q_coord_end = q_coordinates1[-1]
q_coordinates.append([q_coord_end[0] + 1, q_coord_end[1]])
h_z_pat = [0]
for h_z in h_z_pat1:
h_z_pat.append(1 if h_z == 0 else 0)
h_z_pat.append(0)
s_key = f"{n_qubits}.plaquette.A"
coupling_maps[s_key] = c_map
qubit_coordinates[s_key] = q_coordinates
h_z_patterns[s_key] = h_z_pat
# We add a few plaquette topologies, with keys in the form 'N.plaquette.B' where N is the number of
# qubits, and B is the mpo_ordering - indicating qubits ordered to have one large jump at the last one.
coupling_maps["10.plaquette.B"] = [
[0, 1],
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[5, 6],
[5, 7],
[7, 8],
[8, 9],
[9, 1],
]
qubit_coordinates["10.plaquette.B"] = [
[0, 1],
[1, 1],
[2, 0],
[3, 0],
[4, 0],
[5, 1],
[6, 1],
[4, 2],
[3, 2],
[2, 2],
]
h_z_patterns["10.plaquette.B"] = [0, 1, 0, 1, 0, 1, 0, 0, 1, 0]
coupling_maps["12.plaquette.B"] = [
[0, 1],
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[5, 6],
[6, 7],
[6, 8],
[8, 9],
[9, 10],
[10, 11],
[11, 1],
]
qubit_coordinates["12.plaquette.B"] = [
[0, 1],
[1, 1],
[2, 0],
[3, 0],
[4, 0],
[5, 0],
[6, 1],
[7, 1],
[5, 2],
[4, 2],
[3, 2],
[2, 2],
]
h_z_patterns["12.plaquette.B"] = [0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0]
# Below is the connectivity map and outline of the IBM Quantum Falcon devices
coupling_maps["27.falcon"] = [
[0, 1],
[1, 0],
[1, 2],
[1, 4],
[2, 1],
[2, 3],
[3, 2],
[3, 5],
[4, 1],
[4, 7],
[5, 3],
[5, 8],
[6, 7],
[7, 4],
[7, 6],
[7, 10],
[8, 5],
[8, 9],
[8, 11],
[9, 8],
[10, 7],
[10, 12],
[11, 8],
[11, 14],
[12, 10],
[12, 13],
[12, 15],
[13, 12],
[13, 14],
[14, 11],
[14, 13],
[14, 16],
[15, 12],
[15, 18],
[16, 14],
[16, 19],
[17, 18],
[18, 15],
[18, 17],
[18, 21],
[19, 16],
[19, 20],
[19, 22],
[20, 19],
[21, 18],
[21, 23],
[22, 19],
[22, 25],
[23, 21],
[23, 24],
[24, 23],
[24, 25],
[25, 22],
[25, 24],
[25, 26],
[26, 25],
]
qubit_coordinates["27.falcon"] = [
[1, 0],
[1, 1],
[2, 1],
[3, 1],
[1, 2],
[3, 2],
[0, 3],
[1, 3],
[3, 3],
[4, 3],
[1, 4],
[3, 4],
[1, 5],
[2, 5],
[3, 5],
[1, 6],
[3, 6],
[0, 7],
[1, 7],
[3, 7],
[4, 7],
[1, 8],
[3, 8],
[1, 9],
[2, 9],
[3, 9],
[3, 10],
]
h_z_patterns["27.falcon"] = [0] * 27
def plot_topology(
N: int,
topology: str,
s_coupling_map: str,
b_save_figures: bool,
s_file_prefix: str,
b_transpose_plot=False,
b_alternating_qubits=False,
):
qubit_color = ["#648fff"] * N
h_z_pattern = h_z_patterns[s_coupling_map]
coupling_map = coupling_maps[s_coupling_map]
qubit_coord = qubit_coordinates[s_coupling_map]
if b_alternating_qubits:
s_alternating = ".alternating"
for i in np.nonzero(h_z_pattern)[0]:
qubit_color[i] = "#ff6f64"
else:
s_alternating = ""
if topology == "plaquette" or topology == "ring":
figsize = (4, 7)
else:
figsize = (8, 2)
q_coord = []
if b_transpose_plot:
figsize = (figsize[1], figsize[0])
for ll in qubit_coord:
q_coord.append([ll[1], ll[0]])
else:
q_coord = qubit_coord
try:
from qiskit.visualization.gate_map import plot_coupling_map
fig = plot_coupling_map(
num_qubits=N,
qubit_coordinates=q_coord,
coupling_map=coupling_map,
figsize=figsize,
qubit_color=qubit_color,
)
if b_save_figures:
plt.savefig(s_file_prefix + s_alternating + ".png")
plt.draw()
plt.pause(0.1)
plt.show(block=False)
except Exception as e:
print(str(e))
|
<filename>test/compiler/test_enot_compiler.py
import os
import subprocess
import unittest
from os import listdir
from os.path import join
from mock import patch
import test
from enot.__main__ import create
from enot.compiler.enot import EnotCompiler
from enot.pac_cache.local_cache import LocalCache
from enot.packages.config.enot import EnotConfig
from enot.packages.package import Package
from enot.packages.package_builder import Builder
from enot.utils.erl_file_utils import parse_app_config
from enot.utils.file_utils import ensure_dir
from test.abs_test_class import TestClass, set_prebuild, set_git_url, set_git_tag, set_deps
def mock_fetch_package(dep: Package):
test_dir = test.get_test_dir('compile_tests')
tmp_path = join(os.getcwd(), test_dir, 'tmp')
dep.update_from_cache(join(tmp_path, dep.name))
class CompileTests(TestClass):
def __init__(self, method_name):
super().__init__('compile_tests', method_name)
@property
def src_dir(self):
return join(self.test_dir, 'src')
@property
def ebin_dir(self):
return join(self.test_dir, 'ebin')
# Proper erlang file is compiled
@patch.object(EnotCompiler, '_EnotCompiler__write_app_file')
def test_proper_compilation(self, mock_compiler):
mock_compiler.return_value = True
ensure_dir(self.src_dir)
with open(join(self.src_dir, 'proper.erl'), 'w') as w:
w.write('''
-module(proper).
-export([test/0]).
test() -> do_smth(1).
do_smth(A) -> A + 1.
''')
config = EnotConfig({'name': 'test'})
package = Package(self.test_dir, config, None)
compiler = EnotCompiler(package)
self.assertEqual(True, compiler.compile())
self.assertEqual(True, os.path.exists(join(self.ebin_dir, 'proper.beam')))
# Erlang file with syntax error is not compiled
@patch.object(EnotCompiler, '_EnotCompiler__write_app_file')
def test_error_compilation(self, mock_compiler):
mock_compiler.return_value = True
ensure_dir(self.src_dir)
with open(join(self.src_dir, 'improper.erl'), 'w') as w:
w.write('''
-module(proper).
-export([test/0]).
test() -> syntax error here.
do_smth(A) -> A + 1.
''')
config = EnotConfig({'name': 'test'})
package = Package(self.test_dir, config, None)
compiler = EnotCompiler(package)
self.assertEqual(False, compiler.compile())
self.assertEqual(False, os.path.exists(join(self.ebin_dir, 'improper.beam')))
# application file is created from app.src file. Templates are filled.
def test_write_app_file_from_src(self):
ensure_dir(self.src_dir)
with open(join(self.src_dir, 'proper.erl'), 'w') as w:
w.write('''
-module(proper).
-export([test/0]).
test() -> do_smth(1).
do_smth(A) -> A + 1.
''')
with open(join(self.src_dir, 'proper.app.src'), 'w') as w:
w.write('''
{application, proper,
[
{description, ""},
{vsn, "{{ app.vsn }}"},
{registered, []},
{modules, {{ modules }}},
{applications, {{ app.std_apps + app.apps }}},
{mod, {proper_app, []}},
{env, []}
]}.
''')
with open(join(self.test_dir, 'enot_config.json'), 'w') as w:
w.write('''{
\"name\":\"proper\",
\"app_vsn\":\"1.0.0\",
\"deps\": [{\"name\": \"test_dep\",
\"url\": \"http://github/comtihon/test_dep\",
\"tag\": \"test_vsn\"}]
}''')
package = Package.from_path(self.test_dir)
compiler = EnotCompiler(package)
self.assertEqual(True, compiler.compile())
self.assertEqual(True, os.path.exists(self.ebin_dir))
ls = listdir(self.ebin_dir)
self.assertEqual(True, 'proper.beam' in ls)
self.assertEqual(True, 'proper.app' in ls)
self.assertEqual(2, len(ls))
(name, vsn, deps, _) = parse_app_config(self.ebin_dir, '.app')
self.assertEqual('proper', name)
self.assertEqual('1.0.0', vsn)
self.assertEqual(deps, ['kernel', 'stdlib', 'test_dep'])
# if parse transform belongs to a project it will be built before module using it.
@patch('enot.global_properties.ensure_conf_file')
def test_build_parse_transform_first(self, mock_conf):
mock_conf.return_value = self.conf_file
create(self.tmp_dir, {'<name>': 'project'})
project_dir = join(self.tmp_dir, 'project')
project_src = join(project_dir, 'src')
with open(join(project_src, 'p_trans.erl'), 'w') as w:
w.write('''
-module(p_trans).
-export([parse_transform/2]).
-record(support, {add_fun = true,
export = true}).
parse_transform(AST, _Options) ->
do_parse([], AST, #support{}).
do_parse(AST, [], _) -> lists:reverse(AST);
do_parse(AST, [F = {function, N, _, _, _} | Others], Support = #support{add_fun = true}) ->
M = N - 1,
AddedFun =
{function, M, sum, 2,
[{clause, M,
[{var, M, 'A'}, {var, M, 'B'}],
[],
[{op, M, '+', {var, M, 'A'}, {var, M, 'B'}}]}]},
TurnedOff = Support#support{add_fun = false},
do_parse([F | [AddedFun | AST]], Others, TurnedOff);
do_parse(AST, [E = {attribute, N, export, _} | Others], Support = #support{export = true}) ->
Export = [E | AST],
Exported = {attribute, N + 1, export, [{sum, 2}]},
TurnedOff = Support#support{export = false},
do_parse([Exported | Export], Others, TurnedOff);
do_parse(AST, [H | Others], Support) ->
do_parse([H | AST], Others, Support).
''')
with open(join(project_src, 'a_module.erl'), 'w') as w:
w.write('''
-module(a_module).
-compile([{parse_transform, p_trans}]).
-export([hello/0]).
hello() -> hello.
''')
package = Package.from_path(project_dir)
compiler = EnotCompiler(package)
self.assertEqual(True, compiler.compile())
self.assertEqual(True, os.path.exists(join(project_dir, 'ebin')))
# if config has some prebuild steps - they should be tun
@patch('enot.global_properties.ensure_conf_file')
def test_prebuild(self, mock_conf):
mock_conf.return_value = self.conf_file
create(self.tmp_dir, {'<name>': 'project'})
project_dir = join(self.tmp_dir, 'project')
test_file_path = join(project_dir, 'test_file')
set_prebuild(project_dir, [{'shell': 'echo "test" > ' + test_file_path}])
self.assertEqual(False, os.path.exists(test_file_path))
package = Package.from_path(project_dir)
compiler = EnotCompiler(package)
self.assertEqual(True, compiler.compile())
self.assertEqual(True, os.path.exists(test_file_path))
with open(test_file_path, 'r') as file:
self.assertEqual('test\n', file.read())
# if prebuild step is disabled - no prebuild will run
@patch('enot.global_properties.ensure_conf_file')
def test_disable_prebuild(self, mock_conf):
mock_conf.return_value = self.conf_file
create(self.tmp_dir, {'<name>': 'project'})
project_dir = join(self.tmp_dir, 'project')
test_file_path = join(project_dir, 'test_file')
set_prebuild(project_dir,
[{'shell': 'echo "test" > ' + test_file_path}],
disable_prebuild=True)
self.assertEqual(False, os.path.exists(test_file_path))
package = Package.from_path(project_dir)
compiler = EnotCompiler(package)
self.assertEqual(True, compiler.compile())
self.assertEqual(False, os.path.exists(test_file_path))
# if root prebuild is disabled and override conf is true - no prebuild will run
@patch.object(LocalCache, 'fetch_package', side_effect=mock_fetch_package)
@patch('enot.global_properties.ensure_conf_file')
def test_override_disable_prebuild(self, mock_conf, _):
mock_conf.return_value = self.conf_file
create(self.tmp_dir, {'<name>': 'project'})
project_dir = join(self.tmp_dir, 'project')
set_prebuild(project_dir, [], disable_prebuild=True, override_conf=True)
# root project has dep, which has some shell prebuild step
set_deps(project_dir,
[
{'name': 'dep',
'url': 'https://github.com/comtihon/dep',
'tag': '1.0.0'}
])
create(self.tmp_dir, {'<name>': 'dep'})
dep_path = join(self.tmp_dir, 'dep')
set_git_url(dep_path, 'https://github/comtihon/dep')
set_git_tag(dep_path, '1.0.0')
test_file_path = join(project_dir, 'test_file')
set_prebuild(dep_path, [{'shell': 'echo "test" > ' + test_file_path}])
builder = Builder.init_from_path(project_dir)
builder.populate()
self.assertEqual(True, builder.build())
self.assertEqual(False, os.path.exists(test_file_path)) # no dep's prebuild step was executed
# File is compiled with defined var
@patch.object(EnotCompiler, '_EnotCompiler__write_app_file')
def test_defines_setting(self, mock_compiler):
mock_compiler.return_value = True
ensure_dir(self.src_dir)
with open(join(self.src_dir, 'proper.erl'), 'w') as w:
w.write('''
-module(proper).
-export([test/0]).
test() -> io:format("~p~n", [?TEST_DEFINE]).
''')
config = EnotConfig({'name': 'test'})
package = Package(self.test_dir, config, None)
compiler = EnotCompiler(package, 'TEST_DEFINE=test')
self.assertEqual(True, compiler.compile())
self.assertEqual(True, os.path.exists(join(self.ebin_dir, 'proper.beam')))
p = subprocess.Popen(['erl', '-pa', 'ebin', '-run', 'proper', 'test', '-run', 'init', 'stop', '-noshell'],
stdout=subprocess.PIPE,
cwd=self.ebin_dir)
self.assertEqual(0, p.wait(5000))
self.assertEqual('test\n', p.stdout.read().decode('utf8'))
if __name__ == '__main__':
unittest.main()
|
<filename>examples/domain.py
# File: domain.py
# Author: <NAME>
# Created On: 23 Oct 2019
# Purpose:
# Defines a combinatorial domain.
# Notes:
#
from .alternative import Alternative
from random import randint
class Domain:
# Precond:
# attributes is an integer representing the number of attributes in the
# domain.
# value is a list of integers. Each entry represent the number of unique
# values the cooresponding attribute can take.
# The length of values must equal or exceed that of the value of
# attributes. In the case of an excess all remaining values are
# ignored.
#
# Postcond:
# Builds a new Domain object with the specified parameters.
def __init__(self, attributes, value):
self.attributes = attributes
self.value = value[0:self.attributes]
self.attributes = len(self.value)
# Precond:
# None.
#
# Postcond:
# Returns the number of attributes.
def length(self):
return self.attributes
# Precond:
# attribute is an integer representing an attribute.
#
# Postcond:
# Returns the number of values associated with the given attribute.
# Returns 0 if a bad attribute is given.
def attr_length(self, attribute):
if attribute < 0 or attribute >= self.attributes:
return 0
return self.value[attribute]
# Precond:
#
# Postcond:
# Returns the number of values associated with the given largest attribute.
# Returns 0 if a bad attribute is given.
def attr_length_largest(self):
result = 0
for item in self.value:
if item > result:
result = item
return result
# Precond:
# None.
#
# Postcond:
# Returns a random pair of alternatives, with a canonical ordering.
def random_pair(self):
alt1 = Alternative([randint(1,self.value[i]) for i in range(self.attributes)])
alt2 = Alternative([randint(1,self.value[i]) for i in range(self.attributes)])
while alt1 == alt2:
alt2 = Alternative([randint(1,self.value[i]) for i in range(self.attributes)])
for i in range(self.attributes):
if alt1.value(i) < alt2.value(i):
return (alt1,alt2)
elif alt1.value(i) > alt2.value(i):
return (alt2,alt1)
return (alt1, alt2)
# Postcond:
# size is an integer indicating the number of pairs to return.
#
# Precond:
# Returns a random set of pairs, with no two pairs repeating.
def random_pair_set(self, size):
result = [self.random_pair() for i in range(size)]
found = False
while len(result) != size:
temp = self.random_pair()
found = True
for pair in result:
if pair[0] == temp[0] and pair[1] == temp[1]:
found = False
break
if found:
result.append(temp)
return result
# Precond:
# alt is a valid Alternative object for this domain, or None.
#
# Postcond:
# Iterator that yeilds each possible alternative in numerical order
# (least significant attribute first). If alt is specified then the
# iteration starts from that alternative.
def each(self, alt=None):
if alt is None:
alt = Alternative([1 for i in range(self.attributes)])
else:
alt = Alternative([alt.value(i) for i in range(self.attributes)])
if not self.is_highest(alt):
while not self.is_highest(alt):
yield alt
alt = self.next_alternative(alt)
yield alt
# Precond:
# None.
#
# Postcond:
# Iterator that yeilds each possible unique pair of alternatives in
# numerical order (least significant attribute first).
def each_pair(self):
for alt1 in self.each():
if not self.is_highest(alt1):
for alt2 in self.each(self.next_alternative(alt1)):
yield (alt1,alt2)
# Precond:
# alt is a valid Alternative object.
#
# Postcond:
# Returns true if alt does not have the highest possible numerical value.
def is_highest(self, alt):
for i in range(self.attributes):
if(alt.value(i) < self.value[i]):
return False
return True
# Precond:
# alt is a valid Alternative object
#
# Postcond:
# Returns the next alternativein numerical order (least significant
# attribute first).
def next_alternative(self, alt):
n_alt = Alternative([alt.value(i) for i in range(self.attributes)])
n_alt.set(0,n_alt.value(0)+1)
for i in range(self.attributes-1):
if n_alt.value(i) > self.value[i]:
n_alt.set(i,1)
n_alt.set(i+1,n_alt.value(i+1)+1)
else:
return n_alt
return n_alt
# Precond:
# line is a valid string.
#
# Postcond:
# Parses the string into a domain object.
# If parseing fails None is returned.
# See example_set_file_specification.md for details.
@staticmethod
def parse(line):
line = line.strip().lower()
if line[0] != 'd':
return None
contents = line.split(' ')
attrs = int(contents[1])
values = contents[2:]
values = list(map(lambda x: int(x),values))
if attrs <= len(values):
return Domain(attrs,values)
return None
# Precond:
# None.
#
# Postcond:
# Returns the string representation of the domain.
def __str__(self):
str_vals = list(map(lambda x: str(x), self.value))
return ' '.join(['D', str(self.attributes), ' '.join(str_vals)])
|
<gh_stars>0
#!/usr/bin/env python
import os
import sys
sys.path.insert(0,os.environ['HOME']+'/P3D-PLASMA-PIC/p3dpy/')
import numpy as np
from mpi4py import MPI
from subs import create_object
from AnalysisFunctions import fcorr
#
# MPI INITIALIZATION
#
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
status = MPI.Status()
#
# CREATE P3D OBJECT & OPEN OUTPUT FILE
#
rc=create_object()
rc.vars2load(['bx','by','bz'])
ie=1/np.e
kind = 'int'
#
# DECIDE THE NUMBER OF SLICES
#
extslc=rc.numslices%size
nt=(rc.numslices-extslc)/size
if rank==0:
bs=0; fs=nt+extslc
else:
bs=extslc+rank*nt; fs=bs+nt
#
# CREATE OUTPUT ARRAYS
#
if rank==0:
lxc=np.zeros(rc.numslices);lyz=np.zeros(rc.numslices);lc=np.zeros(rc.numslices);tt=np.zeros(rc.numslices)
else:
snddata=np.zeros((4,nt))
#
# MAIN LOOP
#
if rank == 0:
t_start=MPI.Wtime()
comm.Barrier()
for i in range(bs,fs):
rc.loadslice(i)
rx,bxcor=fcorr(rc.bx,rc.bx,ax=0,dx=rc.dx)
ry,bycor=fcorr(rc.by,rc.by,ax=1,dx=rc.dy)
snddata[0,i]=rc.time
if kind == "ie":
snddata[1,i]=rx[abs(bxcor-ie).argmin()]
snddata[2,1]=ry[abs(bycor-ie).argmin()]
elif kind == "int":
snddata[1,i]=np.sum(bxcor)*rc.dx
snddata[2,1]=np.sum(bycor)*rc.dy
snddata[3,i] = 0.5*(snddata[1,i]+snddata[2,i])
print('t,lxc,lyc,lc\t',i,snddata[0,i],snddata[1,i],snddata[2,i],snddata[3,i])
#
# PROC 0 COLLECTS DATA AND WRITES THE FILE
#
if rank > 0:
# snddata=[tt,lxc,lyc,lc]
comm.send(snddata, dest=0, tag=13)
else:
for src in range(1,comm.size):
tbs=extslc+src*nt; tfs=extslc+(src+1)*nt
rcvdata=comm.recv(source=src,tag=13,status=status)
for j in range(tbs,tfs):
tt[j] =rcvdata[0,j-tbs]
lxc[j]=rcvdata[1,j-tbs]
lyc[j]=rcvdata[2,j-tbs]
lc[j] =rcvdata[3,j-tbs]
#
comm.Barrier()
#
if rank == 0:
print('Done Computing. Writing the file now')
outf=open('acorl.'+rc.dirname+'.dat','w')
print('#','t,\t tt,\t lxc,\t lyc,\t lc', file=outf)
for i in range(rc.numslices):
print(tt[i],tt[i]*2*np.pi/rc.lx,lxc[i],lyc[i],lc[i], file=outf)
outf.close()
t_fin = MPI.Wtime()-t_start
print('Total time taken %0.3f'%t_fin)
#
|
"""Plugin to read FSM file."""
# Copyright (c) 2017
# Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
from __future__ import absolute_import, print_function, division
import struct
from os.path import basename
import numpy as np
from six import string_types
from .. import formats
from ..core import Format
from ..core.util import Spectrum
def _block_info(data):
"""Retrieve the information of the next block.
The block ID is represented by an unsigned short while the block size is
represented by a signed int.
Parameters
----------
data : bytes, length=6
Data containing the block information
Returns
-------
block_id : int
The block ID.
block_size :
The size of the block.
"""
# check that the data is an array of bytes
if len(data) != 6:
raise ValueError("'data' should be 6 bytes. Got {} instead.".format(
len(data)))
return struct.unpack('<Hi', data)
def _decode_5100(data):
"""Read the block of data with ID 5100.
Parameters
----------
data : bytes
The 5100 block to decode.
Returns
-------
meta : dict
The extracted information.
"""
name_size = struct.unpack('<h',
data[:2])[0]
name = data[2:name_size + 2].decode('utf8')
header_format = '<ddddddddddiiihBhBhBhB'
header_size = 104
(x_delta, y_delta, z_delta, z_start, z_end, z_4d_start, z_4d_end,
x_init, y_init, z_init, n_x, n_y, n_z, _, text1, _, text2, resolution,
text3, transmission, text4) = struct.unpack(
header_format, data[name_size + 2:name_size + header_size + 2])
return {'name': name,
'x_delta': x_delta,
'y_delta': y_delta,
'z_delta': z_delta,
'z_start': z_start,
'z_end': z_end,
'z_4d_start': z_4d_start,
'z_4d_end': z_4d_end,
'x_init': x_init,
'y_init': y_init,
'z_init': z_init,
'n_x': n_x,
'n_y': n_y,
'n_z': n_z,
'text1': text1,
'text2': text2,
'resolution': resolution,
'text3': text3,
'transmission': transmission,
'text4': text4}
def _decode_5104(data):
"""Read the block of data with ID 5104.
Parameters
----------
data : bytes
The 5104 block to decode.
Returns
-------
meta : dict
The extracted information.
"""
text = []
start_byte = 0
while start_byte + 2 < len(data):
tag = data[start_byte:start_byte + 2]
if tag == b'#u':
start_byte += 2
text_size = struct.unpack(
'<h', data[start_byte:start_byte + 2])[0]
start_byte += 2
text.append(data[start_byte:start_byte + text_size].decode('utf8'))
start_byte += text_size
start_byte += 6
elif tag == b'$u':
start_byte += 2
text.append(struct.unpack(
'<h', data[start_byte:start_byte + 2])[0])
start_byte += 2
start_byte += 6
elif tag == b',u':
start_byte += 2
text.append(struct.unpack(
'<h', data[start_byte:start_byte + 2])[0])
start_byte += 2
else:
start_byte += 1
return {'analyst': text[0],
'date': text[2],
'image_name': text[4],
'instrument_model': text[5],
'instrument_serial_number': text[6],
'instrument_software_version': text[7],
'accumulations': text[9],
'detector': text[11],
'source': text[12],
'beam_splitter': text[13],
'apodization': text[15],
'spectrum_type': text[16],
'beam_type': text[17],
'phase_correction': text[20],
'ir_accessory': text[26],
'igram_type': text[28],
'scan_direction': text[29],
'background_scans': text[32],
'ir_laser_wave_number_unit': text[67]}
def _decode_5105(data):
"""Read the block of data with ID 5105.
The data are stored as 32 bits floats.
Parameters
----------
data : bytes
The 5105 block to be decoded
Returns
-------
spectra : list
The list of the value decoded.
"""
return np.frombuffer(data, dtype=np.float32)
FUNC_DECODE = {5100: _decode_5100,
5104: _decode_5104,
5105: _decode_5105}
class FSM(Format):
"""Plugin to read FSM file which stores IR spectroscopic data.
This file format is used by Perkin Elmer Spotlight IR instrument.
Notes
-----
See :ref:`sphx_glr_auto_examples_reader_plot_read_fsm.py`.
Examples
--------
>>> from specio import specread
>>> from specio.datasets import load_fsm_path
>>> spectra = specread(load_fsm_path())
>>> spectra.wavelength
array([ 4000., 3998., 3996., ..., 724., 722., 720.])
>>> spectra.amplitudes # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE +SKIP
array([[ 38.65655136, 38.6666069 , 38.64698792, ..., 39.89584732,
29.76511383, 28.13317108],
[ 44.61751175, 44.51957703, 44.59909439, ..., 27.84810638,
48.12566376, 44.58335876],
[ 45.4976387 , 45.44074631, 45.43001556, ..., 104.77108002,
83.30805206, 55.3244133 ],
...,
[ 81.6805954 , 81.66387177, 81.57576752, ..., 114.77721405,
125.94933319, 121.3031311 ],
[ 85.60238647, 85.57183075, 85.57678986, ..., 118.79945374,
154.56201172, 135.4960022 ],
[ 87.86193085, 87.94794464, 88.03714752, ..., 59.7109108 ,
76.84341431, 122.54582214]], dtype=float32)
"""
def _can_read(self, request):
if request.filename.lower().endswith(self.extensions):
# the 4 first bytes of a fsm file corresponds to PEPE
if request.firstbytes[:4] == b'PEPE':
return True
return False
# -- reader
class Reader(Format.Reader):
@staticmethod
def _read_fsm(fsm_file):
"""Read the fsm file.
Parameters
----------
fsm_file : file object
The file object in bytes mode.
Returns
-------
spectrum : Spectrum
Return a Spectrum instance.
"""
content = fsm_file.read()
start_byte = 0
n_bytes = 4
signature = content[start_byte:start_byte + n_bytes]
start_byte += n_bytes
# the description is fixed to 40 bytes
n_bytes = 40
description = content[
start_byte:start_byte + n_bytes].decode('utf8')
meta = {'signature': signature,
'description': description}
spectrum = []
while start_byte + n_bytes < len(content):
# read block info
start_byte += n_bytes
n_bytes = 6
block_id, block_size = _block_info(
content[start_byte:start_byte + n_bytes])
# read the upcoming block
start_byte += n_bytes
n_bytes = block_size
data_extracted = FUNC_DECODE[block_id](
content[start_byte:start_byte + n_bytes])
if isinstance(data_extracted, dict):
meta.update(data_extracted)
else:
spectrum.append(data_extracted)
spectrum = np.squeeze(spectrum)
# we add a value such that we include the endpoint
wavelength = np.arange(meta['z_start'],
meta['z_end'] + meta['z_delta'],
meta['z_delta'])
if isinstance(fsm_file, string_types):
meta['filename'] = basename(fsm_file)
else:
meta['filename'] = basename(fsm_file.name)
return Spectrum(spectrum, wavelength, meta)
def _open(self):
self._fp = self.request.get_file()
self._data = self._read_fsm(self._fp)
def _close(self):
# Close the reader.
# Note that the request object will close self._fp
pass
format = FSM('FSM',
'FSM Perkin Elmer Spotlight IR instrument binary format',
'.fsm')
formats.add_format(format)
|
<reponame>nizikawa-worms/WA-Errorlog-Parser<filename>errorlog.py
import logging
import argparse
import pathlib
import re
from range_key_dict import RangeKeyDict
from tabulate import tabulate
class Module:
# WA 3.8.1
WASegments = RangeKeyDict({
(0x00400000, 0x00401000 - 1): '.HEADER',
(0x00401000, 0x0061A000 - 1): '.text',
(0x0061A000, 0x0061A744 - 1): '.idata',
(0x0061A744, 0x00694000 - 1): '.rdata',
(0x00694000, 0x006AFD00 - 1): '.data',
(0x006AFD00, 0x006AFE60 - 1): '.idata',
(0x006AFE60, 0x008C5000 - 1): '.data',
})
def __init__(self, path, base, size, info):
self.path = path
self.name = path.split('\\')[-1]
self.base = base
self.size = size
self.end = self.base + self.size
self.iswa = '.exe' in self.name.lower()
self.loadingbase = 0x00400000 if self.iswa else 0x10000000
self.info = info
def __str__(self):
return f'Module: {self.name} [0x{self.base:08x} - 0x{self.end:08x}]\n{self.info}'
def rebase(self, addr):
rebased = addr - self.base + self.loadingbase
if not self.iswa:
return rebased, None
else:
try:
return rebased, Module.WASegments[rebased]
except:
return rebased, ".heap"
class ErrorLog:
def __init__(self, file):
with open(file, 'r') as f:
self.contents = f.read().replace('\r\n', '\n')
self.path = pathlib.Path(file)
self.modules = []
self.memory = None
def rebase(self, addrstr):
value = int(addrstr, 16)
try:
module = self.memory[value]
rebased, segment = module.rebase(value)
return f'0x{value:08x} [{module.name} 0x{rebased:08x}{segment if segment else ""}]'
except:
return f'0x{value:08x}'
def parseModules(self):
tmp = {}
for match in re.finditer(r"Module(.|\n)*?\n\n", self.contents):
data = match.group()
splitlines = data.splitlines()
path = splitlines[1]
addrs = re.findall(r"Image Base: 0x([0-9a-fA-F]+).+Image Size: 0x([0-9a-fA-F]+)", data)
base = int(addrs[0][0], 16)
size = int(addrs[0][1], 16)
module = Module(path, base, size, data)
tmp[(base, base + size)] = module
self.modules.append(module)
self.memory = RangeKeyDict(tmp)
def parseStack(self):
lines = []
for match in re.finditer(r"0[xX]([0-9a-fA-F]+): ([0-9a-fA-F]+) ([0-9a-fA-F]+) ([0-9a-fA-F]+) ([0-9a-fA-F]+)(.*)", self.contents):
matches = list(match.groups())
stackaddr = int(matches[0], 16)
out = [f'0x{stackaddr:08x}:']
for i, addr in enumerate(matches[1:-1]):
out += [self.rebase(addr)]
rest = matches[-1]
out += [rest]
lines.append(out)
print(tabulate(lines))
def parseInfo(self):
data = self.contents[:self.contents.index("Stack:")]
data = re.sub(r"0[xX]([0-9a-fA-F]+)", lambda m: self.rebase(m.group()), data)
print(data)
if __name__ == "__main__":
logging.basicConfig(format='[%(filename)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d:%H:%M:%S', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('file', help="path to ERRORLOG.TXT file")
args = parser.parse_args()
errlog = ErrorLog(args.file)
errlog.parseModules()
errlog.parseInfo()
errlog.parseStack()
for module in errlog.modules:
print(module)
|
import logging
from typing import IO, Any, Dict, Optional, Tuple, cast
import yaml
from sqlalchemy import create_engine, text
from sqlalchemy.engine import Engine
from panoramic.cli.config.storage import read_context, update_context
from panoramic.cli.errors import (
ConnectionCreateException,
ConnectionUrlNotAvailableFound,
ExecuteInvalidArgumentsException,
)
from panoramic.cli.print import echo_info
logger = logging.getLogger(__name__)
def setup_connection_command(
url: Optional[str],
dialect: Optional[str],
no_test: bool,
) -> None:
"""CLI command. Create new connection."""
if (url and dialect) or (not url and not dialect):
raise ConnectionCreateException('Must specify either a URL or dialect, not both.')
if url:
connection = {'url': url}
elif dialect:
connection = {'dialect': dialect}
if url and not no_test:
ok, error = Connection.test(connection)
if not ok:
raise ConnectionCreateException(error)
Connection.save(connection)
echo_info('Connection was successfully created!')
def show_connection_command() -> None:
"""CLI command. List all connections."""
connection = Connection.get()
if not connection:
echo_info(
'No connection setup yet.\nUse "pano connection setup" to configure connection or edit pano.yaml file.'
)
exit(0)
echo_info(yaml.dump(connection))
def execute_command(
query: Optional[str],
file: Optional[IO],
type: Optional[str],
name: Optional[str],
) -> None:
"""CLI command. Update specific connection."""
connection = Connection.get()
if (query and file) or (not query and not file):
raise ExecuteInvalidArgumentsException('Either query or file must be provided but not both.')
if file:
query = file.read().decode("utf-8")
if (type != 'raw') and not name:
raise ExecuteInvalidArgumentsException(
'When --type is used, please also set the name of the view or table by providing --name option.'
)
if name and type == 'raw':
raise ExecuteInvalidArgumentsException(
'When --name is used, please also set the the type of the created object by providing --type argument.'
)
if type and name:
query = f'CREATE OR REPLACE {type} {name} AS {query}'
return Connection.execute(cast(str, query), connection)
def test_connection_command() -> None:
"""CLI command. Test connection by trying to connect to the database."""
connection = Connection.get()
ok, error = Connection.test(connection)
if ok:
echo_info('{name}... OK')
else:
echo_info(f'FAIL: {error}')
class Connection:
@classmethod
def get_url(cls, connection: Dict[str, Any]) -> str:
"""Gets connection string from physical data source connection"""
try:
return connection['url']
except KeyError:
raise ConnectionUrlNotAvailableFound()
@staticmethod
def save(data: Dict[str, Any]) -> None:
"""Save connection YAML."""
update_context('connection', data)
@staticmethod
def get() -> Dict[str, Any]:
"""Load connection YAML."""
return read_context('connection')
@classmethod
def get_connection_engine(cls, connection) -> Engine:
return create_engine(cls.get_url(connection))
@classmethod
def get_dialect_name(cls, connection) -> str:
try:
return connection['dialect']
except KeyError:
return create_engine(cls.get_url(connection)).dialect.name
@classmethod
def execute(cls, sql: str, connection) -> Any:
engine = cls.get_connection_engine(connection)
with engine.connect() as connection:
return connection.execute(text(sql))
@classmethod
def test(cls, connection) -> Tuple[bool, str]:
engine = cls.get_connection_engine(connection)
try:
# This will try to connect to remote database
with engine.connect():
pass
except Exception as e:
return False, str(e)
return True, ''
|
<filename>src/audiobook_split_ffmpeg/util.py
# Copyright 2018 <NAME> (MawKKe)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Various utilities for audiobook_split_ffmpeg
"""
import os
from collections import namedtuple
from .ffmpeg import ffprobe_read_chapters
# Helper type for collecting necessary information about chapter for processing
WorkItem = namedtuple("WorkItem",
["infile", "outfile", "start", "end", "ch_num", "ch_max", "ch_title"])
# Special characters interpreted specially by most crappy software
_CHR_BLACKLIST = ["\\", "/", ":", "*", "?", "\"", "<", ">", "|", "\0"]
def _sanitize_string(original):
"""
Filter typical special letters from string
"""
if original is None:
return None
return ''.join(c for c in original if c not in _CHR_BLACKLIST)
def _validate_chapter(chap):
"""
Checks that chapter is valid (i.e has valid length)
"""
start = chap['start']
end = chap['end']
if (end - start) <= 0:
msg = "WARNING: chapter {0} duration <= 0 (start: {1}, end: {2}), skipping..."
print(msg.format(chap['id'], start, end))
return None
return chap
def _get_title_maybe(chap):
"""
Chapter to title (string) or None
"""
if "tags" not in chap:
return None
return chap["tags"].get("title", None)
def compute_workitems(infile, outdir, enumerate_files=True, use_title_in_filenames=True):
"""
Compute WorkItem's for each chapter to be processed. These WorkItems can be then used
for launching ffmpeg processes (see ffmpeg_split_chapter)
Arguments:
infile
Path to an audio(book) file. Must contain chapter information in its metadata.
outdir
Path to a directory where chapter files will be written. Must exist already.
enumerate_files
Include chapter numbers in output filenames?
use_title_in_filenames
Include chapter titles in output filenames?
"""
in_root, in_ext = os.path.splitext(os.path.basename(infile))
if 0 in [len(in_root), len(in_ext)]:
raise RuntimeError("Unexpected input filename format - root part or extension is empty")
# Make sure extension has no leading dots.
in_ext = in_ext[1:] if in_ext.startswith(".") else in_ext
# Get chapter metadata
info = ffprobe_read_chapters(infile)
if (info is None) or ("chapters" not in info) or (len(info["chapters"]) == 0):
raise RuntimeError("Could not parse chapters")
# Collect all valid chapters into a list
chapters = list(filter(None, (_validate_chapter(ch) for ch in info["chapters"])))
# Find maximum chapter number. Remember + 1 since enumeration starts at zero!
ch_max = max(chap['id'] + 1 for chap in chapters)
# Produces equal-width zero-padded chapter numbers for use in filenames.
def chnum_fmt(n):
return '{n:{fill}{width}}'.format(n=n, fill='0', width=len(str(ch_max)))
for chapter in chapters:
# Get cleaned title or None
title_maybe = _sanitize_string(_get_title_maybe(chapter))
ch_num = chapter["id"] + 1
# Use chapter title in output filename base unless disabled or not available.
# Otherwise, use the root part of input filename
title = title_maybe if (use_title_in_filenames and title_maybe) else in_root
out_base = "{title}.{ext}".format(title=title, ext=in_ext)
# Prepend chapter number if requested
if enumerate_files:
out_base = "{0} - {1}".format(chnum_fmt(ch_num), out_base)
yield WorkItem(
infile = infile,
outfile = os.path.join(outdir, out_base),
start = chapter["start_time"],
end = chapter["end_time"],
ch_num = ch_num,
ch_max = ch_max,
ch_title = _get_title_maybe(chapter)
)
|
import bson
import re
from collections import defaultdict
import numpy as np
import pandas as pd
from scipy import sparse as sp
from spacy.en import English
from sqlalchemy import create_engine
import psycopg2
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import normalize, scale
from sklearn.cross_validation import train_test_split
"""
NLP functions (NER, lemmatization) for obtaining clean tokens
"""
class NLP():
def __init__(self, num_chars, replace_ne=True):
self.spacy = English()
# only consider this many characters from beginning/end of documents
self.num_chars = num_chars
# replace named entities with generics
self.replace_ne = replace_ne
"""
Normalizes emails, people, places, and organizations
"""
def preprocessor(self, token):
if self.replace_ne:
# normalize emails
if token.like_email:
return 'ne_email'
# check that the NER IOB tag is B
if token.ent_iob == 3:
ent = token.ent_type_
# normalize human names
if ent == 'PERSON':
return 'ne_person'
# normalize national/religious/political groups
elif ent == 'NORP':
return 'ne_group'
# normalize facilities
elif ent == 'FAC':
return 'ne_facility'
# normalize organizations
elif ent == 'ORG':
return 'ne_org'
# normalize geopolitical places
elif ent == 'GPE':
return 'ne_gpe_place'
# normalize natural places
elif ent == 'LOC':
return 'ne_nat_place'
# normalize products
elif ent == 'PRODUCT':
return 'ne_product'
# normalize laws
elif ent == 'LAW':
return 'ne_law'
# normalize dates
elif ent == 'DATE':
return 'ne_date'
# normalize time
elif ent == 'TIME':
return 'ne_time'
# normalize percentages
elif ent == 'PERCENT':
return 'ne_percent'
# normalize money
elif ent == 'MONEY':
return 'ne_money'
# normalize quantity
elif ent == 'QUANTITY':
return 'ne_quant'
# normalize numbers that aren't time/money/quantity/etc
if token.is_digit:
return 'ne_number'
# return lemma for regular words
return token.lemma_
"""
Tokenizes input string with preprocessing
"""
def tokenizer(self, doc):
try:
if len(doc) > 2*self.num_chars:
spacy_doc = self.spacy(doc[:self.num_chars] + doc[-self.num_chars:])
else:
spacy_doc = self.spacy(doc)
return [self.preprocessor(t) for t in spacy_doc \
if (t.is_alpha or t.like_num or t.like_email) \
and len(t) < 50 and len(t) > 1 \
and not (t.is_punct or t.is_space or t.is_stop)]
except:
print(doc)
raise('Error: failed to tokenize a document')
"""
Set-based tokenizer, i.e. count and word order are destroyed
"""
def set_tokenizer(self, doc):
try:
if len(doc) > self.num_chars:
spacy_doc = self.spacy(doc[:self.num_chars])
else:
spacy_doc = self.spacy(doc)
return {self.preprocessor(t) for t in spacy_doc \
if (t.is_alpha or t.like_num or t.like_email) \
and len(t) > 1 \
and not (t.is_punct or t.is_space or t.is_stop)}
except:
print(doc)
raise('Error: failed to tokenize a document')
"""
Data transformation functions to go from database dump to text features
"""
class DataProcessor():
def __init__(self, text_key, label_key, num_chars=300, replace_ne=True):
self.text_key = text_key
self.label_key = label_key
self.nlp = NLP(num_chars, replace_ne=replace_ne)
self.label_dict = {'email': 0, 'internal_memo': 1,
'boardroom_minutes': 2, 'annual_report': 3,
'public_relations': 4, 'general_correspondance': 5,
'media': 6,'deposition': 7,
'scientific_article_unpublished': 8,
'scientific_article_published': 9,
'advertisement': 10, 'trade_association': 11,
'contract': 12, 'budget': 13,
'court_transcript': 14, 'general_report': 15,
'not_english': 18, 'misc': 19, 'blank': 20}
self.inv_label_dict = {v: k for k, v in self.label_dict.items()}
self.label_index_list = sorted(list(self.inv_label_dict.keys()))
self.label_name_list = sorted(self.label_dict.keys(), key=lambda x: self.label_dict[x])
"""
Takes a bson and returns the corresponding list of dicts, the frequency of
each label, and the number of unlabeled items
"""
def load_bson(self, bson_file):
# 'rb' for read as binary
f = open(bson_file, 'rb')
docs = bson.decode_all(f.read())
labels = np.zeros((len(docs), 1))
counts = defaultdict(int)
for i in range(len(docs)):
try:
labels[i] = self.label_dict[docs[i][self.label_key]]
counts[docs[i][self.label_key]] += 1
except:
labels[i] = -1
counts['unlabeled'] += 1
return docs, labels, counts
"""
Writes the document dump (list of dicts) to a PostgresSQL table
"""
def write_to_db(self, docs, user, pw, host, db_name):
db = create_engine('postgres://%s:%s@%s/%s'%\
(user, pw, host, db_name))
conn = psycopg2.connect(database=dbname, user=user)
df = pd.DataFrame(docs)
df['_id'] = df['_id'].map(str)
df.to_sql('toxic_docs_table', engine)
conn.close()
return
"""
Applies a TF-IDF transformer and count vectorizer to the corpus to build
n-gram features for classification
"""
def vectorize(self, docs, min_df=2, max_ngram=2):
docs = [x[self.text_key] for x in docs]
vectorizer = TfidfVectorizer(min_df=min_df,\
ngram_range=(1, max_ngram), tokenizer=self.nlp.tokenizer, sublinear_tf=True)
return vectorizer, vectorizer.fit_transform(docs), vectorizer.get_feature_names()
"""
Retrieves document features of the ToxicDocs collection
"""
def get_feats(self, docs, key_list):
feats = []
for doc in docs:
feats.append({k:v for k,v in doc.items() if k in key_list})
try:
feats[-1]['num_pages'] = 1+np.log(feats[-1]['num_pages'])
except:
pass
feats[-1]['length'] = 1+np.log(len(doc[self.text_key]))
vectorizer = DictVectorizer()
X_feats = vectorizer.fit_transform(feats)
X_feats = normalize(X_feats, axis=0, norm='max')
return X_feats
"""
Stacks extra features onto given data matrix
"""
def stack_feats(self, X, feats):
X = sp.hstack((X, feats))
return X.tocsr()
"""
Splits the labeled subset into train and test sets
"""
def split_data(self, y_all, X_all, split=0.7, seed=0):
indices = np.arange(y_all.shape[0])
X_unlab = X_all[(y_all == -1).flatten()]
ind_unlab = indices[(y_all == -1).flatten()]
y_valid = y_all[y_all != -1]
X_valid = X_all[(y_all != -1).flatten()]
ind_valid = indices[(y_all != -1).flatten()]
X_train, X_test, y_train, y_test, ind_train, ind_test =\
train_test_split(X_valid, y_valid, ind_valid,\
train_size=split, random_state=seed)
return y_train, X_train, ind_train, y_test, X_test, ind_test, X_unlab, ind_unlab
"""
Merges pairs of classes given in the form [(a,b), (c,d), ...]
"""
def merge_classes(self, merge_list, y):
y_merged = y.copy()
for y1, y2 in merge_list:
y_merged[y_merged == y2] = y1
self.label_index_list.remove(y2)
self.label_name_list.remove(self.inv_label_dict[y2])
self.label_dict.pop(self.inv_label_dict[y2], None)
self.inv_label_dict.pop(y2, None)
return y_merged
"""
Returns a measure of OCR quality of each document
"""
def ocr_quality(self, docs):
ocr_quality = np.zeros(len(docs))
for i,doc in enumerate(docs):
spacy_doc = self.nlp.spacy(doc[self.text_key])
num_tokens = 0
num_valid = 0
for token in spacy_doc:
num_tokens += 1
if not token.is_oov:
num_valid += 1
if num_tokens == 0:
ocr_quality[i] = -1
else:
ocr_quality[i] = num_valid/num_tokens
return ocr_quality
|
<gh_stars>0
import os
import unittest
from abc import ABC
from shutil import rmtree
import numpy as np
import z5py
try:
import zarr
import numcodecs
zarr_version = zarr.__version__
except ImportError:
zarr = None
zarr_version = "0.0.0"
class ZarrTestMixin(ABC):
shape = (100, 100)
chunks = (20, 20)
def tearDown(self):
try:
rmtree(self.path)
except OSError:
pass
def test_read_zarr_irregular(self):
shape = (123, 97)
chunks = (17, 32)
data = np.random.rand(*shape)
fz = zarr.open(self.path)
fz.create_dataset('test', data=data, chunks=chunks)
f = z5py.File(self.path)
out = f['test'][:]
self.assertEqual(data.shape, out.shape)
self.assertTrue(np.allclose(data, out))
def test_write_zarr_irregular(self):
shape = (123, 97)
chunks = (17, 32)
data = np.random.rand(*shape)
f = z5py.File(self.path)
f.create_dataset('test', data=data, chunks=chunks)
fz = z5py.File(self.path)
out = fz['test'][:]
self.assertEqual(data.shape, out.shape)
self.assertTrue(np.allclose(data, out))
def test_read_zarr(self):
from z5py.dataset import Dataset
dtypes = list(Dataset._dtype_dict.keys())
zarr_compressors = {'blosc': numcodecs.Blosc(),
'zlib': numcodecs.Zlib(),
'raw': None,
'bzip2': numcodecs.BZ2()}
# TODO lz4 compression is currently not compatible with zarr
# 'lz4': numcodecs.LZ4()}
# conda-forge version of numcodecs is not up-to-data
# for python 3.5 and GZip is missing
# that's why we need to check explicitly here to not fail the test
if hasattr(numcodecs, 'GZip'):
zarr_compressors.update({'gzip': numcodecs.GZip()})
f_zarr = zarr.open(self.path, mode='a')
f_z5 = z5py.File(self.path, mode='r')
for dtype in dtypes:
for compression in zarr_compressors:
data = np.random.randint(0, 127, size=self.shape).astype(dtype)
# write the data with zarr
key = 'test_%s_%s' % (dtype, compression)
ar = f_zarr.create_dataset(key,
shape=self.shape,
chunks=self.chunks,
dtype=dtype,
compressor=zarr_compressors[compression])
ar[:] = data
# read with z5py
out = f_z5[key][:]
self.assertEqual(data.shape, out.shape)
self.assertTrue(np.allclose(data, out))
def test_write_zarr(self):
from z5py.dataset import Dataset
dtypes = list(Dataset._dtype_dict.keys())
compressions = Dataset.compressors_zarr if self.path.endswith('.zr') else Dataset.compressors_n5
f_z5 = z5py.File(self.path, mode='a')
f_zarr = zarr.open(self.path, mode='r')
for dtype in dtypes:
for compression in compressions:
# lz4 compressions are not compatible
if compression == 'lz4':
continue
data = np.random.randint(0, 127, size=self.shape).astype(dtype)
key = 'test_%s_%s' % (dtype, compression)
# write with z5py
f_z5.create_dataset(key, data=data, compression=compression,
chunks=self.chunks)
# read the data with zarr
out = f_zarr[key][:]
self.assertEqual(data.shape, out.shape)
self.assertTrue(np.allclose(data, out))
def test_attributes(self):
f = zarr.open(self.path)
test_attrs = {"a": "b", "1": 2, "x": ["y", "z"]}
attrs = f.attrs
for k, v in test_attrs.items():
attrs[k] = v
f = z5py.File(self.path)
attrs = f.attrs
for k, v in test_attrs.items():
self.assertTrue(k in attrs)
self.assertEqual(attrs[k], v)
@unittest.skipUnless(zarr, 'Requires zarr package')
class TestZarrZarr(ZarrTestMixin, unittest.TestCase):
path = 'f.zr'
# custom fill-value is only supported in zarr format
def test_fillvalue(self):
test_values = [0, 10, 42, 255]
zarr.open(self.path)
for val in test_values:
key = 'test_%i' % val
zarr.open(os.path.join(self.path, key), shape=self.shape,
fill_value=val, dtype='<u1')
out = z5py.File(self.path)[key][:]
self.assertEqual(self.shape, out.shape)
self.assertTrue(np.allclose(val, out))
@unittest.skipIf(int(zarr_version.split(".")[1]) < 10, "Need zarr >= 2.10 for supported of nested storage")
def test_zarr_nested(self):
data = np.random.rand(128, 128)
with zarr.open(self.path, mode="a") as f:
f.create_dataset("data", data=data, chunks=(16, 16), dimension_separator="/")
with z5py.File(self.path, mode="r") as f:
res = f["data"][:]
self.assertTrue(np.allclose(data, res))
@unittest.skipUnless(zarr, 'Requires zarr package')
class TestZarrN5(ZarrTestMixin, unittest.TestCase):
path = 'f.n5'
if __name__ == '__main__':
unittest.main()
|
import math
from enum import Enum
import torch.nn as nn
import torch.nn.functional as F
from pytorch_pretrained_bert import BertModel
from pytorch_pretrained_bert.modeling import BertLayerNorm
import flint.span_util as span_util
import flint.torch_util as torch_util
import torch
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": F.relu, "swish": swish, 'tanh': F.tanh}
def init_bert_weights(module):
""" Initialize the weights.
"""
initializer_range = 0.02
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=initializer_range)
elif isinstance(module, BertLayerNorm):
module.beta.data.normal_(mean=0.0, std=initializer_range)
module.gamma.data.normal_(mean=0.0, std=initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
class BertPairMaxOutMatcher(nn.Module):
class ForwardMode(Enum):
TRAIN = 0
EVAL = 1
def __init__(self, bert_encoder: BertModel, num_of_class, act_type="gelu", dropout_rate=None, num_of_out_layers=4,
use_sigmoid=False):
super(BertPairMaxOutMatcher, self).__init__()
self.bert_encoder = bert_encoder
self.num_of_out_layers = num_of_out_layers
self.num_of_class = num_of_class
self.matching_hidden_dimension = \
4 * self.num_of_out_layers * self.bert_encoder.config.hidden_size
self.matching_intermediate_dimension = self.bert_encoder.config.intermediate_size
if dropout_rate is None:
dropout_rate = self.bert_encoder.config.hidden_dropout_prob
self.matching_layer1 = nn.Linear(self.matching_hidden_dimension, self.matching_intermediate_dimension)
self.matching_layer2 = nn.Linear(self.matching_intermediate_dimension, num_of_class)
self.dropout = nn.Dropout(dropout_rate)
self.activation = ACT2FN[act_type]
self.match_layers = nn.Sequential(*[self.matching_layer1, nn.ReLU(), self.dropout, self.matching_layer2])
self.use_sigmoid = False
if self.num_of_class == 1 and use_sigmoid:
self.use_sigmoid = use_sigmoid
elif self.num_of_class != 1 and use_sigmoid:
raise ValueError("Can not use sigmoid when number of labels is 1.")
@staticmethod
def span_maxpool(input_seq, span): # [B, T, D]
selected_seq, selected_length = span_util.span_select(input_seq, span) # [B, T, D]
maxout_r = torch_util.max_along_time(selected_seq, selected_length)
return maxout_r
def forward(self, seq, token_type_ids, attention_mask, s1_span, s2_span, mode, labels=None):
# Something
encoded_layers, _ = self.bert_encoder(seq, token_type_ids, attention_mask,
output_all_encoded_layers=True)
selected_output_layers = encoded_layers[-self.num_of_out_layers:] # [[B, T, D]] 0, 1, 2
# context_length = att_mask.sum(dim=1)
selected_output = torch.cat(selected_output_layers, dim=2) # Concat at last layer.
s1_out = self.span_maxpool(selected_output, s1_span)
s2_out = self.span_maxpool(selected_output, s2_span)
s1_out = self.dropout(s1_out)
s2_out = self.dropout(s2_out)
paired_out = torch.cat([s1_out, s2_out, torch.abs(s1_out - s2_out), s1_out * s2_out], dim=1)
paired_out_1 = self.dropout(self.activation(self.matching_layer1(paired_out)))
logits = self.matching_layer2(paired_out_1)
if mode == BertPairMaxOutMatcher.ForwardMode.TRAIN:
if self.use_sigmoid:
assert labels is not None
loss_fn = nn.BCEWithLogitsLoss()
loss = loss_fn(logits, labels.unsqueeze(-1).float())
return loss
else:
assert labels is not None
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_of_class), labels.view(-1))
return loss
else:
return logits
class BertSupervisedVecClassifier(nn.Module):
class ForwardMode(Enum):
TRAIN = 0
EVAL = 1
def __init__(self, bert_content2vec_model, num_of_class, dropout_rate=None):
super(BertSupervisedVecClassifier, self).__init__()
self.bert_content2vec_model = bert_content2vec_model
self.num_of_class = num_of_class
self.matching_hidden_dimension = \
4 * self.bert_content2vec_model.num_of_out_layers * self.bert_content2vec_model.bert_encoder.config.hidden_size
self.matching_intermediate_dimension = self.bert_content2vec_model.bert_encoder.config.intermediate_size
if dropout_rate is None:
dropout_rate = self.bert_content2vec_model.bert_encoder.config.hidden_dropout_prob
self.matching_layer1 = nn.Linear(self.matching_hidden_dimension, self.matching_intermediate_dimension)
self.matching_layer2 = nn.Linear(self.matching_intermediate_dimension, self.num_of_class)
self.dropout = nn.Dropout(dropout_rate)
self.match_layers = nn.Sequential(*[self.matching_layer1, nn.ReLU(), self.dropout, self.matching_layer2])
def forward(self, s1_seq, s1_mask, s2_seq, s2_mask, mode, labels=None):
s1_out = self.bert_content2vec_model(s1_seq, attention_mask=s1_mask)
s2_out = self.bert_content2vec_model(s2_seq, attention_mask=s2_mask)
s1_out = self.dropout(s1_out)
s2_out = self.dropout(s2_out)
logits = self.match_layers(torch.cat([s1_out, s2_out, torch.abs(s1_out - s2_out), s1_out * s2_out], dim=1))
if mode == BertSupervisedVecClassifier.ForwardMode.TRAIN:
assert labels is not None
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_of_class), labels.view(-1))
return loss
else:
return logits
|
<reponame>ATestGroup233/mindspore
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""YoloV4 310 infer."""
import os
import argparse
import datetime
import time
import numpy as np
from pycocotools.coco import COCO
from src.logger import get_logger
from src.eval_utils import DetectionEngine
parser = argparse.ArgumentParser('mindspore coco testing')
# dataset related
parser.add_argument('--per_batch_size', default=1, type=int, help='batch size for per gpu')
# logging related
parser.add_argument('--log_path', type=str, default='outputs/', help='checkpoint save location')
# detect_related
parser.add_argument('--nms_thresh', type=float, default=0.5, help='threshold for NMS')
parser.add_argument('--ann_file', type=str, default='', help='path to annotation')
parser.add_argument('--eval_ignore_threshold', type=float, default=0.001, help='threshold to throw low quality boxes')
parser.add_argument('--img_id_file_path', type=str, default='', help='path of image dataset')
parser.add_argument('--result_files', type=str, default='./result_Files', help='path to 310 infer result floder')
args, _ = parser.parse_known_args()
class Redirct:
def __init__(self):
self.content = ""
def write(self, content):
self.content += content
def flush(self):
self.content = ""
if __name__ == "__main__":
start_time = time.time()
args.outputs_dir = os.path.join(args.log_path,
datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
args.logger = get_logger(args.outputs_dir, 0)
# init detection engine
detection = DetectionEngine(args)
coco = COCO(args.ann_file)
result_path = args.result_files
files = os.listdir(args.img_id_file_path)
for file in files:
img_ids_name = file.split('.')[0]
img_id = int(np.squeeze(img_ids_name))
imgIds = coco.getImgIds(imgIds=[img_id])
img = coco.loadImgs(imgIds[np.random.randint(0, len(imgIds))])[0]
image_shape = ((img['width'], img['height']),)
img_id = (np.squeeze(img_ids_name),)
result_path_0 = os.path.join(result_path, img_ids_name + "_0.bin")
result_path_1 = os.path.join(result_path, img_ids_name + "_1.bin")
result_path_2 = os.path.join(result_path, img_ids_name + "_2.bin")
output_small = np.fromfile(result_path_0, dtype=np.float32).reshape(1, 19, 19, 3, 85)
output_me = np.fromfile(result_path_1, dtype=np.float32).reshape(1, 38, 38, 3, 85)
output_big = np.fromfile(result_path_2, dtype=np.float32).reshape(1, 76, 76, 3, 85)
detection.detect([output_small, output_me, output_big], args.per_batch_size, image_shape, img_id)
args.logger.info('Calculating mAP...')
detection.do_nms_for_results()
result_file_path = detection.write_result()
args.logger.info('result file path: {}'.format(result_file_path))
eval_result = detection.get_eval_result()
cost_time = time.time() - start_time
args.logger.info('\n=============coco eval reulst=========\n' + eval_result)
args.logger.info('testing cost time {:.2f}h'.format(cost_time / 3600.))
|
<reponame>dkkim93/meta-mapg
import torch
import gym
import numpy as np
import torch.nn.functional as F
from gym_env import make_env
from meta.dice import get_dice_loss
from meta.linear_baseline import LinearFeatureBaseline
from torch.autograd import Variable
from torch.distributions import Categorical, Normal
from collections import OrderedDict
from misc.torch_utils import get_parameters, get_named_parameters
from misc.rl_utils import get_return
class Base(object):
"""Base class that has shared methods between a meta-agent and an opponent
Args:
log (dict): Dictionary that contains python logging
tb_writer (SummeryWriter): Used for tensorboard logging
args (argparse): Python argparse that contains arguments
name (str): Specifies agent's name
i_agent (int): Agent index among the agents in the shared environment
rank (int): Used for thread-specific meta-agent for multiprocessing. Default: -1
"""
def __init__(self, log, tb_writer, args, name, i_agent, rank):
super(Base, self).__init__()
self.log = log
self.tb_writer = tb_writer
self.args = args
self.name = name + str(i_agent)
self.i_agent = i_agent
self.rank = rank
def _set_dim(self):
env = make_env(self.args)
if isinstance(env.observation_space[self.i_agent], gym.spaces.Box):
self.input_dim = env.observation_space[self.i_agent].shape[0]
else:
self.input_dim = env.observation_space[self.i_agent].n
if isinstance(env.action_space[self.i_agent], gym.spaces.Box):
self.output_dim = env.action_space[self.i_agent].shape[0]
else:
self.output_dim = env.action_space[self.i_agent].n
env.close()
self.log[self.args.log_name].info("[{}] Input dim: {}".format(
self.name, self.input_dim))
self.log[self.args.log_name].info("[{}] Output dim: {}".format(
self.name, self.output_dim))
def _set_action_type(self):
env = make_env(self.args)
if isinstance(env.action_space[self.i_agent], gym.spaces.Discrete):
self.is_discrete_action = True
self.action_dtype = int
else:
self.is_discrete_action = False
self.action_dtype = float
env.close()
self.log[self.args.log_name].info("[{}] Discrete action space: {}".format(
self.name, self.is_discrete_action))
def _set_linear_baseline(self):
self.linear_baseline = LinearFeatureBaseline(
input_size=self.input_dim, args=self.args)
def _get_value_loss(self, value, reward):
value = torch.stack(value, dim=1)
return_ = get_return(reward, self.args)
assert value.shape == return_.shape
return F.mse_loss(value, return_)
def reset_lstm_state(self):
if hasattr(self, 'is_tabular_policy'):
return
self.actor_hidden = (
Variable(torch.zeros(self.args.traj_batch_size, self.args.n_hidden)),
Variable(torch.zeros(self.args.traj_batch_size, self.args.n_hidden)))
if "meta" in self.name:
self.value_hidden = (
Variable(torch.zeros(self.args.traj_batch_size, self.args.n_hidden)),
Variable(torch.zeros(self.args.traj_batch_size, self.args.n_hidden)))
def act(self, obs, actor):
if isinstance(obs, np.ndarray):
obs = torch.from_numpy(obs).float()
# Compute action probability
if hasattr(self, 'is_tabular_policy'):
obs = torch.argmax(obs, dim=1)
probs = F.softmax(actor[obs, :], dim=1)
else:
params = actor if isinstance(actor, OrderedDict) else None
if self.is_discrete_action:
probs, self.actor_hidden = self.actor((obs, self.actor_hidden), params=params)
probs = probs.squeeze(-1)
else:
mu, scale, self.actor_hidden = self.actor((obs, self.actor_hidden), params=params)
# Compute action, logprob, and entropy
if self.is_discrete_action:
distribution = Categorical(probs=probs)
else:
distribution = Normal(loc=mu, scale=scale)
action = distribution.sample()
logprob = distribution.log_prob(action)
if len(logprob.shape) == 2:
logprob = torch.sum(logprob, dim=-1)
entropy = distribution.entropy()
if len(entropy.shape) == 2:
entropy = torch.sum(entropy, dim=-1)
# Compute value for advantage at outer loop
if "meta" in self.name:
value, self.value_hidden = self.value((obs, self.value_hidden), params=None)
value = value.squeeze(-1)
else:
value = None
return action.numpy().astype(self.action_dtype), logprob, entropy, value
def inner_update(self, actor, memory, i_joint, is_train):
if i_joint == self.args.chain_horizon:
return None
obs, logprobs, _, _, rewards = memory.sample()
# Compute value for baseline
reward = rewards[self.i_agent]
value = self.linear_baseline(obs, reward)
# Compute DiCE loss
actor_loss = get_dice_loss(logprobs, reward, value, self.args, self.i_agent, is_train)
# Get adapted parameters
actor_grad = torch.autograd.grad(actor_loss, get_parameters(actor), create_graph=is_train)
if hasattr(self, 'is_tabular_policy'):
phi = actor - 1. * actor_grad[0]
else:
phi = OrderedDict()
lr = self.dynamic_lr[i_joint] if "meta" in self.name else self.args.actor_lr_inner
for (name, param), grad in zip(get_named_parameters(actor), actor_grad):
phi[name] = param - lr * grad
return phi
def _set_policy(self):
raise NotImplementedError()
def _set_dynamic_lr(self):
raise NotImplementedError()
def share_memory(self):
raise NotImplementedError()
def sync(self):
raise NotImplementedError()
def set_persona(self):
raise NotImplementedError()
def get_outer_loss(self, memories, iteration):
raise NotImplementedError()
|
"""This submodule implements some convenience dictionaries for working with the periodic table.
Data was scraped from a combination of the periodictable Python package (on which matador used to
depend) and pymatgen.
"""
from typing import Dict
from dataclasses import dataclass
__all__ = ("PERIODIC_TABLE",)
@dataclass
class Element:
symbol: str
number: int
mass: float
iupac_order: int
PERIODIC_TABLE: Dict[str, Element] = {
"n": Element(symbol="n", number=0, mass=0, iupac_order=-1),
"H": Element(symbol="H", number=1, mass=1.00794, iupac_order=92),
"He": Element(symbol="He", number=2, mass=4.002602, iupac_order=5),
"Li": Element(symbol="Li", number=3, mass=6.941, iupac_order=11),
"Be": Element(symbol="Be", number=4, mass=9.012182, iupac_order=17),
"B": Element(symbol="B", number=5, mass=10.811, iupac_order=81),
"C": Element(symbol="C", number=6, mass=12.0107, iupac_order=86),
"N": Element(symbol="N", number=7, mass=14.0067, iupac_order=91),
"O": Element(symbol="O", number=8, mass=15.9994, iupac_order=97),
"F": Element(symbol="F", number=9, mass=18.9984032, iupac_order=102),
"Ne": Element(symbol="Ne", number=10, mass=20.1797, iupac_order=4),
"Na": Element(symbol="Na", number=11, mass=22.98977, iupac_order=10),
"Mg": Element(symbol="Mg", number=12, mass=24.305, iupac_order=16),
"Al": Element(symbol="Al", number=13, mass=26.981538, iupac_order=80),
"Si": Element(symbol="Si", number=14, mass=28.0855, iupac_order=85),
"P": Element(symbol="P", number=15, mass=30.973761, iupac_order=90),
"S": Element(symbol="S", number=16, mass=32.065, iupac_order=96),
"Cl": Element(symbol="Cl", number=17, mass=35.453, iupac_order=101),
"Ar": Element(symbol="Ar", number=18, mass=39.948, iupac_order=3),
"K": Element(symbol="K", number=19, mass=39.0983, iupac_order=9),
"Ca": Element(symbol="Ca", number=20, mass=40.078, iupac_order=15),
"Sc": Element(symbol="Sc", number=21, mass=44.95591, iupac_order=49),
"Ti": Element(symbol="Ti", number=22, mass=47.867, iupac_order=52),
"V": Element(symbol="V", number=23, mass=50.9415, iupac_order=55),
"Cr": Element(symbol="Cr", number=24, mass=51.9961, iupac_order=58),
"Mn": Element(symbol="Mn", number=25, mass=54.938049, iupac_order=61),
"Fe": Element(symbol="Fe", number=26, mass=55.845, iupac_order=64),
"Co": Element(symbol="Co", number=27, mass=58.9332, iupac_order=67),
"Ni": Element(symbol="Ni", number=28, mass=58.6934, iupac_order=70),
"Cu": Element(symbol="Cu", number=29, mass=63.546, iupac_order=73),
"Zn": Element(symbol="Zn", number=30, mass=65.409, iupac_order=76),
"Ga": Element(symbol="Ga", number=31, mass=69.723, iupac_order=79),
"Ge": Element(symbol="Ge", number=32, mass=72.64, iupac_order=84),
"As": Element(symbol="As", number=33, mass=74.9216, iupac_order=89),
"Se": Element(symbol="Se", number=34, mass=78.96, iupac_order=95),
"Br": Element(symbol="Br", number=35, mass=79.904, iupac_order=100),
"Kr": Element(symbol="Kr", number=36, mass=83.798, iupac_order=2),
"Rb": Element(symbol="Rb", number=37, mass=85.4678, iupac_order=8),
"Sr": Element(symbol="Sr", number=38, mass=87.62, iupac_order=14),
"Y": Element(symbol="Y", number=39, mass=88.90585, iupac_order=48),
"Zr": Element(symbol="Zr", number=40, mass=91.224, iupac_order=51),
"Nb": Element(symbol="Nb", number=41, mass=92.90638, iupac_order=54),
"Mo": Element(symbol="Mo", number=42, mass=95.94, iupac_order=57),
"Tc": Element(symbol="Tc", number=43, mass=98, iupac_order=60),
"Ru": Element(symbol="Ru", number=44, mass=101.07, iupac_order=63),
"Rh": Element(symbol="Rh", number=45, mass=102.9055, iupac_order=66),
"Pd": Element(symbol="Pd", number=46, mass=106.42, iupac_order=69),
"Ag": Element(symbol="Ag", number=47, mass=107.8682, iupac_order=72),
"Cd": Element(symbol="Cd", number=48, mass=112.411, iupac_order=75),
"In": Element(symbol="In", number=49, mass=114.818, iupac_order=78),
"Sn": Element(symbol="Sn", number=50, mass=118.71, iupac_order=83),
"Sb": Element(symbol="Sb", number=51, mass=121.76, iupac_order=88),
"Te": Element(symbol="Te", number=52, mass=127.6, iupac_order=94),
"I": Element(symbol="I", number=53, mass=126.90447, iupac_order=99),
"Xe": Element(symbol="Xe", number=54, mass=131.293, iupac_order=1),
"Cs": Element(symbol="Cs", number=55, mass=132.90545, iupac_order=7),
"Ba": Element(symbol="Ba", number=56, mass=137.327, iupac_order=13),
"La": Element(symbol="La", number=57, mass=138.9055, iupac_order=47),
"Ce": Element(symbol="Ce", number=58, mass=140.116, iupac_order=46),
"Pr": Element(symbol="Pr", number=59, mass=140.90765, iupac_order=45),
"Nd": Element(symbol="Nd", number=60, mass=144.24, iupac_order=44),
"Pm": Element(symbol="Pm", number=61, mass=145, iupac_order=43),
"Sm": Element(symbol="Sm", number=62, mass=150.36, iupac_order=42),
"Eu": Element(symbol="Eu", number=63, mass=151.964, iupac_order=41),
"Gd": Element(symbol="Gd", number=64, mass=157.25, iupac_order=40),
"Tb": Element(symbol="Tb", number=65, mass=158.92534, iupac_order=39),
"Dy": Element(symbol="Dy", number=66, mass=162.5, iupac_order=38),
"Ho": Element(symbol="Ho", number=67, mass=164.93032, iupac_order=37),
"Er": Element(symbol="Er", number=68, mass=167.259, iupac_order=36),
"Tm": Element(symbol="Tm", number=69, mass=168.93421, iupac_order=35),
"Yb": Element(symbol="Yb", number=70, mass=173.04, iupac_order=34),
"Lu": Element(symbol="Lu", number=71, mass=174.967, iupac_order=33),
"Hf": Element(symbol="Hf", number=72, mass=178.49, iupac_order=50),
"Ta": Element(symbol="Ta", number=73, mass=180.9479, iupac_order=53),
"W": Element(symbol="W", number=74, mass=183.84, iupac_order=56),
"Re": Element(symbol="Re", number=75, mass=186.207, iupac_order=59),
"Os": Element(symbol="Os", number=76, mass=190.23, iupac_order=62),
"Ir": Element(symbol="Ir", number=77, mass=192.217, iupac_order=65),
"Pt": Element(symbol="Pt", number=78, mass=195.078, iupac_order=68),
"Au": Element(symbol="Au", number=79, mass=196.96655, iupac_order=71),
"Hg": Element(symbol="Hg", number=80, mass=200.59, iupac_order=74),
"Tl": Element(symbol="Tl", number=81, mass=204.3833, iupac_order=77),
"Pb": Element(symbol="Pb", number=82, mass=207.2, iupac_order=82),
"Bi": Element(symbol="Bi", number=83, mass=208.98038, iupac_order=87),
"Po": Element(symbol="Po", number=84, mass=209, iupac_order=93),
"At": Element(symbol="At", number=85, mass=210, iupac_order=98),
"Rn": Element(symbol="Rn", number=86, mass=222, iupac_order=0),
"Fr": Element(symbol="Fr", number=87, mass=223, iupac_order=6),
"Ra": Element(symbol="Ra", number=88, mass=226, iupac_order=12),
"Ac": Element(symbol="Ac", number=89, mass=227, iupac_order=32),
"Th": Element(symbol="Th", number=90, mass=232.0381, iupac_order=31),
"Pa": Element(symbol="Pa", number=91, mass=231.03588, iupac_order=30),
"U": Element(symbol="U", number=92, mass=238.02891, iupac_order=29),
}
|
<reponame>daesookimds/AWS-DynamoDB-connector
import time
import boto3
import pandas as pd
from boto3.dynamodb.conditions import Key, Attr
class DynomoConnector(object):
def __init__(self, **param):
param = {key: value for key, value in param.items()}
self.aws_access_key_id = param["aws_access_key_id"]
self.aws_secret_access_key = param["aws_secret_access_key"]
self.region_name = param["region_name"]
self.resource = boto3.resource('dynamodb', aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key, region_name=self.region_name)
self.client = boto3.client('dynamodb', aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key, region_name=self.region_name)
self.table_list = self.client.list_tables()["TableNames"]
print(self.table_list)
def create_table(self, tablename, keyschema, attributedefinitions, provisionedthroughput, volume_mode="on-demand"):
if volume_mode == 'on-demand':
table = self.client.create_table(
TableName=tablename,
KeySchema=keyschema,
AttributeDefinitions=attributedefinitions,
BillingMode='PAY_PER_REQUEST',
)
else:
table = self.client.create_table(
TableName=tablename,
KeySchema=keyschema,
AttributeDefinitions=attributedefinitions,
ProvisionedThroughput=provisionedthroughput
)
response_code = table["ResponseMetadata"]["HTTPStatusCode"]
if response_code == 200:
print("create success!")
else:
print("create fail! HTTPStatusCode {}".format(response_code))
def connect(self, table_name):
self.conn = self.resource.Table(table_name)
self.name = self.conn.name
self.key_schema = self.conn.key_schema
self.attribute_definitions = self.conn.attribute_definitions
self.provisioned_throughput = self.conn.provisioned_throughput
self.billing_mode_summary = self.conn.billing_mode_summary
return self.key_schema
def get(self, output='df'):
# default output is JSON format
response = self.conn.scan()
data = response['Items']
while 'LastEvaluatedKey' in response:
response = self.conn.scan(ExclusiveStartKey=response['LastEvaluatedKey'])
data.extend(response['Items'])
if output == 'df':
return pd.DataFrame(data)
else:
return data
def get_with_condition(self, output='df', condition_type='orddt', start='2020-01-01 00:00:00',
end='2020-12-31 23:59:95'):
# default output is JSON format
# condition_type
# orddt : use reange start_date ~ end_date
# else : get all
if condition_type == 'orddt':
response = self.conn.scan(
FilterExpression=Attr('orddt').between(start, end)
)
else:
response = self.conn.scan()
if response["Count"] >= 80:
data = response['Items']
while 'LastEvaluatedKey' in response:
response = self.conn.scan(ExclusiveStartKey=response['LastEvaluatedKey'])
data.extend(response['Items'])
else:
data = response['Items']
if output == 'df':
return pd.DataFrame(data)
else:
return data
def insert_record(self, item):
# put item must json foramt : DataFrame.to_dict()
self.conn.put_item(
Item=item
)
def bulk_insert(self, df):
with self.conn.batch_writer() as batch:
for idx in range(df.shape[0]):
batch.put_item(Item=df.iloc[idx].to_dict())
print("bulk succecss")
def delete_record(self, key):
self.conn.delete_item(Key=key)
def delete_table(self):
self.conn.delete()
print("{0} DELETED!".format(self.name))
def recreate(self):
if self.billing_mode_summary['BillingMode'] == 'PAY_PER_REQUEST':
self.create_table(tablename=self.name, keyschema=self.key_schema,
attributedefinitions=self.attribute_definitions,
provisionedthroughput=self.provisioned_throughput, volume_mode='on-demand')
else:
self.create_table(tablename=self.name, keyschema=self.key_schema,
attributedefinitions=self.attribute_definitions,
provisionedthroughput=self.provisioned_throughput, volume_mode=self.billing_mode_summary)
def truncate(self):
self.conn.delete()
while self.name in self.client.list_tables()["TableNames"]:
print("table deleting...")
time.sleep(5)
if self.billing_mode_summary['BillingMode'] == 'PAY_PER_REQUEST':
self.create_table(tablename=self.name, keyschema=self.key_schema,
attributedefinitions=self.attribute_definitions,
provisionedthroughput=self.provisioned_throughput, volume_mode='on-demand')
else:
self.create_table(tablename=self.name, keyschema=self.key_schema,
attributedefinitions=self.attribute_definitions,
provisionedthroughput=self.provisioned_throughput, volume_mode=self.billing_mode_summary)
while self.name not in self.client.list_tables()["TableNames"]:
print("table creating...")
print("truncate success!") |
<filename>django/bin/WebShop/WebShop/usermanager.py
#-*- coding:utf-8-*-
########################################################################################
#作者:韩望
#修改者:
#日期:2016-04-05
#版本:version 1.0
#备注:用户管理函数,主要解决用户的增,删,查,改
########################################################################################
from django.http import HttpResponse
from django.shortcuts import render_to_response
from foodshop.models import userpwd
from WebShop.forms import Formusersearch,Formuseradd,Formusernamechange,Formusermoneny
##########################用户查询模块####################################################
def usersearch(request):
try:
if request.method=='GET':
mailuser = Formusersearch(request.GET)
if mailuser.is_valid():
usermail=mailuser.cleaned_data
userobj=userpwd.objects.get(usermail=usermail['usermail'])
username=userobj.username
usermail=userobj.usermail
userstatus=userobj.userstatus
usermoneny=userobj.usermoneny
usercreditstatus=userobj.usercreditstatus
return render_to_response('userinfotable.html',locals())
else:
return HttpResponse(mailuser.errors)
else:
HttpResponse('不好意思,请输入注册邮箱!')
except:
return HttpResponse('对不起,此邮箱末注册!')
###########################增加用户模块###################################################
def useradd(request):
if request.method == 'GET':
useraddform=Formuseradd(request.GET)
#return HttpResponse(useraddform.is_valid())
if useraddform.is_valid():
formuseradd=useraddform.cleaned_data
#return HttpResponse(formuseradd['usermail'])
if (formuseradd['firstpwd']==formuseradd['secondpwd']):
try:
#return HttpResponse('test01!')
if (userpwd.objects.get(usermail=formuseradd['usermail']).userpwd):
return render_to_response('userexit.html')
else:
pass
except:
useraddobj=userpwd( username=formuseradd['username'],
userpwd=<PASSWORD>useradd['<PASSWORD>'],
usermail=formuseradd['usermail'],
userstatus=0,
usermoneny=0,
usercreditstatus=5,
)
useraddobj.save()
return HttpResponse('用户添加成功!')
else:
return HttpResponse('两次输入密码不一致,请重新输入密码!')
return HttpResponse('对不起,请正确填写用户注册表!')
############################用户名修改模块##################################################
def usernamechange(request):
if request.method == 'GET':
formnameuserchange=Formusernamechange(request.GET)
if formnameuserchange.is_valid():
formusernamechange=formnameuserchange.cleaned_data
try:
usernamechangeobj=userpwd.objects.get(usermail=formusernamechange['usermail'])
#return HttpResponse(formusernamechange['username'])
usernamechangeobj.username=formusernamechange['username']
usernamechangeobj.save()
return HttpResponse('恭喜用户名修改成功!')
except:
return HttpResponse('对不起,您所输入的邮箱还没有注册,请核对邮箱输入!')
else:
return HttpResponse(formnameuserchange.errors)
else:
return HttpResponse('请正确输入数据')
############################用户名充值模块##################################################
def usermonenychange(request):
if request.method == 'GET':
#return HttpResponse('usermonenychange hanwang')
formmonenyuser=Formusermoneny(request.GET)
if formmonenyuser.is_valid():
formusermoneny=formmonenyuser.cleaned_data
try:
usermonenyobj=userpwd.objects.get(usermail=formusermoneny['usermail'])
#return HttpResponse(formusernamechange['username'])
originalmoneny=usermonenyobj.usermoneny;
exitmoneny=formusermoneny['usermoneny']
totalmoneny=(float(originalmoneny) + float(exitmoneny))
usermonenyobj.usermoneny=totalmoneny
usermonenyobj.save()
return HttpResponse('恭喜您充值成功!')
except:
return HttpResponse('对不起,您所输入的邮箱还没有注册,请核对邮箱输入!')
else:
return HttpResponse(formmonenyuser.errors)
else:
return HttpResponse('请正确输入数据')
###########################初始化用户密码#######################################################
def inituserpwd(request):
if request.method == 'GET':
initpwduser=Formusersearch(request.GET)
if initpwduser.is_valid():
inituserpwd=initpwduser.cleaned_data
try:
inituserpwdobj=userpwd.objects.get(usermail=inituserpwd['usermail'])
inituserpwdobj.userpwd='<PASSWORD>'
inituserpwdobj.save()
return HttpResponse('用户密码已被初始化为<PASSWORD>')
except:
return HttpResponse('该邮箱还没有注册过,无法初始化密码!')
else:
return HttpResponse(inituserpwdobj.errors)
else:
return HttpResponse('请正确输入注册邮箱!')
###########################冻结用户#######################################################
def freezeuser(request):
if request.method == 'GET':
userfreeze=Formusersearch(request.GET)
if userfreeze.is_valid():
freezeuser=userfreeze.cleaned_data
try:
freezeuserobj=userpwd.objects.get(usermail=freezeuser['usermail'])
if (freezeuserobj.usercreditstatus == '0'):
return HttpResponse('该用户已处在冻结状态,没有必要再次冻结!')
else:
freezeuserobj.usercreditstatus=0
freezeuserobj.save()
return HttpResponse('用户已被冻结!')
except:
return HttpResponse('该邮箱还没有注册过,无法冻结!')
else:
return HttpResponse(userfreeze.errors)
else:
return HttpResponse('请正确输入注册邮箱!')
###########################解冻用户#######################################################
def thawuser(request):
if request.method == 'GET':
userthaw=Formusersearch(request.GET)
if userthaw.is_valid():
thawuser=userthaw.cleaned_data
try:
freezeuserobj=userpwd.objects.get(usermail=thawuser['usermail'])
if (freezeuserobj.usercreditstatus != '0'):
return HttpResponse('该用户处在活动状态,没有必要解冻!')
else:
freezeuserobj.usercreditstatus=5
freezeuserobj.save()
return HttpResponse('用户已解冻!')
except:
return HttpResponse('该邮箱还没有注册过,无法解冻!')
else:
return HttpResponse(userthaw.errors)
else:
return HttpResponse('请正确输入注册邮箱!')
|
<reponame>MatthewGerber/rl<filename>src/rlai/models/sklearn.py
import sys
import threading
import time
from argparse import ArgumentParser
from typing import Tuple, List, Optional, Any, Dict
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
from numpy.random import RandomState
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import SGDRegressor
from rlai.meta import rl_text
from rlai.models import FunctionApproximationModel
from rlai.utils import parse_arguments, StdStreamTee, IncrementalSampleAverager
@rl_text(chapter=9, page=200)
class SKLearnSGD(FunctionApproximationModel):
"""
Wrapper for the sklearn.linear_model.SGDRegressor implemented by scikit-learn.
"""
@classmethod
def get_argument_parser(
cls
) -> ArgumentParser:
"""
Get argument parser.
:return: Argument parser.
"""
parser = ArgumentParser(
prog=f'{cls.__module__}.{cls.__name__}',
parents=[super().get_argument_parser()],
allow_abbrev=False,
add_help=False
)
# loss
parser.add_argument(
'--loss',
type=str,
default='squared_loss',
help='The loss function to be used.',
choices=['squared_loss', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive']
)
parser.add_argument(
'--sgd-epsilon',
type=float,
default=0.1,
help='Epsilon in the epsilon-insensitive loss functions.'
)
# regularization
parser.add_argument(
'--penalty',
type=str,
default='l2',
help='The penalty (aka regularization term) to be used.',
choices=['l2', 'l1', 'elasticnet']
)
parser.add_argument(
'--l1-ratio',
type=float,
default=0.15,
help='The elasticnet mixing parameter (0 for pure L2 and 1 for pure L1).'
)
parser.add_argument(
'--sgd-alpha',
type=float,
default=0.0001,
help='Constant that multiplies the regularization term. The higher the value, the stronger the regularization. Also used to compute the learning rate when set to learning_rate is set to `optimal`.'
)
# learning rate (step size)
parser.add_argument(
'--learning-rate',
type=str,
default='invscaling',
help='Learning rate schedule.',
choices=['constant', 'optimal', 'invscaling', 'adaptive']
)
parser.add_argument(
'--eta0',
type=float,
default=0.01,
help='The initial learning rate for the `constant`, `invscaling` or `adaptive` schedules.'
)
parser.add_argument(
'--power-t',
type=float,
default=0.25,
help='The exponent for inverse scaling learning rate.'
)
parser.add_argument(
'--scale-eta0-for-y',
action='store_true',
help='Pass this flag to scale eta0 dynamically with respect to y.'
)
# other stuff
parser.add_argument(
'--verbose',
type=int,
default=0,
help='Verbosity level.'
)
return parser
@classmethod
def init_from_arguments(
cls,
args: List[str],
random_state: RandomState
) -> Tuple[FunctionApproximationModel, List[str]]:
"""
Initialize a model from arguments.
:param args: Arguments.
:param random_state: Random state.
:return: 2-tuple of a model and a list of unparsed arguments.
"""
parsed_args, unparsed_args = parse_arguments(cls, args)
# process arguments whose names conflict with arguments used elsewhere
setattr(parsed_args, 'alpha', parsed_args.sgd_alpha)
del parsed_args.sgd_alpha
setattr(parsed_args, 'epsilon', parsed_args.sgd_epsilon)
del parsed_args.sgd_epsilon
# instantiate model
model = cls(
random_state=random_state,
**vars(parsed_args)
)
return model, unparsed_args
def fit(
self,
X: Any,
y: Any,
weight: Optional[float]
):
"""
Fit the model to a matrix of features (one row per observations) and a vector of returns.
:param X: Feature matrix (#obs, #features)
:param y: Outcome vector (#obs,).
:param weight: Weight.
"""
# TODO: Add more here? (e.g., put max(y) in the exponent for some base we expose)
if self.scale_eta0_for_y:
eta0_scalar = np.abs(np.array(y)).max()
self.model.eta0 = self.base_eta0 / eta0_scalar
# put tee on standard output in order to grab the loss value printed by sklearn
stdout_tee = StdStreamTee(sys.stdout, 20, self.print_output)
sys.stdout = stdout_tee
# update fit
self.model.partial_fit(X=X, y=y, sample_weight=weight)
# reassign standard output
sys.stdout = sys.__stdout__
# get loss emitted by sklearn
fit_line = stdout_tee.buffer[-2]
if not fit_line.startswith('Norm:'): # pragma no cover
raise ValueError('Expected sklearn output to start with Norm:')
avg_loss = float(fit_line.rsplit(' ', maxsplit=1)[1]) # example line: Norm: 6.38, NNZs: 256, Bias: 8.932199, T: 1, Avg. loss: 0.001514
# save y, loss, and eta0 values. each y-value is associated with the same average loss and eta0 (step size).
with self.plot_data_lock:
if self.plot_iteration not in self.iteration_y_values:
self.iteration_y_values[self.plot_iteration] = []
if self.plot_iteration not in self.iteration_loss_values:
self.iteration_loss_values[self.plot_iteration] = []
if self.plot_iteration not in self.iteration_eta0_values:
self.iteration_eta0_values[self.plot_iteration] = []
for y_value in y:
self.iteration_y_values[self.plot_iteration].append(y_value)
self.y_averager.update(y_value)
self.iteration_loss_values[self.plot_iteration].append(avg_loss)
self.loss_averager.update(avg_loss)
self.iteration_eta0_values[self.plot_iteration].append(self.model.eta0)
self.eta0_averager.update(self.model.eta0)
def evaluate(
self,
X: np.ndarray
) -> np.ndarray:
"""
Evaluate the model at a matrix of features.
:param X: Feature matrix (#obs, #features).
:return: Vector of outcomes from the evaluation (#obs,).
"""
try:
# predict values using the currently fitted model
values = self.model.predict(X)
# the following exception will be thrown if the model has not yet been fitted. catch and return uniform values.
except NotFittedError:
values = np.repeat(0.0, X.shape[0])
return values
def reset(
self
):
"""
Reset the model.
"""
self.model = SGDRegressor(**self.model_kwargs)
def plot(
self,
render: bool,
pdf: Optional[PdfPages]
) -> Optional[plt.Figure]:
"""
Plot the model. If called from the main thread and render is True, then a new plot will be generated. If called
from a background thread, then the data used by the plot will be updated but a plot will not be generated or
updated. This supports a pattern in which a background thread generates new plot data, and a UI thread (e.g., in
a Jupyter notebook) periodically calls `update_plot` to redraw the plot with the latest data.
:param render: Whether or not to render the plot data. If False, then plot data will be updated but nothing will
be shown.
:param pdf: PDF for plots.
:return: Matplotlib figure, if one was generated and not plotting to PDF.
"""
with self.plot_data_lock:
# collect average values for the current policy improvement iteration and reset the averagers. the
# individual y, loss, and eta0 values have already been collected during the calls to fit.
if self.y_averager.n > 0:
self.y_averages.append(self.y_averager.get_value())
self.y_averager.reset()
if self.loss_averager.n > 0:
self.loss_averages.append(self.loss_averager.get_value())
self.loss_averager.reset()
if self.eta0_averager.n > 0:
self.eta0_averages.append(self.eta0_averager.get_value())
self.eta0_averager.reset()
# sleep to let others threads (e.g., the main thread) plot if needed.
if threading.current_thread() != threading.main_thread():
time.sleep(0.01)
# render the plot. the usual pattern is for this to happen only once at the start of training, and on the
# main thread. subsequently, the main thread will call update_plot to redraw with the latest plot data
# provided by another thread above.
elif render:
# noinspection PyTypeChecker
fig, axs = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(13, 6.5))
# plot average return and loss per iteration
self.iteration_ax = axs[0]
iterations = list(range(1, len(self.y_averages) + 1))
self.iteration_return_line, = self.iteration_ax.plot(iterations, self.y_averages, linewidth=0.75, color='darkgreen', label='Obtained (avg./iter.)')
self.iteration_loss_line, = self.iteration_ax.plot(iterations, self.loss_averages, linewidth=0.75, color='red', label='Loss (avg./iter.)')
self.iteration_ax.set_xlabel('Policy improvement iteration')
self.iteration_ax.set_ylabel('Return (G)')
self.iteration_ax.legend(loc='upper left')
# plot twin-x for average step size per iteration
self.iteration_eta0_ax = self.iteration_ax.twinx()
self.iteration_eta0_line, = self.iteration_eta0_ax.plot(iterations, self.eta0_averages, linewidth=0.75, color='blue', label='Step size (eta0, avg./iter.)')
self.iteration_eta0_ax.set_yscale('log')
self.iteration_eta0_ax.legend(loc='upper right')
# plot return and loss per time step of the most recent plot iteration. there might not yet be any data
# in the current iteration, so watch out.
self.time_step_ax = axs[1]
y_values = self.iteration_y_values.get(self.plot_iteration, [])
time_steps = list(range(1, len(y_values) + 1))
self.time_step_return_line, = self.time_step_ax.plot(time_steps, y_values, linewidth=0.75, color='darkgreen', label='Obtained')
self.time_step_loss_line, = self.time_step_ax.plot(time_steps, self.iteration_loss_values.get(self.plot_iteration, []), linewidth=0.75, color='red', label='Loss')
self.time_step_ax.set_xlabel(f'Time step (iteration {self.plot_iteration})')
self.iteration_ax.set_ylabel('Return (G)')
self.time_step_ax.legend(loc='upper left')
# plot twin-x for step size per time step of the most recent plot iteration.
self.time_step_eta0_ax = self.time_step_ax.twinx()
self.time_step_eta0_line, = self.time_step_eta0_ax.plot(time_steps, self.iteration_eta0_values.get(self.plot_iteration, []), linewidth=0.75, color='blue', label='Step size (eta0)')
self.time_step_eta0_ax.set_yscale('log')
self.time_step_eta0_ax.legend(loc='upper right')
# share y-axis scale between the two twin-x axes
self.iteration_eta0_ax.get_shared_y_axes().join(self.iteration_eta0_ax, self.time_step_eta0_ax)
plt.tight_layout()
if pdf is None:
plt.show(block=False)
return fig
else:
pdf.savefig()
# move to next plot iteration
self.plot_iteration += 1
def update_plot(
self,
time_step_detail_iteration: Optional[int]
):
"""
Update the plot of the model. Can only be called from the main thread.
:param time_step_detail_iteration: Iteration for which to plot time-step-level detail, or None for no detail.
Passing -1 will plot detail for the most recently completed iteration.
"""
if threading.current_thread() != threading.main_thread():
raise ValueError('Can only update plot on main thread.')
with self.plot_data_lock:
# plot axes will be None prior to the first call to self.plot
if self.iteration_ax is None:
return
iterations = list(range(1, len(self.y_averages) + 1))
self.iteration_return_line.set_data(iterations, self.y_averages)
self.iteration_loss_line.set_data(iterations, self.loss_averages)
self.iteration_ax.relim()
self.iteration_ax.autoscale_view()
self.iteration_eta0_line.set_data(iterations, self.eta0_averages)
self.iteration_eta0_ax.relim()
self.iteration_eta0_ax.autoscale_view()
if time_step_detail_iteration is not None:
# the current iteration is likely incomplete. use the previous iteration to ensure that we plot a
# completed iteration. the current iteration could be complete, but the likely use case for passing -1
# is to rapidly replot as training proceeds. in this case, the caller won't mind if they see the
# previous iteration.
if time_step_detail_iteration == -1:
time_step_detail_iteration = len(self.iteration_y_values) - 2
if time_step_detail_iteration >= 0:
y_values = self.iteration_y_values[time_step_detail_iteration]
time_steps = list(range(1, len(y_values) + 1))
self.time_step_return_line.set_data(time_steps, y_values)
self.time_step_loss_line.set_data(time_steps, self.iteration_loss_values[time_step_detail_iteration])
self.time_step_ax.set_xlabel(f'Time step (iteration {time_step_detail_iteration + 1})') # display as 1-based
self.time_step_ax.relim()
self.time_step_ax.autoscale_view()
self.time_step_eta0_line.set_data(time_steps, self.iteration_eta0_values[time_step_detail_iteration])
self.time_step_eta0_ax.relim()
self.time_step_eta0_ax.autoscale_view()
def __init__(
self,
scale_eta0_for_y: bool,
**kwargs
):
"""
Initialize the model.
:param scale_eta0_for_y: Whether or not to scale the value of eta0 for y.
:param kwargs: Keyword arguments to pass to SGDRegressor.
"""
super().__init__()
self.scale_eta0_for_y = scale_eta0_for_y
self.model_kwargs = kwargs
# verbose is required in order to capture standard output for plotting. if a verbosity level is not passed or
# passed as 0, then set flag indicating that we should not print captured output back to stdout; otherwise,
# print captured output back to stdout as the caller expects.
passed_verbose = kwargs.get('verbose', 0)
kwargs['verbose'] = 1
self.print_output = passed_verbose != 0
self.model = SGDRegressor(**self.model_kwargs)
self.base_eta0 = self.model.eta0
# plotting data (update __getstate__ below when changing these attributes)
self.iteration_y_values: Dict[int, List[float]] = dict()
self.y_averager = IncrementalSampleAverager()
self.y_averages: List[float] = []
self.iteration_loss_values: Dict[int, List[float]] = dict()
self.loss_averager = IncrementalSampleAverager()
self.loss_averages: List[float] = []
self.iteration_eta0_values: Dict[int, List[float]] = dict()
self.eta0_averager = IncrementalSampleAverager()
self.eta0_averages: List[float] = []
self.plot_iteration = 0 # number of policy improvement iterations that have been plotted
self.plot_data_lock = threading.Lock() # plotting data is read/written from multiple threads
# plotting objects
self.iteration_ax = None
self.iteration_return_line = None
self.iteration_loss_line = None
self.iteration_eta0_ax = None
self.iteration_eta0_line = None
self.time_step_ax = None
self.time_step_return_line = None
self.time_step_loss_line = None
self.time_step_eta0_ax = None
self.time_step_eta0_line = None
def __getstate__(
self
) -> Dict:
"""
Get the state to pickle for the current instance.
:return: State dictionary.
"""
state = dict(self.__dict__)
self.deflate_state(state)
return state
@staticmethod
def deflate_state(
state: Dict
):
"""
Modify the state dictionary to exclude particular items.
:param state: State dictionary.
"""
# clear other memory intensive attributes
state['plot_iteration'] = 0
state['iteration_y_values'] = {}
state['y_averager'] = IncrementalSampleAverager()
state['y_averages'] = []
state['iteration_loss_values'] = {}
state['loss_averager'] = IncrementalSampleAverager()
state['loss_averages'] = []
state['iteration_eta0_values'] = {}
state['eta0_averager'] = IncrementalSampleAverager()
state['eta0_averages'] = []
# debatable whether plotting axes and lines should be pickled. the lines can contain a good deal of data, and
# neither makes much sense to pickle without the above data
state['iteration_ax'] = None
state['iteration_return_line'] = None
state['iteration_loss_line'] = None
state['iteration_eta0_ax'] = None
state['iteration_eta0_line'] = None
state['time_step_ax'] = None
state['time_step_return_line'] = None
state['time_step_loss_line'] = None
state['time_step_eta0_ax'] = None
state['time_step_eta0_line'] = None
# the plot data lock cannot be pickled
state['plot_data_lock'] = None
def __setstate__(
self,
state: Dict
):
"""
Set the unpickled state for the current instance.
:param state: Unpickled state.
"""
self.inflate_state(state)
self.__dict__ = state
@staticmethod
def inflate_state(
state: Dict
):
"""
Modify the state to include items that weren't pickled.
:param state: Pickled state dictionary.
"""
# initialize new lock, which couldn't be pickled.
state['plot_data_lock'] = threading.Lock()
def __eq__(
self,
other: object
) -> bool:
"""
Check whether the model equals another.
:param other: Other model.
:return: True if equal and False otherwise.
"""
if not isinstance(other, SKLearnSGD):
raise ValueError(f'Expected {SKLearnSGD}')
return np.allclose(self.model.coef_, other.model.coef_) and np.allclose(self.model.intercept_, other.model.intercept_)
def __ne__(
self,
other: object
) -> bool:
"""
Check whether the model does not equal another.
:param other: Other model.
:return: True if not equal and False otherwise.
"""
return not (self == other)
|
from kivy.metrics import dp
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.image import Image
from kivy.uix.label import Label
from kivy.uix.screenmanager import ScreenManager, SlideTransition, Screen
from kivy.properties import StringProperty, ObjectProperty, BooleanProperty, NumericProperty, ListProperty
from kivy.app import App, platform
from kivy.logger import Logger
import settings
from utils import _, import_kv
from library_widgets import CircleDiagram, ImageButton, DidYouKnowLabel
import_kv(__file__)
class TaskIcon(Image):
is_complete = BooleanProperty(False)
complete_source = StringProperty()
incomplete_source = StringProperty()
task_class = ObjectProperty()
def change_source(self, *_):
from kivy.animation import Animation
if self.is_complete:
self.source = self.complete_source
else:
self.source = self.incomplete_source
Animation(opacity=1, duration=.2, t='in_out_sine').start(self)
def on_complete(self, instance, complete):
from kivy.animation import Animation
animation = Animation(opacity=0, duration=.3, t='in_out_sine')
animation.bind(on_complete=self.change_source)
animation.start(self)
def on_task_class(self, icon, klass):
self.incomplete_source = "atlas://data/atlas/tasks/%s" % klass
self.source = self.incomplete_source
def __init__(self, **kwargs):
super(TaskIcon, self).__init__(**kwargs)
self.bind(is_complete=self.on_complete)
self.source = self.incomplete_source
self.selected = False
class TestOutroDiagramLabel(Label):
pass
class TestOutro(Screen):
day_diagram = ObjectProperty()
week_diagram = ObjectProperty()
points = NumericProperty()
diagrams_container = ObjectProperty()
day_efficiency = NumericProperty()
week_efficiency = NumericProperty()
status_label = ObjectProperty()
status_image = ObjectProperty()
status_container = ObjectProperty()
def on_enter(self, *args):
App.get_running_app().sounds['test_finished'].play()
def on_day_efficiency(self, outro, value):
self.day_diagram.part = value
self.on_points()
def on_week_efficiency(self, outro, value):
self.week_diagram.part = value
def __init__(self, **kwargs):
super(TestOutro, self).__init__(**kwargs)
self.RANGES = {
50: _("Typing monkey"),
60: _("Homo erectus"),
70: _("Slowpoke"),
80: _("Homo sapiens"),
90: _("Junkie"),
100: _("Mr Perfect"),
110: _("Genius"),
120: _("Nerd"),
130: _("Holmes"),
140: _("Einstein"),
150: _("Kognimaster")
}
def _get_marker(self):
marker = 10 * (int(100 * self.day_diagram.part) / 10)
marker = min(marker, 150)
marker = max(marker, 50)
return marker
def on_points(self, *args, **kwargs):
marker = self._get_marker()
self.status_label.text = self.RANGES[marker].upper() + "\n" + _(
"[size=20sp][color=#6666660A]%s POINTS[/color][/size]"
) % self.points
self.status_image.source = "data/img/status_icons/icon_%s.png" % marker
def on_diagrams_container(self, instance, value):
self.day_diagram = CircleDiagram()
self.week_diagram = CircleDiagram()
self.diagrams_container.add_widget(self.day_diagram)
self.diagrams_label_container.add_widget(TestOutroDiagramLabel(text=_("of your\n[b]Day's[/b] average")))
from managers.database import database_manager
if database_manager.total_time().days > 7:
self.diagrams_container.add_widget(self.week_diagram)
self.diagrams_label_container.add_widget(TestOutroDiagramLabel(text=_("of your\n[b]Week's[/b] average")))
def share(self):
if platform == 'android':
from jnius import autoclass, cast
Context = autoclass('android.content.Context')
File = autoclass('java.io.File')
PythonActivity = autoclass('org.kivy.android.PythonActivity')
Intent = autoclass('android.content.Intent')
Uri = autoclass('android.net.Uri')
AndroidString = autoclass('java.lang.String')
import uuid
file_ = File(Context.getCacheDir(), "share-%s.png" % uuid.uuid4())
from utils import export_to_png
path = file_.getPath()
export_to_png(self.status_container, path, (1, 1, 1, 1))
file_.setReadable(True, False)
Logger.info("Sharing: share %s" % path)
intent = Intent()
intent.setAction(Intent.ACTION_SEND)
text = (_("My cognitive status #kognitivo #braintrainer %s") % settings.STORE_URL).encode('utf-8')
intent.putExtra(Intent.EXTRA_SUBJECT, "Sharing File...")
intent.putExtra(Intent.EXTRA_TEXT, text)
parcelable = cast('android.os.Parcelable', Uri.parse(path))
intent.putExtra(Intent.EXTRA_STREAM, parcelable)
intent.setType(AndroidString('image/png'))
chooser = Intent.createChooser(intent, AndroidString("Sharing File..."))
PythonActivity.mActivity.startActivity(chooser)
class PointsLabel(Label):
points = NumericProperty(0)
def on_points(self, instance, value):
self.text = _("BEST RESULT: [b]%s[/b] POINTS") % value
class SessionTasksCounter(Label):
counter = NumericProperty(-1)
full_count = NumericProperty(4)
def on_counter(self, instance, value):
self.text = _(u"[color=#408742ff][font=glyphicons]\uE006[/font][/color] task #%(counter)s/%(full)s") % {
"counter": self.counter,
"full": self.full_count
}
class TestStatus(BoxLayout):
task_number = NumericProperty()
task = StringProperty()
task_number_widget = ObjectProperty()
best_points_widget = ObjectProperty()
full_count = NumericProperty(4)
def on_task(self, status, task_key):
app = App.get_running_app()
self.best_points_widget.points = app.storage["task_records"][task_key]
def on_task_number(self, status, value):
self.task_number_widget.counter = value
def on_full_count(self, status, value):
self.task_number_widget.full_count = value
class TaskFinished(ImageButton):
pass
class TestScreen(Screen):
icon = ObjectProperty()
task_container = ObjectProperty()
status = ObjectProperty()
task_number = NumericProperty()
points = NumericProperty()
def __init__(self, **kwargs):
self.family = None
self.task_classes = {}
self.day_efficiency = 0
self.week_efficiency = 0
self.task_classes_in_session = []
self.random = False
super(TestScreen, self).__init__(**kwargs)
self.transition = SlideTransition()
@property
def current_task(self):
return self.task_container.children[0] if self.task_container.children else None
def on_task_finished(self, task, successful):
from managers.database import database_manager
import settings
import datetime
self.task_classes_in_session = []
self.icon.is_complete = True
self.points += task.points
storage = App.get_running_app().storage
if storage["task_records"][task.TASK_KEY] < task.points:
data = storage["task_records"]
data.update({task.TASK_KEY: task.points})
storage.put("task_records", **data)
self.status.on_task(self.status, task.TASK_KEY)
today = datetime.datetime.now()
day_average = database_manager.task_efficiency_for_weekday(task.TASK_KEY, today.weekday())
week_average = database_manager.task_efficiency_for_interval(task.TASK_KEY, today - datetime.timedelta(days=7),
today)
if day_average is None:
self.day_efficiency += settings.LOW_DATA_EFFICIENCY_SCALE * task.efficiency
else:
self.day_efficiency += task.efficiency / (day_average * self.status.full_count)
if week_average is None:
self.week_efficiency += settings.LOW_DATA_EFFICIENCY_SCALE * task.efficiency
else:
self.week_efficiency += task.efficiency / (week_average * self.status.full_count)
self.add_finished_marker(task)
def to_test_outro(self, *args, **kwargs):
if self.manager:
self.manager.day_efficiency = self.day_efficiency
self.manager.week_efficiency = self.week_efficiency
self.manager.points = self.points
self.manager.current = 'outro'
from settings import LEADERBOARD
app = App.get_running_app()
sessions_finished = app.storage['sessions']['finished']
app.tracker.send_event('tasks', 'sessions', label='finished', value=sessions_finished + 1)
app.storage['sessions'] = {"started": app.storage['sessions']['started'],
"finished": sessions_finished + 1}
app.google_client.submit_score(LEADERBOARD, self.points)
def add_finished_marker(self, task):
task.clear_widgets()
if self.task_number < self.status.full_count:
next_task_button = TaskFinished(source="data/img/buttons/next_button.png")
next_task_button.bind(on_press=self.load_next_task)
else:
next_task_button = TaskFinished(source="data/img/buttons/to_summary_button.png")
next_task_button.bind(on_press=self.to_test_outro)
outer_container = BoxLayout(orientation='vertical', size_hint_y=.5, size_hint_x=.8, padding=(0, 0, 0, dp(100)))
outer_container.add_widget(DidYouKnowLabel())
outer_container.add_widget(next_task_button)
task.add_widget(outer_container)
def animate_task_icon(self, *_):
from kivy.animation import Animation
old_y = self.icon.y
self.icon.y = self.icon.parent.center[1] + self.icon.parent.height
animation = Animation(y=old_y,
opacity=1,
duration=.3,
t='out_back')
animation.start(self.icon)
def start_test(self, family=None, tasks=None):
self.prepare_task_classes(family, tasks)
self.task_number = 0
self.points = 0
from kivy.animation import Animation
self.load_next_task()
animation = Animation(opacity=1, duration=.5, t='in_out_sine')
animation.bind(on_complete=self.animate_task_icon)
animation.start(self.task_container)
@staticmethod
def get_task_classes(family=None):
import settings
active_tasks = settings.TASKS.keys()
billing = App.get_running_app().billing
purchased_items = billing.get_purchased_items()
Logger.info("Tasks: purchased items: %s" % purchased_items)
premium_status = any(
settings.INAPP_PURCHASES[purchased_item]['unlocks_tasks'] is None for
purchased_item in purchased_items
)
if not premium_status:
purchased_tasks = []
for sku in purchased_items:
purchased_tasks += settings.INAPP_PURCHASES[sku]['unlocks_tasks']
purchased_tasks = list(set(purchased_tasks))
active_tasks = [key for key in active_tasks if
not settings.TASKS[key]['purchasable'] or key in purchased_tasks]
if family is not None:
active_tasks = [key for key in active_tasks if family in settings.TASKS[key]['families']]
Logger.info("Tasks: registered %s task classes" % len(active_tasks))
return active_tasks
def prepare_task_classes(self, family=None, task_classes=None):
self.family = family
if task_classes:
self.task_classes = task_classes
self.status.full_count = len(task_classes)
self.random = False
else:
self.task_classes = self.get_task_classes(family=family)
self.status.full_count = settings.TASKS_PER_TEST
self.random = True
def get_next_task_class(self):
import random
import importlib
import settings
if self.random:
self.prepare_task_classes(self.family)
if self.family is None:
filter_family = [
settings.ANALYTICS_FAMILY,
settings.ATTENTION_FAMILY,
settings.REACTION_FAMILY,
settings.MEMORY_FAMILY
][self.task_number % 4]
next_class = settings.TASKS[random.choice(
[
task_class for task_class in self.task_classes
if filter_family in settings.TASKS[task_class]['families'] and
task_class not in self.task_classes_in_session
]
)]['class']
else:
next_class = settings.TASKS[random.choice(self.task_classes)]['class']
else:
next_class = settings.TASKS[self.task_classes[self.task_number]]['class']
self.task_classes_in_session.append(next_class)
module, klass = next_class.rsplit(".", 1)
next_class = getattr(importlib.import_module(module), klass)
return next_class
def on_task_number(self, screen, number):
self.status.task_number = number
def load_next_task(self, *args, **kwargs):
self.task_container.clear_widgets()
klass = self.get_next_task_class()
self.status.task = klass.TASK_KEY
self.icon.task_class = klass.TASK_KEY
self.icon.is_complete = False
task = klass(name="%s:%s" % (klass.TASK_KEY, self.task_number))
task.bind(successful=self.on_task_finished)
self.task_container.add_widget(task)
self.task_number += 1
Logger.info("Tasks: created %s" % klass)
class TaskScreenManager(ScreenManager):
test_screen = ObjectProperty()
day_efficiency = NumericProperty()
week_efficiency = NumericProperty()
points = NumericProperty()
outro = ObjectProperty()
task_sets_screen = ObjectProperty()
def start_test(self, family=None, tasks=None):
self.test_screen.start_test(family, tasks)
self.current = 'test'
def on_day_efficiency(self, manager, value):
self.outro.day_efficiency = value
def on_week_efficiency(self, manager, value):
self.outro.week_efficiency = value
def on_points(self, manager, value):
self.outro.points = value
class TaskEntry(BoxLayout):
task_key = StringProperty()
class TaskSet(BoxLayout):
title = StringProperty()
icon = StringProperty()
tasks = ListProperty()
tasks_container = ObjectProperty()
color = ListProperty(settings.FILL_COLOR)
buy = BooleanProperty(True)
purchases_needed = ListProperty()
screen = ObjectProperty()
play_button = ObjectProperty()
def on_tasks_container(self, *args):
for task_key in self.tasks:
entry = TaskEntry(task_key=task_key)
self.tasks_container.add_widget(entry)
if not self.needs_buy():
self.play_button.text = _("PLAY")
else:
if self.purchases_needed and self.purchases_needed[0] != 'lifetime_premium':
self.play_button.text = _("BUY")
else:
self.play_button.text = _("GET PREMIUM")
def needs_buy(self):
if not self.purchases_needed:
return
billing = App.get_running_app().billing
purchased_items = billing.get_purchased_items()
return not any(purchase in purchased_items for purchase in self.purchases_needed)
def play(self, *args):
if not self.needs_buy():
main_manager = self.screen.manager
main_manager.start_test(tasks=self.tasks)
else:
from billing.abstract import BillingException
billing = App.get_running_app().billing
try:
billing.buy(self.purchases_needed[0], callbacks=[self.screen.fill])
except BillingException:
pass
class TaskSetsScreen(Screen):
sets = ListProperty()
container = ObjectProperty()
def get_config(self):
return [
{
"title": _("ANALYTICS"),
"icon": "atlas://data/atlas/menu/analytics",
"tasks": [
key for key, config in settings.TASKS.items()
if settings.ANALYTICS_FAMILY in config['families'] and not config['purchasable']
],
"color": settings.ACTIVITY_COLORS[settings.ANALYTICS_FAMILY],
},
{
"title": _("ATTENTION"),
"icon": "atlas://data/atlas/menu/attention",
"tasks": [
key for key, config in settings.TASKS.items()
if settings.ATTENTION_FAMILY in config['families'] and not config['purchasable']
],
"color": settings.ACTIVITY_COLORS[settings.ATTENTION_FAMILY],
},
{
"title": _("REACTION"),
"icon": "atlas://data/atlas/menu/reaction",
"tasks": [
key for key, config in settings.TASKS.items()
if settings.REACTION_FAMILY in config['families'] and not config['purchasable']
],
"color": settings.ACTIVITY_COLORS[settings.REACTION_FAMILY],
},
{
"title": _("MEMORY"),
"icon": "atlas://data/atlas/menu/memory",
"tasks": [
key for key, config in settings.TASKS.items()
if settings.MEMORY_FAMILY in config['families']
],
"color": settings.ACTIVITY_COLORS[settings.MEMORY_FAMILY],
},
{
"title": _("ANALYTICS PREMIUM"),
"icon": "atlas://data/atlas/menu/analytics",
"tasks": [
key for key, config in settings.TASKS.items()
if settings.ANALYTICS_FAMILY in config['families']
],
"color": settings.ACTIVITY_COLORS[settings.ANALYTICS_FAMILY],
"purchases_needed": ['lifetime_premium', 'premium_subscription'] if settings.INAPP_PURCHASES else []
},
{
"title": _("ATTENTION PREMIUM"),
"icon": "atlas://data/atlas/menu/attention",
"tasks": [
key for key, config in settings.TASKS.items()
if settings.ATTENTION_FAMILY in config['families']
],
"color": settings.ACTIVITY_COLORS[settings.ATTENTION_FAMILY],
"purchases_needed": ['lifetime_premium', 'premium_subscription'] if settings.INAPP_PURCHASES else []
},
{
"title": _("MATH GEEK SET"),
"icon": "atlas://data/atlas/purchases/calculation_arena",
"tasks": ["division_calculation", "percents_calculation", "multiplication_calculation"],
"color": settings.ACTIVITY_COLORS[None],
"purchases_needed": ['analytics_arena_pack', 'lifetime_premium',
'premium_subscription'] if settings.INAPP_PURCHASES else []
},
{
"title": _("TIME TASKS SET"),
"icon": "atlas://data/atlas/purchases/time_arena",
"tasks": ["time_subtraction", "time_calculation_minutes"],
"color": settings.ACTIVITY_COLORS[None],
"purchases_needed": ['time_arena_pack', 'lifetime_premium',
'premium_subscription'] if settings.INAPP_PURCHASES else []
},
{
"title": _("CLASH TASKS SET"),
"icon": "atlas://data/atlas/purchases/clash_arena",
"tasks": ["find_figures", "find_primer"],
"color": settings.ACTIVITY_COLORS[None],
"purchases_needed": ['clash_arena_pack', 'lifetime_premium',
'premium_subscription'] if settings.INAPP_PURCHASES else []
}
]
def fill(self, *args):
self.container.clear_widgets()
for config in self.get_config():
task_set = TaskSet(screen=self, **config)
self.container.add_widget(task_set)
self.container.height = len(self.container.children) * self.container.children[0].height
|
import numpy as np
import lasagne
import theano
import sys
from lasagne import layers
from lasagne.nonlinearities import sigmoid, rectify, leaky_rectify, identity
from data_aug_necrosis import data_aug
sys.path.append('..')
from common.batch_norms import batch_norm
from common.shape import ReshapeLayer
# Parameters
APS = None;
PS = None;
batchsize = 10;
LearningRate = theano.shared(np.array(1e-3, dtype=np.float32));
mu = None;
sigma = None;
def iterate_minibatches(inputs, augs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets);
assert len(inputs) == len(augs);
if inputs.shape[0] <= batchsize:
yield inputs, augs, targets;
return;
if shuffle:
indices = np.arange(len(inputs));
np.random.shuffle(indices);
start_idx = 0;
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx: start_idx + batchsize];
else:
excerpt = slice(start_idx, start_idx + batchsize);
yield inputs[excerpt], augs[excerpt], targets[excerpt];
if start_idx < len(inputs) - batchsize:
if shuffle:
excerpt = indices[start_idx + batchsize: len(inputs)];
else:
excerpt = slice(start_idx + batchsize, len(inputs));
yield inputs[excerpt], augs[excerpt], targets[excerpt];
def build_deconv_network_temp():
input_var = theano.tensor.tensor4('input_var');
net = {};
net['input'] = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
# Encoding part
net['conv1_1'] = batch_norm(layers.Conv2DLayer(net['input'], 64, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
net['conv1_2'] = batch_norm(layers.Conv2DLayer(net['conv1_1'], 64, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
net['pool1'] = layers.Pool2DLayer(net['conv1_2'], pool_size=(2,2), stride=2, mode='max');
net['conv2_1'] = batch_norm(layers.Conv2DLayer(net['pool1'], 128, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
net['conv2_2'] = batch_norm(layers.Conv2DLayer(net['conv2_1'], 128, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
net['pool2'] = layers.Pool2DLayer(net['conv2_2'], pool_size=(2,2), stride=2, mode='max');
net['conv3_1'] = batch_norm(layers.Conv2DLayer(net['pool2'], 256, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
net['conv3_2'] = batch_norm(layers.Conv2DLayer(net['conv3_1'], 256, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
net['conv3_3'] = batch_norm(layers.Conv2DLayer(net['conv3_2'], 256, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
net['pool3'] = layers.Pool2DLayer(net['conv3_3'], pool_size=(2,2), stride=2, mode='max');
net['conv4_1'] = batch_norm(layers.Conv2DLayer(net['pool3'], 512, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
net['conv4_2'] = batch_norm(layers.Conv2DLayer(net['conv4_1'], 512, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
net['conv4_3'] = batch_norm(layers.Conv2DLayer(net['conv4_2'], 512, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
net['pool4'] = layers.Pool2DLayer(net['conv4_3'], pool_size=(2,2), stride=2, mode='max');
net['conv5_1'] = batch_norm(layers.Conv2DLayer(net['pool4'], 512, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
net['conv5_2'] = batch_norm(layers.Conv2DLayer(net['conv5_1'], 512, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
net['conv5_3'] = batch_norm(layers.Conv2DLayer(net['conv5_2'], 512, filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
net['pool5'] = layers.Pool2DLayer(net['conv5_3'], pool_size=(2,2), stride=2, mode='max');
net['fc6'] = batch_norm(layers.Conv2DLayer(net['pool5'], 4096, filter_size=(7,7), stride=1, pad='same', nonlinearity=leaky_rectify));
# fc7 is the encoding layer
net['fc7'] = batch_norm(layers.Conv2DLayer(net['fc6'], 4096, filter_size=(1,1), stride=1, pad='same', nonlinearity=leaky_rectify));
# Decoding part
net['fc6_deconv'] = batch_norm(layers.Deconv2DLayer(net['fc7'], 512, filter_size=(7,7), stride=1, crop='same', nonlinearity=leaky_rectify));
net['unpool5'] = layers.InverseLayer(net['fc6_deconv'], net['pool5']);
net['deconv5_1'] = batch_norm(layers.Deconv2DLayer(net['unpool5'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
net['deconv5_2'] = batch_norm(layers.Deconv2DLayer(net['deconv5_1'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
net['deconv5_3'] = batch_norm(layers.Deconv2DLayer(net['deconv5_2'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
net['unpool4'] = layers.InverseLayer(net['deconv5_3'], net['pool4']);
net['deconv4_1'] = batch_norm(layers.Deconv2DLayer(net['unpool4'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
net['deconv4_2'] = batch_norm(layers.Deconv2DLayer(net['deconv4_1'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
net['deconv4_3'] = batch_norm(layers.Deconv2DLayer(net['deconv4_2'], 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
net['unpool3'] = layers.InverseLayer(net['deconv4_3'], net['pool3']);
net['deconv3_1'] = batch_norm(layers.Deconv2DLayer(net['unpool3'], 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
net['deconv3_2'] = batch_norm(layers.Deconv2DLayer(net['deconv3_1'], 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
net['deconv3_3'] = batch_norm(layers.Deconv2DLayer(net['deconv3_2'], 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
net['unpool2'] = layers.InverseLayer(net['deconv3_3'], net['pool2']);
net['deconv2_1'] = batch_norm(layers.Deconv2DLayer(net['unpool2'], 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
net['deconv2_2'] = batch_norm(layers.Deconv2DLayer(net['deconv2_1'], 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
net['unpool1'] = layers.InverseLayer(net['deconv2_2'], net['pool1']);
net['deconv1_1'] = batch_norm(layers.Deconv2DLayer(net['unpool1'], 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
net['deconv1_2'] = batch_norm(layers.Deconv2DLayer(net['deconv1_1'], 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
# Segmentation layer
net['seg_score'] = layers.Deconv2DLayer(net['deconv1_2'], 1, filter_size=(1,1), stride=1, crop='same', nonlinearity=lasagne.nonlinearities.sigmoid);
network = ReshapeLayer(net['seg_score'], ([0], -1));
output_var = lasagne.layers.get_output(network);
all_param = lasagne.layers.get_all_params(network, trainable=True);
return network, input_var, output_var, all_param;
def build_function(network, param_set, input_var, target_var):
prediction_var_train = lasagne.layers.get_output(network, deterministic=False);
prediction_var_test = lasagne.layers.get_output(network, deterministic=True);
loss_train = lasagne.objectives.squared_error(prediction_var_train, target_var).mean();
loss_test = lasagne.objectives.squared_error(prediction_var_test, target_var).mean();
# training function
updates = lasagne.updates.nesterov_momentum(loss_train, param_set, learning_rate=LearningRate, momentum=0.985);
train_func = theano.function([input_var, target_var], loss_train, updates=updates);
test_func = theano.function([input_var, target_var], [loss_test, prediction_var_test]);
print("finish building function");
return train_func, test_func;
def test_all(X_test, y_test, loaded_mu, loaded_sigma, train_func, test_func):
global mu;
global sigma;
mu = loaded_mu;
sigma = loaded_sigma;
# Generating augmenting data
a_test = np.zeros(shape=(X_test.shape[0], 1), dtype=np.float32);
# do testing
image_array, groundtruth_array, prediction_array = exc_test(test_func, X_test, a_test, y_test);
return image_array, groundtruth_array, prediction_array;
def exc_test(test_func, X_test, a_test, y_test):
print("Start testing...");
i_draw_arr = [0, APS - PS];
j_draw_arr = [0, APS - PS];
total_error = 0;
i_batch = 0;
i_line = 0;
image_array = np.empty(shape=(X_test.shape[0], 3, APS, APS), dtype=np.float32);
prediction_array = np.empty(shape=(X_test.shape[0], APS, APS), dtype=np.float32);
groundtruth_array = np.empty(shape=(X_test.shape[0], APS, APS), dtype=np.int32);
class_array = np.empty(shape=(len(i_draw_arr)*len(j_draw_arr)*X_test.shape[0], PS**2), dtype=np.float32);
tr_pos = 0;
fl_pos = 0;
fl_neg = 0;
tr_neg = 0;
# Debug
total_pixels = 0;
begin_img_idx = 0;
for batch in iterate_minibatches(X_test, a_test, y_test, batchsize, shuffle = False):
#print begin_img_idx;
input_real_org, aug_real_org, target_real_org = batch;
image_patch = np.zeros(shape=(input_real_org.shape[0], 3, APS, APS), dtype=np.float32);
prediction_patch = np.zeros(shape=(input_real_org.shape[0], APS, APS), dtype=np.float32);
groundtruth_patch = np.zeros(shape=(input_real_org.shape[0], APS, APS), dtype=np.float32);
weight_2d = np.zeros(shape=(input_real_org.shape[0], APS, APS), dtype=np.float32);
mask_2d = np.ones(shape=(input_real_org.shape[0], PS, PS), dtype=np.float32);
weight_3d = np.zeros(shape=(input_real_org.shape[0], 3, APS, APS), dtype=np.float32);
mask_3d = np.ones(shape=(input_real_org.shape[0], 3, PS, PS), dtype=np.float32);
temp_idx = 0;
for i_draw in i_draw_arr:
for j_draw in j_draw_arr:
input_real, target_real = data_aug(X=input_real_org, Y=target_real_org, \
mu=mu, sigma=sigma, deterministic=True, \
idraw=i_draw, jdraw=j_draw, APS=APS, PS=PS);
target_real = target_real.reshape(target_real.shape[0], -1);
error, prediction_real = test_func(input_real, target_real);
class_res = from_pred_to_class(prediction_real);
class_res_flattened = class_res.reshape(-1, 1);
class_array[i_line:i_line+len(prediction_real)] = class_res;
image_patch[:, :,i_draw:i_draw+PS, j_draw:j_draw+PS] += input_real;
prediction_patch[:, i_draw:i_draw+PS, j_draw:j_draw+PS] += np.reshape(prediction_real, (-1, PS, PS));
groundtruth_patch[:, i_draw:i_draw+PS, j_draw:j_draw+PS] += np.reshape(target_real, (-1, PS, PS));
weight_2d[:, i_draw:i_draw+PS, j_draw:j_draw+PS] += mask_2d;
weight_3d[:, :, i_draw:i_draw+PS, j_draw:j_draw+PS] += mask_3d;
total_error += error;
i_batch += 1;
i_line += len(prediction_real);
temp_idx += 1;
image_patch = np.divide(image_patch, weight_3d);
prediction_patch = np.divide(prediction_patch, weight_2d);
groundtruth_patch = np.divide(groundtruth_patch, weight_2d);
image_array[begin_img_idx:begin_img_idx+input_real_org.shape[0]] = image_patch;
prediction_array[begin_img_idx:begin_img_idx+input_real_org.shape[0]] = prediction_patch;
groundtruth_array[begin_img_idx:begin_img_idx+input_real_org.shape[0]] = groundtruth_patch;
begin_img_idx += input_real_org.shape[0];
return image_array, groundtruth_array, prediction_array;
def from_pred_to_class(pred):
class_res = np.copy(pred);
class_res = (class_res >= 0.5).astype(np.int32);
return class_res;
def necrosis_predict(X_test, Y_test, loaded_mu, loaded_sigma, param_values, loaded_APS, loaded_PS):
# attach additional layers for classification
network, input_var, output_var, all_param = build_deconv_network_temp();
# load param values of the encoder
global mu;
global sigma;
global APS;
global PS;
mu = loaded_mu;
sigma = loaded_sigma;
lasagne.layers.set_all_param_values(network, param_values);
APS = loaded_APS;
PS = loaded_PS;
# build train function
# you can change the set of params to training by switching between "latter_param" and "all_param" in the command below
target_var = theano.tensor.imatrix('target_var');
train_func, test_func = build_function(network, all_param, input_var, target_var);
# run training (X_test, y_test, loaed_mu, loaded_sigma, train_func, test_func)
image_array, groundtruth_array, prediction_array = test_all(X_test, Y_test, mu, sigma, train_func, test_func);
print("DONE!");
return image_array, groundtruth_array, prediction_array;
|
from django.db import models
from django.utils.translation import ugettext as _
from .userAuthenticated import UserAuthenticated
from newspaper.models import Journalist, Section, SubSection, Classifield
from django.contrib.auth.models import Group, Permission
class Redator(UserAuthenticated):
def registeringJournalist(self, form):
if form.is_valid():
form.save()
data = form.cleaned_data
username = data['username']
u = Journalist.objects.get(username = username)
permission1 = Permission.objects.get(codename='keep_news')
permission2 = Permission.objects.get(codename='access_manager')
u.user_permissions.add(permission1, permission2,)
return True
else:
return False
def registeringRedator(self, form):
if form.is_valid():
form.save()
data = form.cleaned_data
username = data['username']
from newspaper.models import Redator
u = Redator.objects.get(username = username)
permission1 = Permission.objects.get(codename='keep_journalist')
permission2 = Permission.objects.get(codename='keep_redator')
permission3 = Permission.objects.get(codename='keep_classifield')
permission4 = Permission.objects.get(codename='keep_section')
permission5 = Permission.objects.get(codename='keep_subsection')
permission6 = Permission.objects.filter(codename='delete_news')[0]
permission7 = Permission.objects.get(codename='access_manager')
u.user_permissions.add(permission1, permission2,permission3,permission4,permission5,permission6,permission7)
return True
else:
return False
def editJournalist(self,form):
if form.is_valid():
form.save()
return True
else:
return False
def editRedator(self,form):
if form.is_valid():
form.save()
return True
else:
return False
def remJournalist(self, id):
try:
Journalist.objects.get(id = id).delete()
return True
except:
return False
def remRedator(self, id):
try:
from newspaper.models import Redator
Redator.objects.get(id = id).delete()
return True
except:
return False
def deleteNews(self, id_news):
try:
from newspaper.models import News
News.objects.get(id = id_news).delete()
return True
except:
return False
def registeringSection(self, form):
if form.is_valid():
form.save()
return True
else:
return False
def editSection(self, form):
if form.is_valid():
form.save()
return True
else:
return False
def remSection(self, id_section):
try:
Section.objects.get(id = id_section).delete()
return True
except:
return False
def registeringSubSection(self, form):
if form.is_valid():
form.save()
return True
else:
return False
def editSubSection(self, form):
if form.is_valid():
form.save()
return True
else:
return False
def remSubSection(self, id_subsection):
try:
SubSection.objects.get(id = id_subsection).delete()
return True
except:
return False
def registeringClassifield(self, form):
if form.is_valid():
form.save()
return True
else:
return False
def remClassifield(self, id_classifield):
try:
Classifield.objects.get(id = id_classifield).delete()
return True
except:
return False
def editClassifield(self, form):
if form.is_valid():
form.save()
return True
else:
return False
def __unicode__(self):
return self.username
class Meta:
verbose_name = _("Redator")
verbose_name_plural = _("Redators")
permissions = (("keep_journalist", "Keep Journalist"),
("keep_redator", "Keep Redator"),
("keep_classifield", "Keep Classifield"),
("keep_section", "Keep Section"),
("keep_subsection", "Keep SubSection"),
("delete_news", "Delete News"),
) |
# -*- coding: utf-8 -*-
try:
from django.db.models import QuerySet, FieldDoesNotExist
from django.db.models.fields.related import RelatedField
except ImportError:
QuerySet = None
FieldDoesNotExist = None
class Preparer(object):
"""
A plain preparation object which just passes through data.
It also is relevant as the protocol subclasses should implement to work with
Restless.
"""
def __init__(self):
super(Preparer, self).__init__()
def prepare(self, data):
"""
Handles actually transforming the data.
By default, this does nothing & simply returns the data passed to it.
"""
return data
class FieldsPreparer(Preparer):
"""
A more complex preparation object, this will return a given set of fields.
This takes a ``fields`` parameter, which should be a dictionary of
keys (fieldnames to expose to the user) & values (a dotted lookup path to
the desired attribute/key on the object).
Example::
preparer = FieldsPreparer(fields={
# ``user`` is the key the client will see.
# ``author.pk`` is the dotted path lookup ``FieldsPreparer``
# will traverse on the data to return a value.
'user': 'author.pk',
})
"""
def __init__(self, fields):
super(FieldsPreparer, self).__init__()
self.fields = fields
def prepare(self, data):
"""
Handles transforming the provided data into the fielded data that should
be exposed to the end user.
Uses the ``lookup_data`` method to traverse dotted paths.
Returns a dictionary of data as the response.
"""
result = {}
if not self.fields:
return data
for fieldname, lookup in self.fields.items():
# 允许lookup为FieldsPreparer实例
if isinstance(lookup, FieldsPreparer):
preparer = lookup
sub_data = None
if hasattr(data, fieldname):
sub_data = getattr(data, fieldname)
if sub_data is not None:
# 特殊处理数组
if hasattr(sub_data, '__iter__'):
sub_result = []
for sd in sub_data:
sub_result.append(preparer.prepare(sd))
else:
sub_result = preparer.prepare(sub_data)
result[fieldname] = sub_result
else:
if lookup.endswith('?'):
lookup = lookup[:-1]
try:
result[fieldname] = self.lookup_data(lookup, data)
except AttributeError:
pass
else:
result[fieldname] = self.lookup_data(lookup, data)
return result
def lookup_data(self, lookup, data):
"""
Given a lookup string, attempts to descend through nested data looking for
the value.
Can work with either dictionary-alikes or objects (or any combination of
those).
Lookups should be a string. If it is a dotted path, it will be split on
``.`` & it will traverse through to find the final value. If not, it will
simply attempt to find either a key or attribute of that name & return it.
Example::
>>> data = {
... 'type': 'message',
... 'greeting': {
... 'en': 'hello',
... 'fr': 'bonjour',
... 'es': 'hola',
... },
... 'person': Person(
... name='daniel'
... )
... }
>>> lookup_data('type', data)
'message'
>>> lookup_data('greeting.en', data)
'hello'
>>> lookup_data('person.name', data)
'daniel'
"""
value = data
parts = lookup.split('.')
if not parts or not parts[0]:
return value
part = parts[0]
remaining_lookup = '.'.join(parts[1:])
if hasattr(data, 'keys') and data.keys and hasattr(data, '__getitem__') and data.__getitem__:
# Dictionary enough for us.
value = data[part]
elif data is not None:
# Assume it's an object.
value = getattr(data, part)
# 支持方法或者函数的值的获取
if hasattr(value, '__call__') and value.__call__:
value = value()
if not remaining_lookup:
return value
# There's more to lookup, so dive in recursively.
return self.lookup_data(remaining_lookup, value) |
import json
import logging
import time
import unittest
import pytest
from tests.cook import mesos, util
@pytest.mark.multi_user
@unittest.skipUnless(util.multi_user_tests_enabled(), 'Requires using multi-user coniguration '
'(e.g., BasicAuth) for Cook Scheduler')
@pytest.mark.timeout(util.DEFAULT_TEST_TIMEOUT_SECS) # individual test timeout
class MultiUserCookTest(util.CookTest):
@classmethod
def setUpClass(cls):
cls.cook_url = util.retrieve_cook_url()
util.init_cook_session(cls.cook_url)
def setUp(self):
self.cook_url = type(self).cook_url
self.mesos_url = util.retrieve_mesos_url()
self.logger = logging.getLogger(__name__)
self.user_factory = util.UserFactory(self)
def test_job_delete_permission(self):
user1, user2 = self.user_factory.new_users(2)
with user1:
job_uuid, resp = util.submit_job(self.cook_url, command='sleep 30')
try:
self.assertEqual(resp.status_code, 201, resp.text)
with user2:
resp = util.kill_jobs(self.cook_url, [job_uuid], expected_status_code=403)
self.assertEqual(f'You are not authorized to kill the following jobs: {job_uuid}',
resp.json()['error'])
with user1:
util.kill_jobs(self.cook_url, [job_uuid])
job = util.wait_for_job(self.cook_url, job_uuid, 'completed')
self.assertEqual('failed', job['state'])
finally:
with user1:
util.kill_jobs(self.cook_url, [job_uuid], assert_response=False)
def test_group_delete_permission(self):
user1, user2 = self.user_factory.new_users(2)
with user1:
group_spec = util.minimal_group()
group_uuid = group_spec['uuid']
job_uuid, resp = util.submit_job(self.cook_url, command='sleep 30', group=group_uuid)
try:
self.assertEqual(resp.status_code, 201, resp.text)
with user2:
util.kill_groups(self.cook_url, [group_uuid], expected_status_code=403)
with user1:
util.kill_groups(self.cook_url, [group_uuid])
job = util.wait_for_job(self.cook_url, job_uuid, 'completed')
self.assertEqual('failed', job['state'])
finally:
with user1:
util.kill_jobs(self.cook_url, [job_uuid], assert_response=False)
def test_multi_user_usage(self):
users = self.user_factory.new_users(6)
job_resources = {'cpus': 0.1, 'mem': 123}
all_job_uuids = []
pools, _ = util.all_pools(self.cook_url)
try:
# Start jobs for several users
for i, user in enumerate(users):
with user:
for j in range(i):
job_uuid, resp = util.submit_job(self.cook_url, command='sleep 480',
max_retries=2, **job_resources)
self.assertEqual(resp.status_code, 201, resp.content)
all_job_uuids.append(job_uuid)
job = util.load_job(self.cook_url, job_uuid)
self.assertEqual(user.name, job['user'], job)
# Don't query until the jobs are all running
util.wait_for_jobs(self.cook_url, all_job_uuids, 'running')
# Check the usage for each of our users
for i, user in enumerate(users):
with user:
# Get the current usage
resp = util.user_current_usage(self.cook_url, user=user.name)
self.assertEqual(resp.status_code, 200, resp.content)
usage_data = resp.json()
# Check that the response structure looks as expected
if pools:
self.assertEqual(list(usage_data.keys()), ['total_usage', 'pools'], usage_data)
else:
self.assertEqual(list(usage_data.keys()), ['total_usage'], usage_data)
self.assertEqual(len(usage_data['total_usage']), 4, usage_data)
# Check that each user's usage is as expected
self.assertEqual(usage_data['total_usage']['mem'], job_resources['mem'] * i, usage_data)
self.assertEqual(usage_data['total_usage']['cpus'], job_resources['cpus'] * i, usage_data)
self.assertEqual(usage_data['total_usage']['gpus'], 0, usage_data)
self.assertEqual(usage_data['total_usage']['jobs'], i, usage_data)
finally:
for job_uuid in all_job_uuids:
job = util.load_job(self.cook_url, job_uuid)
for instance in job['instances']:
if instance['status'] == 'failed':
mesos.dump_sandbox_files(util.session, instance, job)
# Terminate all of the jobs
if all_job_uuids:
with self.user_factory.admin():
util.kill_jobs(self.cook_url, all_job_uuids, assert_response=False)
def test_job_cpu_quota(self):
admin = self.user_factory.admin()
user = self.user_factory.new_user()
all_job_uuids = []
try:
# User with no quota can't submit jobs
with admin:
resp = util.set_limit(self.cook_url, 'quota', user.name, cpus=0)
self.assertEqual(resp.status_code, 201, resp.text)
with user:
_, resp = util.submit_job(self.cook_url)
self.assertEqual(resp.status_code, 422, msg=resp.text)
# User with tiny quota can't submit bigger jobs, but can submit tiny jobs
with admin:
resp = util.set_limit(self.cook_url, 'quota', user.name, cpus=0.25)
self.assertEqual(resp.status_code, 201, resp.text)
with user:
_, resp = util.submit_job(self.cook_url, cpus=0.5)
self.assertEqual(resp.status_code, 422, msg=resp.text)
job_uuid, resp = util.submit_job(self.cook_url, cpus=0.25)
self.assertEqual(resp.status_code, 201, msg=resp.text)
all_job_uuids.append(job_uuid)
# Reset user's quota back to default, then user can submit jobs again
with admin:
resp = util.reset_limit(self.cook_url, 'quota', user.name, reason=self.current_name())
self.assertEqual(resp.status_code, 204, resp.text)
with user:
job_uuid, resp = util.submit_job(self.cook_url)
self.assertEqual(resp.status_code, 201, msg=resp.text)
all_job_uuids.append(job_uuid)
# Can't set negative quota
with admin:
resp = util.set_limit(self.cook_url, 'quota', user.name, cpus=-4)
self.assertEqual(resp.status_code, 400, resp.text)
finally:
with admin:
util.kill_jobs(self.cook_url, all_job_uuids, assert_response=False)
util.reset_limit(self.cook_url, 'quota', user.name, reason=self.current_name())
def test_job_mem_quota(self):
admin = self.user_factory.admin()
user = self.user_factory.new_user()
all_job_uuids = []
try:
# User with no quota can't submit jobs
with admin:
resp = util.set_limit(self.cook_url, 'quota', user.name, mem=0)
self.assertEqual(resp.status_code, 201, resp.text)
with user:
_, resp = util.submit_job(self.cook_url)
self.assertEqual(resp.status_code, 422, msg=resp.text)
# User with tiny quota can't submit bigger jobs, but can submit tiny jobs
with admin:
resp = util.set_limit(self.cook_url, 'quota', user.name, mem=10)
self.assertEqual(resp.status_code, 201, resp.text)
with user:
_, resp = util.submit_job(self.cook_url, mem=11)
self.assertEqual(resp.status_code, 422, msg=resp.text)
job_uuid, resp = util.submit_job(self.cook_url, mem=10)
self.assertEqual(resp.status_code, 201, msg=resp.text)
all_job_uuids.append(job_uuid)
# Reset user's quota back to default, then user can submit jobs again
with admin:
resp = util.reset_limit(self.cook_url, 'quota', user.name, reason=self.current_name())
self.assertEqual(resp.status_code, 204, resp.text)
with user:
job_uuid, resp = util.submit_job(self.cook_url)
self.assertEqual(resp.status_code, 201, msg=resp.text)
all_job_uuids.append(job_uuid)
# Can't set negative quota
with admin:
resp = util.set_limit(self.cook_url, 'quota', user.name, mem=-128)
self.assertEqual(resp.status_code, 400, resp.text)
finally:
with admin:
util.kill_jobs(self.cook_url, all_job_uuids, assert_response=False)
util.reset_limit(self.cook_url, 'quota', user.name, reason=self.current_name())
def test_job_count_quota(self):
admin = self.user_factory.admin()
user = self.user_factory.new_user()
all_job_uuids = []
try:
# User with no quota can't submit jobs
with admin:
resp = util.set_limit(self.cook_url, 'quota', user.name, count=0)
self.assertEqual(resp.status_code, 201, resp.text)
with user:
_, resp = util.submit_job(self.cook_url)
self.assertEqual(resp.status_code, 422, msg=resp.text)
# Reset user's quota back to default, then user can submit jobs again
with admin:
resp = util.reset_limit(self.cook_url, 'quota', user.name, reason=self.current_name())
self.assertEqual(resp.status_code, 204, resp.text)
with user:
job_uuid, resp = util.submit_job(self.cook_url)
self.assertEqual(resp.status_code, 201, msg=resp.text)
all_job_uuids.append(job_uuid)
# Can't set negative quota
with admin:
resp = util.set_limit(self.cook_url, 'quota', user.name, count=-1)
self.assertEqual(resp.status_code, 400, resp.text)
finally:
with admin:
util.kill_jobs(self.cook_url, all_job_uuids, assert_response=False)
util.reset_limit(self.cook_url, 'quota', user.name, reason=self.current_name())
def test_rate_limit_while_creating_job(self):
# Make sure the rate limit cuts a user off.
if util.settings(self.cook_url)['rate-limit']['job-submission'] is None:
raise pytest.skip()
user = self.user_factory.new_user()
bucket_size = util.settings(self.cook_url)['rate-limit']['job-submission']['bucket-size']
extra_size = replenishment_rate = util.settings(self.cook_url)['rate-limit']['job-submission']['tokens-replenished-per-minute']
if extra_size < 100:
extra_size = 100;
if bucket_size > 3000 or extra_size > 1000:
raise pytest.skip() # Don't run if we'd have to create a whole lot of jobs to run the test.
with user:
jobs_to_kill = []
try:
# First, empty most but not all of the tocken bucket.
jobs1, resp1 = util.submit_jobs(self.cook_url, {}, bucket_size - 60)
jobs_to_kill.extend(jobs1)
self.assertEqual(resp1.status_code, 201)
# Then another 1060 to get us very negative.
jobs2, resp2 = util.submit_jobs(self.cook_url, {}, extra_size + 60)
jobs_to_kill.extend(jobs2)
self.assertEqual(resp2.status_code, 201)
# And finally a request that gets cut off.
jobs3, resp3 = util.submit_jobs(self.cook_url, {}, 10)
self.assertEqual(resp3.status_code, 400)
# The timestamp can change so we should only match on the prefix.
expectedPrefix = f'User {user.name} is inserting too quickly. Not allowed to insert for'
self.assertEqual(resp3.json()['error'][:len(expectedPrefix)], expectedPrefix)
# Earn back 70 seconds of tokens.
time.sleep(70.0*extra_size/replenishment_rate)
jobs4, resp4 = util.submit_jobs(self.cook_url, {}, 10)
jobs_to_kill.extend(jobs4)
self.assertEqual(resp4.status_code, 201)
finally:
util.kill_jobs(self.cook_url,jobs_to_kill)
def trigger_preemption(self, pool):
"""
Triggers preemption on the provided pool (which can be None) by doing the following:
1. Choose a user, X
2. Lower X's cpu share to 0.1 and cpu quota to 1.0
3. Submit a job, J1, from X with 1.0 cpu and priority 99 (fills the cpu quota)
4. Wait for J1 to start running
5. Submit a job, J2, from X with 0.1 cpu and priority 100
6. Wait until J1 is preempted (to make room for J2)
"""
admin = self.user_factory.admin()
user = self.user_factory.new_user()
all_job_uuids = []
try:
small_cpus = 0.1
large_cpus = small_cpus * 10
with admin:
# Lower the user's cpu share and quota
util.set_limit(self.cook_url, 'share', user.name, cpus=small_cpus, pool=pool)
util.set_limit(self.cook_url, 'quota', user.name, cpus=large_cpus, pool=pool)
with user:
# Submit a large job that fills up the user's quota
base_priority = 99
command = 'sleep 600'
uuid_large, _ = util.submit_job(self.cook_url, priority=base_priority,
cpus=large_cpus, command=command, pool=pool)
all_job_uuids.append(uuid_large)
util.wait_for_running_instance(self.cook_url, uuid_large)
# Submit a higher-priority job that should trigger preemption
uuid_high_priority, _ = util.submit_job(self.cook_url, priority=base_priority + 1,
cpus=small_cpus, command=command,
name='higher_priority_job', pool=pool)
all_job_uuids.append(uuid_high_priority)
# Assert that the lower-priority job was preempted
def low_priority_job():
job = util.load_job(self.cook_url, uuid_large)
one_hour_in_millis = 60 * 60 * 1000
start = util.current_milli_time() - one_hour_in_millis
end = util.current_milli_time()
running = util.jobs(self.cook_url, user=user.name, state='running', start=start, end=end).json()
waiting = util.jobs(self.cook_url, user=user.name, state='waiting', start=start, end=end).json()
self.logger.info(f'Currently running jobs: {json.dumps(running, indent=2)}')
self.logger.info(f'Currently waiting jobs: {json.dumps(waiting, indent=2)}')
return job
def job_was_preempted(job):
for instance in job['instances']:
self.logger.debug(f'Checking if instance was preempted: {instance}')
if instance.get('reason_string') == 'Preempted by rebalancer':
return True
self.logger.info(f'Job has not been preempted: {job}')
return False
max_wait_ms = util.settings(self.cook_url)['rebalancer']['interval-seconds'] * 1000 * 1.5
self.logger.info(f'Waiting up to {max_wait_ms} milliseconds for preemption to happen')
util.wait_until(low_priority_job, job_was_preempted, max_wait_ms=max_wait_ms, wait_interval_ms=5000)
finally:
with admin:
util.kill_jobs(self.cook_url, all_job_uuids, assert_response=False)
util.reset_limit(self.cook_url, 'share', user.name, reason=self.current_name(), pool=pool)
util.reset_limit(self.cook_url, 'quota', user.name, reason=self.current_name(), pool=pool)
@unittest.skipUnless(util.is_preemption_enabled(), 'Preemption is not enabled on the cluster')
@pytest.mark.serial
def test_preemption_basic(self):
self.trigger_preemption(pool=None)
@unittest.skipUnless(util.is_preemption_enabled(), 'Preemption is not enabled on the cluster')
@unittest.skipUnless(util.are_pools_enabled(), 'Pools are not enabled on the cluster')
@pytest.mark.serial
def test_preemption_for_pools(self):
pools, _ = util.active_pools(self.cook_url)
self.assertLess(0, len(pools))
for pool in pools:
self.logger.info(f'Triggering preemption for {pool}')
self.trigger_preemption(pool=pool['name'])
@unittest.skipUnless(util.are_pools_enabled(), "Requires pools")
def test_user_total_usage(self):
user = self.user_factory.new_user()
with user:
job_spec = {'cpus': 0.11, 'mem': 123, 'command': 'sleep 600'}
pools, _ = util.active_pools(self.cook_url)
job_uuids = []
try:
for pool in pools:
job_uuid, resp = util.submit_job(self.cook_url, pool=pool['name'], **job_spec)
self.assertEqual(201, resp.status_code, resp.text)
job_uuids.append(job_uuid)
util.wait_for_jobs(self.cook_url, job_uuids, 'running')
resp = util.user_current_usage(self.cook_url, user=user.name, group_breakdown='true')
self.assertEqual(resp.status_code, 200, resp.content)
usage_data = resp.json()
total_usage = usage_data['total_usage']
self.assertEqual(job_spec['mem'] * len(job_uuids), total_usage['mem'], total_usage)
self.assertEqual(job_spec['cpus'] * len(job_uuids), total_usage['cpus'], total_usage)
self.assertEqual(len(job_uuids), total_usage['jobs'], total_usage)
finally:
util.kill_jobs(self.cook_url, job_uuids)
|
"""Test duplicate behavior."""
import graphene
import pytest
import graphanno
from .test_objects import duplicated
from .utils import to_dict
@graphanno.graph_annotations
class Duplicated:
"""
Wrapper for the Duplicate class. The name is purposely the same
as the name of the model, this will cause the name clash
"""
__model__ = duplicated.Duplicated
__excluded_fields__ = ("to_exclude",)
@graphanno.graph_annotations
class DuplicateUser:
"""Wrapper for DuplicateUser."""
__model__ = duplicated.DuplicateUser
@graphanno.graph_annotations
class DuplicateUser2:
"""
Second example, the annotations order is now different
(parent class is annotated first).
"""
__model__ = duplicated.DuplicateUser2
@graphanno.graph_annotations
class Duplicated2:
"""
Wrapper for the Duplicate class. The name is purposely the same as
the name of the model, this will cause the name clash.
"""
__model__ = duplicated.Duplicated2
__excluded_fields__ = ("to_exclude",)
class Query(graphene.ObjectType):
"""Test GraphQL query."""
user = graphene.Field(DuplicateUser)
duplicate = graphene.Field(Duplicated)
user2 = graphene.Field(DuplicateUser2)
@staticmethod
def resolve_user(*_):
"""Return the DuplicateUser object instance"""
# pylint: disable=attribute-defined-outside-init
data = duplicated.DuplicateUser()
data.duplicate = Query.resolve_duplicate()
data.name = "duplicated_parent"
return data
@staticmethod
def resolve_duplicate(*_):
"""Return the Duplicated object instance"""
# pylint: disable=attribute-defined-outside-init
duplicate = duplicated.Duplicated()
duplicate.name = "duplicated_child"
return duplicate
@staticmethod
def resolve_user2(*_):
"""Return the DuplicateUser2 object instance"""
# pylint: disable=attribute-defined-outside-init
data = duplicated.DuplicateUser2()
data.duplicate = Query.resolve_duplicate()
data.name = "duplicated_parent"
return data
def test_schema():
"""Test DuplicatedUser content."""
# pylint: disable=no-member
assert isinstance(DuplicateUser.name, graphene.String)
assert isinstance(DuplicateUser.duplicate, graphene.Field)
assert issubclass(DuplicateUser, graphene.ObjectType)
assert isinstance(Duplicated.name, graphene.String)
assert not hasattr(Duplicated, "to_exclude")
assert issubclass(Duplicated, graphene.ObjectType)
assert isinstance(DuplicateUser2.name, graphene.String)
assert isinstance(DuplicateUser2.duplicate, graphene.Field)
assert issubclass(DuplicateUser2, graphene.ObjectType)
assert isinstance(Duplicated2.name, graphene.String)
assert not hasattr(Duplicated2, "to_exclude")
assert issubclass(Duplicated2, graphene.ObjectType)
def test_query():
"""Test graphene query with the generated object."""
schema = graphene.Schema(query=Query)
response = schema.execute(
"{duplicate {name}, user {name, duplicate {name} }, "
"user2 {name, duplicate {name} }}"
)
assert to_dict(response.data) == {
"duplicate": {"name": "duplicated_child"},
"user": {
"name": "duplicated_parent",
"duplicate": {"name": "duplicated_child"},
},
"user2": {
"name": "duplicated_parent",
"duplicate": {"name": "duplicated_child"},
},
}
def test_name_clash():
"""
If another class with the same name will be annotated,
the exception will be raised.
"""
# pylint: disable=redefined-outer-name,unused-variable
with pytest.raises(graphanno.SchemaClashError) as excinfo:
@graphanno.graph_annotations
class Duplicated:
"""Same name, different class"""
value: int
assert (
excinfo.value.args[0] == 'The schema with name "Duplicated" already exists, '
"and bases on another class:\n"
" - Current: tests.test_duplicated.Duplicated\n"
" - Existing: tests.test_objects.duplicated.Duplicated"
)
def test_all_annotations_excluded():
"""All annotations will be removed from a duplicated object."""
# pylint: disable=redefined-outer-name,unused-variable
with pytest.raises(graphanno.NoAnnotationsError) as excinfo:
@graphanno.graph_annotations
class _:
"""Exclude all fields"""
__model__ = duplicated.Duplicated
__excluded_fields__ = ("to_exclude", "name")
assert excinfo.value.args[0] == "No included annotations for class _."
|
ORIGINAL_SERVER_CONFIG="""
bootstrap.servers=ansiblebroker1.example.com:9092
config.storage.replication.factor=3
config.storage.topic=connect-cluster-configs
confluent.license.topic=_confluent-command
confluent.metadata.basic.auth.user.info=connect_worker:<PASSWORD>
confluent.metadata.bootstrap.server.urls=https://ansiblebroker1.example.com:8090
confluent.metadata.http.auth.credentials.provider=BASIC
confluent.monitoring.interceptor.topic=_confluent-monitoring
connector.client.config.override.policy=All
consumer.bootstrap.servers=ansiblebroker1.example.com:9092
consumer.confluent.monitoring.interceptor.bootstrap.servers=ansiblebroker1.example.com:9092
consumer.confluent.monitoring.interceptor.sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required username="connect_worker" password="<PASSWORD>" metadataServerUrls="https://ansiblebroker1.example.com:8090";
consumer.confluent.monitoring.interceptor.sasl.login.callback.handler.class=io.confluent.kafka.clients.plugins.auth.token.TokenUserLoginCallbackHandler
consumer.confluent.monitoring.interceptor.sasl.mechanism=OAUTHBEARER
consumer.confluent.monitoring.interceptor.security.protocol=SASL_SSL
consumer.confluent.monitoring.interceptor.ssl.truststore.location=/var/ssl/private/kafka_connect.truststore.jks
consumer.confluent.monitoring.interceptor.ssl.truststore.password=<PASSWORD>
consumer.interceptor.classes=io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor
consumer.sasl.login.callback.handler.class=io.confluent.kafka.clients.plugins.auth.token.TokenUserLoginCallbackHandler
consumer.sasl.mechanism=OAUTHBEARER
consumer.security.protocol=SASL_SSL
consumer.ssl.truststore.location=/var/ssl/private/kafka_connect.truststore.jks
consumer.ssl.truststore.password=<PASSWORD>keystorestorepass
group.id=connect-cluster
internal.key.converter=org.apache.kafka.connect.json.JsonConverter
internal.key.converter.schemas.enable=false
internal.value.converter=org.apache.kafka.connect.json.JsonConverter
internal.value.converter.schemas.enable=false
key.converter=io.confluent.connect.avro.AvroConverter
key.converter.schema.registry.ssl.key.password=<PASSWORD>
key.converter.schema.registry.ssl.keystore.location=/var/ssl/private/kafka_connect.keystore.jks
key.converter.schema.registry.ssl.keystore.password=<PASSWORD>
key.converter.schema.registry.ssl.truststore.location=/var/ssl/private/kafka_connect.truststore.jks
key.converter.schema.registry.ssl.truststore.password=<PASSWORD>
key.converter.schema.registry.url=https://ansibleschemaregistry1.example.com:8081
listeners=https://0.0.0.0:8083
listeners.https.ssl.key.password=<PASSWORD>
listeners.https.ssl.keystore.location=/var/ssl/private/kafka_connect.keystore.jks
listeners.https.ssl.keystore.password=<PASSWORD>
listeners.https.ssl.truststore.location=/var/ssl/private/kafka_connect.truststore.jks
listeners.https.ssl.truststore.password=<PASSWORD>
offset.flush.interval.ms=10000
offset.storage.replication.factor=3
offset.storage.topic=connect-cluster-offsets
plugin.path=/usr/share/java
producer.bootstrap.servers=ansiblebroker1.example.com:9092
producer.confluent.monitoring.interceptor.bootstrap.servers=ansiblebroker1.example.com:9092
producer.confluent.monitoring.interceptor.sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required username="connect_worker" password="<PASSWORD>" metadataServerUrls="https://ansiblebroker1.example.com:8090";
producer.confluent.monitoring.interceptor.sasl.login.callback.handler.class=io.confluent.kafka.clients.plugins.auth.token.TokenUserLoginCallbackHandler
producer.confluent.monitoring.interceptor.sasl.mechanism=OAUTHBEARER
producer.confluent.monitoring.interceptor.security.protocol=SASL_SSL
producer.confluent.monitoring.interceptor.ssl.truststore.location=/var/ssl/private/kafka_connect.truststore.jks
producer.confluent.monitoring.interceptor.ssl.truststore.password=<PASSWORD>
producer.interceptor.classes=io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor
producer.sasl.login.callback.handler.class=io.confluent.kafka.clients.plugins.auth.token.TokenUserLoginCallbackHandler
producer.sasl.mechanism=OAUTHBEARER
producer.security.protocol=SASL_SSL
producer.ssl.truststore.location=/var/ssl/private/kafka_connect.truststore.jks
producer.ssl.truststore.password=<PASSWORD>
public.key.path=/var/ssl/private/public.pem
rest.advertised.host.name=ansibleconnect1.example.com
rest.advertised.listener=https
rest.advertised.port=8083
rest.extension.classes=io.confluent.connect.security.ConnectSecurityExtension,io.confluent.connect.secretregistry.ConnectSecretRegistryExtension
rest.port=8083
rest.servlet.initializor.classes=io.confluent.common.security.jetty.initializer.InstallBearerOrBasicSecurityHandler
sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required username="connect_worker" password="<PASSWORD>" metadataServerUrls="https://ansiblebroker1.example.com:8090";
sasl.login.callback.handler.class=io.confluent.kafka.clients.plugins.auth.token.TokenUserLoginCallbackHandler
sasl.mechanism=OAUTHBEARER
security.protocol=SASL_SSL
ssl.truststore.location=/var/ssl/private/kafka_connect.truststore.jks
ssl.truststore.password=<PASSWORD>
status.storage.replication.factor=3
status.storage.topic=connect-cluster-status
value.converter=io.confluent.connect.avro.AvroConverter
value.converter.schema.registry.ssl.key.password=<PASSWORD>
value.converter.schema.registry.ssl.keystore.location=/var/ssl/private/kafka_connect.keystore.jks
value.converter.schema.registry.ssl.keystore.password=<PASSWORD>
value.converter.schema.registry.ssl.truststore.location=/var/ssl/private/kafka_connect.truststore.jks
value.converter.schema.registry.ssl.truststore.password=<PASSWORD>
value.converter.schema.registry.url=https://ansibleschemaregistry1.example.com:8081""" # noqa
|
# DISCLAIMER: This is jank
from __future__ import print_function
import argparse
import csv
import json
import os
import zerorpc
from collections import defaultdict
from flask import Flask, flash, jsonify, redirect, render_template, url_for
from werkzeug.exceptions import NotFound
from poketrainer.poke_lvl_data import TCPM_VALS
from poketrainer.pokemon import Pokemon
class ReverseProxied(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
app = Flask(__name__, template_folder="web/templates", static_folder='web/static')
app.wsgi_app = ReverseProxied(app.wsgi_app)
app.secret_key = <KEY>"
app.debug = True
options = {}
attacks = {}
with open("resources" + os.sep + "GAME_ATTACKS_v0_1.tsv") as tsv:
reader = csv.DictReader(tsv, delimiter='\t')
for row in reader:
attacks[int(row["Num"])] = row["Move"]
def init_config(username):
config_file = "config.json"
config_data = {}
if os.path.isfile(config_file):
with open(config_file) as data:
config_data.update(json.load(data))
filtered_data = filter(lambda x: x.get('username') == username, config_data.get('accounts', []))
return filtered_data[0]
def set_columns_to_ignore(columns_to_ignore):
options['ignore_recent'] = ''
options['ignore_#'] = ''
options['ignore_name'] = ''
options['ignore_lvl'] = ''
options['ignore_score'] = ''
options['ignore_IV'] = ''
options['ignore_CP'] = ''
options['ignore_max_CP'] = ''
options['ignore_candies'] = ''
options['ignore_candy_needed'] = ''
options['ignore_dust_needed'] = ''
options['ignore_power_up'] = ''
options['ignore_stamina'] = ''
options['ignore_attkIV'] = ''
options['ignore_staIV'] = ''
options['ignore_defIV'] = ''
options['ignore_moves'] = ''
options['ignore_transfer'] = ''
for column in columns_to_ignore:
if column.lower() == 'recent':
options['ignore_recent'] = 'display: none;'
elif column.lower() == '#':
options['ignore_id'] = 'display: none;'
elif column.lower() == 'name':
options['ignore_name'] = 'display: none;'
elif column.lower() == 'lvl':
options['ignore_lvl'] = 'display: none;'
elif column.lower() == 'score':
options['ignore_score'] = 'display: none;'
elif column.lower() == 'iv':
options['ignore_IV'] = 'display: none;'
elif column.lower() == 'cp':
options['ignore_CP'] = 'display: none;'
elif column.lower() == 'max cp':
options['ignore_max_CP'] = 'display: none;'
elif column.lower() == 'candies':
options['ignore_candies'] = 'display: none;'
elif column.lower() == 'candy needed':
options['ignore_candy_needed'] = 'display: none;'
elif column.lower() == 'dust needed':
options['ignore_dust_needed'] = 'display: none;'
elif column.lower() == 'power up':
options['ignore_power_up'] = 'display: none;'
elif column.lower() == 'stamina':
options['ignore_stamina'] = 'display: none;'
elif column.lower() == 'att iv':
options['ignore_attkIV'] = 'display: none;'
elif column.lower() == 'sta iv':
options['ignore_staIV'] = 'display: none;'
elif column.lower() == 'def iv':
options['ignore_defIV'] = 'display: none;'
elif column.lower() == 'move 1':
options['ignore_move1'] = 'display: none;'
elif column.lower() == 'move 2':
options['ignore_move2'] = 'display: none;'
elif column.lower() == 'transfer':
options['ignore_transfer'] = 'display: none;'
def get_api_rpc(username):
desc_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), ".listeners")
sock_port = 0
with open(desc_file) as f:
data = f.read()
data = json.loads(data if len(data) > 0 else '{}')
if username not in data:
# FIXME Use logger instead of print statements!
print("There is no bot running with username '%s'!" % username)
return None
sock_port = int(data[username])
c = zerorpc.Client()
c.connect("tcp://127.0.0.1:%i" % sock_port)
return c
@app.route("/favicon.ico")
def favicon():
# Explicitly handle favicon.ico so it doesn't route to the status function.
return NotFound()
@app.route("/<username>")
@app.route("/<username>/status")
def status(username):
c = get_api_rpc(username)
if c is None:
return("There is no bot running with username '%s'!" % username)
config = init_config(username)
options['SCORE_METHOD'] = config.get('POKEMON_CLEANUP', {}).get("SCORE_METHOD", "CP")
options['IGNORE_COLUMNS'] = config.get("IGNORE_COLUMNS", [])
set_columns_to_ignore(options['IGNORE_COLUMNS'])
player_json = json.loads(c.get_player_info())
currency = player_json['player_data']['currencies'][1]['amount']
latlng = c.current_location()
latlng = "%f,%f" % (latlng[0], latlng[1])
items = json.loads(c.get_inventory())['inventory_items']
pokemons_data = []
candy = defaultdict(int)
for item in items:
item = item['inventory_item_data']
pokemon = item.get("pokemon_data", {})
if "pokemon_id" in pokemon:
pokemons_data.append(pokemon)
if 'player_stats' in item:
player = item['player_stats']
if "candy" in item:
filled_family = str(item['candy']['family_id']).zfill(4)
candy[filled_family] += item['candy'].get("candy", 0)
# add candy back into pokemon json
pokemons = []
for pokemon in pokemons_data:
pkmn = Pokemon(pokemon, player['level'], options['SCORE_METHOD'])
pkmn.candy = candy[pkmn.family_id]
pkmn.set_max_cp(TCPM_VALS[int(player['level'] * 2 + 1)])
pkmn.score = format(pkmn.score, '.2f').rstrip('0').rstrip('.') # makes the value more presentable to the user
pokemons.append(pkmn)
player['username'] = player_json['player_data']['username']
player['level_xp'] = player.get('experience', 0) - player.get('prev_level_xp', 0)
with open('./data_dumps/' + str(username) + '.json') as json_data:
d = json.load(json_data)['GET_PLAYER']['player_data']['hourly_exp']
player['hourly_exp'] = d # Not showing up in inv or player data
player['goal_xp'] = player.get('next_level_xp', 0) - player.get('prev_level_xp', 0)
return render_template('status.html', pokemons=pokemons, player=player, currency="{:,d}".format(currency), candy=candy, latlng=latlng, attacks=attacks, username=username, options=options)
@app.route("/<username>/pokemon")
def pokemon(username):
s = get_api_rpc(username)
try:
pokemons = json.loads(s.get_caught_pokemons())
except ValueError:
# FIXME Use logger instead of print statements!
print("Not valid Json")
return render_template('pokemon.html', pokemons=pokemons, username=username)
@app.route("/<username>/inventory")
def inventory(username):
s = get_api_rpc(username)
try:
inventory = json.loads(s.get_inventory())
except ValueError:
# FIXME Use logger instead of print statements!
print("Not valid Json")
return render_template('inventory.html', inventory=json.dumps(inventory, indent=2), username=username)
@app.route("/<username>/transfer/<p_id>")
def transfer(username, p_id):
c = get_api_rpc(username)
if c and c.release_pokemon_by_id(p_id) == 1:
flash("Released")
else:
flash("Failed!")
return redirect(url_for('status', username=username))
@app.route("/<username>/snipe/<latlng>")
def snipe(username, latlng):
c = get_api_rpc(username)
try:
if len(latlng.split(',')) == 2:
l = latlng.split(',')
lat = float(l[0])
lng = float(l[1])
else:
l = latlng.split(' ')
lat = float(l[0])
lng = float(l[1])
except:
return jsonify(status=1, result='Error parsing coordinates.')
if c.snipe_pokemon(lat, lng):
msg = "Sniped!"
status = 0
else:
msg = "Failed sniping!"
status = 1
return jsonify(status=status, result=msg)
def init_web_config():
default_configs = {
"hostname": "0.0.0.0",
"port": 5000,
"debug": True
}
config_file = "web_config.json"
# If config file exists, load variables from json
if os.path.isfile(config_file):
with open(config_file) as data:
default_configs.update(json.load(data))
# Read passed in Arguments
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--hostname", help="Server hostname/IP", default=default_configs['hostname'])
parser.add_argument("-p", "--port", help="Server TCP port number", type=int, default=default_configs['port'])
parser.add_argument("-d", "--debug", help="Debug Mode", action='store_true', default=default_configs['debug'])
web_config = parser.parse_args()
return web_config
def main():
web_config = init_web_config()
app.run(host=web_config.hostname, port=web_config.port, debug=web_config.debug)
if __name__ == "__main__":
main()
|
from __future__ import absolute_import
import cPickle as pickle
import datetime
import json
import sys
from . import progress
from .backup import BackupFile, NewBackup
from .cache import Slot
from .config import argument_parser, configure, conf, log
from .site import Site
from .storage import Storage
def main(args):
parser = argument_parser()
subparsers = parser.add_subparsers()
ExportCommand(subparsers)
ExportJsonCommand(subparsers)
ExportHistoryCommand(subparsers)
ExportObjectHistoryCommand(subparsers)
PrintObjectHistoryCommand(subparsers)
ConvertToJsonCommand(subparsers)
RestoreCommand(subparsers)
RestoreHistoryCommand(subparsers)
ResetCommand(subparsers)
parsed = parser.parse_args(args)
configure("tool", redirect_stderr=False)
progress.enable()
parsed.func(parsed)
class Command(object):
args = []
def __init__(self, subparsers):
parser = subparsers.add_parser(self.name, help=self.help)
for arg in self.args:
parser.add_argument(arg["name"], action=arg["action"])
parser.set_defaults(func=self)
class JsonMixin(object):
def dump_backup_as_json(self, backup):
slot = Slot.load_backup(backup)
values = { slot.key: { objkey: modeldata.get() for objkey, modeldata in slot.cachedata.iteritems() } }
json.dump(values, sys.stdout, indent=True)
print
class ForceMixin(object):
force_arg = dict(name="--force", action="store_true")
def check_force(self, args):
if not args.force:
print >>sys.stderr, "You must specify --force if your heart is really in it."
sys.exit(1)
class ExportCommand(Command):
name = "export"
help = "copy backup from DynamoDB to stdout"
args = [
dict(name="sitename", action="store"),
]
def __call__(self, args):
backup = Storage(Site(args.sitename)).get_cache_backup()
if backup:
NewBackup(backup.load()).dump(sys.stdout)
else:
sys.exit(1)
class ExportJsonCommand(Command, JsonMixin):
name = "export-json"
help = "load backup from DynamoDB and print it to stdout as JSON"
args = [
dict(name="sitename", action="store"),
]
def __call__(self, args):
backup = Storage(Site(args.sitename)).get_cache_backup()
if backup:
self.dump_backup_as_json(backup)
else:
sys.exit(1)
class ExportRowMixin(object):
def export(self, row):
values = { row.objkey: row.slots }
print pickle.dumps(values)
print
class ExportHistoryCommand(Command, ExportRowMixin):
name = "export-history"
help = "copy rows from DynamoDB to stdout as pickled"
args = [
dict(name="sitename", action="store"),
]
def __call__(self, args):
try:
for row in Storage(Site(args.sitename)).iterate_rows():
self.export(row)
except KeyboardInterrupt:
print >>sys.stderr
print >>sys.stderr, "Interrupted"
class ObjectHistoryMixin(object):
def get(self, args):
return Storage(Site(args.sitename))._get(args.objkey)
class ExportObjectHistoryCommand(Command, ObjectHistoryMixin, ExportRowMixin):
name = "export-object-history"
help = "copy row from DynamoDB to stdout as pickled"
args = [
dict(name="sitename", action="store"),
dict(name="objkey", action="store"),
]
def __call__(self, args):
row = self.get(args)
if row:
self.export(row)
class PrintObjectHistoryCommand(Command, ObjectHistoryMixin):
name = "print-object-history"
help = "copy row from DynamoDB to stdout in human-readable format"
args = [
dict(name="sitename", action="store"),
dict(name="objkey", action="store"),
]
def __call__(self, args):
row = self.get(args)
if row:
for slotkey, values in sorted(row, reverse=True):
print "Slot:\t%s" % slotkey
for k, v in sorted(values.iteritems()):
if isinstance(v, dict):
print " %s:" % k,
for vk, vv in sorted(v.iteritems()):
print "\t%s: %s" % (vk, vv)
else:
print " %s:\t%s" % (k, v)
print
class ConvertToJsonCommand(Command, JsonMixin):
name = "convert-to-json"
help = "load backup from FILE and print it to stdout as JSON"
args = [
dict(name="filename", action="store")
]
def __call__(self, args):
self.dump_backup_as_json(BackupFile(args.filename))
class RestoreCommand(Command, ForceMixin):
name = "restore"
help = "load backup from FILE and store it to DynamoDB"
args = [
ForceMixin.force_arg,
dict(name="filename", action="store"),
dict(name="sitename", action="store"),
]
def __call__(self, args):
site = Site(args.sitename)
storage = Storage(site)
slot = Slot.load_backup(BackupFile(args.filename))
self.check_force(args)
if slot.is_active(site.current_datetime()):
storage.insert_cache_backup(site, slot.make_backup())
else:
if not slot.store(storage):
sys.exit(1)
class RestoreHistoryCommand(Command, ForceMixin):
name = "restore-history"
help = "read pickled rows from FILE and store them to DynamoDB"
args = [
ForceMixin.force_arg,
dict(name="sitename", action="store"),
dict(name="filename", action="store"),
]
def __call__(self, args):
storage = Storage(Site(args.sitename))
counter = progress.Counter(interval=100)
self.check_force(args)
with open(args.filename) as file:
try:
while True:
for storekey, slots in pickle.load(file).iteritems():
storage._replace(storekey, slots)
counter.increment()
file.read(2)
except EOFError:
pass
counter.done()
class ResetCommand(Command, ForceMixin):
name = "reset"
help = "clear the cache backup in DynamoDB"
args = [
ForceMixin.force_arg,
dict(name="sitename", action="store"),
]
def __call__(self, args):
site = Site(args.sitename)
storage = Storage(site)
empty = Slot(site.current_datetime()).make_backup()
self.check_force(args)
storage.insert_cache_backup(site, empty)
if __name__ == "__main__":
main(sys.argv[1:])
|
<gh_stars>0
import pandas as pd
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from sklearn.ensemble import GradientBoostingRegressor
import joblib
import matplotlib.pyplot as plt
from sklearn import metrics
#### 使用torch构建LSTM模型 ####
class lstm(nn.Module):
def __init__(self,input_size=73,hidden_size=4,output_size=1,num_layer=2): #详见nn.LSTM参数设置页面
super(lstm,self).__init__()
self.layer1 = nn.LSTM(input_size,hidden_size,num_layer) #第一层为num_layer层LSTM
self.layer2 = nn.Linear(hidden_size,output_size) #第二层为线性
# 定义前向传输路径
def forward(self,x):
x,_ = self.layer1(x)
s,b,h = x.size()
x = x.view(s*b,h)
x = self.layer2(x)
x = x.view(s,b,-1)
return x
#data=pd.read_csv('/data/home/u20120778/test/data.csv',usecols=[5])
data=pd.read_csv('cddata.csv',usecols=[1])
data=data.dropna()
dataset = data.values # 获得csv的值
dataset = dataset.astype('float32')
max_value = np.max(dataset) # 获得最大值
min_value = np.min(dataset) # 获得最小值
scalar = max_value - min_value # 获得间隔数量
dataset = list(map(lambda x: x / scalar, dataset)) # 归一化
def create_dataset(dataset, look_back=73):
dataX, dataY = [], []
for i in range(len(dataset) - look_back):
a = dataset[i:(i + look_back)]
dataX.append(a)
dataY.append(dataset[i + look_back])
return np.array(dataX), np.array(dataY)
# 创建好输入输出
data_X, data_Y = create_dataset(dataset)
# 划分训练集和测试集
train_size = int(len(data_X)-73)
test_size = len(data_X) - train_size
train_X = data_X[:train_size]
train_Y = data_Y[:train_size]
test_X = data_X[train_size:]
test_Y = data_Y[train_size:]
# LSTM数据类型变换
train_X = train_X.reshape(-1, 1, 73)
train_Y = train_Y.reshape(-1, 1, 1)
test_X = test_X.reshape(-1, 1, 73)
train_x = torch.from_numpy(train_X)
train_y = torch.from_numpy(train_Y)
test_x = torch.from_numpy(test_X)
# GBDT数据类型转换
GBDT_X=train_X.reshape(-1,73)
GBDT_Y=train_Y.reshape(-1,1).ravel()
GBDT_test_X=test_X.reshape(-1,73)
GDBT_test_Y=test_Y.reshape(-1,1).ravel()
# LSTM预测
var_data = Variable(test_x)
new_m = torch.load('lstm_200.pt')
predict = new_m(var_data)
predict = predict.view(-1).data.numpy()
# GBDT预测
gbr = joblib.load('gbr.pkl')
y_gbr = gbr.predict(GBDT_test_X)
test_score = np.zeros((3000,), dtype=np.float64)
y_pre=[]
for i, y_pred in enumerate(gbr.staged_predict(GBDT_test_X)):
test_score[i] = gbr.loss_(GDBT_test_Y, y_pred)
y_pre.append(y_pred)
# LSTM效果评价
x1=torch.from_numpy(np.array(predict).reshape(-1,1,73))
x2=torch.from_numpy(np.array(dataset[:73*1]).reshape(-1,1,73))
criterion = nn.MSELoss()
loss = criterion(x1,x2)
print('LSTM-MSE:'+str(loss.item())) # MSE
print('LSTM-R2_SCORE:'+str(metrics.r2_score(dataset[:73*1], predict))) # r2_score
# GBDT效果评价
x1=torch.from_numpy(np.array(y_pre[2999]).reshape(-1,1,73))
x2=torch.from_numpy(np.array(dataset[:73*1]).reshape(-1,1,73))
criterion = nn.MSELoss()
loss = criterion(x1,x2)
print('GBDT-MSE:'+str(loss.item())) # MSE
print('GBDT-R2_SCORE:'+str(metrics.r2_score(dataset[:73*1], y_pre[2999]))) # r2_score
# 作图
plt.plot(dataset[:73*1], 'b', label='REAL')
plt.plot(predict, 'r', label='LSTM')
plt.plot(y_pre[2999], 'g', label='GBDT')
plt.legend(loc='best')
plt.show()
#plt.savefig('result.png') |
<filename>scsr_api/scsr/models/test_behaviors.py
import json
import unittest
from mongoengine.connection import _get_db
from scsr.models.elements import ElementAO, ElementDB, ElementReferenceDB, ElementReferenceAO, ElementGameMappingAO, ElementGameMappingDB
from scsr.models.behaviors import BehaviorAO, BehaviorDB
from application import ScsrAPP
from settings import MONGODB_HOST
class BehaviorModelTest(unittest.TestCase):
@classmethod
def init_elements(cls):
cls.elDB=ElementDB.objects # pylint: disable=no-member
cls.elAO = [el.to_obj() for el in cls.elDB]
@classmethod
def setUpClass(cls):
print("*"*130)
print(" "*40+"Testing Behavior Model")
print("*"*130)
cls.db_name = 'scsr-api-test2'
cls.app_factory = ScsrAPP(
MONGODB_SETTINGS = {'DB': cls.db_name,
'HOST': MONGODB_HOST},
TESTING = True,
WTF_CSRF_ENABLED = False,
SECRET_KEY = 'mySecret!').APP
cls.app = cls.app_factory.test_client()
#initialize the database with the countries and languages
from install_system import start_countries,start_language, start_games, start_genre, start_elements_ontology, start_elements_pigd
start_countries()
start_language()
start_genre()
start_games()
start_elements_ontology()
start_elements_pigd()
cls.els1=[1,3,5,7,9]
cls.els2=[2,3,4,7,8,10,12,11]
cls.els1_add=[6,13,18]
cls.els1_rem1=[3]
cls.els1_rem2=[5,9]
cls.els1_add2=[20,24,32,42]
cls.int=list(set(cls.els1) & set(cls.els2))
cls.uni=list(set(cls.els1) | set(cls.els2))
cls.xor=list(set(cls.els1) ^ set(cls.els2))
cls.diff1=list(set(cls.els1) - set(cls.els2))
cls.diff2=list(set(cls.els2) - set(cls.els1))
cls.newels1=list(set(set(cls.els1) | set(cls.els1_add)) - set(cls.els1_rem1))
cls.newels2=list(set(cls.newels1) - set(cls.els1_rem2) | set(cls.els1_add2))
cls.init_elements()
#load the elements
@classmethod
def tearDownClass(cls):
db = _get_db()
db.client.drop_database(db)
def test_01(self):
"""[summary]
Create a behavior (store the external_id)
Validation: Database accuses 1 behavior
"""
print("test_01")
beh1=BehaviorAO.create_behavior("LUDIC")
assert beh1.external_id is not None
assert BehaviorDB.objects.count() == 1 # pylint: disable=no-member
for idx in self.els1:
beh1.add(self.elAO[idx])
beh1.save()
assert len(beh1) == len(self.els1)
self.__class__.eid1=beh1.external_id
def test_02(self):
print("test_02")
"""[summary]
Create a second behavior, of the same type as the 1st (store the external_id)
Validation: Database accuses 2 behavior
"""
beh2=BehaviorAO.create_behavior("LUDIC")
assert beh2.external_id is not None
assert BehaviorDB.objects.count() == 2 # pylint: disable=no-member
for idx in self.els2:
beh2.add(self.elAO[idx])
beh2.save()
assert len(beh2) == len(self.els2)
self.__class__.eid2=beh2.external_id
pass
def test_03(self):
print("test_03")
"""[summary]
Create a third behavior, of the different type as the 1st (store the external_id)
Validation: Database accuses 3 behavior
"""
behMec=BehaviorAO.create_behavior("MECHANICAL")
assert behMec.external_id is not None
assert BehaviorDB.objects.count() == 3 # pylint: disable=no-member
for idx in self.els2:
behMec.add(self.elAO[idx])
behMec.save()
assert len(behMec) == len(self.els2)
self.__class__.eidMech=behMec.external_id
def test_04(self):
print("test_04")
"""[summary]
And test
"""
beh1=BehaviorAO.get_behavior(self.eid1)
beh2=BehaviorAO.get_behavior(self.eid2)
beh3=beh1 & beh2
assert beh3.external_id == ""
assert len(beh3) == len(self.int)
assert beh3.behavior_type=="LUDIC"
for el in self.int:
assert self.elAO[el] in beh3
for el in beh3.elements:
assert ((el in beh2) and (el in beh1))
erro,msg=beh3.check_save()
assert erro is RuntimeError
self.assertRaises(RuntimeError, beh3.save)
def test_05(self):
print("test_05")
"""[summary]
Or test
"""
beh1=BehaviorAO.get_behavior(self.eid1)
beh2=BehaviorAO.get_behavior(self.eid2)
beh3=beh1 | beh2
assert beh3.external_id == ""
assert len(beh3) == len(self.uni)
assert beh3.behavior_type=="LUDIC"
for el in self.uni:
assert self.elAO[el] in beh3
for el in beh3.elements:
assert ((el in beh2) or (el in beh1))
erro,msg=beh3.check_save()
assert erro is RuntimeError
self.assertRaises(RuntimeError, beh3.save)
def test_06(self):
print("test_06")
"""[summary]
xor test
"""
beh1=BehaviorAO.get_behavior(self.eid1)
beh2=BehaviorAO.get_behavior(self.eid2)
beh3=beh1 ^ beh2
assert beh3.external_id == ""
assert len(beh3) == len(self.xor)
assert beh3.behavior_type=="LUDIC"
for el in self.xor:
assert self.elAO[el] in beh3
for el in beh3.elements:
assert ((el in beh2) and (not (el in beh1))) or ((el in beh1) and (not (el in beh2)))
erro,msg=beh3.check_save()
assert erro is RuntimeError
self.assertRaises(RuntimeError, beh3.save)
def test_07(self):
print("test_07")
"""[summary]
difference test
"""
beh1=BehaviorAO.get_behavior(self.eid1)
beh2=BehaviorAO.get_behavior(self.eid2)
beh3=beh1.difference(beh2)
assert beh3.external_id == ""
assert len(beh3) == len(self.diff1)
assert beh3.behavior_type=="LUDIC"
for el in self.diff1:
assert self.elAO[el] in beh3
for el in beh3.elements:
assert ((el in beh1) and (not (el in beh2)))
erro,msg=beh3.check_save()
assert erro is RuntimeError
self.assertRaises(RuntimeError, beh3.save)
def test_07_1(self):
print("test_07_1")
"""[summary]
difference test
"""
beh1=BehaviorAO.get_behavior(self.eid1)
beh2=BehaviorAO.get_behavior(self.eid2)
beh3=beh2.difference(beh1)
assert beh3.external_id == ""
assert len(beh3) == len(self.diff2)
assert beh3.behavior_type=="LUDIC"
for el in self.diff2:
assert self.elAO[el] in beh3
for el in beh3.elements:
assert ((el in beh2) and (not (el in beh1)))
erro,msg=beh3.check_save()
assert erro is RuntimeError
self.assertRaises(RuntimeError, beh3.save)
def test_08(self):
print("test_08")
"""[summary]
intersection test
"""
beh1=BehaviorAO.get_behavior(self.eid1)
beh2=BehaviorAO.get_behavior(self.eid2)
beh3=beh1.intersection(beh2)
assert beh3.external_id == ""
assert len(beh3) == len(self.int)
assert beh3.behavior_type=="LUDIC"
for el in self.int:
assert self.elAO[el] in beh3
for el in beh3.elements:
assert ((el in beh2) and (el in beh1))
erro,msg=beh3.check_save()
assert erro is RuntimeError
self.assertRaises(RuntimeError, beh3.save)
def test_09(self):
print("test_09")
"""[summary]
union test
"""
"""[summary]
Or test
"""
beh1=BehaviorAO.get_behavior(self.eid1)
beh2=BehaviorAO.get_behavior(self.eid2)
beh3=beh1.union(beh2)
assert beh3.external_id == ""
assert len(beh3) == len(self.uni)
assert beh3.behavior_type=="LUDIC"
for el in self.uni:
assert self.elAO[el] in beh3
for el in beh3.elements:
assert ((el in beh2) or (el in beh1))
erro,msg=beh3.check_save()
assert erro is RuntimeError
self.assertRaises(RuntimeError, beh3.save)
def test_10(self):
print("test_10")
"""[summary]
add more elements to 1st behavior
Validation: Check out the diffs
"""
"""[summary]
retrieve the 1st behavior add update elements to generate a diff
Validation: Database behavior list of elements must be equal to the added
Validation: Diff length must increase in 1
"""
beh1=BehaviorAO.get_behavior(self.eid1)
currdiffdata=beh1.diffdata
for el in self.els1_add:
beh1.add(self.elAO[el])
for el in self.els1_rem1:
beh1.discard(self.elAO[el])
beh1.save()
assert len(beh1.diffdata) == (len(currdiffdata)+1)
for el in self.els1_add:
assert self.elAO[el] in beh1
for el in self.els1_rem1:
assert self.elAO[el] not in beh1
def test_10_1(self):
print("test_10_1")
"""[summary]
add more elements to 1st behavior
Validation: Check out the diffs
"""
"""[summary]
retrieve the 1st behavior add update elements to generate a diff
Validation: Database behavior list of elements must be equal to the added
Validation: Diff length must increase in 1
"""
beh1=BehaviorAO.get_behavior(self.eid1)
currdiffdata=beh1.diffdata
for el in self.els1_add2:
beh1.add(self.elAO[el])
for el in self.els1_rem2:
beh1.discard(self.elAO[el])
beh1.save()
assert len(beh1.diffdata) == (len(currdiffdata)+1)
for el in self.els1_add2:
assert self.elAO[el] in beh1
for el in self.els1_rem2:
assert self.elAO[el] not in beh1
def test_11(self):
print("test_11")
"""[summary]
Add two behaviors
Validation: Quantification test.
"""
"""[summary]
union test
"""
"""[summary]
Or test
"""
beh1=BehaviorAO.get_behavior(self.eid1)
beh2=BehaviorAO.get_behavior(self.eid2)
beh3=beh1 + beh2
beh4=beh1 ^ beh2
beh5=beh1 & beh2
assert beh3.external_id == ""
assert beh3.behavior_type=="LUDIC"
for el in beh4.elements:
assert beh3.element_count.count(el) ==1
for el in beh5.elements:
assert beh3.element_count.count(el) ==2
assert len(beh3.element_count) == len(beh1)+len(beh2)
def test_12(self):
""" Test the mixing of behaviors
"""
behMec=BehaviorAO.get_behavior(self.eidMech)
behLud=BehaviorAO.get_behavior(self.eid1)
bint=behMec & behLud
bor=behMec | behLud
bxor=behMec ^ behLud
bdiff=behMec.difference(behLud)
assert bint.behavior_type == "composed"
assert bor.behavior_type == "composed"
assert bxor.behavior_type == "composed"
assert bdiff.behavior_type == "composed"
self.assertRaises(TypeError,behMec.difference_update,behLud)
|
import appdaemon.plugins.hass.hassapi as hass
import datetime
#
# App which toggles entities for single/double/hold presses of Xiaomi buttons connected via deconz
#
# Args:
#
# id: id of the xiaomi button
# actor_single: actor to toggle on single click
# actor_double: actor to toggle on double click
# actor_hold: actor to dim on hold
#
# Release Notes
#
# Version 1.0:
# Initial Version
class DeconzXiaomiButton(hass.Hass):
def initialize(self):
self.listen_event_handle_list = []
self.timer_handle_list = []
self.actor_single = self.args.get("actor_single")
self.actor_double = self.args.get("actor_double")
self.actor_hold = self.args.get("actor_hold")
self.id = self.args["id"]
self.dimmer_timer_handle = None
self.listen_event_handle_list.append(
self.listen_event(self.event_detected, "deconz_event")
)
def event_detected(self, event_name, data, kwargs):
if data["id"] == self.id:
if data["event"] == 1002 and self.actor_single is not None:
self.log("ButtonClicked: {}".format(data["id"]))
# Is on
if self.get_state(self.actor_single) == "on":
self.log("Turning {} off".format(self.actor_single))
# Workaround for Yeelight see https://community.home-assistant.io/t/transition-for-turn-off-service-doesnt-work-for-yeelight-lightstrip/25333/4
if self.actor_single.startswith("light"):
self.call_service(
"light/turn_on",
entity_id=self.actor_single,
transition=1,
brightness_pct=1,
)
self.timer_handle_list.append(
self.run_in(self.turn_off_workaround, 2)
)
else:
self.turn_off(self.actor_single)
# Is off
if self.get_state(self.actor_single) == "off":
self.log("Turning {} on".format(self.actor_single))
if self.actor_single.startswith("light"):
self.call_service(
"light/turn_on",
entity_id=self.actor_single,
transition=1,
brightness_pct=100,
)
else:
self.turn_on(self.actor_single)
if data["event"] == 1004 and self.actor_double is not None:
self.log("Double Button Click: {}".format(data["id"]))
self.log("Toggling {}".format(self.actor_double))
# Is on
if self.get_state(self.actor_double) == "on":
# Workaround for Yeelight see https://community.home-assistant.io/t/transition-for-turn-off-service-doesnt-work-for-yeelight-lightstrip/25333/4
if self.actor_single.startswith("light"):
self.call_service(
"light/turn_on",
entity_id=self.actor_single,
transition=1,
brightness_pct=1,
)
self.timer_handle_list.append(
self.run_in(self.turn_off_workaround, 2)
)
else:
self.turn_off(self.actor_single)
# Is off
if self.get_state(self.actor_double) == "off":
self.log("Turning {} on".format(self.actor_single))
if self.actor_single.startswith("light"):
self.call_service(
"light/turn_on",
entity_id=self.actor_single,
transition=1,
brightness_pct=100,
)
else:
self.turn_on(self.actor_single)
if data["event"] == 1001 and self.actor_hold is not None:
self.log("Long Button Click: {}".format(data["id"]))
self.log("Starting Dimmer")
self.dimmer_timer_handle = self.run_every(
self.dimmer_callback,
datetime.datetime.now(),
0.5,
entity_id=self.actor_hold,
)
self.timer_handle_list.append(self.dimmer_timer_handle)
if data["event"] == 1003 and self.actor_hold is not None:
self.log("Button Release: {}".format(data["id"]))
self.log("Stopping Dimmer")
if self.dimmer_timer_handle is not None:
self.cancel_timer(self.dimmer_timer_handle)
def dimmer_callback(self, kwargs):
"""Dimm the by 10% light. If it would dim above 100% start again at 10%"""
brightness_pct_old = (
int(
self.get_state(self.actor_hold, attribute="all")["attributes"][
"brightness"
]
)
/ 255
)
brightness_pct_new = brightness_pct_old + 0.1
if brightness_pct_new > 1:
brightness_pct_new = 0.1
self.call_service(
"light/turn_on",
entity_id=kwargs["entity_id"],
brightness_pct=brightness_pct_new * 100,
)
def turn_off_workaround(self, *kwargs):
self.call_service("light/turn_off", entity_id=self.actor_single)
def terminate(self):
for listen_event_handle in self.listen_event_handle_list:
self.cancel_listen_event(listen_event_handle)
for timer_handle in self.timer_handle_list:
self.cancel_timer(timer_handle)
|
"""
Utility functions for building neural networks in theano.
Most of these are taken from http://deeplearning.net/tutorial/.
"""
from __future__ import print_function
import os
import sys
import timeit
import gzip
import pickle
import numpy as np
import theano
import theano.tensor as T
def load_data(dataset):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
#############
# LOAD DATA #
#############
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(
os.path.split(__file__)[0],
"..",
"data",
dataset
)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
from six.moves import urllib
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print('Downloading data from %s' % origin)
urllib.request.urlretrieve(origin, dataset)
print('... loading data')
# Load the dataset
with gzip.open(dataset, 'rb') as f:
try:
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
except:
train_set, valid_set, test_set = pickle.load(f)
# train_set, valid_set, test_set format: tuple(input, target)
# input is a numpy.ndarray of 2 dimensions (a matrix)
# where each row corresponds to an example. target is a
# numpy.ndarray of 1 dimension (vector) that has the same length as
# the number of rows in the input. It should give the target
# to the example with the same index in the input.
train_set, valid_set, test_set = train_set[0], valid_set[0], test_set[0]
return train_set, valid_set, test_set
def split_img(img):
"""
Split mnist image
Split an mnist example input (28x28 np.array of floats) into two
called x and y, where x is the upper first 392=784/2 pixels and y
the latter 392 pixels.
:param img: binary numpy array of an mnist image
:return: two half images for x and y
"""
dim = img.shape[1]
x = img[:, :dim]
y = img[:, :-dim]
return x, y
class HiddenLayer(object):
def __init__(self, input_tensor, n_in, n_out, W=None, b=None,
activation=T.nnet.relu):
"""
Hidden layer of a neural network.
Args:
input_tensor: a tensor as the input to the layer
n_in: the number of features of the input tensor
n_out: the number of features we are mapping to
W: weight matrix
b: bias term
activation: activation function we are going to use
"""
self.input_tensor = input_tensor
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh. This means that ReLU will be initialized by the same
# values as for tanh.
if W is None:
W_values = np.asarray(
np.random.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input_tensor, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
class EncoderMLP(object):
def __init__(self, srng, x, n_in, n_hid, ldim):
"""
Gaussian MLP that handles the encoding q_phi(z | x).
Outputs a mean tensor mu and diagonal entries of the covariance matrix, sigma2.
Args:
rng: np random number generator
srng: theano shared random number generator
input_tensor: input tensor that we feed into the network
n_in: number of features of the input vector
n_hid: number of hidden units
lat_dim: Latent dimension of the space, dictates size of mu and sigma2
"""
# Layers
self.hidden_layer = HiddenLayer(x, n_in, n_hid, activation=T.tanh)
self.mu_layer = HiddenLayer(self.hidden_layer.output, n_hid, ldim, activation=None)
self.logsigma2_layer = HiddenLayer(self.hidden_layer.output, n_hid, ldim, activation=None)
# Tensors
self.mu = self.mu_layer.output
self.sigma2 = T.exp(self.logsigma2_layer.output)
self.sigma = T.sqrt(self.sigma2)
self.eps = srng.normal(self.mu.shape)
self.output = self.mu + self.sigma * self.eps
# Parameters
self.params = self.hidden_layer.params + self.mu_layer.params + self.logsigma2_layer.params
class FactoredGaussianEncoderMLP(object):
def __init__(self, srng, input_tensor, n_in, n_hid, ldim):
"""
Gaussian dual MLP for encoding of form q_phi(z | x, y) = q(z | x)q(z | y)
Keeps as parameters the mu and sigma of the x and y, and sample a z from
the join distribution of these. We consider precision since this is easier
work with than variances when calculating the joint distribution of two
Gaussians.
:param srng: theano random number generator
:param x: input tensor for x in (x, y) input
:param y: input tensor for y in (x, y) input
:param nx_in: input dimension for x
:param ny_in: input dimension for y
:param n_hid: size of the hidden layer
:param ldim: number of samples from z
"""
# split tensor into 2
x = input_tensor[:, :392]
y = input_tensor[:, :-392]
# X part of NN
# Layers
self.hidden_layer_x = HiddenLayer(x, n_in//2, n_hid, activation=T.tanh)
self.mu_layer_x = HiddenLayer(self.hidden_layer_x.output, n_hid, ldim, activation=None)
self.loglambda_layer_x = HiddenLayer(self.hidden_layer_x.output, n_hid, ldim, activation=None)
# Tensors
self.mu_x = self.mu_layer_x.output
self.lambda_x = T.exp(self.loglambda_layer_x.output)
# Y part of NN
# Layers
self.hidden_layer_y = HiddenLayer(y, n_in//2, n_hid, activation=T.tanh)
self.mu_layer_y = HiddenLayer(self.hidden_layer_y.output, n_hid, ldim, activation=None)
self.loglambda_layer_y = HiddenLayer(self.hidden_layer_y.output, n_hid, ldim, activation=None)
# Tensors
self.mu_y = self.mu_layer_y.output
self.lambda_y = T.exp(self.loglambda_layer_y.output)
# The distribution q(z | x, y) can be shown to be N(mu, lambda)
# We have assumed diagonal covariance structure for q(z | x) and
# q(z | y) which implies covariance structure for the precision as
# well, hence we only need lambda to be a vector. We have that:
# lambda = (lambda_x + lambda_y)
# mu = (1/lambda)(lambda_x * mu_x + lambda_y * mu_y)
self.lambda_xy = self.lambda_x + self.lambda_y
self.mu_xy = (self.lambda_x * self.mu_x + self.lambda_y * self.mu_y) / self.lambda_xy
# Sample from z
self.eps = srng.normal(self.mu_xy.shape)
self.sigma_xy = T.sqrt(1.0/self.lambda_xy)
self.output = self.mu_xy + self.sigma_xy * self.eps
# Fantasize z by masking x
self.eps_mask = srng.normal(self.mu_x.shape)
self.sigma_x = T.sqrt(1.0/self.lambda_x)
self.output_mask = self.mu_x + self.sigma_x * self.eps_mask
# Parameters
self.params = self.hidden_layer_x.params + self.mu_layer_x.params + self.loglambda_layer_x.params
self.params += self.hidden_layer_y.params + self.mu_layer_y.params + self.loglambda_layer_y.params
class DecoderMLP(object):
def __init__(self, x, n_in, n_hid, n_out):
""" Decoder MLP for getting the final output of the network
Args:
input_tensor: input tensor that we feed into the network
n_in: number of features of the input vector
n_hid: number of hidden units
"""
self.hidden_layer = HiddenLayer(x, n_in, n_hid, activation=T.tanh)
self.bern_layer = HiddenLayer(self.hidden_layer.output, n_hid, n_out, activation=T.nnet.sigmoid)
self.params = self.hidden_layer.params + self.bern_layer.params
self.bern = self.bern_layer.output
# Helper functions with regard to cost functions
def kl_unit_normal(mu, var):
"""
Assuming normality of the target probability we have that since
the latent prior is unit normal, from
https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence#Kullback.E2.80.93Leibler_divergence_for_multivariate_normal_distributions
and from AEVB paper page. 5. that
D_KL(q_phi(z | x_i) || p_theta(z)) = 0.5 * (sum(var) + sum(mu**2) - sum(log(var)) - k)"""
return 0.5 * (T.sum(var, axis=1) + T.sum(T.sqr(mu), axis=1) - T.sum(1 + T.log(var), axis=1))
|
<gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Hexagon testing infrastructure """
import tvm
import numpy
def ceildiv(o, d):
return tvm.tir.floordiv(o + d - 1, d)
def get_packed_activation_layout(shape_nhwc, block_shape, packed_C=True):
assert len(shape_nhwc) == 4
shape = [shape_nhwc[0]]
off_h, off_w, off_c = block_shape
shape.append(ceildiv(shape_nhwc[1], off_h))
shape.append(ceildiv(shape_nhwc[2], off_w))
if packed_C:
shape.append(ceildiv(shape_nhwc[3], off_c))
shape.extend(block_shape)
else:
shape.extend([off_h, off_w, shape_nhwc[3]])
return shape
def get_block_shape():
return 8, 8, 32
def get_filter_block_shape():
return 8, 32, 4
def get_packed_filter_layout(out_channel, in_channel, kernel_h, kernel_w):
filter_Cio, filter_Ki, filter_Cii = get_filter_block_shape()
filter_Ci = filter_Cio * filter_Cii
return (
int(ceildiv(out_channel, filter_Ki)),
int(ceildiv(in_channel, filter_Ci)),
kernel_h,
kernel_w,
filter_Cio,
filter_Ki,
filter_Cii,
)
def build_and_run(inputs, func, target, target_host, *args, **kwargs):
schedule, placeholders, binds = func(*args, **kwargs)
func = tvm.build(
schedule, placeholders, target=tvm.target.Target(target, host=target_host), binds=binds
)
dev = tvm.device(target)
tensors = []
for tensor in inputs:
tensors.append(tvm.nd.array(tensor, dev))
tensors.append(
tvm.nd.array(
numpy.zeros([i.value for i in placeholders[-1].shape], dtype=placeholders[-1].dtype),
dev,
)
)
func(*tensors)
return tensors[-1].asnumpy()
def get_conv2d_nhwc_shape(shape_nhwc, kernel_size, strides, padding, dilation, out_channels):
assert len(shape_nhwc) == 4
kernel = []
kernel.append((kernel_size[0] - 1) * dilation[0] + 1)
kernel.append((kernel_size[1] - 1) * dilation[1] + 1)
return (
shape_nhwc[0],
(shape_nhwc[1] - kernel[0] + padding[0] + padding[1]) // strides[0] + 1,
(shape_nhwc[2] - kernel[1] + padding[2] + padding[3]) // strides[1] + 1,
out_channels,
)
def verify_conv2d(output, ref_output, dtype):
# nhwc8h8w32c
if len(output.shape) == 7:
# nhwc8h8w32c -> nhwc
output = output.transpose(0, 1, 4, 2, 5, 3, 6).reshape(
output.shape[0],
output.shape[1] * output.shape[4],
output.shape[2] * output.shape[5],
output.shape[3] * output.shape[6],
)
# nhwhwc
else:
# nhwhwc -> nhwc
output = output.transpose(0, 1, 3, 2, 4, 5).reshape(
output.shape[0],
output.shape[1] * output.shape[3],
output.shape[2] * output.shape[4],
output.shape[5],
)
# slice output to match ref_output shape
# e.g. 8x8 spatial 3x3 filter = 6x6 ref output
# but still 8x8 output given the blocked layout
output = output[
0 : ref_output.shape[0] : 1,
0 : ref_output.shape[1] : 1,
0 : ref_output.shape[2] : 1,
0 : ref_output.shape[3] : 1,
]
if "int" in dtype:
tol = {"atol": 0, "rtol": 0}
elif dtype == "float32":
tol = {"rtol": 1e-4, "atol": 2e-4}
tvm.testing.assert_allclose(output, ref_output, **tol)
|
<filename>Test.py
from python_helper import ObjectHelper
from reinforcement_learning import hash as hashModule
from reinforcement_learning import Set, Dictionary, Tuple, List
def getWorksAsIntended():
#arrange
simpleSet = {1,2,3}
anotherSimpleSet = {1,2,3}
anotherSimpleSetInAnotherOrder = {1,3,2}
differentSet = {4,5,6}
bigSet = Set(simpleSet)
anotherBigSet = Set(anotherSimpleSet)
anotherBigSetInAnotherOrder = Set(anotherSimpleSetInAnotherOrder)
differentBigSet = Set(differentSet)
bigDictionary = Dictionary({
1: simpleSet,
2: bigSet,
3: differentSet,
4: anotherSimpleSetInAnotherOrder,
5: anotherBigSetInAnotherOrder,
6: differentBigSet
})
anotherBigDictionary = Dictionary({
1: simpleSet,
2: bigSet,
3: differentSet,
4: anotherSimpleSetInAnotherOrder,
5: anotherBigSetInAnotherOrder,
6: differentBigSet
})
anotherBigDictionaryInADifferentOrder = Dictionary({
1: simpleSet,
5: anotherBigSetInAnotherOrder,
2: bigSet,
3: differentSet,
4: anotherSimpleSetInAnotherOrder,
6: differentBigSet
})
complexObject = List([
# Set({
# tuple(Dictionary({
# 1: simpleSet,
# 2: bigSet,
# 3: differentSet,
# 4: anotherSimpleSetInAnotherOrder,
# 5: anotherBigSetInAnotherOrder,
# 6: differentBigSet
# })),
# tuple(Dictionary({
# 1: simpleSet,
# 2: bigSet,
# 3: differentSet,
# 4: anotherSimpleSetInAnotherOrder,
# 5: anotherBigSetInAnotherOrder,
# 6: differentBigSet
# })),
# tuple(Dictionary({
# 7: simpleSet,
# 8: bigSet,
# 9: differentSet,
# 10: anotherSimpleSetInAnotherOrder,
# 11: anotherBigSetInAnotherOrder,
# 12: differentBigSet
# }))
# })
# , bigSet
# , Dictionary({
# 1: simpleSet,
# 2: bigSet,
# 3: differentSet,
# 4: anotherSimpleSetInAnotherOrder,
# 5: anotherBigSetInAnotherOrder,
# 6: differentBigSet
# })
# , Dictionary({
# 7: simpleSet,
# 8: bigSet,
# 9: differentSet,
# 10: anotherSimpleSetInAnotherOrder,
# 11: anotherBigSetInAnotherOrder,
# 12: differentBigSet
# })
Dictionary({
f'[[{hash(1)},{hash(2)}],[{hash(3)},{hash(4)}]]': Dictionary({
1: simpleSet,
2: bigSet,
3: differentSet,
4: anotherSimpleSetInAnotherOrder,
5: Tuple(tuple(t for t in anotherBigSetInAnotherOrder)),
6: differentBigSet
})
})
])
#assert
assert hashModule.get(simpleSet) == hashModule.get(anotherSimpleSet), 'hashModule.get(simpleSet) == hashModule.get(anotherSimpleSet) sould be equals'
assert hashModule.get(bigSet) == hashModule.get(anotherBigSet), 'hashModule.get(bigSet) == hashModule.get(anotherBigSet) sould be equals'
assert hashModule.get(bigSet) == hashModule.get(anotherBigSetInAnotherOrder), 'hashModule.get(bigSet) == hashModule.get(anotherBigSetInAnotherOrder) sould be equals'
assert hashModule.get(bigSet) == hashModule.get(bigSet.getCopy()), 'hashModule.get(bigSet) == hashModule.get(bigSet.getCopy()) sould be equals'
assert bigSet == bigSet.getCopy(), 'bigSet == bigSet.getCopy() sould not be equals'
assert not simpleSet == differentSet, 'simpleSet == differentSet sould not be equals'
assert not bigSet == differentBigSet, 'bigSet == differentBigSet sould not be equals'
assert ObjectHelper.isCollection(bigDictionary), 'ObjectHelper.isCollection(bigDictionary) should be a colection'
assert ObjectHelper.isDictionary(bigDictionary), 'ObjectHelper.isDictionary(bigDictionary) should be a dictionary'
assert bigDictionary == bigDictionary.getCopy(), 'bigDictionary == bigDictionary.getCopy() should be equals'
assert not bigDictionary == anotherBigDictionary, 'bigDictionary == anotherBigDictionary should be equals'
assert hashModule.get(bigDictionary) == hashModule.get(anotherBigDictionaryInADifferentOrder), 'hashModule.get(bigDictionary) == hashModule.get(anotherBigDictionaryInADifferentOrder) should be equals'
assert bigDictionary > anotherBigDictionary or bigDictionary >= anotherBigDictionary or bigDictionary < anotherBigDictionary or bigDictionary <= anotherBigDictionary
assert ObjectHelper.getSortedCollection([bigDictionary])
assert tuple({1,2}) == tuple({1:3,2:4})
assert complexObject == complexObject.getCopy(), 'complexObject == complexObject.getCopy() should be equals'
print(hashModule.get(simpleSet))
print(hashModule.get(anotherSimpleSet))
print(hashModule.get(bigSet))
print(hashModule.get(bigSet.getCopy()))
print(hashModule.get(anotherBigSet))
print(hashModule.get(anotherBigSetInAnotherOrder))
print(hashModule.get(bigDictionary))
print(hashModule.get(complexObject))
###- HashTest
getWorksAsIntended()
|
<reponame>kafee23/Deep_Learning_AV<gh_stars>0
# -*- coding: utf-8 -*-
"""
"""
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.wrappers.scikit_learn import KerasRegressor, KerasClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from keras.constraints import maxnorm
class Deep_Models:
def __init__(self, name):
self.name = name
self.model = Sequential()
self.estimators = []
self.hidden_layers = [100, 100, 100]
self.input_dm = 1
self.activation_fn = ['relu']
self.k_fold = 10
self.loss = 'binary_crossentropy'
self.optimizer = 'adam'
self.model_type = 'classifier'
self.no_of_output = 1
self.metrics = ['accuracy']
self.sample_weight = None
self.dropout_spec = [0.2, 0.2, 0.2]
def get_name(self):
return self.name
def get_activation_fn(self, activation_fn, pos):
if len(activation_fn) - 2 < pos:
return activation_fn[-2]
else:
return activation_fn[pos]
def update_parameters(self, param):
self.hidden_layers = param['hidden_layers']
self.input_dm = param['input_dm']
self.activation_fn = param['activation_fn']
self.k_fold = param['k_fold']
self.loss = param['loss']
self.optimizer = param['optimizer']
self.model_type = param['model_type']
self.no_of_output = param['no_of_output']
self.metrics = param['metrics']
self.sample_weight = param['sample_weight']
self.dropout_spec = param['dropout_spec']
def update_parameters_class_Weight(self, param):
self.hidden_layers = param['hidden_layers']
self.input_dm = param['input_dm']
self.activation_fn = param['activation_fn']
self.k_fold = param['k_fold']
self.loss = param['loss']
self.optimizer = param['optimizer']
self.model_type = param['model_type']
self.no_of_output = param['no_of_output']
self.metrics = param['metrics']
self.sample_weight = param['sample_weight']
self.dropout_spec = param['dropout_spec']
self.class_weight = param['class_weight']
def build_model(self):
for hl in self.hidden_layers:
if self.hidden_layers.index(hl) == 0: # adding the very first hidden layer
self.model.add(Dense(hl, input_dim=self.input_dm, kernel_initializer='normal', activation =self.activation_fn[0]))
else:
self.model.add(Dense(hl, kernel_initializer='normal', activation = self.get_activation_fn(self.activation_fn, self.hidden_layers.index(hl))))
self.model.add(Dropout(self.dropout_spec[self.hidden_layers.index(hl)]))
self.model.add(Dense(self.no_of_output, kernel_initializer='normal', activation = self.activation_fn[-1]))
self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics = self.metrics)
return self.model
def tune_model_Dropout(self, dropout_rate=0.0, weight_constraint=0):
for hl in self.hidden_layers:
if self.hidden_layers.index(hl) == 0: # adding the very first hidden layer
self.model.add(Dense(hl, input_dim=self.input_dm, kernel_initializer='normal', activation =self.activation_fn[0], kernel_constraint=maxnorm(weight_constraint)))
self.model.add(Dropout(dropout_rate))
else:
self.model.add(Dense(hl, kernel_initializer='normal', activation = self.get_activation_fn(self.activation_fn, self.hidden_layers.index(hl))))
self.model.add(Dropout(dropout_rate))
self.model.add(Dense(self.no_of_output, kernel_initializer='normal', activation = self.activation_fn[-1]))
self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics = self.metrics)
return self.model
def build_estimator(self):
self.estimators.append(('standardize', StandardScaler()))
if self.model_type == 'regressor':
self.estimators.append(('mlp', KerasRegressor(build_fn=self.build_model, epochs=50, batch_size=5, verbose=0)))
elif self.model_type == 'classifier':
self.estimators.append(('mlp', KerasClassifier(build_fn=self.build_model, epochs=200, batch_size=5, verbose=0)))
self.pipeline = Pipeline(self.estimators)
def kfold_CVS(self, X, Y):
kfold = KFold(n_splits=10)
results = cross_val_score(self.pipeline, X, Y, cv=kfold)
print("Larger: %.2f (%.2f) MSE" % (results.mean(), results.std()))
def DNN_Models(self, X, Y, **param):
self.update_parameters(param)
self.build_estimator()
if self.k_fold > 0:
self.kfold_CVS(X, Y)
print(self.sample_weight)
self.pipeline.fit(X, Y, **{'mlp__sample_weight': self.sample_weight})
return self.pipeline
def DNN_Models_Class_Weight(self, X, Y, **param):
self.update_parameters(param)
self.build_estimator()
if self.k_fold > 0:
self.kfold_CVS(X, Y)
print(self.sample_weight)
self.pipeline.fit(X, Y, **{'mlp__class_weight': self.sample_weight})
return self.pipeline
|
<gh_stars>1-10
import os
import bpy
from bpy.app.handlers import persistent
bl_info = {
"name": "Auto FBX export",
"author": "Flavelius",
"description":"Automatically export fbx when saving file",
"version": (0, 1),
"blender": (2, 80, 0),
"category":"Import-Export",
"support":"COMMUNITY",
"location": "View3D -> Properties -> AutoFBX"
}
def get_active_preset():
return bpy.context.scene.autofbx_settings.preset
def path_to_presetname(path):
return os.path.splitext(os.path.basename(path))[0]
def get_savepath():
return os.path.splitext(bpy.path.abspath(bpy.data.filepath))[0]+'.fbx'
class PresetPropertyGroup(bpy.types.PropertyGroup):
key: bpy.props.StringProperty()
path: bpy.props.StringProperty()
class AutoFBXSettings(bpy.types.PropertyGroup):
is_enabled: bpy.props.BoolProperty("Enabled")
preset: bpy.props.StringProperty(name="Preset")
presets: bpy.props.CollectionProperty(type=PresetPropertyGroup)
def items_callback(self, context):
return [(item.key, item.key, '') for item in self.presets]
def items_selected(self, context):
context.scene.autofbx_settings.preset = context.scene.autofbx_settings.enum_prop
enum_prop: bpy.props.EnumProperty(items=items_callback, update=items_selected)
def reload_presets(self):
preset_path = bpy.utils.preset_paths('operator/export_scene.fbx/')
self.presets.clear()
new_item = self.presets.add()
new_item.key = "NONE"
new_item.path = "NONE"
if preset_path:
listed_files = os.listdir(preset_path[0])
for i in range(len(listed_files)):
if listed_files[i].endswith('.py'):
preset_file = os.path.join(preset_path[0], listed_files[i])
new_item = self.presets.add()
new_item.key = path_to_presetname(preset_file)
new_item.path = preset_file
for item in self.presets:
if item.key == self.enum_prop:
return
self.enum_prop = 'NONE'
class EXPORT_SCENE_OT_autofbx_presetreloader(bpy.types.Operator):
bl_idname = "export_scene.autofbx_reload"
bl_label = "Reload AutoFBX presets"
def execute(self, context):
context.scene.autofbx_settings.reload_presets()
return {'FINISHED'}
class EXPORT_SCENE_OT_autofbx(bpy.types.Operator):
bl_idname = "export_scene.autofbx"
bl_label = "Auto FBX Save"
def export_fbx(self, outpath, presetpath):
class Container():
__slots__ = ('__dict__',)
op = Container()
file = open(presetpath, 'r')
# storing the values from the preset on the class
for line in file.readlines()[3::]:
exec(line, globals(), locals())
# pass class dictionary to the operator
op.filepath = outpath
kwargs = op.__dict__
bpy.ops.export_scene.fbx(**kwargs)
def preset_to_path(self, context, preset):
settings = context.scene.autofbx_settings
for item in settings.presets:
if item.key == preset:
return item.path
def execute(self, context):
save_path = get_savepath()
preset = get_active_preset()
if not save_path:
self.report({'ERROR'}, 'file not saved')
return {'CANCELLED'}
elif not preset or preset == 'NONE':
self.report({'ERROR'}, 'No Preset')
return {'CANCELLED'}
else:
self.report({'INFO'}, "Exporting "+save_path+" with preset: "+preset)
preset_path = self.preset_to_path(context, preset)
self.export_fbx(get_savepath(), preset_path)
return {'FINISHED'}
class AutoFBXPanel(bpy.types.Panel):
bl_idname = "OBJECT_PT_auto_fbx"
bl_label = "Auto FBX"
bl_category = "Auto FBX"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
def draw(self, context):
settings = context.scene.autofbx_settings
layout = self.layout
row = layout.row()
row.prop(settings, "is_enabled", text="Enabled")
row = layout.row()
row.label(text="Preset")
row.prop_menu_enum(settings, "enum_prop", text=settings.preset)
row = layout.row(align=True)
row.operator("export_scene.autofbx_reload", text="Reload Presets")
row.operator("export_scene.autofbx", text="Export")
row = layout.row()
def preset_warning(self, context):
self.layout.label(text="AutoFBX: Missing preset, disabling")
context.scene.autofbx_settings.is_enabled = False
@persistent
def on_save(dummy):
settings = bpy.context.scene.autofbx_settings
if settings is None:
return
if not settings.is_enabled:
return
if settings.preset == 'NONE':
bpy.context.window_manager.popover(preset_warning)
return
bpy.ops.export_scene.autofbx()
@persistent
def on_load(dummy):
if bpy.context.scene.autofbx_settings:
bpy.context.scene.autofbx_settings.reload_presets()
registerable_classes = [
PresetPropertyGroup,
AutoFBXSettings,
AutoFBXPanel,
EXPORT_SCENE_OT_autofbx,
EXPORT_SCENE_OT_autofbx_presetreloader
]
def register():
for item in registerable_classes:
bpy.utils.register_class(item)
bpy.types.Scene.autofbx_settings = bpy.props.PointerProperty(type=AutoFBXSettings)
bpy.app.handlers.save_post.append(on_save)
bpy.app.handlers.load_post.append(on_load)
def unregister():
for item in registerable_classes:
bpy.utils.unregister_class(item)
del bpy.types.Scene.autofbx_settings
bpy.app.handlers.save_post.remove(on_save)
bpy.app.handlers.load_post.remove(on_load)
|
# Copyright (c) <NAME>.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory.
from fcmaes.astro import MessFull, Messenger, Cassini2, Rosetta, Gtoc1, Cassini1
from fcmaes.optimizer import Optimizer, logger
from fcmaes.advretry import Store, retry, minimize
def messengerFullLoop():
while True:
problem = MessFull()
logger().info(problem.name + ' cmaes c++')
ret = minimize(problem.fun, bounds=problem.bounds, num_retries = 40000,
max_evaluations = 50000, value_limit = 10.0, logger = logger(),
useCpp = True)
def test_all(num_retries = 4000, num = 20):
# test C++ version
_test_problem(Cassini1(), num_retries, num, useCpp = True)
_test_problem(Cassini2(), num_retries, num, useCpp = True)
_test_problem(Rosetta(), num_retries, num, useCpp = True)
_test_problem(Messenger(), num_retries, num, useCpp = True)
_test_problem(Gtoc1(), num_retries, num, value_limit = -1000000, useCpp = True)
_test_problem(MessFull(), num_retries, num, value_limit = 10.0, useCpp = True)
# test python version
_test_problem(Cassini1(), num_retries, num)
_test_problem(Cassini2(), num_retries, num)
_test_problem(Rosetta(), num_retries, num)
_test_problem(Messenger(), num_retries, num)
_test_problem(Gtoc1(), num_retries, num, value_limit = -1000000)
_test_problem(MessFull(), num_retries, num, value_limit = 10.0)
# test dual annealing
_test_optimizer("dual_annealing", Cassini1(), num_retries, num)
_test_optimizer("dual_annealing", Cassini2(), num_retries, num)
_test_optimizer("dual_annealing", Rosetta(), num_retries, num)
_test_optimizer("dual_annealing", Messenger(), num_retries, num)
_test_optimizer("dual_annealing", Gtoc1(), num_retries, num, value_limit = -200000)
_test_optimizer("dual_annealing", MessFull(), num_retries, num)
# test differential evolution
_test_optimizer("differential_evolution", Cassini1(), num_retries, num)
_test_optimizer("differential_evolution", Cassini2(), num_retries, num)
_test_optimizer("differential_evolution", Rosetta(), num_retries, num)
_test_optimizer("differential_evolution", Messenger(), num_retries, num)
_test_optimizer("differential_evolution", Gtoc1(), num_retries, num, value_limit = -200000)
_test_optimizer("differential_evolution", MessFull(), num_retries, num)
def _test_problem(problem, num_retries = 4000, num = 20, value_limit = 12.0,
log = logger(), useCpp = False):
log.info(problem.name + ' cmaes ' + ('c++' if useCpp else 'python'))
for i in range(num):
ret = minimize(problem.fun, bounds=problem.bounds, num_retries = num_retries,
useCpp = useCpp, logger = log, value_limit = value_limit)
def _test_optimizer(opt_name, problem, num_retries = 4000, num = 20, value_limit = 20.0,
log = logger()):
log.info(problem.name + ' ' + opt_name)
for i in range(num):
store = Store(problem.bounds, logger = log)
optimizer = Optimizer(store, 0)
method = getattr(optimizer, opt_name)
ret = retry(problem.fun, store, method, num_retries, value_limit = value_limit)
def main():
test_all()
#_test_optimizer("dual_annealing", Cassini1(), 500, 1)
#_test_problem(Cassini1(), 500, 1)
#messengerFullLoop()
if __name__ == '__main__':
main() |
<gh_stars>0
#! python3
import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from color import GREEN, YELLOW, YELLOW_BRIGHT
def _get_text_bbox(text):
'''
Argument
--------
text (matplotlib.Text)
Returns
-------
bbox (matplotlib.BBox)
'''
bbox = text.get_window_extent(renderer=plt.gcf().canvas.get_renderer()) \
.inverse_transformed(plt.gca().transAxes) # in axes coordinates
return bbox
class SolarPlot:
'''
Plot solar predictions and actual
'''
def __init__(self, verbose=True, debug=False):
# Verbosity
self.verbose = verbose
self.debug = debug
def show_all(self):
plt.show()
def solar_power(self, time_view, tz, sun_times, local_capacity,
forecast, predicted_total_kwh,
predicted_current_power=None, predicted_current_kwh=None,
actual=None, actual_current_power=None, actual_current_kwh=None, actual_last_updated=None, actual_total_kwh=None):
'''
Arguments
---------
time_view (string) : 'past'/'today'/'future'
tz
sun_times
local_capacity (float) : [kWp]
forecast (dict) : {timedata (timezone aware) : value [kW]}
actual (dict) : {timedata (timezone aware) : value [kW]}
predicted_total_kwh
predicted_current_power (float) : (optional) [kW]
predicted_current_kwh (float) : (optional) [kWh]
actual_current_power (float) : (optional) [kW]
actual_current_kwh (float) : (optional) [kWh]
actual_last_updated (datetime) : (optional)
actual_total_kwh
'''
# Progress print
if self.verbose:
print('Plotting solar power... ', end='')
if self.debug: print()
# Add last value (only if it is later, otherwise line seems to go back)
if time_view == 'today':
if actual_last_updated > actual['time'][-1]:
actual['time'].append(actual_last_updated)
actual['value'].append(actual_current_power)
#plt.style.use('dark_background')
# Create figure
cm = 1/2.54 # inch
plt.figure(figsize=(35*cm,18*cm))
# DEBUG input
if self.debug:
print(YELLOW + 'Prediction')
for i, _ in enumerate(forecast['time']):
print(YELLOW_BRIGHT + str(forecast['time'][i]) + ': %f' % (forecast['value'][i]))
if time_view in ('past', 'today'):
print(YELLOW + 'Actual')
for i, _ in enumerate(actual['time']):
print(YELLOW_BRIGHT + str(actual['time'][i]) + ': %f' % (actual['value'][i]))
# Plot predictions
lines = plt.plot(forecast['time'], forecast['value'], linewidth = 1, label='Predicted')
forecast_color = lines[0].get_color()
# Plot actuals
if time_view in ['past', 'today']:
lines = plt.plot(actual['time'], actual['value'], linewidth = 1, label='Actual')
actual_color = lines[0].get_color()
# Plot settings
plt.legend(loc='lower left', bbox_to_anchor=(-0.2, 0.0))
plt.subplots_adjust(left=0.28, right=0.92, top=0.9, bottom=0.1)
plt.suptitle('Solar Power Forecast', fontweight='bold', fontsize= 15)
plt.ylabel('[kW]')
plt.xlim(forecast['time'][0], forecast['time'][-1])
plt.grid(which='major', alpha=0.5)
plt.grid(which='minor', alpha=0.5)
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1, tz=tz))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d/%m/%y', tz=tz))
plt.gca().xaxis.set_minor_locator(mdates.HourLocator(interval=2, tz=tz))
plt.gca().xaxis.set_minor_formatter(mdates.DateFormatter('%Hu', tz=tz))
#--------------------------- Current values ---------------------------#
if time_view == 'today':
# Current time
if predicted_current_power != None:
datetime_now = datetime.datetime.now(tz=tz)
plt.axvline(x=datetime_now, color='r', linestyle='dashed', linewidth=1, alpha=0.7)
label = datetime_now.strftime('%Hu%M')
trans = plt.gca().get_xaxis_transform()
plt.text(datetime_now, 1.01, label, transform=trans,
horizontalalignment = 'center',
verticalalignment = 'bottom',
color='r')
# Current predicted power
if predicted_current_power != None:
plt.axhline(y=predicted_current_power, color=forecast_color, linestyle='dashed', linewidth=1, alpha=0.7)
plt.plot(datetime_now, predicted_current_power, 'o', color=forecast_color)
trans = plt.gca().get_yaxis_transform()
plt.text(1.01, predicted_current_power, '%.2f kW' % predicted_current_power, transform=trans,
horizontalalignment = 'left',
verticalalignment = 'center',
color=forecast_color)
# Current actual power
if actual_current_power != None:
plt.axhline(y=actual_current_power, color=actual_color, linestyle='dashed', linewidth=1, alpha=0.7)
plt.plot(actual_last_updated, actual_current_power, 'o', color=actual_color)
plt.text(1.01, actual_current_power, '%.2f kW' % actual_current_power, transform=trans,
horizontalalignment = 'left',
verticalalignment = 'center',
color=actual_color)
#----------------------------- Sun times ------------------------------#
# - More top margin for labels
forecast_data_range = max(forecast['value']) - min(forecast['value'])
forecast_ymin = min(forecast['value']) - 0.05*forecast_data_range # default bottom margin
forecast_ymax = max(forecast['value']) + 0.10*forecast_data_range # more top margin
if time_view in ('past', 'today'):
actual_data_range = max(actual['value']) - min(actual['value'])
actual_ymin = min(actual['value']) - 0.05*actual_data_range # default bottom margin
actual_ymax = max(actual['value']) + 0.10*actual_data_range # more top margin
ymin = min(forecast_ymin, actual_ymin)
ymax = max(forecast_ymax, actual_ymax)
elif time_view == 'future':
ymin = forecast_ymin
ymax = forecast_ymax
plt.ylim(ymin,ymax)
plt.twiny()
plt.xlim(forecast['time'][0], forecast['time'][-1])
del sun_times['dawn']
del sun_times['dusk']
plot_sun_times = sun_times.copy()
for old_key in sun_times:
new_key = old_key + '\n' + sun_times[old_key].strftime("%Hu%M")
plot_sun_times[new_key] = sun_times[old_key]
plt.xticks(ticks=list(plot_sun_times.values()), labels=plot_sun_times.keys())
plt.gca().tick_params(axis='x', direction='in',pad=-28)
#------------------------------- Table --------------------------------#
rows = []
rows.append(['Local Capacity', '%.2f' % local_capacity, 'kWp'])
rows.append(['','',''])
# Predictions
rows.append([r'$\bf{Elia\ Predictions}$','',''])
elia_row = len(rows)-1
rows.append(['Total production', '%.2f' % predicted_total_kwh, 'kWh'])
if predicted_current_power != None:
rows.append(['Current power', '%.2f' % predicted_current_power, 'kW'])
if predicted_current_kwh != None:
rows.append(['Current production', '%.2f' % predicted_current_kwh, 'kWh'])
rows.append(['','',''])
# Actuals
if time_view in ('past', 'today'):
rows.append([r'$\bf{SolarEdge\ Actuals}$','',''])
solaredge_row = len(rows)-1
if time_view == 'today':
if actual_current_power != None:
rows.append(['Current power', '%.2f' % actual_current_power, 'kW'])
if actual_current_kwh != None:
rows.append(['Current production', '%.2f' % actual_current_kwh, 'kWh'])
if actual_last_updated != None:
rows.append(['Last updated', actual_last_updated.strftime("%Hu%M"), ''])
last_update_row = len(rows)-1
if time_view == 'past':
rows.append(['Total production', '%.2f' % actual_total_kwh, 'kWh'])
nr_of_rows = len(rows)
# Position (axes coordinates)
x0 = -0.39
y0 = 0.65
width = 0.57
height = 0.35
t = plt.table(rows, edges='open', cellLoc='left', bbox=[x0, y0, width, height])
t.auto_set_font_size(False)
t.auto_set_column_width((0,1,3))
# Format cells
t[elia_row, 0].set_text_props(color=forecast_color)
if time_view in ('past', 'today'):
t[solaredge_row, 0].set_text_props(color=actual_color)
if actual_last_updated != None:
t[last_update_row, 0].set_text_props(color='grey')
t[last_update_row, 1].set_text_props(color='grey')
t[last_update_row, 0].set_fontsize(9)
t[last_update_row, 1].set_fontsize(9)
# Right align numbers
for i in range(nr_of_rows):
t[i,1].set_text_props(horizontalalignment = 'right')
#----------------------------------------------------------------------#
# Progress print
if self.verbose:
print(GREEN + 'Done')
#plt.savefig('plot.png')
#plt.show()
def power_flow(self, component_power, component_status, connections):
'''
Plot power flow between site components (solar, battery, house, grid)
Arguments
---------
component_power (dict) : {name (string) : power (float) [kW]}
component_status (dict) : {name (string) : status (string)}
connections (list)
'''
# Progress print
if self.verbose:
print('Plotting power flow... ', end='')
if self.debug: print()
# Import fontawesome keys
import fontawesome as fa
# Load fontawesome font
from matplotlib.font_manager import FontProperties
fp = FontProperties(fname=r'./font/Font Awesome 5 Free-Solid-900.otf')
# Create figure
cm = 1/2.54 # inch
plt.figure(figsize=(15*cm,10*cm))
# Plot icons
icon_properties = {'fontproperties': fp,
'ha': 'center',
'va': 'center',
"transform": plt.gca().transAxes}
solar = plt.text(.20, .50, fa.icons['solar-panel'], fontsize=25, **icon_properties)
home = plt.text(.50, .50, fa.icons['home'], fontsize=35, **icon_properties)
grid = plt.text(.80, .50, fa.icons['plug'], fontsize=25, **icon_properties)
battery = plt.text(.50, .20, fa.icons['car-battery'], fontsize=25, **icon_properties)
# Icon BBoxes
solar_bbox = _get_text_bbox(solar)
home_bbox = _get_text_bbox(home)
grid_bbox = _get_text_bbox(grid)
battery_bbox = _get_text_bbox(battery)
# Arrows
margin = 0.05
solar_home = plt.arrow(x = solar_bbox.x1 + margin,
y = solar.get_position()[1],
dx = home_bbox.x0 - solar_bbox.x1 - 2*margin,
dy = 0,
width = 0.001,
head_width = 0.02,
length_includes_head = True,
fc = 'k',
transform=plt.gca().transAxes)
home_grid = plt.arrow(x = home_bbox.x1 + margin,
y = home.get_position()[1],
dx = grid_bbox.x0 - home_bbox.x1 - 2*margin,
dy = 0,
width = 0.001,
head_width = 0.02,
length_includes_head = True,
fc = 'k',
transform=plt.gca().transAxes)
home_battery = plt.arrow(x = home.get_position()[0],
y = home_bbox.y0 - margin,
dx = 0,
dy = battery_bbox.y1 - home_bbox.y0 + 2*margin,
width = 0.001,
head_width = 0.02,
length_includes_head = True,
fc = 'k',
transform=plt.gca().transAxes)
# Plot settings
plt.axis('off')
# Progress print
if self.verbose:
print(GREEN + 'Done')
#plt.savefig('plot.png')
#plt.show()
|
import cv2
from utils import to_gray, has_alpha, blend_white, get_image_paths
import numpy as np
import glob
import sys
from timeit import default_timer as timer
def resize(patch):
target_height = 15 * 8
curr_height = patch.shape[0]
mul = target_height / (1.0 * curr_height)
return cv2.resize(patch, None, fx=mul, fy=mul)
def preproc(patch):
patch = resize(patch)
patch = to_gray(patch)
return patch
def get_keypoints(cv2_sift, template):
return cv2_sift.detectAndCompute(preproc(template), None)
def sift_pred(cv2_sift, bf, query_kp, query_des, patch,
patch_kp=None, patch_des=None,
template_img=None, draw_matches=False, ratio=0.6, fp=False):
if patch_kp is None or patch_des is None:
patch_kp, patch_des = get_keypoints(cv2_sift, patch)
if patch_des is None:
match_list = []
else:
match_list = bf.knnMatch(query_des, patch_des, k=2)
match_list = [m for m in match_list if len(m) == 2]
# Apply ratio test
good = []
score = 0.0
for m, n in match_list:
if m.distance < ratio * n.distance:
good.append([m])
if not fp:
score += n.distance / np.maximum(m.distance, 0.01)
else:
if fp:
score += np.sqrt((m.distance / n.distance - ratio))
if draw_matches:
template_img = resize(template_img.copy())
if has_alpha(template_img):
template_img = blend_white(template_img)
if has_alpha(patch):
patch = blend_white(patch)
drawn_matches = cv2.drawMatchesKnn(template_img,
query_kp,
resize(patch),
patch_kp,
good, None, flags=2)
return score, len(good), drawn_matches
return score, len(good)
class SIFTModel(object):
def __init__(self, path_to_templates,
match_threshold=0.2, match_threshold_small=0.5):
self.cv2_sift = cv2.xfeatures2d.SIFT_create()
self.bf = cv2.BFMatcher()
self.match_threshold = match_threshold
self.match_threshold_small = match_threshold_small
templates, keypoints = self.load_templates(path_to_templates)
self.templates = templates
self.keypoints = keypoints
def load_templates(self, path_to_templates):
print("LOADING TEMPLATES...")
if path_to_templates.endswith(".png"):
files = [path_to_templates]
else:
files = glob.glob(path_to_templates + '/*.png')
templates = []
keypoints = []
for file in files:
img = cv2.imread(file, -1)
kp, des = get_keypoints(self.cv2_sift, img)
if len(kp) > 10:
templates.append(file)
keypoints.append((kp, des))
print("adding {} as template with {} keypoints".format(file, len(kp)))
else:
print("ignoring {}".format(file))
print("LOADED {} TEMPLATES".format(len(templates)))
return templates, keypoints
def score(self, img, fp=False):
patch_kp, patch_des = get_keypoints(self.cv2_sift, img)
total_score = 0.0
max_matches = 0
for idx, (kp, des) in enumerate(self.keypoints):
score, num_matches = sift_pred(self.cv2_sift, self.bf, kp, des,
patch=img,
patch_kp=patch_kp,
patch_des=patch_des, fp=fp)
if fp:
total_score += score
max_matches = max(max_matches, num_matches)
else:
if len(kp) > 40:
threshold = self.match_threshold
else:
threshold = self.match_threshold_small
match_ratio = (1.0 * num_matches) / len(kp)
if match_ratio >= threshold:
total_score += score
max_matches = max(max_matches, num_matches)
return total_score, max_matches
def match(self, img, verbose=False, img_name=None):
patch_kp, patch_des = get_keypoints(self.cv2_sift, img)
high_match = np.array([0, 0, 0])
for idx, (kp, des) in enumerate(self.keypoints):
_, num_matches = sift_pred(self.cv2_sift, self.bf, kp, des,
patch=img,
patch_kp=patch_kp,
patch_des=patch_des)
if len(kp) > 40:
threshold = self.match_threshold
else:
threshold = self.match_threshold_small
match_ratio = (1.0 * num_matches) / len(kp)
if match_ratio > high_match[0]:
high_match = [match_ratio, num_matches, len(kp)]
if match_ratio >= threshold:
if verbose:
print("{} matched on template {} with {}/{}={:.2f} "
"matches".format(img_name, self.templates[idx],
num_matches, len(kp), match_ratio))
return True, high_match
if verbose:
print("\tno match for {}! Highest score was {}".format(
img_name, high_match))
return False, high_match
if __name__ == '__main__':
template_path = sys.argv[1]
glob_path = sys.argv[2]
match_threshold = 0.05
match_threshold_small = 1.0
SIFT = SIFTModel(template_path, match_threshold=match_threshold,
match_threshold_small=match_threshold_small)
ad_logo_paths = get_image_paths(glob_path)
print("found {} files to match".format(len(ad_logo_paths)))
scores = []
t1 = timer()
for ad_logo_path in ad_logo_paths:
ad_logo = cv2.imread(ad_logo_path, -1)
assert ad_logo is not None
if ad_logo.dtype != np.uint8:
assert ad_logo.dtype == np.uint16
ad_logo = (ad_logo / 256).astype('uint8')
try:
match, (match_ratio, _, _) = SIFT.match(ad_logo, verbose=True,
img_name=ad_logo_path)
if not match:
match_ratio = 0
scores.append(match_ratio)
except Exception as e:
print("{} failed with {}".format(ad_logo_path, e))
t2 = timer()
print("evaluated {} images in {} seconds".format(len(ad_logo_paths), t2 - t1))
scores = np.array(scores)
ad_logo_paths = np.array(ad_logo_paths)
print(np.sum(scores >= match_threshold))
print(ad_logo_paths[scores >= match_threshold])
topk = scores.argsort()[-10:][::-1]
print(ad_logo_paths[topk])
|
<filename>bench/visualize.py
'''
Module containing Visualizers.
Classes embody Function Plots,
for visualization of data.
'''
import numpy, matplotlib.pyplot
class UnivariatePlot:
'''
Univariate Function Plots
'''
# assumed to have a univariate function already defined
def __init__(self, lowerlimit = None, upperlimit = None, stepsize = None, functiondepictor = None, derivativedepictor = None):
'''
Constructor
: param lowerlimit : lower limit used to plot along univariate function axis
: param upperlimit : upper limit used to plot along univariate function axis
: param stepsize : step size used to plot along univariate function axis
: param functiondepictor : depictor used to plot function
: param derivativedepictor : depictor used to plot function derivative
'''
self.lowerlimit = lowerlimit if lowerlimit is not None else self.begin
self.upperlimit = upperlimit if lowerlimit is not None else self.end
self.stepsize = stepsize if stepsize is not None else 0.1
self.functiondepictor = functiondepictor if functiondepictor is not None else 'b^'
self.derivativedepictor = derivativedepictor if derivativedepictor is not None else 'r^'
def plot(self):
'''
Method to plot function and derivative values of univariate function
'''
matplotlib.pyplot.figure('Univariate Plot')
points = numpy.arange(self.lowerlimit, self.upperlimit, self.stepsize)
matplotlib.pyplot.plot(points, [self.function(point) for point in points], self.functiondepictor)
matplotlib.pyplot.plot(points, [self.derivative(point) for point in points], self.derivativedepictor)
matplotlib.pyplot.show()
class MultivariatePlot:
'''
Multivariate Function Plots
'''
# assumed to have a multivariate function already defined
def __init__(self, lowerlimits = None, upperlimits = None, stepsizes = None, functiondepictor = None, derivativedepictor = None):
'''
Constructor
: param lowerlimits : lower limits used to plot along univariate function axes
: param upperlimits : upper limits used to plot along univariate function axes
: param stepsizes : step sizes used to plot along univariate function axes
: param functiondepictor : depictor used to plot function
: param derivativedepictor : depictor used to plot function derivative
'''
self.lowerlimits = lowerlimits if lowerlimits is not None else [univariate.begin for univariate in self.univariatelist]
self.upperlimits = upperlimits if lowerlimits is not None else [univariate.end for univariate in self.univariatelist]
self.stepsizes = stepsizes if stepsizes is not None else [univariate.stepsize for univariate in self.univariatelist]
self.functiondepictor = functiondepictor if functiondepictor is not None else 'b^'
self.derivativedepictor = derivativedepictor if derivativedepictor is not None else 'r^'
def plot(self, point):
'''
Method to plot partial function and partial derivative values of multivariate function about a point
: param point : point of evaluation in parameter space
'''
matplotlib.pyplot.figure('Multivariate Plot')
for i in range(len(self.univariatelist)):
matplotlib.pyplot.subplot(len(self.univariatelist), 1, i)
points = numpy.arange(self.lowerlimits[i], self.upperlimits[i], self.stepsizes[i])
deltas = list()
for delta in points:
deltas.append(point.copy())
deltas[-1][i] = delta
matplotlib.pyplot.plot(points, [self.function(delta) for delta in deltas], self.functiondepictor)
matplotlib.pyplot.plot(points, [self.derivative(delta)[i] for delta in deltas], self.derivativedepictor)
matplotlib.pyplot.show()
def contourplot(self, point, dimensionx, dimensiony):
'''
Method to plot function values of a multivariate function
: param point : point of evaluation in parameter space
: param dimensionx : dimension to plot on x axis
: param dimensiony : dimension to plot on y axis
'''
matplotlib.pyplot.figure('Multivariate Contour Plot')
pointsx = numpy.arange(self.lowerlimits[dimensionx], self.upperlimits[dimensionx], self.stepsizes[dimensionx])
pointsy = numpy.arange(self.lowerlimits[dimensiony], self.upperlimits[dimensiony], self.stepsizes[dimensiony])
values = numpy.empty((len(pointsy), len(pointsx)), dtype = float)
for i in range(len(pointsx)):
for j in range(len(pointsy)):
z = point.copy()
z[dimensionx][0] = pointsx[i]
z[dimensiony][0] = pointsy[j]
values[j][i] = self.function(z)
CS = matplotlib.pyplot.contour(pointsx, pointsy, values)
matplotlib.pyplot.clabel(CS, fontsize = 10)
matplotlib.pyplot.show()
class HintonPlot:
'''
Hinton Matrix Plots
'''
def plot(self, matrix, size = None):
'''
Method to plot Hinton Diagram of matrix
: param matrix : matrix to be plotted
: param size : scaling of blocks in plot
'''
matplotlib.pyplot.figure('Hinton Plot')
matplotlib.pyplot.gca().set_axis_bgcolor('gray')
size = size if size is not None else 5000 / max(matrix.shape)
columns = list()
rows = list()
values = list()
sizes = list()
maximum = numpy.amax(numpy.abs(matrix))
for (row, column), value in numpy.ndenumerate(matrix):
columns.append(column)
rows.append(row)
if value > 0.0:
values.append('white')
else:
values.append('black')
sizes.append(size * numpy.abs(value) / maximum)
matplotlib.pyplot.scatter(columns, rows, s = sizes, c = values, marker = 's')
matplotlib.pyplot.show()
|
<gh_stars>100-1000
"""Optimizations that rewrite graph interfaces."""
import operator
from dataclasses import dataclass
from functools import lru_cache, reduce
from types import SimpleNamespace as NS
from ..info import About
from ..ir import Graph, Parameter
from ..operations import primitives as P
from ..utils import OrderedSet, WorkSet, tracer
from .dde import make_dead
def _noop(fn):
"""Mark a function to represent that the represented feature is absent."""
fn.noop = True
return fn
class GraphInterfaceRewriter:
"""Base class to rewrite a graph's interface.
A graph's interface (parameter list and output type) can be rewritten if
all of its uses are either:
a. A direct call to the function, e.g. g(x)
b. A branch of a switch statement that's called directly, e.g.
switch(cond, g, h)(x)
The run method applies the optimization and proceeds as follows:
1. Identify "groups" of graphs that can be rewritten. If two graphs are
different branches of the same switch statement then they must be
rewritten identically and belong to the same group. A list of entries
is produced at this point. Each graph is associated to one, but the
first graph in a group gets a `calls` field in the entry that contains
the map of the relevant call sites (others get an empty list).
2. Filter the candidate graphs using the `filter` method, to eliminate
those we do not wish to rewrite. Eliminating one graph in a group
eliminates the whole group.
3. Reorder the graphs using the `order_key` method. Some optimizations
should be executed on parents first, or children first, for example.
4. Rewrite the call sites using the `rewrite_call` method. The method
may add or remove arguments as necessary, wrap or unwrap the call, etc.
5. Rewrite the graphs using the `rewrite_graph` method, adding/removing
parameters or changing other aspects of the interface.
"""
relation = None
@classmethod
def as_step(cls):
"""Return a Pipeline step that applies this optimization."""
return GraphInterfaceRewriterStep(rewriter=cls, name=cls.__name__)
def __init__(self, manager, graphs=None):
"""Initialize a GraphInterfaceRewriter.
Arguments:
manager: The manager for the graphs.
graphs: The set of graphs to process. If not provided, it defaults
to all the non-root graphs in the manager.
"""
self.manager = manager
self.graphs = (
manager.graphs - manager.roots if graphs is None else graphs
)
def param(self, graph, model):
"""Create a new parameter for the graph based on the model node.
The abstract field of the model will be copied for the new
parameter.
"""
with About(model.debug, self.relation):
# param = graph.add_parameter()
param = Parameter(graph)
param.abstract = model.abstract
return param
def call_sites(self, g):
"""Returns {call_site: eqv}.
A call site C is either:
* C = g(...)
* eqv = {g}
* C = switch(cond, g, g2)(...)
* eqv = {g, g2}
"""
call_sites = {cs: {g} for cs in g.call_sites}
for node, key in g.higher_order_sites:
if not (node.is_apply(P.switch) and (key == 2 or key == 3)):
return None
g2_node = node.inputs[5 - key]
if not g2_node.is_constant_graph():
return None
g2 = g2_node.value
uses = self.manager.uses[node]
if not all(key2 == 0 for site, key2 in uses):
return None
call_sites.update({site: {g, g2} for site, _ in uses})
return call_sites
def _make_group(self, g, entries):
valid = True
ws = WorkSet([g])
# Set of graphs that must all be augmented with the same fvs
eqv = OrderedSet([g])
# Call sites that will have to be updated
call_sites = {}
for currg in ws:
eqv.add(currg)
new_results = self.call_sites(currg)
if new_results is None:
valid = False
break
for _, new_eqv in new_results.items():
ws.queue_all(new_eqv)
call_sites.update(new_results)
if valid:
for gg in eqv:
entries[gg] = NS(graph=gg, calls=call_sites, eqv=eqv)
# Only the first graph gets the call sites
call_sites = {}
def make_groups(self):
"""Group the graphs according to their uses.
Returns {graph: entry}.
Each resulting entry contains the following fields:
* graph: The graph for this entry.
* eqv: A set of graphs that must be rewritten identically.
* calls: A {call_site: graphs} dict that maps a node to the
set of graphs that may be called there. That set may not
include the graph for this entry. Only one graph in the
eqv set will have a non-empty dictionary here.
"""
entries = {}
for g in self.graphs:
if g in entries:
continue
self._make_group(g, entries)
return entries
def run(self):
"""Run the rewriting optimization."""
# 1. Group the graphs and generate corresponding entries.
entries = self.make_groups()
# 2. Filter the entries to remove invalid ones. If an entry is invalid,
# all entries corresponding to graphs in its eqv are also removed,
# because it's all or nothing for an eqv group.
if not getattr(self.filter, "noop", False):
new_entries = dict(entries)
for g, entry in entries.items():
if g in new_entries and not self.filter(entry, entries):
for gg in entry.eqv:
del new_entries[gg]
entries = new_entries
# 3. Sort the entries, if operations must be done in a specific order
if getattr(self.order_key, "noop", False):
tasks = entries.values()
else:
tasks = [entries[g] for g in sorted(entries, key=self.order_key)]
chg = False
# 4. Rewrite the call sites
for entry in tasks:
for node in entry.calls:
chg |= self.rewrite_call(node, entry)
# 5. Rewrite the graphs
for entry in tasks:
chg |= self.rewrite_graph(entry)
return chg
#######################
# Methods to override #
#######################
@_noop
def filter(self, entry, all_entries):
"""Return whether to rewrite this entry.
It is allowed to add more data to the entry by setting additional
fields into it.
Arguments:
entry: The entry to look at.
all_entries: The complete dict of entries.
"""
raise NotImplementedError("Override in subclass")
@_noop
def order_key(self, g):
"""Return a key to sort graphs.
Graphs with a lower key will be processed first.
Arguments:
g: The graph to order.
"""
raise NotImplementedError("Override in subclass")
def rewrite_call(self, node, entry):
"""Rewrite the given call site.
self.manager should be used to perform the rewriting, either using
a transaction or directly.
Arguments:
node: A call site to rewrite.
entry: An entry with the information needed to perform the rewrite.
Note that entry.graph is not necessarily callable from this
call site, but one or more of the graphs in entry.eqv are.
Returns:
True if any changes were made.
"""
raise NotImplementedError("Override in subclass")
def rewrite_graph(self, entry):
"""Rewrite the graph for this entry.
The call sites are rewritten before the graphs.
self.manager should be used to perform the rewriting, either using
a transaction or directly. The parameters should be changed using
the manager/transaction, not with `graph.add_parameter`.
Arguments:
entry: entry.graph is the graph to be rewritten.
Returns:
True if any changes were made.
"""
raise NotImplementedError("Override in subclass")
class RemoveUnusedParameters(GraphInterfaceRewriter):
"""Optimization to remove unused parameters in graphs."""
def filter(self, entry, all_entries):
"""Keep the entry if graphs in eqv all miss common parameters."""
params_grouped = zip(*[g.parameters for g in entry.eqv])
entry.keep = [
any(self.manager.uses[p] for p in params)
for params in params_grouped
]
# No rewrite if all parameters are kept
return not all(entry.keep)
def rewrite_call(self, call, entry):
"""Remove unused parameters from the call site."""
new_call = call.graph.apply(
call.inputs[0],
*[arg for arg, keep in zip(call.inputs[1:], entry.keep) if keep]
)
new_call.abstract = call.abstract
self.manager.replace(call, new_call)
return True
def rewrite_graph(self, entry):
"""Remove unused parameters from the graph parameters."""
self.manager.set_parameters(
entry.graph,
[p for p, keep in zip(entry.graph.parameters, entry.keep) if keep],
)
return True
class LambdaLiftRewriter(GraphInterfaceRewriter):
"""Lambda lifting optimization.
Graphs with free variables for which we can identify all calls will be
modified to take these free variables as extra arguments.
This is a destructive operation.
"""
relation = "llift"
def filter(self, entry, all_entries):
"""Only graphs that have free variables will be transformed.
In order for the lambda lifting to work properly when a function F
refers to a function G that cannot be lambda lifted but has free
variables (in other words, the G is a free variable of F), G will have
to be moved inside F's scope.
We only do this if all uses of G are inside the scope of F. Otherwise
we will not lambda lift F.
"""
g = entry.graph
fvg = {
g2
for g2 in g.free_variables_total
if isinstance(g2, Graph) and g2 not in all_entries
}
all_fvs = reduce(
operator.or_,
[gg.free_variables_extended for gg in entry.eqv],
OrderedSet(),
)
if all_fvs and all(
all(user in g.scope for user in g2.graph_users) for g2 in fvg
):
entry.fvs = all_fvs
entry.scope = {*g.scope, *fvg}
return True
else:
return False
@lru_cache(maxsize=None)
def order_key(self, g):
"""Order graphs so that children are processed before parents.
Reverse the order so that children are processed before parents. This
is important when substituting the new parameters for the free
variables, because children that are lambda lifted must replace their
uses first (otherwise the original fvs would be replaced by their
parent's parameters, which is not what we want)
"""
if g.parent:
return self.order_key(g.parent) - 1
else:
return 0
def rewrite_call(self, node, entry):
"""For each closure, we add arguments to each call of the closure.
The arguments that are added are the original free variables, or
DEAD if none of the graphs that can be called at that site have that
free variable.
"""
fvs = [
fv
if any(fv in gg.free_variables_extended for gg in entry.calls[node])
else make_dead(fv)
for fv in entry.fvs
]
new_node = node.graph.apply(*node.inputs, *fvs)
new_node.abstract = node.abstract
self.manager.replace(node, new_node)
return True
def rewrite_graph(self, entry):
"""Rewrite the graphs.
New parameters are added for each free variable.
Then, we redirect all free variables within scope to the new
parameters, which means that they are not closures anymore.
"""
mng = self.manager
new_params = list(entry.graph.parameters)
with mng.transact() as tr:
# Redirect the fvs to the parameter (those in scope)
for fv in entry.fvs:
param = self.param(entry.graph, fv)
new_params.append(param)
if fv in entry.graph.free_variables_extended:
for node, idx in mng.uses[fv]:
if node.graph in entry.scope:
tr.set_edge(node, idx, param)
tr.set_parameters(entry.graph, new_params)
return True
##########################
# LAMBDA LIFTING EXAMPLE #
##########################
##################
# ORIGINAL GRAPH #
##################
# def f(x, y):
# def g(z):
# return x + z
# def h():
# return g(y)
# return h()
##########
# Step 1 #
##########
# def f(x, y):
# def g(z, _x): # <- Add parameter
# return x + z
# def h(_y, _x): # <- Add parameters
# return g(y)
# return h()
##########
# Step 2 #
##########
# def f(x, y):
# def g(z, _x):
# return x + z
# def h(_y, _x):
# return g(y, x) # <- Add argument to call
# return h(y, x) # <- Add arguments to call
##########
# Step 3 #
##########
# def f(x, y):
# def g(z, _x):
# return _x + z # <- Swap fv for parameter
# def h(_y, _x):
# return g(_y, _x) # <- Swap fvs for parameters
# return h(y, x)
@dataclass
class GraphInterfaceRewriterStep:
"""Implements optimizer interface for GraphInferfaceRewriter.
Attributes:
rewriter: A subclass of GraphInterfaceRewriter. It will be
instantiated in the __call__ method.
name: The name of the optimization.
"""
rewriter: type
name: str
def __call__(self, resources):
"""Apply the rewriter on root."""
mng = resources.opt_manager
args = dict(opt=self, node=None, manager=mng, profile=False,)
with tracer("opt", **args) as tr:
tr.set_results(success=False, **args)
mng.gc()
rewriter = self.rewriter(mng)
chg = rewriter.run()
if chg:
tracer().emit_success(**args, new_node=None)
return {"changes": chg}
__all__ = [
"GraphInterfaceRewriter",
"GraphInterfaceRewriterStep",
"LambdaLiftRewriter",
"RemoveUnusedParameters",
]
|
<filename>apps/downtimes.py<gh_stars>1-10
import dash_table
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objects as go
import numpy as np
import statistics
from plotly.subplots import make_subplots
from database_connection import connect, return_engine
#connect to database and add files to
conn = connect()
sql = "select * from vehicle_data;"
df_vehicle_data = pd.read_sql_query(sql, conn)
sql = "select * from driving_data;"
fleet_data = pd.read_sql_query(sql, conn)
conn = None
# get data from csv files
#df_vehicle_data = pd.read_csv('vehicle_data.csv')
#fleet_data = pd.read_csv('driving_data.csv')
# colors theme
colors = ['rgb(66,234,221)', 'rgb(7,130,130)', 'rgb(171,209,201)', 'rgb(151,179,208)', 'rgb(118,82,139)', 'rgb(173,239,209)', 'rgb(96,96,96)', 'rgb(214,65,97)']
df_vehicle_data = df_vehicle_data.round(decimals=2)
######## Convert maintenance score to maintenance status############
df_maintenance_status = df_vehicle_data.copy()
conditions = [
(df_vehicle_data['scheduled_maintenance'] >= 8),
(df_vehicle_data['scheduled_maintenance'] < 8) & (df_vehicle_data['scheduled_maintenance'] > 1),
(df_vehicle_data['scheduled_maintenance'] <= 1)]
choices = ['No need', 'Soon', 'Need']
df_maintenance_status['scheduled_maintenance'] = np.select(conditions, choices, default='null')
#####Convert accident_probability score to accident probability status########
df_accident_probability = df_vehicle_data.copy()
conditions = [
(df_vehicle_data['accident_probability'] <= 40),
(df_vehicle_data['accident_probability'] <= 90) & (df_vehicle_data['accident_probability'] > 40),
(df_vehicle_data['accident_probability'] > 90)]
choices = ['Low Risk', 'Mid Risk', 'High Risk']
df_accident_probability['accident_probability'] = np.select(conditions, choices, default='null')
# PieCharts
############### Downtimes Overview graph################
###New dataframe for filter result####
df_vehicle_status = df_vehicle_data.copy()
###Array with accepted values###
accepted_vehicle_status_array = ['accident', 'unused', 'maintenance', 'traffic jam']
###filter####
df_vehicle_status = df_vehicle_status.loc[df_vehicle_data['vehicle_status'].isin(accepted_vehicle_status_array)]
# use unique values as labels
lables = df_vehicle_status.groupby(['vehicle_status'])['licence_plate'].count().reset_index()
lables.columns = (['vehicle_status', 'Amount'])
# count values
values = df_vehicle_status.vehicle_status.value_counts()
#index = df_vehicle_status.vid
text = len(df_vehicle_status)
pie1 = go.Figure(data=[go.Pie(labels=lables['vehicle_status'], values=lables['Amount'], hole=.3)])
pie1.update_traces(marker=dict(colors=colors))
pie1.update_layout(
annotations=[dict(text=text, font_size=20, showarrow=False)]
)
############################## Need for Maintenance graph###################################
####use unique values as labels###
labels = df_maintenance_status['scheduled_maintenance'].unique()
####count values###
values = df_maintenance_status.scheduled_maintenance.value_counts()
pie2 = go.Figure(data=[go.Pie(labels=labels, values=values, hole=.3)])
pie2.update_traces(marker=dict(colors=colors))
##################### Accident Probability graph#####################################
labels = df_accident_probability['accident_probability'].unique()
values = df_accident_probability.accident_probability.value_counts()
pie3 = go.Figure(data=[go.Pie(labels=labels, values=values, hole=.3, )])
pie3.update_traces(marker=dict(colors=colors))
####################### Mapbox ###########################
#########Mapbox Accidents##############
###Data filter####
df_vehicle_accidents = df_vehicle_data.copy()
###Array with accepted values###
only_accidents_array = ['accident']
df_vehicle_accidents = df_vehicle_accidents.loc[df_vehicle_accidents['vehicle_status'].isin(only_accidents_array)]
fleet_lat = df_vehicle_accidents.position_latitude
fleet_lon = df_vehicle_accidents.position_longitude
fleet_text = df_vehicle_accidents.licence_plate
mapbox_accidents = go.Figure(go.Scattermapbox(
text=fleet_text,
lat=fleet_lat,
lon=fleet_lon,
mode='markers',
#hoverinfo='all',
marker=go.scattermapbox.Marker(
size=12,
symbol='fire-station',
color='rgb(242, 177, 172)'
),
))
mapbox_accidents.update_layout(
margin=dict(l=0, r=0, t=0, b=0),
autosize=True,
hovermode='closest',
mapbox=dict(
accesstoken='<KEY>',
bearing=0,
center=dict(
lat=40.92,
lon=-91.07
),
pitch=0,
zoom=3,
style='mapbox://styles/jakobschaal/ckcv9t67c097q1imzfqprsks9',
),
)
######Mapbox total########
####### Vehicle position data extraction ###################
fleet_lat = df_vehicle_data.position_latitude
fleet_lon = df_vehicle_data.position_longitude
fleet_vid = df_vehicle_data.vid
fig = go.Figure(go.Scattermapbox(
lat=fleet_lat,
lon=fleet_lon,
mode='markers',
marker=go.scattermapbox.Marker(
size=9
),
text=fleet_vid,
))
fig.update_layout(
margin=dict(l=0, r=0, t=0, b=0),
autosize=True,
hovermode='closest',
mapbox=dict(
accesstoken='<KEY>',
bearing=0,
center=dict(
lat=38.92,
lon=-100.07
),
pitch=0,
zoom=5,
style='mapbox://styles/jakobschaal/ckb1ekfv005681iqlj9tery0v',
),
)
######Maintenance Calendar########
######## sorting dataframe for maintenace calendar############
df_vehicle_data = df_vehicle_data.sort_values(by='licence_plate', ascending=False)
#####oldest vehicle table#####
oldest_vehicle_data = df_vehicle_data.sort_values(by='vehicle_construction_year', ascending=False)
# Create figure with secondary y-axis
oldest_vehicle = make_subplots(specs=[[{"secondary_y": True}]])
x = statistics.mean(df_vehicle_data['vehicle_construction_year'])
df_vehicle_data['mean'] = x
# Add traces
oldest_vehicle.add_trace(
go.Bar(x=oldest_vehicle_data['licence_plate'], y=oldest_vehicle_data['vehicle_construction_year'],
name="construction year", marker=dict(color='rgb(7,130,130)')),
secondary_y=False,
)
oldest_vehicle.add_trace(
go.Scatter(x=oldest_vehicle_data['licence_plate'], y=df_vehicle_data['mean'], name="mean", marker=dict(color='rgb(66,234,221)')),
secondary_y=True,
)
# Set x-axis title
oldest_vehicle.update_xaxes(title_text="Licence Plate")
# Set y-axes titles
oldest_vehicle.update_yaxes(title_text="Year", secondary_y=False)
oldest_vehicle['layout']['yaxis'].update(range=[1999, 2020], dtick=5, autorange=False)
oldest_vehicle['layout']['yaxis2'].update(range=[1999, 2020], dtick=5, autorange=False)
#####driving distance table#####
sum = {}
# run through each months dataframe
for index, row in fleet_data.iterrows():
if int(row['vid']) in sum.keys():
sum[int(row['vid'])] += row['distance_total']
else:
sum[int(row['vid'])] = row['distance_total']
df_vehicle_data['distance'] = df_vehicle_data['vid'].map(sum)
df_vehicle_data = df_vehicle_data.sort_values(by='distance', ascending=False)
# Create figure with secondary y-axis
distance = make_subplots(specs=[[{"secondary_y": True}]])
x_distance = statistics.mean(df_vehicle_data['distance'])
df_vehicle_data['mean_distance'] = x_distance
# Add traces
distance.add_trace(
go.Bar(x=df_vehicle_data['licence_plate'], y=df_vehicle_data['distance'], name="distance", marker=dict(color='rgb(7,130,130)')),
secondary_y=False,
)
distance.add_trace(
go.Scatter(x=df_vehicle_data['licence_plate'], y=df_vehicle_data['mean_distance'], name="mean", marker=dict(color='rgb(66,234,221)')),
secondary_y=True,
)
# Set x-axis title
distance.update_xaxes(title_text="Licence Plate")
# Set y-axes titles
distance.update_yaxes(title_text="Distance", secondary_y=False)
distance['layout']['yaxis'].update(range=[0,20000], dtick=1000, autorange=False)
distance['layout']['yaxis2'].update(range=[0,20000], dtick=1000, autorange=False)
#### view layout #####
layout = html.Div(
className='downtimes-content',
children=[
###### Tab-Layout ############
dcc.Tabs([
# Downtimes Home
dcc.Tab(label='Downtimes', children=[
################# Row 1 ###########################
dbc.Row([
dbc.Col([
dbc.Row(
dbc.Col(
html.Div(
html.H1('Vehicle Downtimes'),
),
),
),
dbc.Row([
dbc.Col(dcc.Graph(figure=pie1, config={'responsive': True}), className='piechart'),
dbc.Col([
##################Radiobuttons Downtimes###################
dcc.Checklist(
id='page-downtimes-radios-1',
options=[{'label': i, 'value': i}
for i in ['unused', 'accident', 'maintenance']],
value=['unused', 'accident', 'maintenance']),
##################Searchbox Downtimes###################
##################Table Downtimes#########################
dash_table.DataTable(
id="downtime_table",
filter_action='native',
sort_action='native',
style_table={
'maxHeight': '',
'maxWidth': '',
'overflowY': ''
},
data=[{}],
columns=[{'name': i, 'id': i} for i in
df_vehicle_data.loc[:, ['licence_plate', 'vehicle_status']]],
page_size=10,
style_header={
'backgroundColor': '#f1f1f1',
'fontWeight': 'bold',
'fontSize': 12,
'fontFamily': 'Open Sans'
},
style_cell={
'padding': '5px',
'fontSize': 13,
'fontFamily': 'sans-serif'
},
style_cell_conditional=[
]),
]),
]),
], className='card-tab card', width=True),
##################Map Accidents#########################
dbc.Col(html.Div([
html.Div([
html.Div(
html.H1('Accidents'), className='map-margin'
),
html.Div(
dcc.Graph(figure=mapbox_accidents, config={'responsive': True},
className='accidentsmap'),
),
]),
]), className='card-tab card', width=True),
]),
################# Row 2 ###########################
dbc.Row([
dbc.Col([
dbc.Row(
dbc.Col(
html.Div(
html.H1('Need for Maintenance'),
),
),
),
dbc.Row([
dbc.Col(dcc.Graph(figure=pie2)),
dbc.Col([
################## Radio-Buttons Maintenance ################
dcc.Checklist(
id='page-downtimes-radios-2',
options=[{'label': i, 'value': i}
for i in ['Need', 'Soon', 'No need']],
value=['Need', 'Soon', 'No need']),
################## Searchbox Maintenance ###################
dash_table.DataTable(
id='maintenance_table',
data=[{}],
filter_action='native',
sort_action='native',
columns=[{'name': i, 'id': i} for i in
df_maintenance_status.loc[:, ['licence_plate', 'scheduled_maintenance']]],
page_size=10,
style_header={
'backgroundColor': '#f1f1f1',
'fontWeight': 'bold',
'fontSize': 12,
'fontFamily': 'Open Sans'
},
style_cell={
'padding': '5px',
'fontSize': 13,
'fontFamily': 'sans-serif'
},
style_cell_conditional=[
]),
]),
]),
], className='card-tab card', width=True),
dbc.Col([
dbc.Row(
dbc.Col(
html.Div(
html.H1('Accident Probability'),
),
),
),
dbc.Row([
dbc.Col(dcc.Graph(figure=pie3)),
dbc.Col([
################## Searchbox Accidents ###################
dcc.Checklist(
id='page-downtimes-radios-3',
options=[{'label': i, 'value': i}
for i in ['High Risk', 'Mid Risk', 'Low Risk']],
value=['High Risk', 'Mid Risk', 'Low Risk']),
################## Searchbox Accidents ###################
dash_table.DataTable(
id='table-accident-probability',
data=[{}],
filter_action='native',
sort_action='native',
# columns=[{'id': c, 'name': c} for c in vehicle_data.columns],
columns=[{'name': i, 'id': i} for i in
df_accident_probability.loc[:, ['licence_plate', 'accident_probability']]],
page_size=10,
style_header={
'backgroundColor': '#f1f1f1',
'fontWeight': 'bold',
'fontSize': 12,
'fontFamily': 'Open Sans'
},
style_cell={
'padding': '5px',
'fontSize': 13,
'fontFamily': 'sans-serif'
},
style_cell_conditional=[
]),
]),
]),
], className='card-tab card', width=True),
]),
############# Row 3 #############
dbc.Row([
dbc.Col([
dbc.Row(
dbc.Col(
html.Div(
html.H1('Oldest Vehicles'),
),
),
),
dcc.Graph(id='graph-oldest_vehicle', figure=oldest_vehicle)
], className='card-tab card', width=True),
# Longest Distance table
dbc.Col([
dbc.Row(
dbc.Col(
html.Div(
html.H1('Longest Distance'),
),
),
),
dcc.Graph(id='graph-distance', figure=distance)
], className='card-tab card', width=True),
]),
]),
# Maintenance Calendar
dcc.Tab(label='Maintenance Calendar', children=[
html.Div(children=[
dcc.Dropdown(id='heatmap-dropdown',
options=[{'value': x, 'label': x} for x in df_vehicle_data['licence_plate']],
#multi=True,
#value='x',
placeholder='Select license plate'),
dcc.Graph(id='heatmap',
#figure=maintenance_calendar(), config={'displayModeBar': False}
)
], className="maintenance-calender"
)
]),
]),
])
|
"""
Plot electric potential output from CHARMM PBEQ calculation.
These units are labeled as Angstrom for grid and unitCharge/Angstrom for potential.
Verify these with CHARMM configuration file.
By: <NAME>
Example: python plotpbeq.py -d /path/to/dir -i -1 -c X
"""
import os
import re
import sys
import pandas as pd
import numpy as np
import pickle
import matplotlib as mpl
import matplotlib.pyplot as plt
def get_data(flist):
"""
Extract data to pandas dataframes from all files in flist.
All coordinates should be same within all phi files in this directory.
Parameters
----------
flist - list of phi file names to read from CHARMM PBEQ output
Returns
-------
crds - X Y Z coordinates from 0th 1st 2nd column of first file in flist
data1 - potentials from 3rd column for all files in flist
"""
crds = pd.read_csv(flist[0],index_col=None,sep='\s+',header=None,usecols=[0,1,2])
crds.columns = ["X","Y","Z"]
templist = []
for i, f in enumerate(flist):
if i%10==0: print(i) # status check
tempdf = pd.read_csv(f,index_col=None,sep='\s+',header=None,usecols=[3])
templist.append(tempdf)
data1 = pd.concat(templist, axis=1)
data1['PHI'] = data1.mean(axis=1)
df = pd.concat([crds, data1], axis=1)
return df
def plot_contour(abscissa, ordinate):
# https://matplotlib.org/examples/pylab_examples/griddata_demo.html
fig = plt.figure()
# define grid
nlevels = 15 # number of contours to draw
xi = np.linspace(int(min(abscissa)),int(max(abscissa)),1000)
yi = np.linspace(int(min(ordinate)),int(max(ordinate)),1000)
zi = mpl.mlab.griddata(abscissa, ordinate, phi, xi, yi, interp='linear')
cs = plt.contour(xi, yi, zi, nlevels, linewidths=0.5, colors='k')
cs = plt.contourf(xi, yi, zi, nlevels,
vmax=abs(zi).max(), vmin=-abs(zi).max())
plt.colorbar(label=r"$\Phi$ (e/$\AA$)")
plt.xlabel('{} ($\AA$)'.format(othr_coord))
plt.ylabel('Z ($\AA$)')
plt.title('Frame: {} at {} = 0 $\AA$'.format(frame, coord))
plt.savefig('{}_{:03d}.png'.format(coord, frame))
# plt.show()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Plot electrostatic potential from CHARMM PBEQ calculation.')
parser.add_argument("-d", "--directory", required=True,
help="Base directory containing .phi files.")
parser.add_argument("-i", "--frame", required=True, type=int,
help="Specify frame number for which to plot potential. "
"To plot average of all frames, use -1")
parser.add_argument("-p", "--readpickle", default=False,
help="Look for specified pickle read data from there. "
"Don't use this flag if reading from .phi files.")
parser.add_argument("-c","--coordinate", required=True,
help="Coordinate to view from. Upper case choice of X or Y. "
"E.g., if specified X, plot will display Y against Z.")
args = parser.parse_args()
basedir = args.directory
frame = args.frame
# specify coordinate to view from
if args.coordinate.upper() in set(['X','Y']):
coord = args.coordinate.upper()
if coord == 'X': othr_coord = 'Y'
if coord == 'Y': othr_coord = 'X'
else:
sys.exit('Specify a valid coordinate. X Y')
# generate list of file names
if frame == -1:
fs = [x for x in os.listdir(basedir) if x.endswith(".phi")] # find phi files
fs = ['{}/{}'.format(basedir, f) for f in fs] # add directory to filename
fs = sorted(fs, key=lambda x: (int(re.sub('\D','',x)),x))
else:
fs = "{}/{}.phi".format(basedir,frame)
if not os.path.isfile(fs):
sys.exit('ERROR. File not found: {}.'.format(fs))
fs = [fs]
print('\n'.join(fs))
# read in data from files or from pickle
if not args.readpickle:
df = get_data(fs)
pickle.dump(df, open('numpyArrays.pickle','wb'))
else:
df = pickle.load(open(args.readpickle, 'rb'))
# extract data from plane of X=0 or Y=0 (whatever coordinate is specified)
df_2d = df.loc[df[coord] == 0]
abscissa = df_2d[othr_coord] # make this generalizable to read in either X or Y
ordinate = df_2d['Z']
phi = df_2d['PHI']
# plot
plot_contour(abscissa, ordinate)
|
import numpy as np
import pandas as pd
import subprocess
import os
import classes
def get_sn_from_file(pathToSN, magFlag=False):
"""Reads photometric data of SN from file formatted as in SNPhotCC
Keyword arguments:
pathToSN -- path to file from which extract data.
Returns:
sn -- object of class Supernova.
"""
sn = classes.Supernova(pathToSN, magFlag)
return sn
def get_fit_from_file(pathToFit, magFlag=False):
"""Reads photometric fit of SN from file formatted as in SNPhotCC
Keyword arguments:
pathToSN -- path to file from which to extract data.
Returns:
fit -- object of class SupernovaFit.
"""
tmp = get_sn_from_file(pathToFit, magFlag)
"""
Initializing SupernovaFit object
"""
fit = classes.SupernovaFit(tmp, tmp.kern if hasattr(tmp, 'kern') else None)
for b in tmp.lcsDict.keys():
fit.set_lightcurve(b,
tmp.lcsDict[b].mjd,
tmp.lcsDict[b].flux,
tmp.lcsDict[b].fluxErr,
magFlag=magFlag
)
del(tmp)
if fit.r.badCurve == False:
fit.shift_mjds()
"""
Fixing shiftedMjd for not-peaked LCs
"""
if (fit.peaked == False) and (fit.r.badCurve == False) :
"""
correcting using CC results
"""
for b in fit.lcsDict.keys():
fit.lcsDict[b].shiftedMjd = [
el + fit.ccMjdMaxFlux for el in fit.lcsDict[b].shiftedMjd
]
return fit
def check_lc_from_file(fileDir):
"""Creates a `Supernova` objects from files in specified directory and checks for bad light curves.
Keyword arguments:
fileDir -- string, directory in which to look for files.
"""
p = subprocess.Popen("ls *.DAT", shell=True, stdout=subprocess.PIPE,
cwd=fileDir)
lsDir = p.stdout.read()
lsDir = lsDir.split('\n')
lsDir.sort()
lsDir.remove('')
for i in range(len(lsDir)):
tmpSN = get_sn_from_file(fileDir+lsDir[i])
if tmpSN.r.badCurve:
print "{:<} Has bad curve in r band - ".format(lsDir[i]) +\
"THE FILE HAS TO BE DELETED"
def create_file(indexList, outFilePath):
"""Creates file contaning list of files 'DES_SN*_FIT.DAT' in directory train_data/DES_BLIND+HOSTZ_FIT/.
Keyword arguments:
indexList -- Python list, contains supernova IDs.
outFilePath -- string, path where to create the output file.
"""
# indexArr = np.loadtxt(indexFile, dtype=np.int)
outFile = open(outFilePath, 'w')
for i in indexList:
filePath = 'train_data/DES_BLIND+HOSTZ_FIT/' + \
'DES_SN{:0>6d}_FIT.DAT'.format(i)
try:
f = open(filePath, 'r')
outFile.write(filePath+'\n')
except IOError:
continue
outFile.close()
def index_to_filename(indexList, inFileName, outFileName):
"""Filters a list of files using specified indexes.
Keyword arguments:
indexList -- Python list, indices to keep in the output.
inFileName -- path to file containing list of files.
outFileName -- path to output file.
"""
inFile = open(inFileName, "r")
lines = inFile.readlines()
inFile.close()
npLines = np.array(lines, dtype=np.str)
outFileList = npLines[indexList]
np.savetxt(outFileName, outFileList, fmt='%s', newline='')
def rename_bad_r_lc_file(path):
"""Renames files of fitted lc with a bad lc in r band to extension '.BADrLC'.
Keyword arguments:
path -- string, path to directory in which to find files to checked.
"""
if path[-1] != os.sep:
path = path + os.sep
p = subprocess.Popen("ls *FIT.DAT", shell=True, stdout=subprocess.PIPE,
cwd=path)
lsList = p.stdout.read()
lsList = lsList.split('\n')
lsList.sort()
lsList.remove('')
countBad = 0
for i in range(len(lsList)):
tmpSN = get_sn_from_file(path+lsList[i])
if tmpSN.r.badCurve:
os.rename(path+lsList[i], path+lsList[i]+'.BADrLC')
countBad += 1
return countBad
def extract_redshift_data(path, outFile):
"""Extract redshift from files and produces a CSV file, to be read from R to study redshift distribution.
Keyword arguments:
path -- where to find supernova files.
outFile -- path to output file.
Notes:
The output CSV file will have 4 columns:
SNID -- integer
Redshift -- float, spectroscopic or photometric
Training flag -- 1 for training 0 for test
SN Type -- from 'SIMGEN_PUBLIC_DES.DUMP' file
"""
if path[-1] != os.sep:
path = path + os.sep
p = subprocess.Popen("ls *.DAT", shell=True, stdout=subprocess.PIPE,
cwd=path)
lsList = p.stdout.read()
lsList = lsList.split('\n')
lsList.sort()
lsList.remove('')
dump = pd.read_csv(
'train_data/SIMGEN_PUBLIC_DES/SIMGEN_PUBLIC_DES.DUMP',
sep=' ', skiprows=0, header=1, usecols=[1,2],
skipinitialspace=True, engine='c')
dump = dump.convert_objects(convert_numeric=True, copy=False)
snid = np.empty(len(lsList), dtype=np.int)
redshift = np.empty(len(lsList), dtype=np.float)
trainFlag = np.zeros(len(lsList), dtype=np.int)
genType = np.zeros(len(lsList), dtype=np.int)
for i in range(len(lsList)):
tmpSN = get_sn_from_file(path+lsList[i])
snid[i] = tmpSN.SNID
redshift[i] = tmpSN.zSpec if (tmpSN.zSpec != None) else tmpSN.zPhotHost
trainFlag[i] = 1 if (tmpSN.zSpec != None) else 0
genType[i] = dump['GENTYPE'][dump['CID']==snid[i]]
df = pd.DataFrame(
data=zip(snid, redshift, trainFlag, genType),
columns=['SNID', 'redshift', 'train_flag', 'genType'])
df.to_csv(
'products/'+outFile, sep=';', index=False,
float_format='%5.4f', header=True)
def extract_training_set(path, fileName):
"""Creates files dividing supernovae in training and test sets.
It creates also files list training set supernovae by type
Keyword arguments:
path -- where to find supernova light curves files
Notes:
Created files are saved in directory 'products/'. Their name are, so far, fixed.
fileName.TEST
fileName.TRAIN
fileName.[SNType].TRAIN
"""
if path[-1] != os.sep:
path = path + os.sep
badCount = 0
p = subprocess.Popen("ls *.DAT", shell=True, stdout=subprocess.PIPE,
cwd=path)
lsList = p.stdout.read()
lsList = lsList.split('\n')
lsList.sort()
lsList.remove('')
# if path.rfind('/') == len(path)-1:
# fileName = path.rpartition('/')[0].rpartition('/')[-1]
# else:
# fileName = path.rpartition('/')[-1]
outFileTest = open('{:s}{:s}.TEST'.format(path, fileName), 'w')
outFileTrain = open('{:s}{:s}.TRAIN'.format(path, fileName), 'w')
outFileIa = open('{:s}{:s}.Ia.TRAIN'.format(path, fileName), 'w')
outFileII = open('{:s}{:s}.II.TRAIN'.format(path, fileName), 'w')
outFileIbc = open('{:s}{:s}.Ibc.TRAIN'.format(path, fileName), 'w')
outFileIaPec = open('{:s}{:s}.IaPec.TRAIN'.format(path, fileName), 'w')
outFileOther = open('{:s}{:s}.Other.TRAIN'.format(path, fileName), 'w')
outFileRej = open('{:s}{:s}.Rej.TRAIN'.format(path, fileName), 'w')
outFileTest.write('# {:s}\n'.format(path))
outFileTrain.write('# {:s}\n'.format(path))
outFileIa.write('# {:s}\n'.format(path))
outFileII.write('# {:s}\n'.format(path))
outFileIbc.write('# {:s}\n'.format(path))
outFileIaPec.write('# {:s}\n'.format(path))
outFileOther.write('# {:s}\n'.format(path))
outFileRej.write('# {:s}\n'.format(path))
for i in range(len(lsList)):
tmpSN = get_sn_from_file(path+lsList[i])
if tmpSN.r.badCurve:
badCount += 1
continue
SNType = tmpSN.SNTypeInt
if SNType != -9:
outFileTrain.write(
"{:0>5d} {:0>6d} {:>}\n".format(
i-badCount, tmpSN.SNID, path+lsList[i]
)
)
else:
outFileTest.write(
"{:0>5d} {:0>6d} {:>}\n".format(
i-badCount, tmpSN.SNID, path+lsList[i]
)
)
continue
if SNType == 1:
outFileIa.write(
"{:0>5d} {:0>6d} {:>} snIa\n".format(
i-badCount, tmpSN.SNID, path+lsList[i]
)
)
continue
if SNType == 21 or SNType == 22 or SNType == 23:
outFileII.write(
"{:0>5d} {:0>6d} {:>} snII\n".format(
i-badCount, tmpSN.SNID, path+lsList[i]
)
)
continue
if SNType == 3 or SNType == 32 or SNType == 33:
outFileIbc.write(
"{:0>5d} {:0>6d} {:>} snIbc\n".format(
i-badCount, tmpSN.SNID, path+lsList[i]
)
)
continue
if SNType == 11:
outFileIaPec.write(
"{:0>5d} {:0>6d} {:>} pec\n".format(
i-badCount, tmpSN.SNID, path+lsList[i]
)
)
continue
if SNType == 66:
outFileOther.write(
"{:0>5d} {:0>6d} {:>} other\n".format(
i-badCount, tmpSN.SNID, path+lsList[i]
)
)
continue
if SNType == -1:
outFileIbc.write(
"{:0>5d} {:0>6d} {:>} snIbc\n".format(
i-badCount, tmpSN.SNID, path+lsList[i]
)
)
continue
outFileOther.write(
"{:0>5d} {:0>6d} {:>} other\n".format(
i-badCount, tmpSN.SNID, path+lsList[i]
)
)
print badCount
outFileTest.close()
outFileTrain.close()
outFileIa.close()
outFileII.close()
outFileIbc.close()
outFileIaPec.close()
outFileOther.close()
outFileRej.close()
def rewrite_file(fileName):
"""Rewrites files produced after fit of old code version.
It removes `#` at beginning of the first 10 lines, leaving as it is the
first line.
If necessary it adds `MJD_MAX_FLUX-CCF: 0.000`.
Adds column `OBS` containing row names.
--- DEPRECATED ---
"""
inFile = file(fileName, 'r')
lines = inFile.readlines()
inFile.close()
outFile = open(fileName, 'w')
line = ''
for i in range(len(lines)):
if i == 0:
outFile.write(lines[i])
continue
if i > 0 and i < 10:
if i == 7 and ('REDSHIFT_SPEC:' not in lines[i]):
line = 'REDSHIFT_SPEC: -9.0000 +- 9.0000\n'
outFile.write(line)
line = lines[i]
line = line[2:]
outFile.write(line)
continue
if i == 10:
if lines[i] != '\n':
outFile.write(lines[i])
else:
line = 'MJD_MAX_FLUX-CCF: 0.000\n'
outFile.write(line)
outFile.write(lines[i])
print i, lines[i]
continue
# empty space between details and table
if lines[i] == '\n' and i > 10:
outFile.write(lines[i])
print i, lines[i]
continue
if lines[i][0] == '#':
outFile.write(lines[i])
continue
if 'MJD' in lines[i]:
if 'FIELD' not in lines[i]:
line = lines[i][0:15] + ' FIELD' + lines[i][15:]
if 'OBS' not in lines[i]:
line = lines[i]
line = ' OBS ' + line
outFile.write(line)
continue
if '----' in lines[i]:
line = lines[i][0:15] + ' ------' + lines[i][15:]
line = '---- ' + line
outFile.write(line)
continue
line = lines[i][0:15] + ' NULL ' + lines[i][15:]
line = 'OBS: ' + line
outFile.write(line)
outFile.close()
|
<gh_stars>1-10
import torch
from model import AudioToPose, PoseDiscriminator
from transforms import BVHtoMocapData, MocapDataToExpMap, Pipeline, AudioToLogMelSpec
from pathlib import Path
from argparse import Namespace
import torch.nn as nn
import librosa
d = 1
class GesturePrediction:
def __init__(self, checkpoint):
checkpoint = Path(checkpoint)
self.mocap_pipeline = Pipeline([BVHtoMocapData, MocapDataToExpMap])
self.mel_spec = AudioToLogMelSpec()
self.generator = None
self.losses = self.get_losses()
self.discriminator = None
self.last_audio_shape = None
self.checkpoint = torch.load(checkpoint)
self.infer = checkpoint.parent.parent / 'infer/'
self.infer.mkdir(exist_ok=True)
self.output = self.infer / 'output/'
self.output.mkdir(exist_ok=True)
self.cache = self.infer / 'cache/'
self.cache.mkdir(exist_ok=True)
def apply(self, audio_file, bvh_file):
cache = self.cache / (bvh_file.name + '.pt')
if cache.is_file():
audio_encoding, real_pose = torch.load(cache)
else:
audio_encoding = torch.from_numpy(self.mel_spec.apply(librosa.load(audio_file, mono=True))).unsqueeze(
0).cuda()
real_pose = torch.from_numpy(self.mocap_pipeline.apply(bvh_file)).permute(1, 0).unsqueeze(0).cuda()
torch.save((audio_encoding, real_pose), cache)
if self.generator is None or audio_encoding.shape[-2:] != self.last_audio_shape:
self.generator = AudioToPose(input_shape=audio_encoding.shape[-2:], pose_shape=real_pose.shape[-2:],
encoder_dim=d).cuda()
self.generator.load_state_dict(self.checkpoint['model_state_dict']['generator'])
self.generator.float()
self.generator.eval()
self.last_audio_shape = audio_encoding.shape[-2:]
# self.discriminator = PoseDiscriminator(pose_shape=real_pose.shape[-2:]).cuda()
# self.discriminator.load_state_dict(self.checkpoint['model_state_dict']['discriminator'])
# self.discriminator.float()
# self.discriminator.eval()
pred_pose = self.generator(audio_encoding)
print(self.losses.l1(pred_pose, real_pose))
bvh = self.mocap_pipeline.invert(pred_pose[0].permute(1, 0).detach().cpu().numpy())
with open(self.output / (audio_file.name.split('.')[0] + '.bvh'), 'w+') as f:
f.write(bvh)
@staticmethod
def get_losses():
"""
Returns a namespace of required loss functions.
"""
losses = Namespace()
losses.l1 = nn.L1Loss()
losses.mse = nn.MSELoss()
return losses
if d == 1:
#gen = GesturePrediction('E:/Users/Sarthak/Experiments/speech2gesture/20210929-211434-1d-unet-666343a/checkpoints/best.pt')
gen = GesturePrediction('E:/Users/Sarthak/Experiments/speech2gesture/20211007-144056/checkpoints/best.pt')
else:
gen = GesturePrediction('E:/Users/Sarthak/Experiments/speech2gesture/20210930-103450-2d-unet/checkpoints/best.pt')
audio = Path('E:/Users/Sarthak/Data/speech2gesture/raw_data/val/Audio/Recording_018.wav')
bvh = Path('E:/Users/Sarthak/Data/speech2gesture/raw_data/val/Motion/Recording_018.bvh')
gen.apply(audio, bvh) |
<reponame>TuringApproved/Turing_Neural_Networks
__author__ = "<NAME>"
"""In this file, a R-ANN is constructed from a sample Turing Machine.
Specifically, the TM decides if an input unary string is composed by
an even number of 1's.
A Generalized Shift is first created from the TM description.
Subsequently, an NDA simulating the GS dynamics is created.
Then, a R-ANN is constructed from the NDA.
Finally, the dynamics of the R-ANN is simulated from initial
conditions and visualized.
"""
import matplotlib.pyplot as plt
import numpy as np
from tnnpy import NeuralTM, GodelEncoder, CompactGodelEncoder, TMGeneralizedShift, NonlinearDynamicalAutomaton
# Turing Machine description (latex syntax for typesetting in plot)
tape_symbols = ["1", "2", "3", "4"]
states = ["w", "g"]
tm_descr = {
("w", "1"): ("w", "3", "S"),
("w", "2"): ("w", "4", "S"),
("w", "3"): ("w", "2", "S"),
("w", "4"): ("w", "1", "S"),
("g", "1"): ("g", "2", "S"),
("g", "2"): ("g", "3", "S"),
("g", "3"): ("g", "4", "S"),
("g", "4"): ("g", "1", "S"),
}
# create encoders for states and tape symbols
ge_q = GodelEncoder(states)
ge_s = GodelEncoder(tape_symbols)
# and from the simple encoders, create the actual encoders for the
# alpha and beta subsequences
ge_alpha = CompactGodelEncoder(ge_q, ge_s)
ge_beta = ge_s
# create Generalized Shift from machine description...
tm_gs = TMGeneralizedShift(states, tape_symbols, tm_descr)
# ...then NDA from the Generalized Shift and encoders...
nda = NonlinearDynamicalAutomaton(tm_gs, ge_alpha, ge_beta)
# ... and finally the R-ANN simulating the TM from the NDA
tm_nn = NeuralTM(nda)
# set initial conditions for the computation
init_state = ge_alpha.encode_sequence(["g", "1"])
init_tape = ge_beta.encode_sequence(list("1"))
# run R-ANN with random cloud of initial conditions
def rand_cloud_run(xs, ys, n_iter):
init_conds = zip(xs, ys)
mean_acts = np.zeros((xs.size, n_iter))
for i, ic in enumerate(init_conds):
x = ic[0]
y = ic[1]
tm_nn.set_init_cond(x, y)
for j in range(n_iter):
MCL_acts = np.concatenate((tm_nn.MCLx.activation, tm_nn.MCLy.activation))
tm_nn.run_net()
all_acts = np.concatenate(
(
MCL_acts,
tm_nn.BSLbx.activation,
tm_nn.BSLby.activation,
tm_nn.LTL.activation,
)
)
mean_acts[i, j] = np.mean(all_acts)
return np.mean(mean_acts, axis=0), np.std(mean_acts, axis=0)
n_iter = 32
n_init_conds = 100
w_rd_x = np.random.uniform(0.0, 0.5, size=n_init_conds)
w_rd_y = np.random.uniform(0, 0.25, size=n_init_conds)
walk_means, walk_std = rand_cloud_run(w_rd_x, w_rd_y, n_iter)
g_rd_x = np.random.uniform(0.5, 1, size=n_init_conds)
g_rd_y = np.random.uniform(0, 0.25, size=n_init_conds)
gall_means, gall_std = rand_cloud_run(g_rd_x, g_rd_y, n_iter)
# Plot syntetic ERPs
plt.figure(figsize=[8, 4])
plt.style.use("ggplot")
plt.fill_between(
range(n_iter), walk_means - walk_std, walk_means + walk_std, color="cornflowerblue"
)
plt.plot(range(n_iter), walk_means, label="Walk gait", color="lightblue", lw=2)
plt.fill_between(
range(n_iter), gall_means - gall_std, gall_means + gall_std, color="lightpink"
)
plt.plot(range(n_iter), gall_means, label="Gallop gait", color="red", lw=2)
plt.ylim([0, 0.2])
plt.xlim([0, n_iter - 1])
plt.xlabel("Time step")
plt.ylabel("Mean network activation")
plt.legend()
plt.tight_layout()
plt.show() |
<filename>bin/headlines.py<gh_stars>0
from mutation import *
np.random.seed(1)
random.seed(1)
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='Headline analysis')
parser.add_argument('model_name', type=str,
help='Type of language model (e.g., hmm, lstm)')
parser.add_argument('--namespace', type=str, default='headlines',
help='Model namespace')
parser.add_argument('--dim', type=int, default=256,
help='Embedding dimension')
parser.add_argument('--batch-size', type=int, default=1000,
help='Training minibatch size')
parser.add_argument('--n-epochs', type=int, default=20,
help='Number of training epochs')
parser.add_argument('--seed', type=int, default=1,
help='Random seed')
parser.add_argument('--checkpoint', type=str, default=None,
help='Model checkpoint')
parser.add_argument('--train', action='store_true',
help='Train model')
parser.add_argument('--train-split', action='store_true',
help='Train model on portion of data')
parser.add_argument('--test', action='store_true',
help='Test model')
parser.add_argument('--embed', action='store_true',
help='Analyze embeddings')
parser.add_argument('--semantics', action='store_true',
help='Analyze mutational semantic change')
args = parser.parse_args()
return args
def parse_meta(timestamp, headline):
return {
'timestamp': timestamp,
'date': dparse(timestamp),
'year': int(timestamp[:4]),
'headline': headline,
}
def process(fnames):
seqs = {}
for fname in fnames:
with open(fname) as f:
f.readline() # Consume header.
for line in f:
timestamp, headline = line.rstrip().split(',')
seq = tuple(headline.split())
if seq not in seqs:
seqs[seq] = []
seqs[seq].append(parse_meta(timestamp, headline))
return seqs
def split_seqs(seqs, split_method='random'):
train_seqs, val_seqs = {}, {}
new_cutoff = dparse('01-01-2016')
tprint('Splitting seqs...')
for seq in seqs:
# Pick validation set based on date.
seq_dates = [ meta['date'] for meta in seqs[seq] ]
if len(seq_dates) > 0:
oldest_date = sorted(seq_dates)[0]
if oldest_date >= new_cutoff:
val_seqs[seq] = seqs[seq]
continue
train_seqs[seq] = seqs[seq]
tprint('{} train seqs, {} test seqs.'
.format(len(train_seqs), len(val_seqs)))
return train_seqs, val_seqs
def setup():
fnames = [ 'data/headlines/abcnews-date-text.head1k.csv' ]
seqs = process(fnames)
vocabulary = sorted({ word for seq in seqs for word in seq })
vocabulary = { word: idx + 1 for idx, word in enumerate(vocabulary) }
return seqs, vocabulary
def interpret_clusters(adata):
clusters = sorted(set(adata.obs['louvain']))
for cluster in clusters:
tprint('Cluster {}'.format(cluster))
adata_cluster = adata[adata.obs['louvain'] == cluster]
counts = Counter(adata_cluster.obs['headline'])
for val, count in counts.most_common():
tprint('\t\t{}: {}'.format(val, count))
tprint('')
def plot_umap(adata):
sc.tl.umap(adata, min_dist=1.)
sc.pl.umap(adata, color='louvain', save='_louvain.png')
sc.pl.umap(adata, color='year', save='_year.png')
sc.pl.umap(adata, color='date', save='_date.png')
def analyze_embedding(args, model, seqs, vocabulary):
seqs = embed_seqs(args, model, seqs, vocabulary,
use_cache=True)
X, obs = [], {}
obs['n_seq'] = []
obs['seq'] = []
for seq in seqs:
meta = seqs[seq][0]
X.append(meta['embedding'].mean(0))
for key in meta:
if key == 'embedding':
continue
if key not in obs:
obs[key] = []
obs[key].append(Counter([
meta[key] for meta in seqs[seq]
]).most_common(1)[0][0])
obs['n_seq'].append(len(seqs[seq]))
obs['seq'].append(str(seq))
X = np.array(X)
adata = AnnData(X)
for key in obs:
adata.obs[key] = obs[key]
sc.pp.neighbors(adata, n_neighbors=15, use_rep='X')
sc.tl.louvain(adata, resolution=1.)
sc.set_figure_params(dpi_save=500)
plot_umap(adata)
interpret_clusters(adata)
def analyze_semantics(args, model, seq_to_mutate, vocabulary,
prob_cutoff=1e-4, n_most_probable=100, beta=1.,
plot_acquisition=False, verbose=False):
seq_to_mutate = tuple(seq_to_mutate)
seqs = { seq_to_mutate: [ {} ] }
X_cat, lengths = featurize_seqs(seqs, vocabulary)
if args.model_name == 'lstm':
from lstm import _split_and_pad
elif args.model_name == 'bilstm':
from bilstm import _split_and_pad
else:
raise ValueError('No semantics support for model {}'
.format(args.model_name))
X = _split_and_pad(X_cat, lengths, model.seq_len_,
model.vocab_size_, False)[0]
y_pred = model.model_.predict(X, batch_size=2500)
assert(y_pred.shape[0] == len(seq_to_mutate) + 2)
assert(y_pred.shape[1] == len(vocabulary) + 3)
word_pos_prob = {}
for i in range(len(seq_to_mutate)):
for word in vocabulary:
word_idx = vocabulary[word]
prob = y_pred[i + 1, word_idx]
if prob < prob_cutoff:
continue
word_pos_prob[(word, i)] = prob
prob_seqs = { seq_to_mutate: [ {} ] }
seq_prob = {}
for (word, pos), prob in word_pos_prob.items():
mutable = list(seq_to_mutate)
mutable[pos] = word
prob_seqs[tuple(mutable)] = [ {} ]
seq_prob[tuple(mutable)] = prob
prob_seqs = embed_seqs(args, model, prob_seqs, vocabulary,
use_cache=False, verbose=verbose)
base_embedding = prob_seqs[seq_to_mutate][0]['embedding']
seq_change = {}
for seq in prob_seqs:
embedding = prob_seqs[seq][0]['embedding']
# L1 distance between embedding vectors.
seq_change[seq] = abs(base_embedding - embedding).sum()
sorted_seqs = sorted(seq_prob.keys())
headlines = np.array([ ' '.join(seq) for seq in sorted_seqs ])
prob = np.array([ seq_prob[seq] for seq in sorted_seqs ])
change = np.array([ seq_change[seq] for seq in sorted_seqs ])
acquisition = ss.rankdata(change) + (beta * ss.rankdata(prob))
if plot_acquisition:
plt.figure()
plt.scatter(np.log10(prob), change,
c=acquisition, cmap='viridis', alpha=0.3)
plt.title(' '.join(seq_to_mutate))
plt.xlabel('$ \log_{10}(p(x_i)) $')
plt.ylabel('$ \Delta \Theta $')
plt.savefig('figures/headline_acquisition.png', dpi=300)
plt.close()
exit()
tprint('Original headline: ' + ' '.join(seq_to_mutate))
tprint('Modifications:')
for idx in np.argsort(-acquisition)[:n_most_probable]:
tprint('{}: {} (change), {} (prob)'.format(
headlines[idx], change[idx], prob[idx]
))
tprint('Least change:')
for idx in np.argsort(change)[:n_most_probable]:
tprint('{}: {} (change), {} (prob)'.format(
headlines[idx], change[idx], prob[idx]
))
tprint('Most change:')
for idx in np.argsort(-change)[:n_most_probable]:
tprint('{}: {} (change), {} (prob)'.format(
headlines[idx], change[idx], prob[idx]
))
if __name__ == '__main__':
breakpoint()
args = parse_args()
seqs, vocabulary = setup()
seq_len = max([ len(seq) for seq in seqs ]) + 2
vocab_size = len(vocabulary) + 2
model = get_model(args, seq_len, vocab_size)
if args.checkpoint is not None:
model.model_.load_weights(args.checkpoint)
tprint('Model summary:')
print(model.model_.summary())
if args.train or args.train_split or args.test:
train_test(args, model, seqs, vocabulary, split_seqs)
if args.embed:
if args.checkpoint is None and not args.train:
raise ValueError('Model must be trained or loaded '
'from checkpoint.')
no_embed = { 'hmm' }
if args.model_name in no_embed:
raise ValueError('Embeddings not available for models: {}'
.format(', '.join(no_embed)))
analyze_embedding(args, model, seqs, vocabulary)
if args.semantics:
if args.checkpoint is None and not args.train:
raise ValueError('Model must be trained or loaded '
'from checkpoint.')
#random_sample = np.random.choice(
# [ ' '.join(seq) for seq in seqs ], 50
#)
#for headline in random_sample[12:]:
# tprint('')
# analyze_semantics(args, model, headline.split(' '),
# vocabulary, n_most_probable=3,
# prob_cutoff=0, beta=2.)
random_sample = np.random.choice(
[ ' '.join(seq) for seq in seqs ], 100000
)
for headline in random_sample[50000:]:
tprint('')
analyze_semantics(args, model, headline.split(' '),
vocabulary, n_most_probable=3,
prob_cutoff=1e-4, beta=1.)
|
import yaml
import json
import requests
import csv
import pandas as pd
from tempfile import NamedTemporaryFile
import shutil
allowedVocabs = ['ICD10', 'ICD10CM', 'ICD10CN', 'ICD10GM', 'ICD10PCS', 'ICD9CM',
'ICD9Proc', 'ICD9ProcCN', 'LOINC', 'RxNorm', 'RxNorm Extension', 'SNOMED']
def checkIdExits(ID):
node_normalizer_url = "https://nodenormalization-sri.renci.org/1.1/get_normalized_nodes"
id_exists = None
labels = None
input_obj = {
"curies": [ID]
}
curl_cmd = f"curl -XPOST {node_normalizer_url} -H \"Content-Type: application/json\" -d '{json.dumps(input_obj)}'"
resp = requests.post(node_normalizer_url, headers={
"Content-Type": "application/json",
"Accept": "applicaton/json"
}, json=input_obj)
obj = resp.json()
#print(obj)
for key, value in obj.items():
if value is not None:
#for id, label in value.items():
labels = value['id']['label']
#print(labels)
id_exists = value
#print(id_exists)
return id_exists, labels
def check_consistant(table):
#print(table)
for feature_name, identifier_list in table.items():
#print(table[feature_name])
#print(table, ":", feature_name, ":", identifier_list)
#identifier_df = pd.DataFrame(identifier_list)
new_list = []
for idx, identifier in enumerate(identifier_list):
#print(identifier)
item = str(identifier)
vocab = item.rsplit(':', 1)[0]
if vocab in allowedVocabs:
new_list.append({identifier:'allowed'})
else:
exists, labels = checkIdExits(identifier)
if exists:
#print(exists)
#print(labels)
new_list.append({identifier:labels})
table[feature_name] = new_list
print(table[feature_name])
return document
def check_inconsistant(table):
#print(table)
for feature_name, identifier_list in table.items():
#print(table[feature_name])
new_list = []
for idx, identifier in enumerate(identifier_list):
#print(identifier)
item = str(identifier)
vocab = item.rsplit(':', 1)[0]
if vocab in allowedVocabs:
exists = "allowed"
else:
checkID, labels = checkIdExits(identifier)
exists = checkID
if not exists:
print(identifier)
new_list.append(identifier)
table[feature_name] = new_list
#print(table[feature_name])
return document
def format_identifiers(document):
for table, feature in document.items():
for feature_name, identifier_list in list(feature.items()):
if identifier_list == []:
print(feature[feature_name])
del feature[feature_name]
return(document)
#--------extract consistant identifiers
#with open(r'../identifiers copy.yml') as file:
# document = yaml.full_load(file)
#print(document['patient'])
#d= check_consistant(document['patient'])
#d= check_consistant(document['visit'])
#with open(r'consistent_identifiers3.yml', 'w') as file:
# documents = yaml.dump(d, file)
#--------format 'consistent_identifiers_with_labels.yml' file to remove empty features
#with open(r'consistent_identifiers_with_labels.yml') as file:
# document = yaml.full_load(file)
#d = format_identifiers(document)
#print(d)
#with open(r'formatted_consistent_identifiers_with_labels.yml', 'w') as file:
# documents = yaml.dump(d, file)
#--------extract inconsistant identifiers
#with open(r'../identifiers.yml') as file:
# document = yaml.full_load(file)
#d = check_inconsistant(document['patient'])
#d= check_inconsistant(document['visit'])
#print(d)
#with open(r'inconsistent_identifiers.yml', 'w') as file:
# documents = yaml.dump(d, file)
|
"""
Utilities for exploring the anchor network within a model in order to
find the optimal procedure for steering the system to the different
anchors.
"""
import numpy as np
try:
import openmm.unit as unit
except ModuleNotFoundError:
import simtk.unit as unit
def find_edge_distance(model, src_anchor_index, dest_anchor_index):
"""
Find the distance of the variables commonly shared between two
anchors. The distance is assumed to be Euclidean.
Parameters:
-----------
model : Model()
The model object containing the relevant anchors.
src_anchor_index : integer
The index of the anchor within the model that will be the
'source' - where the steered simulation will start from.
dest_anchor_index : integer
The index of the anchor within the model that will be the
'destination' - where the steered simulation will go to.
Returns:
--------
distance : float
The distance (in nm) between the source and destination anchors.
"""
# TODO: this may need to eventually be adapted to angular CVs
variable_distance_sq = 0.0
for variable in model.anchors[src_anchor_index].variables:
if variable in model.anchors[dest_anchor_index].variables:
distance = model.anchors[src_anchor_index].variables[variable] \
- model.anchors[dest_anchor_index].variables[variable]
variable_distance_sq += distance**2
return np.sqrt(variable_distance_sq)
def find_next_anchor_index(model, visited_anchor_dict):
"""
Given a set of explored anchors, find a candidate for the next step
in the pathfinding based on which will be the closest anchor (node)
to reach.
Parameters:
-----------
model : Model()
The model object containing the relevant anchors.
visited_anchor_dict : dict
A dictionary whose keys are anchor indices and whose values are
a distance to a starting anchor.
Returns:
--------
prev_anchor_index : int
For the next step, starting at this anchor will be the step that
will explore the closest available node (anchor).
next_anchor_index : int
For the next step, this anchor will be the step that explores
the closest available node (anchor).
next_anchor_distance : float
The total distance from one of the starting nodes (anchors) to
the destination of this step.
"""
prev_anchor_index = None
next_anchor_index = None
next_anchor_distance = 9e9
for visited_anchor_index in visited_anchor_dict:
for milestone in model.anchors[visited_anchor_index].milestones:
neighbor_index = milestone.neighbor_anchor_index
if model.anchors[neighbor_index].bulkstate:
continue
if neighbor_index not in visited_anchor_dict:
edge_distance = find_edge_distance(model, visited_anchor_index,
neighbor_index)
total_distance = visited_anchor_dict[visited_anchor_index] \
+ edge_distance
if total_distance < next_anchor_distance:
next_anchor_distance = total_distance
prev_anchor_index = visited_anchor_index
next_anchor_index = neighbor_index
return prev_anchor_index, next_anchor_index, next_anchor_distance
def get_procedure(model, source_anchor_indices, destination_list):
"""
Given the model and a list of starting anchors, a procedure
will be provided that will allow every anchor to be reached
from one of the starting anchors.
Parameters:
-----------
model : Model()
The model object containing the relevant anchors.
source_anchor_indices : int
Indices of anchors which have starting structures defined.
The network will be explored starting from these nodes.
destination_list : list
A list of anchor indices that need to be reached by this
procedure.
Returns:
--------
procedure : list
A list of tuples of integers, where each tuple is a step in a
procedure of an SMD simulation from a source anchor to a
destination anchor.
"""
procedure = []
visited_anchor_dict = {}
tmp_destination_list = destination_list[:]
for source_anchor_index in source_anchor_indices:
visited_anchor_dict[source_anchor_index] = 0.0
for i in range(len(model.anchors)):
prev_anchor_index, next_anchor_index, next_anchor_distance \
= find_next_anchor_index(model, visited_anchor_dict)
if next_anchor_index is None:
# No more paths found
break
visited_anchor_dict[next_anchor_index] = next_anchor_distance
procedure.append((prev_anchor_index, next_anchor_index))
if next_anchor_index in tmp_destination_list:
tmp_destination_list.remove(next_anchor_index)
if len(tmp_destination_list) == 0:
break
return procedure
def estimate_simulation_time(model, procedure, velocity):
"""
Estimate the total amount of time will be needed to simulate enough
SMD to fill all empty anchors.
Parameters:
-----------
model : Model()
The model object containing the relevant anchors.
procedure : list
The output from get_procedure - a list of tuples of anchor
indices indicating the steps to take to reach each anchor with
a series of SMD simulations.
velocity : Quantity
The speed (in velocity units) that the system will be moving
between anchors due to guided restraints.
Returns:
--------
total_time : Quantity
The total time estimate that would be required to complete the
provided procedure.
"""
total_time = 0.0 * unit.nanoseconds
for step in procedure:
source_anchor_index = step[0]
destination_anchor_index = step[1]
edge_distance = find_edge_distance(
model, source_anchor_index, destination_anchor_index) \
* unit.nanometers
time_in_step = edge_distance / velocity
total_time += time_in_step
return total_time |
<filename>client/pyre.py
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import shutil
import sys
import time
import traceback
from dataclasses import dataclass
from typing import List, Optional
from . import (
analysis_directory,
buck,
commands,
log,
recently_used_configurations,
statistics,
)
from .commands import CommandParser, ExitCode, IncrementalStyle
from .exceptions import EnvironmentException
from .find_directories import find_project_root
from .version import __version__
LOG: logging.Logger = logging.getLogger(__name__)
@dataclass
class FailedOutsideLocalConfigurationException(Exception):
exit_code: ExitCode
command: CommandParser
exception_message: str
def _set_default_command(arguments: argparse.Namespace) -> None:
if shutil.which("watchman"):
arguments.command = commands.Incremental.from_arguments
arguments.nonblocking = False
arguments.incremental_style = IncrementalStyle.FINE_GRAINED
arguments.no_start = False
else:
watchman_link = "https://facebook.github.io/watchman/docs/install"
LOG.warning(
"No watchman binary found. \n"
"To enable pyre incremental, "
"you can install watchman: {}".format(watchman_link)
)
LOG.warning("Defaulting to non-incremental check.")
arguments.command = commands.Check.from_arguments
def _log_statistics(
command: CommandParser,
arguments: argparse.Namespace,
start_time: float,
client_exception_message: str,
error_message: Optional[str],
exit_code: int,
should_log: bool = True,
) -> None:
configuration = command.configuration
if should_log and configuration and configuration.logger:
statistics.log(
category=statistics.LoggerCategory.USAGE,
arguments=arguments,
configuration=configuration,
integers={
"exit_code": exit_code,
"runtime": int((time.time() - start_time) * 1000),
},
normals={
"root": configuration.local_root,
"cwd": os.getcwd(),
"client_version": __version__,
"command": command.NAME,
"client_exception": client_exception_message,
"error_message": error_message,
},
)
def run_pyre(arguments: argparse.Namespace) -> ExitCode:
start_time = time.time()
command: Optional[CommandParser] = None
client_exception_message = ""
should_log_statistics = True
# Having this as a fails-by-default helps flag unexpected exit
# from exception flows.
exit_code = ExitCode.FAILURE
try:
original_directory = os.getcwd()
if arguments.version:
try:
# TODO(T64512953): Decouple configuration creation with command creation
configuration = arguments.command(
arguments, original_directory
).configuration
if configuration:
binary_version = configuration.get_binary_version()
if binary_version:
log.stdout.write(f"Binary version: {binary_version}\n")
except Exception:
pass
log.stdout.write(f"Client version: {__version__}\n")
exit_code = ExitCode.SUCCESS
else:
command = arguments.command(arguments, original_directory)
log.start_logging_to_directory(
arguments.noninteractive, command.log_directory
)
exit_code = command.run().exit_code()
except analysis_directory.NotWithinLocalConfigurationException as error:
if arguments.command == commands.Persistent.from_arguments:
try:
commands.Persistent.run_null_server(timeout=3600 * 12)
exit_code = ExitCode.SUCCESS
except Exception as error:
client_exception_message = str(error)
exit_code = ExitCode.FAILURE
except KeyboardInterrupt:
LOG.warning("Interrupted by user")
exit_code = ExitCode.SUCCESS
elif not command:
client_exception_message = str(error)
exit_code = ExitCode.FAILURE
else:
should_log_statistics = False
raise FailedOutsideLocalConfigurationException(
exit_code, command, str(error)
)
except (buck.BuckException, EnvironmentException) as error:
if arguments.command == commands.Persistent.from_arguments:
try:
commands.Persistent.run_null_server(timeout=3600 * 12)
exit_code = ExitCode.SUCCESS
except Exception as error:
client_exception_message = str(error)
exit_code = ExitCode.FAILURE
except KeyboardInterrupt:
LOG.warning("Interrupted by user")
exit_code = ExitCode.SUCCESS
else:
client_exception_message = str(error)
exit_code = (
ExitCode.BUCK_ERROR
if isinstance(error, buck.BuckException)
else ExitCode.FAILURE
)
except commands.ClientException as error:
client_exception_message = str(error)
exit_code = ExitCode.FAILURE
except Exception:
client_exception_message = traceback.format_exc()
exit_code = ExitCode.FAILURE
except KeyboardInterrupt:
LOG.warning("Interrupted by user")
LOG.debug(traceback.format_exc())
exit_code = ExitCode.SUCCESS
finally:
if len(client_exception_message) > 0:
LOG.error(client_exception_message)
log.cleanup()
if command:
result = command.result()
error_message = result.error if result else None
command.cleanup()
_log_statistics(
command,
arguments,
start_time,
client_exception_message,
error_message,
exit_code,
should_log_statistics,
)
return exit_code
def _run_pyre_with_retry(arguments: argparse.Namespace) -> ExitCode:
try:
return run_pyre(arguments)
except FailedOutsideLocalConfigurationException as exception:
command = exception.command
exit_code = exception.exit_code
client_exception_message = exception.exception_message
configurations = recently_used_configurations.Cache(
command._dot_pyre_directory
).get_all_items()
if not configurations:
LOG.error(client_exception_message)
return exit_code
LOG.warning(
f"Could not find a Pyre local configuration at `{command._original_directory}`."
)
local_root_for_rerun = recently_used_configurations.prompt_user_for_local_root(
configurations
)
if not local_root_for_rerun:
return exit_code
arguments.local_configuration = local_root_for_rerun
LOG.warning(
f"Rerunning the command in recent local configuration `{local_root_for_rerun}`."
)
try:
return run_pyre(arguments)
except FailedOutsideLocalConfigurationException:
LOG.error(f"Failed to rerun command in `{local_root_for_rerun}`.")
return ExitCode.FAILURE
# Need the default argument here since this is our entry point in setup.py
def main(argv: List[str] = sys.argv[1:]) -> int:
parser = argparse.ArgumentParser(
allow_abbrev=False,
formatter_class=argparse.RawTextHelpFormatter,
epilog="environment variables:"
"\n `PYRE_BINARY` overrides the pyre binary used."
"\n `PYRE_VERSION_HASH` overrides the pyre version set in the "
"configuration files.",
)
commands.Command.add_arguments(parser)
# Subcommands.
subcommand_names = ", ".join(
[command.NAME for command in commands.COMMANDS if not command.HIDDEN]
)
parsed_commands = parser.add_subparsers(
metavar="{}".format(subcommand_names),
help="""
The pyre command to run; defaults to `incremental`.
Run `pyre command --help` for documentation on a specific command.
""",
)
for command in commands.COMMANDS:
command.add_subparser(parsed_commands)
arguments = parser.parse_args(argv)
log.initialize(arguments.noninteractive)
if not hasattr(arguments, "command"):
_set_default_command(arguments)
return _run_pyre_with_retry(arguments)
if __name__ == "__main__":
try:
os.getcwd()
except FileNotFoundError:
LOG.error(
"Pyre could not determine the current working directory. "
"Has it been removed?\nExiting."
)
sys.exit(ExitCode.FAILURE)
sys.exit(main(sys.argv[1:]))
|
'''
## Aliyun ROS HBR Construct Library
This module is part of the AliCloud ROS Cloud Development Kit (ROS CDK) project.
```python
# Example automatically generated from non-compiling source. May contain errors.
import * as HBR from '@alicloud/ros-cdk-hbr';
```
'''
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from ._jsii import *
import ros_cdk_core
class BackupClients(
ros_cdk_core.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.BackupClients",
):
'''A ROS resource type: ``ALIYUN::HBR::BackupClients``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "BackupClientsProps",
enable_resource_property_constraint: typing.Optional[builtins.bool] = None,
) -> None:
'''Create a new ``ALIYUN::HBR::BackupClients``.
Param scope - scope in which this resource is defined
Param id - scoped id of the resource
Param props - resource properties
:param scope: -
:param id: -
:param props: -
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrClientIds")
def attr_client_ids(self) -> ros_cdk_core.IResolvable:
'''Attribute ClientIds: ID list of clients installed in instances.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrClientIds"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrInstanceIds")
def attr_instance_ids(self) -> ros_cdk_core.IResolvable:
'''Attribute InstanceIds: ID list of instances to install backup client.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrInstanceIds"))
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.BackupClientsProps",
jsii_struct_bases=[],
name_mapping={"instance_ids": "instanceIds"},
)
class BackupClientsProps:
def __init__(
self,
*,
instance_ids: typing.Union[ros_cdk_core.IResolvable, typing.Sequence[typing.Union[builtins.str, ros_cdk_core.IResolvable]]],
) -> None:
'''Properties for defining a ``ALIYUN::HBR::BackupClients``.
:param instance_ids: Property instanceIds: ID list of instances to install backup client.
'''
self._values: typing.Dict[str, typing.Any] = {
"instance_ids": instance_ids,
}
@builtins.property
def instance_ids(
self,
) -> typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]]:
'''Property instanceIds: ID list of instances to install backup client.'''
result = self._values.get("instance_ids")
assert result is not None, "Required property 'instance_ids' is missing"
return typing.cast(typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "BackupClientsProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class DbAgent(
ros_cdk_core.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.DbAgent",
):
'''A ROS resource type: ``ALIYUN::HBR::DbAgent``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "DbAgentProps",
enable_resource_property_constraint: typing.Optional[builtins.bool] = None,
) -> None:
'''Create a new ``ALIYUN::HBR::DbAgent``.
Param scope - scope in which this resource is defined
Param id - scoped id of the resource
Param props - resource properties
:param scope: -
:param id: -
:param props: -
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrInstanceIds")
def attr_instance_ids(self) -> ros_cdk_core.IResolvable:
'''Attribute InstanceIds: Uni backup agent instance ids.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrInstanceIds"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrTaskId")
def attr_task_id(self) -> ros_cdk_core.IResolvable:
'''Attribute TaskId: Uni backup agent install task id.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrTaskId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrUniBackupInstanceDetails")
def attr_uni_backup_instance_details(self) -> ros_cdk_core.IResolvable:
'''Attribute UniBackupInstanceDetails: Uni backup agent instance info details.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrUniBackupInstanceDetails"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrUniBackupInstances")
def attr_uni_backup_instances(self) -> ros_cdk_core.IResolvable:
'''Attribute UniBackupInstances: Uni backup agent instance info.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrUniBackupInstances"))
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.DbAgentProps",
jsii_struct_bases=[],
name_mapping={"instance_info": "instanceInfo"},
)
class DbAgentProps:
def __init__(
self,
*,
instance_info: typing.Union[ros_cdk_core.IResolvable, typing.Sequence[typing.Union[ros_cdk_core.IResolvable, "RosDbAgent.InstanceInfoProperty"]]],
) -> None:
'''Properties for defining a ``ALIYUN::HBR::DbAgent``.
:param instance_info: Property instanceInfo: Instance infos.
'''
self._values: typing.Dict[str, typing.Any] = {
"instance_info": instance_info,
}
@builtins.property
def instance_info(
self,
) -> typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, "RosDbAgent.InstanceInfoProperty"]]]:
'''Property instanceInfo: Instance infos.'''
result = self._values.get("instance_info")
assert result is not None, "Required property 'instance_info' is missing"
return typing.cast(typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, "RosDbAgent.InstanceInfoProperty"]]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "DbAgentProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class DbPlan(
ros_cdk_core.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.DbPlan",
):
'''A ROS resource type: ``ALIYUN::HBR::DbPlan``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "DbPlanProps",
enable_resource_property_constraint: typing.Optional[builtins.bool] = None,
) -> None:
'''Create a new ``ALIYUN::HBR::DbPlan``.
Param scope - scope in which this resource is defined
Param id - scoped id of the resource
Param props - resource properties
:param scope: -
:param id: -
:param props: -
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrContinuousPlan")
def attr_continuous_plan(self) -> ros_cdk_core.IResolvable:
'''Attribute ContinuousPlan: Continuous backup plan schedule.
Use { "type": "continuous" }.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrContinuousPlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrContinuousUuid")
def attr_continuous_uuid(self) -> ros_cdk_core.IResolvable:
'''Attribute ContinuousUuid: Uuid of continuous backup plan.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrContinuousUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrCumulativePlan")
def attr_cumulative_plan(self) -> ros_cdk_core.IResolvable:
'''Attribute CumulativePlan: Cumulative plan schedule, only for mssql.
More details see FullPlan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrCumulativePlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrCumulativeUuid")
def attr_cumulative_uuid(self) -> ros_cdk_core.IResolvable:
'''Attribute CumulativeUuid: Uuid of cumulative plan.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrCumulativeUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrDbPlanName")
def attr_db_plan_name(self) -> ros_cdk_core.IResolvable:
'''Attribute DbPlanName: Display name of the backup plan.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrDbPlanName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrFullPlan")
def attr_full_plan(self) -> ros_cdk_core.IResolvable:
'''Attribute FullPlan: Full backup plan schedule.
daily: {"type": "daily", "start": "00:00:00", "interval": 3}, weekly {"type":"weekly","start": "03:00:00","days": [1,2,3,4,5],"interval": 1}, days can be 0 - 6, 0 means Sunday, and interval can be 1 - 52.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrFullPlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrFullUuid")
def attr_full_uuid(self) -> ros_cdk_core.IResolvable:
'''Attribute FullUuid: Uuid of full backup plan.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrFullUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrHostUuid")
def attr_host_uuid(self) -> ros_cdk_core.IResolvable:
'''Attribute HostUuid: Uuid of the host of the database instance.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrHostUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrIncPlan")
def attr_inc_plan(self) -> ros_cdk_core.IResolvable:
'''Attribute IncPlan: Incremental backup plan schedule.
Only for mysql and oracle. More details see FullPlan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrIncPlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrIncUuid")
def attr_inc_uuid(self) -> ros_cdk_core.IResolvable:
'''Attribute IncUuid: Uuid of the incremental bakcup plan.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrIncUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrInstanceUuid")
def attr_instance_uuid(self) -> ros_cdk_core.IResolvable:
'''Attribute InstanceUuid: Uuid of database instance.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrInstanceUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrLogPlan")
def attr_log_plan(self) -> ros_cdk_core.IResolvable:
'''Attribute LogPlan: Log backup plan schedule.More details see FullPlan.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrLogPlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrLogUuid")
def attr_log_uuid(self) -> ros_cdk_core.IResolvable:
'''Attribute LogUuid: Uuid of the log backup plan.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrLogUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrMaxRateLimit")
def attr_max_rate_limit(self) -> ros_cdk_core.IResolvable:
'''Attribute MaxRateLimit: Max rate limit for backup job,.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrMaxRateLimit"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrMaxRetrySeconds")
def attr_max_retry_seconds(self) -> ros_cdk_core.IResolvable:
'''Attribute MaxRetrySeconds: Max retry seconds on network failure.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrMaxRetrySeconds"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrOptions")
def attr_options(self) -> ros_cdk_core.IResolvable:
'''Attribute Options: Backup options in json format, different for each type of database.
For Oracle, use {"channels":4,"compression":"lzop","offline_backup":false,"archivelog_reserve_hours":24,"custom_commands":""}, "channels" means numbers of concurrent theads, "archivelog_reserve_hours" means how long before the archive log will be deleted after backup job completed, other paramters should use the default vaule. For Mysql, use {"channels":4,"compression":"lzop","del_binlog":false}, "del_binlog" means whether the binlog will be deleted after backup completed, only take effect for log or continuous backup. For SQL Server, use {"channels":4,"verify":false,"compression":"lzop","backup_new_databases":false}.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrOptions"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrPlanId")
def attr_plan_id(self) -> ros_cdk_core.IResolvable:
'''Attribute PlanId: Id of the backup plan.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrPlanId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrSourceType")
def attr_source_type(self) -> ros_cdk_core.IResolvable:
'''Attribute SourceType: Database type, allowed value: MYSQL, ORACLE, MSSQL.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrSourceType"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrTarget")
def attr_target(self) -> ros_cdk_core.IResolvable:
'''Attribute Target: Target vault to backup.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrTarget"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrVaultId")
def attr_vault_id(self) -> ros_cdk_core.IResolvable:
'''Attribute VaultId: Vault ID to create backup plan, the backup data will be stored to the vault.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrVaultId"))
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.DbPlanProps",
jsii_struct_bases=[],
name_mapping={
"db_plan_name": "dbPlanName",
"host_uuid": "hostUuid",
"source_type": "sourceType",
"vault_id": "vaultId",
"continuous_plan": "continuousPlan",
"cumulative_plan": "cumulativePlan",
"full_plan": "fullPlan",
"inc_plan": "incPlan",
"instance_uuid": "instanceUuid",
"log_plan": "logPlan",
"max_rate_limit": "maxRateLimit",
"max_retry_seconds": "maxRetrySeconds",
"options": "options",
"source": "source",
},
)
class DbPlanProps:
def __init__(
self,
*,
db_plan_name: typing.Union[builtins.str, ros_cdk_core.IResolvable],
host_uuid: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_type: typing.Union[builtins.str, ros_cdk_core.IResolvable],
vault_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
continuous_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
cumulative_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
full_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
inc_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
instance_uuid: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
log_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
max_rate_limit: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]] = None,
max_retry_seconds: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]] = None,
options: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
source: typing.Optional[typing.Union[ros_cdk_core.IResolvable, "RosDbPlan.SourceProperty"]] = None,
) -> None:
'''Properties for defining a ``ALIYUN::HBR::DbPlan``.
:param db_plan_name: Property dbPlanName: Display name of the backup plan.
:param host_uuid: Property hostUuid: Uuid of the host of the database instance.
:param source_type: Property sourceType: Database type, allowed value: MYSQL, ORACLE, MSSQL.
:param vault_id: Property vaultId: Vault ID to create backup plan, the backup data will be stored to the vault.
:param continuous_plan: Property continuousPlan: Continuous backup plan schedule. Use { "type": "continuous" }.
:param cumulative_plan: Property cumulativePlan: Cumulative plan schedule, only for mssql. More details see FullPlan.
:param full_plan: Property fullPlan: Full backup plan schedule. daily: {"type": "daily", "start": "00:00:00", "interval": 3}, weekly {"type":"weekly","start": "03:00:00","days": [1,2,3,4,5],"interval": 1}, days can be 0 - 6, 0 means Sunday, and interval can be 1 - 52.
:param inc_plan: Property incPlan: Incremental backup plan schedule. Only for mysql and oracle. More details see FullPlan.
:param instance_uuid: Property instanceUuid: Uuid of database instance.
:param log_plan: Property logPlan: Log backup plan schedule.More details see FullPlan.
:param max_rate_limit: Property maxRateLimit: Max rate limit for backup job,.
:param max_retry_seconds: Property maxRetrySeconds: Max retry seconds on network failure.
:param options: Property options: Backup options in json format, different for each type of database. For Oracle, use {"channels":4,"compression":"lzop","offline_backup":false,"archivelog_reserve_hours":24,"custom_commands":""}, "channels" means numbers of concurrent theads, "archivelog_reserve_hours" means how long before the archive log will be deleted after backup job completed, other paramters should use the default vaule. For Mysql, use {"channels":4,"compression":"lzop","del_binlog":false}, "del_binlog" means whether the binlog will be deleted after backup completed, only take effect for log or continuous backup. For SQL Server, use {"channels":4,"verify":false,"compression":"lzop","backup_new_databases":false}.
:param source: Property source: Which database instance or database will be backup.
'''
self._values: typing.Dict[str, typing.Any] = {
"db_plan_name": db_plan_name,
"host_uuid": host_uuid,
"source_type": source_type,
"vault_id": vault_id,
}
if continuous_plan is not None:
self._values["continuous_plan"] = continuous_plan
if cumulative_plan is not None:
self._values["cumulative_plan"] = cumulative_plan
if full_plan is not None:
self._values["full_plan"] = full_plan
if inc_plan is not None:
self._values["inc_plan"] = inc_plan
if instance_uuid is not None:
self._values["instance_uuid"] = instance_uuid
if log_plan is not None:
self._values["log_plan"] = log_plan
if max_rate_limit is not None:
self._values["max_rate_limit"] = max_rate_limit
if max_retry_seconds is not None:
self._values["max_retry_seconds"] = max_retry_seconds
if options is not None:
self._values["options"] = options
if source is not None:
self._values["source"] = source
@builtins.property
def db_plan_name(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property dbPlanName: Display name of the backup plan.'''
result = self._values.get("db_plan_name")
assert result is not None, "Required property 'db_plan_name' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def host_uuid(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property hostUuid: Uuid of the host of the database instance.'''
result = self._values.get("host_uuid")
assert result is not None, "Required property 'host_uuid' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property sourceType: Database type, allowed value: MYSQL, ORACLE, MSSQL.'''
result = self._values.get("source_type")
assert result is not None, "Required property 'source_type' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def vault_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property vaultId: Vault ID to create backup plan, the backup data will be stored to the vault.'''
result = self._values.get("vault_id")
assert result is not None, "Required property 'vault_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def continuous_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property continuousPlan: Continuous backup plan schedule.
Use { "type": "continuous" }.
'''
result = self._values.get("continuous_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def cumulative_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property cumulativePlan: Cumulative plan schedule, only for mssql.
More details see FullPlan.
'''
result = self._values.get("cumulative_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def full_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property fullPlan: Full backup plan schedule.
daily: {"type": "daily", "start": "00:00:00", "interval": 3}, weekly {"type":"weekly","start": "03:00:00","days": [1,2,3,4,5],"interval": 1}, days can be 0 - 6, 0 means Sunday, and interval can be 1 - 52.
'''
result = self._values.get("full_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def inc_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property incPlan: Incremental backup plan schedule.
Only for mysql and oracle. More details see FullPlan.
'''
result = self._values.get("inc_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def instance_uuid(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property instanceUuid: Uuid of database instance.'''
result = self._values.get("instance_uuid")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def log_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property logPlan: Log backup plan schedule.More details see FullPlan.'''
result = self._values.get("log_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def max_rate_limit(
self,
) -> typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]]:
'''Property maxRateLimit: Max rate limit for backup job,.'''
result = self._values.get("max_rate_limit")
return typing.cast(typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]], result)
@builtins.property
def max_retry_seconds(
self,
) -> typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]]:
'''Property maxRetrySeconds: Max retry seconds on network failure.'''
result = self._values.get("max_retry_seconds")
return typing.cast(typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]], result)
@builtins.property
def options(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property options: Backup options in json format, different for each type of database.
For Oracle, use {"channels":4,"compression":"lzop","offline_backup":false,"archivelog_reserve_hours":24,"custom_commands":""}, "channels" means numbers of concurrent theads, "archivelog_reserve_hours" means how long before the archive log will be deleted after backup job completed, other paramters should use the default vaule. For Mysql, use {"channels":4,"compression":"lzop","del_binlog":false}, "del_binlog" means whether the binlog will be deleted after backup completed, only take effect for log or continuous backup. For SQL Server, use {"channels":4,"verify":false,"compression":"lzop","backup_new_databases":false}.
'''
result = self._values.get("options")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def source(
self,
) -> typing.Optional[typing.Union[ros_cdk_core.IResolvable, "RosDbPlan.SourceProperty"]]:
'''Property source: Which database instance or database will be backup.'''
result = self._values.get("source")
return typing.cast(typing.Optional[typing.Union[ros_cdk_core.IResolvable, "RosDbPlan.SourceProperty"]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "DbPlanProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class DbVault(
ros_cdk_core.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.DbVault",
):
'''A ROS resource type: ``ALIYUN::HBR::DbVault``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "DbVaultProps",
enable_resource_property_constraint: typing.Optional[builtins.bool] = None,
) -> None:
'''Create a new ``ALIYUN::HBR::DbVault``.
Param scope - scope in which this resource is defined
Param id - scoped id of the resource
Param props - resource properties
:param scope: -
:param id: -
:param props: -
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrDescription")
def attr_description(self) -> ros_cdk_core.IResolvable:
'''Attribute Description: Description of the vault.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrDescription"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrRetentionDays")
def attr_retention_days(self) -> ros_cdk_core.IResolvable:
'''Attribute RetentionDays: Data retention days of the vault.
Data will be deleted when it's older than this time.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrRetentionDays"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrVaultId")
def attr_vault_id(self) -> ros_cdk_core.IResolvable:
'''Attribute VaultId: Vault ID.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrVaultId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrVaultName")
def attr_vault_name(self) -> ros_cdk_core.IResolvable:
'''Attribute VaultName: Display name of the vault.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrVaultName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrVaultRegionId")
def attr_vault_region_id(self) -> ros_cdk_core.IResolvable:
'''Attribute VaultRegionId: The region ID to create the vault.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrVaultRegionId"))
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.DbVaultProps",
jsii_struct_bases=[],
name_mapping={
"retention_days": "retentionDays",
"vault_name": "vaultName",
"vault_region_id": "vaultRegionId",
"description": "description",
},
)
class DbVaultProps:
def __init__(
self,
*,
retention_days: typing.Union[jsii.Number, ros_cdk_core.IResolvable],
vault_name: typing.Union[builtins.str, ros_cdk_core.IResolvable],
vault_region_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
description: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
) -> None:
'''Properties for defining a ``ALIYUN::HBR::DbVault``.
:param retention_days: Property retentionDays: Data retention days of the vault. Data will be deleted when it's older than this time.
:param vault_name: Property vaultName: Display name of the vault.
:param vault_region_id: Property vaultRegionId: The region ID to create the vault.
:param description: Property description: Description of the vault.
'''
self._values: typing.Dict[str, typing.Any] = {
"retention_days": retention_days,
"vault_name": vault_name,
"vault_region_id": vault_region_id,
}
if description is not None:
self._values["description"] = description
@builtins.property
def retention_days(self) -> typing.Union[jsii.Number, ros_cdk_core.IResolvable]:
'''Property retentionDays: Data retention days of the vault.
Data will be deleted when it's older than this time.
'''
result = self._values.get("retention_days")
assert result is not None, "Required property 'retention_days' is missing"
return typing.cast(typing.Union[jsii.Number, ros_cdk_core.IResolvable], result)
@builtins.property
def vault_name(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property vaultName: Display name of the vault.'''
result = self._values.get("vault_name")
assert result is not None, "Required property 'vault_name' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def vault_region_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property vaultRegionId: The region ID to create the vault.'''
result = self._values.get("vault_region_id")
assert result is not None, "Required property 'vault_region_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def description(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property description: Description of the vault.'''
result = self._values.get("description")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "DbVaultProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RestoreJob(
ros_cdk_core.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.RestoreJob",
):
'''A ROS resource type: ``ALIYUN::HBR::RestoreJob``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RestoreJobProps",
enable_resource_property_constraint: typing.Optional[builtins.bool] = None,
) -> None:
'''Create a new ``ALIYUN::HBR::RestoreJob``.
Param scope - scope in which this resource is defined
Param id - scoped id of the resource
Param props - resource properties
:param scope: -
:param id: -
:param props: -
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrErrorMessage")
def attr_error_message(self) -> ros_cdk_core.IResolvable:
'''Attribute ErrorMessage: Error message of restore job.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrErrorMessage"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrRestoreId")
def attr_restore_id(self) -> ros_cdk_core.IResolvable:
'''Attribute RestoreId: Restore job ID.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrRestoreId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrRestoreType")
def attr_restore_type(self) -> ros_cdk_core.IResolvable:
'''Attribute RestoreType: Restore type.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrRestoreType"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrSourceType")
def attr_source_type(self) -> ros_cdk_core.IResolvable:
'''Attribute SourceType: Source type.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrSourceType"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrStatus")
def attr_status(self) -> ros_cdk_core.IResolvable:
'''Attribute Status: Restore job status.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrStatus"))
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RestoreJobProps",
jsii_struct_bases=[],
name_mapping={
"restore_type": "restoreType",
"snapshot_id": "snapshotId",
"source_client_id": "sourceClientId",
"source_instance_id": "sourceInstanceId",
"source_type": "sourceType",
"target_client_id": "targetClientId",
"target_instance_id": "targetInstanceId",
"target_path": "targetPath",
"vault_id": "vaultId",
},
)
class RestoreJobProps:
def __init__(
self,
*,
restore_type: typing.Union[builtins.str, ros_cdk_core.IResolvable],
snapshot_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_client_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_instance_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_type: typing.Union[builtins.str, ros_cdk_core.IResolvable],
target_client_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
target_instance_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
target_path: typing.Union[builtins.str, ros_cdk_core.IResolvable],
vault_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
'''Properties for defining a ``ALIYUN::HBR::RestoreJob``.
:param restore_type: Property restoreType: Restore type.
:param snapshot_id: Property snapshotId: Snapshot ID.
:param source_client_id: Property sourceClientId: Source client ID. It should be provided when SourceType=FILE.
:param source_instance_id: Property sourceInstanceId: Source instance ID. It should be provided when SourceType=ECS_FILE.
:param source_type: Property sourceType: Source type.
:param target_client_id: Property targetClientId: Target client ID. It should be provided when RestoreType=FILE.
:param target_instance_id: Property targetInstanceId: Target instance ID. It should be provided when RestoreType=ECS_FILE.
:param target_path: Property targetPath: Target path. For instance, "/".
:param vault_id: Property vaultId: Vault ID.
'''
self._values: typing.Dict[str, typing.Any] = {
"restore_type": restore_type,
"snapshot_id": snapshot_id,
"source_client_id": source_client_id,
"source_instance_id": source_instance_id,
"source_type": source_type,
"target_client_id": target_client_id,
"target_instance_id": target_instance_id,
"target_path": target_path,
"vault_id": vault_id,
}
@builtins.property
def restore_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property restoreType: Restore type.'''
result = self._values.get("restore_type")
assert result is not None, "Required property 'restore_type' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def snapshot_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property snapshotId: Snapshot ID.'''
result = self._values.get("snapshot_id")
assert result is not None, "Required property 'snapshot_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_client_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property sourceClientId: Source client ID.
It should be provided when SourceType=FILE.
'''
result = self._values.get("source_client_id")
assert result is not None, "Required property 'source_client_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_instance_id(
self,
) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property sourceInstanceId: Source instance ID.
It should be provided when SourceType=ECS_FILE.
'''
result = self._values.get("source_instance_id")
assert result is not None, "Required property 'source_instance_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property sourceType: Source type.'''
result = self._values.get("source_type")
assert result is not None, "Required property 'source_type' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def target_client_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property targetClientId: Target client ID.
It should be provided when RestoreType=FILE.
'''
result = self._values.get("target_client_id")
assert result is not None, "Required property 'target_client_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def target_instance_id(
self,
) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property targetInstanceId: Target instance ID.
It should be provided when RestoreType=ECS_FILE.
'''
result = self._values.get("target_instance_id")
assert result is not None, "Required property 'target_instance_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def target_path(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property targetPath: Target path.
For instance, "/".
'''
result = self._values.get("target_path")
assert result is not None, "Required property 'target_path' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def vault_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property vaultId: Vault ID.'''
result = self._values.get("vault_id")
assert result is not None, "Required property 'vault_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RestoreJobProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RosBackupClients(
ros_cdk_core.RosResource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.RosBackupClients",
):
'''A ROS template type: ``ALIYUN::HBR::BackupClients``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RosBackupClientsProps",
enable_resource_property_constraint: builtins.bool,
) -> None:
'''Create a new ``ALIYUN::HBR::BackupClients``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="ROS_RESOURCE_TYPE_NAME")
def ROS_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "ROS_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrClientIds")
def attr_client_ids(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: ClientIds: ID list of clients installed in instances
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrClientIds"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrInstanceIds")
def attr_instance_ids(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: InstanceIds: ID list of instances to install backup client
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrInstanceIds"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="rosProperties")
def _ros_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "rosProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="enableResourcePropertyConstraint")
def enable_resource_property_constraint(self) -> builtins.bool:
return typing.cast(builtins.bool, jsii.get(self, "enableResourcePropertyConstraint"))
@enable_resource_property_constraint.setter
def enable_resource_property_constraint(self, value: builtins.bool) -> None:
jsii.set(self, "enableResourcePropertyConstraint", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="instanceIds")
def instance_ids(
self,
) -> typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]]:
'''
:Property: instanceIds: ID list of instances to install backup client
'''
return typing.cast(typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]], jsii.get(self, "instanceIds"))
@instance_ids.setter
def instance_ids(
self,
value: typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]],
) -> None:
jsii.set(self, "instanceIds", value)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RosBackupClientsProps",
jsii_struct_bases=[],
name_mapping={"instance_ids": "instanceIds"},
)
class RosBackupClientsProps:
def __init__(
self,
*,
instance_ids: typing.Union[ros_cdk_core.IResolvable, typing.Sequence[typing.Union[builtins.str, ros_cdk_core.IResolvable]]],
) -> None:
'''Properties for defining a ``ALIYUN::HBR::BackupClients``.
:param instance_ids:
'''
self._values: typing.Dict[str, typing.Any] = {
"instance_ids": instance_ids,
}
@builtins.property
def instance_ids(
self,
) -> typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]]:
'''
:Property: instanceIds: ID list of instances to install backup client
'''
result = self._values.get("instance_ids")
assert result is not None, "Required property 'instance_ids' is missing"
return typing.cast(typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RosBackupClientsProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RosDbAgent(
ros_cdk_core.RosResource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.RosDbAgent",
):
'''A ROS template type: ``ALIYUN::HBR::DbAgent``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RosDbAgentProps",
enable_resource_property_constraint: builtins.bool,
) -> None:
'''Create a new ``ALIYUN::HBR::DbAgent``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="ROS_RESOURCE_TYPE_NAME")
def ROS_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "ROS_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrInstanceIds")
def attr_instance_ids(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: InstanceIds: Uni backup agent instance ids
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrInstanceIds"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrTaskId")
def attr_task_id(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: TaskId: Uni backup agent install task id.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrTaskId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrUniBackupInstanceDetails")
def attr_uni_backup_instance_details(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: UniBackupInstanceDetails: Uni backup agent instance info details
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrUniBackupInstanceDetails"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrUniBackupInstances")
def attr_uni_backup_instances(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: UniBackupInstances: Uni backup agent instance info
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrUniBackupInstances"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="rosProperties")
def _ros_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "rosProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="enableResourcePropertyConstraint")
def enable_resource_property_constraint(self) -> builtins.bool:
return typing.cast(builtins.bool, jsii.get(self, "enableResourcePropertyConstraint"))
@enable_resource_property_constraint.setter
def enable_resource_property_constraint(self, value: builtins.bool) -> None:
jsii.set(self, "enableResourcePropertyConstraint", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="instanceInfo")
def instance_info(
self,
) -> typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, "RosDbAgent.InstanceInfoProperty"]]]:
'''
:Property: instanceInfo: Instance infos
'''
return typing.cast(typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, "RosDbAgent.InstanceInfoProperty"]]], jsii.get(self, "instanceInfo"))
@instance_info.setter
def instance_info(
self,
value: typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, "RosDbAgent.InstanceInfoProperty"]]],
) -> None:
jsii.set(self, "instanceInfo", value)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RosDbAgent.InstanceInfoProperty",
jsii_struct_bases=[],
name_mapping={
"instance_id": "instanceId",
"source_type": "sourceType",
"authentication_type": "authenticationType",
"password": "password",
"user_name": "userName",
},
)
class InstanceInfoProperty:
def __init__(
self,
*,
instance_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_type: typing.Union[builtins.str, ros_cdk_core.IResolvable],
authentication_type: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
password: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
user_name: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
) -> None:
'''
:param instance_id:
:param source_type:
:param authentication_type:
:param password:
:param user_name:
'''
self._values: typing.Dict[str, typing.Any] = {
"instance_id": instance_id,
"source_type": source_type,
}
if authentication_type is not None:
self._values["authentication_type"] = authentication_type
if password is not None:
self._values["password"] = password
if user_name is not None:
self._values["user_name"] = user_name
@builtins.property
def instance_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: instanceId: ECS instance id
'''
result = self._values.get("instance_id")
assert result is not None, "Required property 'instance_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceType: Data source type, valid value: MYSQL, ORACLE, MSSQL
'''
result = self._values.get("source_type")
assert result is not None, "Required property 'source_type' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def authentication_type(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: authenticationType: verification method, valid value: INSTANCE, ACCESS_KEY
'''
result = self._values.get("authentication_type")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def password(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: password: <PASSWORD>
'''
result = self._values.get("password")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def user_name(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: userName: Database backup account username
'''
result = self._values.get("user_name")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "InstanceInfoProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RosDbAgentProps",
jsii_struct_bases=[],
name_mapping={"instance_info": "instanceInfo"},
)
class RosDbAgentProps:
def __init__(
self,
*,
instance_info: typing.Union[ros_cdk_core.IResolvable, typing.Sequence[typing.Union[ros_cdk_core.IResolvable, RosDbAgent.InstanceInfoProperty]]],
) -> None:
'''Properties for defining a ``ALIYUN::HBR::DbAgent``.
:param instance_info:
'''
self._values: typing.Dict[str, typing.Any] = {
"instance_info": instance_info,
}
@builtins.property
def instance_info(
self,
) -> typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, RosDbAgent.InstanceInfoProperty]]]:
'''
:Property: instanceInfo: Instance infos
'''
result = self._values.get("instance_info")
assert result is not None, "Required property 'instance_info' is missing"
return typing.cast(typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, RosDbAgent.InstanceInfoProperty]]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RosDbAgentProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RosDbPlan(
ros_cdk_core.RosResource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.RosDbPlan",
):
'''A ROS template type: ``ALIYUN::HBR::DbPlan``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RosDbPlanProps",
enable_resource_property_constraint: builtins.bool,
) -> None:
'''Create a new ``ALIYUN::HBR::DbPlan``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="ROS_RESOURCE_TYPE_NAME")
def ROS_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "ROS_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrContinuousPlan")
def attr_continuous_plan(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: ContinuousPlan: Continuous backup plan schedule. Use { "type": "continuous" }.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrContinuousPlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrContinuousUuid")
def attr_continuous_uuid(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: ContinuousUuid: Uuid of continuous backup plan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrContinuousUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrCumulativePlan")
def attr_cumulative_plan(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: CumulativePlan: Cumulative plan schedule, only for mssql. More details see FullPlan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrCumulativePlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrCumulativeUuid")
def attr_cumulative_uuid(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: CumulativeUuid: Uuid of cumulative plan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrCumulativeUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrDbPlanName")
def attr_db_plan_name(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: DbPlanName: Display name of the backup plan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrDbPlanName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrFullPlan")
def attr_full_plan(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: FullPlan: Full backup plan schedule. daily: {"type": "daily", "start": "00:00:00", "interval": 3}, weekly {"type":"weekly","start": "03:00:00","days": [1,2,3,4,5],"interval": 1}, days can be 0 - 6, 0 means Sunday, and interval can be 1 - 52.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrFullPlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrFullUuid")
def attr_full_uuid(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: FullUuid: Uuid of full backup plan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrFullUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrHostUuid")
def attr_host_uuid(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: HostUuid: Uuid of the host of the database instance.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrHostUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrIncPlan")
def attr_inc_plan(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: IncPlan: Incremental backup plan schedule. Only for mysql and oracle. More details see FullPlan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrIncPlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrIncUuid")
def attr_inc_uuid(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: IncUuid: Uuid of the incremental bakcup plan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrIncUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrInstanceUuid")
def attr_instance_uuid(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: InstanceUuid: Uuid of database instance.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrInstanceUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrLogPlan")
def attr_log_plan(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: LogPlan: Log backup plan schedule.More details see FullPlan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrLogPlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrLogUuid")
def attr_log_uuid(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: LogUuid: Uuid of the log backup plan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrLogUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrMaxRateLimit")
def attr_max_rate_limit(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: MaxRateLimit: Max rate limit for backup job,
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrMaxRateLimit"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrMaxRetrySeconds")
def attr_max_retry_seconds(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: MaxRetrySeconds: Max retry seconds on network failure.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrMaxRetrySeconds"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrOptions")
def attr_options(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: Options: Backup options in json format, different for each type of database. For Oracle, use {"channels":4,"compression":"lzop","offline_backup":false,"archivelog_reserve_hours":24,"custom_commands":""}, "channels" means numbers of concurrent theads, "archivelog_reserve_hours" means how long before the archive log will be deleted after backup job completed, other paramters should use the default vaule. For Mysql, use {"channels":4,"compression":"lzop","del_binlog":false}, "del_binlog" means whether the binlog will be deleted after backup completed, only take effect for log or continuous backup. For SQL Server, use {"channels":4,"verify":false,"compression":"lzop","backup_new_databases":false}.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrOptions"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrPlanId")
def attr_plan_id(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: PlanId: Id of the backup plan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrPlanId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrSourceType")
def attr_source_type(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: SourceType: Database type, allowed value: MYSQL, ORACLE, MSSQL
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrSourceType"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrTarget")
def attr_target(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: Target: Target vault to backup.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrTarget"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrVaultId")
def attr_vault_id(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: VaultId: Vault ID to create backup plan, the backup data will be stored to the vault.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrVaultId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="rosProperties")
def _ros_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "rosProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="dbPlanName")
def db_plan_name(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: dbPlanName: Display name of the backup plan.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "dbPlanName"))
@db_plan_name.setter
def db_plan_name(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "dbPlanName", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="enableResourcePropertyConstraint")
def enable_resource_property_constraint(self) -> builtins.bool:
return typing.cast(builtins.bool, jsii.get(self, "enableResourcePropertyConstraint"))
@enable_resource_property_constraint.setter
def enable_resource_property_constraint(self, value: builtins.bool) -> None:
jsii.set(self, "enableResourcePropertyConstraint", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="hostUuid")
def host_uuid(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: hostUuid: Uuid of the host of the database instance.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "hostUuid"))
@host_uuid.setter
def host_uuid(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "hostUuid", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="sourceType")
def source_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceType: Database type, allowed value: MYSQL, ORACLE, MSSQL
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "sourceType"))
@source_type.setter
def source_type(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "sourceType", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="vaultId")
def vault_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vaultId: Vault ID to create backup plan, the backup data will be stored to the vault.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "vaultId"))
@vault_id.setter
def vault_id(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "vaultId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="continuousPlan")
def continuous_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: continuousPlan: Continuous backup plan schedule. Use { "type": "continuous" }.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "continuousPlan"))
@continuous_plan.setter
def continuous_plan(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "continuousPlan", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="cumulativePlan")
def cumulative_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: cumulativePlan: Cumulative plan schedule, only for mssql. More details see FullPlan.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "cumulativePlan"))
@cumulative_plan.setter
def cumulative_plan(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "cumulativePlan", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="fullPlan")
def full_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: fullPlan: Full backup plan schedule. daily: {"type": "daily", "start": "00:00:00", "interval": 3}, weekly {"type":"weekly","start": "03:00:00","days": [1,2,3,4,5],"interval": 1}, days can be 0 - 6, 0 means Sunday, and interval can be 1 - 52.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "fullPlan"))
@full_plan.setter
def full_plan(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "fullPlan", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="incPlan")
def inc_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: incPlan: Incremental backup plan schedule. Only for mysql and oracle. More details see FullPlan.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "incPlan"))
@inc_plan.setter
def inc_plan(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "incPlan", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="instanceUuid")
def instance_uuid(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: instanceUuid: Uuid of database instance.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "instanceUuid"))
@instance_uuid.setter
def instance_uuid(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "instanceUuid", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="logPlan")
def log_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: logPlan: Log backup plan schedule.More details see FullPlan.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "logPlan"))
@log_plan.setter
def log_plan(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "logPlan", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="maxRateLimit")
def max_rate_limit(
self,
) -> typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]]:
'''
:Property: maxRateLimit: Max rate limit for backup job,
'''
return typing.cast(typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]], jsii.get(self, "maxRateLimit"))
@max_rate_limit.setter
def max_rate_limit(
self,
value: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "maxRateLimit", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="maxRetrySeconds")
def max_retry_seconds(
self,
) -> typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]]:
'''
:Property: maxRetrySeconds: Max retry seconds on network failure.
'''
return typing.cast(typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]], jsii.get(self, "maxRetrySeconds"))
@max_retry_seconds.setter
def max_retry_seconds(
self,
value: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "maxRetrySeconds", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="options")
def options(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: options: Backup options in json format, different for each type of database. For Oracle, use {"channels":4,"compression":"lzop","offline_backup":false,"archivelog_reserve_hours":24,"custom_commands":""}, "channels" means numbers of concurrent theads, "archivelog_reserve_hours" means how long before the archive log will be deleted after backup job completed, other paramters should use the default vaule. For Mysql, use {"channels":4,"compression":"lzop","del_binlog":false}, "del_binlog" means whether the binlog will be deleted after backup completed, only take effect for log or continuous backup. For SQL Server, use {"channels":4,"verify":false,"compression":"lzop","backup_new_databases":false}.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "options"))
@options.setter
def options(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "options", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="source")
def source(
self,
) -> typing.Optional[typing.Union[ros_cdk_core.IResolvable, "RosDbPlan.SourceProperty"]]:
'''
:Property: source: Which database instance or database will be backup.
'''
return typing.cast(typing.Optional[typing.Union[ros_cdk_core.IResolvable, "RosDbPlan.SourceProperty"]], jsii.get(self, "source"))
@source.setter
def source(
self,
value: typing.Optional[typing.Union[ros_cdk_core.IResolvable, "RosDbPlan.SourceProperty"]],
) -> None:
jsii.set(self, "source", value)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RosDbPlan.SourceProperty",
jsii_struct_bases=[],
name_mapping={"entries": "entries"},
)
class SourceProperty:
def __init__(
self,
*,
entries: typing.Optional[typing.Union[ros_cdk_core.IResolvable, typing.Sequence[typing.Union[builtins.str, ros_cdk_core.IResolvable]]]] = None,
) -> None:
'''
:param entries:
'''
self._values: typing.Dict[str, typing.Any] = {}
if entries is not None:
self._values["entries"] = entries
@builtins.property
def entries(
self,
) -> typing.Optional[typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]]]:
'''
:Property: entries: Backup database instance or databases or tables. For Oracle, use ["oracle://${instanceName}", "oracle://${instanceName}/archivelog"], ${instanceName} is the name of the oracle database instance, which can be get from dbAgent resource. For Mysql, use "mysql://${instanceName}". For SQL Server, use ["mssql://${instanceName}/${databse1}", "mssql://${instanceName}/${databse2}"], ${databse} can be get from dbAgent resource(MSSQL)
'''
result = self._values.get("entries")
return typing.cast(typing.Optional[typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "SourceProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RosDbPlanProps",
jsii_struct_bases=[],
name_mapping={
"db_plan_name": "dbPlanName",
"host_uuid": "hostUuid",
"source_type": "sourceType",
"vault_id": "vaultId",
"continuous_plan": "continuousPlan",
"cumulative_plan": "cumulativePlan",
"full_plan": "fullPlan",
"inc_plan": "incPlan",
"instance_uuid": "instanceUuid",
"log_plan": "logPlan",
"max_rate_limit": "maxRateLimit",
"max_retry_seconds": "maxRetrySeconds",
"options": "options",
"source": "source",
},
)
class RosDbPlanProps:
def __init__(
self,
*,
db_plan_name: typing.Union[builtins.str, ros_cdk_core.IResolvable],
host_uuid: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_type: typing.Union[builtins.str, ros_cdk_core.IResolvable],
vault_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
continuous_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
cumulative_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
full_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
inc_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
instance_uuid: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
log_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
max_rate_limit: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]] = None,
max_retry_seconds: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]] = None,
options: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
source: typing.Optional[typing.Union[ros_cdk_core.IResolvable, RosDbPlan.SourceProperty]] = None,
) -> None:
'''Properties for defining a ``ALIYUN::HBR::DbPlan``.
:param db_plan_name:
:param host_uuid:
:param source_type:
:param vault_id:
:param continuous_plan:
:param cumulative_plan:
:param full_plan:
:param inc_plan:
:param instance_uuid:
:param log_plan:
:param max_rate_limit:
:param max_retry_seconds:
:param options:
:param source:
'''
self._values: typing.Dict[str, typing.Any] = {
"db_plan_name": db_plan_name,
"host_uuid": host_uuid,
"source_type": source_type,
"vault_id": vault_id,
}
if continuous_plan is not None:
self._values["continuous_plan"] = continuous_plan
if cumulative_plan is not None:
self._values["cumulative_plan"] = cumulative_plan
if full_plan is not None:
self._values["full_plan"] = full_plan
if inc_plan is not None:
self._values["inc_plan"] = inc_plan
if instance_uuid is not None:
self._values["instance_uuid"] = instance_uuid
if log_plan is not None:
self._values["log_plan"] = log_plan
if max_rate_limit is not None:
self._values["max_rate_limit"] = max_rate_limit
if max_retry_seconds is not None:
self._values["max_retry_seconds"] = max_retry_seconds
if options is not None:
self._values["options"] = options
if source is not None:
self._values["source"] = source
@builtins.property
def db_plan_name(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: dbPlanName: Display name of the backup plan.
'''
result = self._values.get("db_plan_name")
assert result is not None, "Required property 'db_plan_name' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def host_uuid(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: hostUuid: Uuid of the host of the database instance.
'''
result = self._values.get("host_uuid")
assert result is not None, "Required property 'host_uuid' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceType: Database type, allowed value: MYSQL, ORACLE, MSSQL
'''
result = self._values.get("source_type")
assert result is not None, "Required property 'source_type' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def vault_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vaultId: Vault ID to create backup plan, the backup data will be stored to the vault.
'''
result = self._values.get("vault_id")
assert result is not None, "Required property 'vault_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def continuous_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: continuousPlan: Continuous backup plan schedule. Use { "type": "continuous" }.
'''
result = self._values.get("continuous_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def cumulative_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: cumulativePlan: Cumulative plan schedule, only for mssql. More details see FullPlan.
'''
result = self._values.get("cumulative_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def full_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: fullPlan: Full backup plan schedule. daily: {"type": "daily", "start": "00:00:00", "interval": 3}, weekly {"type":"weekly","start": "03:00:00","days": [1,2,3,4,5],"interval": 1}, days can be 0 - 6, 0 means Sunday, and interval can be 1 - 52.
'''
result = self._values.get("full_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def inc_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: incPlan: Incremental backup plan schedule. Only for mysql and oracle. More details see FullPlan.
'''
result = self._values.get("inc_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def instance_uuid(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: instanceUuid: Uuid of database instance.
'''
result = self._values.get("instance_uuid")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def log_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: logPlan: Log backup plan schedule.More details see FullPlan.
'''
result = self._values.get("log_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def max_rate_limit(
self,
) -> typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]]:
'''
:Property: maxRateLimit: Max rate limit for backup job,
'''
result = self._values.get("max_rate_limit")
return typing.cast(typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]], result)
@builtins.property
def max_retry_seconds(
self,
) -> typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]]:
'''
:Property: maxRetrySeconds: Max retry seconds on network failure.
'''
result = self._values.get("max_retry_seconds")
return typing.cast(typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]], result)
@builtins.property
def options(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: options: Backup options in json format, different for each type of database. For Oracle, use {"channels":4,"compression":"lzop","offline_backup":false,"archivelog_reserve_hours":24,"custom_commands":""}, "channels" means numbers of concurrent theads, "archivelog_reserve_hours" means how long before the archive log will be deleted after backup job completed, other paramters should use the default vaule. For Mysql, use {"channels":4,"compression":"lzop","del_binlog":false}, "del_binlog" means whether the binlog will be deleted after backup completed, only take effect for log or continuous backup. For SQL Server, use {"channels":4,"verify":false,"compression":"lzop","backup_new_databases":false}.
'''
result = self._values.get("options")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def source(
self,
) -> typing.Optional[typing.Union[ros_cdk_core.IResolvable, RosDbPlan.SourceProperty]]:
'''
:Property: source: Which database instance or database will be backup.
'''
result = self._values.get("source")
return typing.cast(typing.Optional[typing.Union[ros_cdk_core.IResolvable, RosDbPlan.SourceProperty]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RosDbPlanProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RosDbVault(
ros_cdk_core.RosResource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.RosDbVault",
):
'''A ROS template type: ``ALIYUN::HBR::DbVault``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RosDbVaultProps",
enable_resource_property_constraint: builtins.bool,
) -> None:
'''Create a new ``ALIYUN::HBR::DbVault``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="ROS_RESOURCE_TYPE_NAME")
def ROS_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "ROS_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrDescription")
def attr_description(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: Description: Description of the vault.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrDescription"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrRetentionDays")
def attr_retention_days(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: RetentionDays: Data retention days of the vault. Data will be deleted when it's older than this time.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrRetentionDays"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrVaultId")
def attr_vault_id(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: VaultId: Vault ID.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrVaultId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrVaultName")
def attr_vault_name(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: VaultName: Display name of the vault.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrVaultName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrVaultRegionId")
def attr_vault_region_id(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: VaultRegionId: The region ID to create the vault.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrVaultRegionId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="rosProperties")
def _ros_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "rosProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="enableResourcePropertyConstraint")
def enable_resource_property_constraint(self) -> builtins.bool:
return typing.cast(builtins.bool, jsii.get(self, "enableResourcePropertyConstraint"))
@enable_resource_property_constraint.setter
def enable_resource_property_constraint(self, value: builtins.bool) -> None:
jsii.set(self, "enableResourcePropertyConstraint", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="retentionDays")
def retention_days(self) -> typing.Union[jsii.Number, ros_cdk_core.IResolvable]:
'''
:Property: retentionDays: Data retention days of the vault. Data will be deleted when it's older than this time.
'''
return typing.cast(typing.Union[jsii.Number, ros_cdk_core.IResolvable], jsii.get(self, "retentionDays"))
@retention_days.setter
def retention_days(
self,
value: typing.Union[jsii.Number, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "retentionDays", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="vaultName")
def vault_name(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vaultName: Display name of the vault.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "vaultName"))
@vault_name.setter
def vault_name(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "vaultName", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="vaultRegionId")
def vault_region_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vaultRegionId: The region ID to create the vault.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "vaultRegionId"))
@vault_region_id.setter
def vault_region_id(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "vaultRegionId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="description")
def description(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: description: Description of the vault.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "description"))
@description.setter
def description(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "description", value)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RosDbVaultProps",
jsii_struct_bases=[],
name_mapping={
"retention_days": "retentionDays",
"vault_name": "vaultName",
"vault_region_id": "vaultRegionId",
"description": "description",
},
)
class RosDbVaultProps:
def __init__(
self,
*,
retention_days: typing.Union[jsii.Number, ros_cdk_core.IResolvable],
vault_name: typing.Union[builtins.str, ros_cdk_core.IResolvable],
vault_region_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
description: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
) -> None:
'''Properties for defining a ``ALIYUN::HBR::DbVault``.
:param retention_days:
:param vault_name:
:param vault_region_id:
:param description:
'''
self._values: typing.Dict[str, typing.Any] = {
"retention_days": retention_days,
"vault_name": vault_name,
"vault_region_id": vault_region_id,
}
if description is not None:
self._values["description"] = description
@builtins.property
def retention_days(self) -> typing.Union[jsii.Number, ros_cdk_core.IResolvable]:
'''
:Property: retentionDays: Data retention days of the vault. Data will be deleted when it's older than this time.
'''
result = self._values.get("retention_days")
assert result is not None, "Required property 'retention_days' is missing"
return typing.cast(typing.Union[jsii.Number, ros_cdk_core.IResolvable], result)
@builtins.property
def vault_name(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vaultName: Display name of the vault.
'''
result = self._values.get("vault_name")
assert result is not None, "Required property 'vault_name' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def vault_region_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vaultRegionId: The region ID to create the vault.
'''
result = self._values.get("vault_region_id")
assert result is not None, "Required property 'vault_region_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def description(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: description: Description of the vault.
'''
result = self._values.get("description")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RosDbVaultProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RosRestoreJob(
ros_cdk_core.RosResource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.RosRestoreJob",
):
'''A ROS template type: ``ALIYUN::HBR::RestoreJob``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RosRestoreJobProps",
enable_resource_property_constraint: builtins.bool,
) -> None:
'''Create a new ``ALIYUN::HBR::RestoreJob``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="ROS_RESOURCE_TYPE_NAME")
def ROS_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "ROS_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrErrorMessage")
def attr_error_message(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: ErrorMessage: Error message of restore job
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrErrorMessage"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrRestoreId")
def attr_restore_id(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: RestoreId: Restore job ID
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrRestoreId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrRestoreType")
def attr_restore_type(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: RestoreType: Restore type
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrRestoreType"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrSourceType")
def attr_source_type(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: SourceType: Source type
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrSourceType"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrStatus")
def attr_status(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: Status: Restore job status
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrStatus"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="rosProperties")
def _ros_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "rosProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="enableResourcePropertyConstraint")
def enable_resource_property_constraint(self) -> builtins.bool:
return typing.cast(builtins.bool, jsii.get(self, "enableResourcePropertyConstraint"))
@enable_resource_property_constraint.setter
def enable_resource_property_constraint(self, value: builtins.bool) -> None:
jsii.set(self, "enableResourcePropertyConstraint", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="restoreType")
def restore_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: restoreType: Restore type
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "restoreType"))
@restore_type.setter
def restore_type(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "restoreType", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="snapshotId")
def snapshot_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: snapshotId: Snapshot ID
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "snapshotId"))
@snapshot_id.setter
def snapshot_id(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "snapshotId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="sourceClientId")
def source_client_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceClientId: Source client ID. It should be provided when SourceType=FILE.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "sourceClientId"))
@source_client_id.setter
def source_client_id(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "sourceClientId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="sourceInstanceId")
def source_instance_id(
self,
) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceInstanceId: Source instance ID. It should be provided when SourceType=ECS_FILE.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "sourceInstanceId"))
@source_instance_id.setter
def source_instance_id(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "sourceInstanceId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="sourceType")
def source_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceType: Source type
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "sourceType"))
@source_type.setter
def source_type(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "sourceType", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="targetClientId")
def target_client_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: targetClientId: Target client ID. It should be provided when RestoreType=FILE.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "targetClientId"))
@target_client_id.setter
def target_client_id(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "targetClientId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="targetInstanceId")
def target_instance_id(
self,
) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: targetInstanceId: Target instance ID. It should be provided when RestoreType=ECS_FILE.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "targetInstanceId"))
@target_instance_id.setter
def target_instance_id(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "targetInstanceId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="targetPath")
def target_path(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: targetPath: Target path. For instance, "/".
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "targetPath"))
@target_path.setter
def target_path(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "targetPath", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="vaultId")
def vault_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vaultId: Vault ID
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "vaultId"))
@vault_id.setter
def vault_id(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "vaultId", value)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RosRestoreJobProps",
jsii_struct_bases=[],
name_mapping={
"restore_type": "restoreType",
"snapshot_id": "snapshotId",
"source_client_id": "sourceClientId",
"source_instance_id": "sourceInstanceId",
"source_type": "sourceType",
"target_client_id": "targetClientId",
"target_instance_id": "targetInstanceId",
"target_path": "targetPath",
"vault_id": "vaultId",
},
)
class RosRestoreJobProps:
def __init__(
self,
*,
restore_type: typing.Union[builtins.str, ros_cdk_core.IResolvable],
snapshot_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_client_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_instance_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_type: typing.Union[builtins.str, ros_cdk_core.IResolvable],
target_client_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
target_instance_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
target_path: typing.Union[builtins.str, ros_cdk_core.IResolvable],
vault_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
'''Properties for defining a ``ALIYUN::HBR::RestoreJob``.
:param restore_type:
:param snapshot_id:
:param source_client_id:
:param source_instance_id:
:param source_type:
:param target_client_id:
:param target_instance_id:
:param target_path:
:param vault_id:
'''
self._values: typing.Dict[str, typing.Any] = {
"restore_type": restore_type,
"snapshot_id": snapshot_id,
"source_client_id": source_client_id,
"source_instance_id": source_instance_id,
"source_type": source_type,
"target_client_id": target_client_id,
"target_instance_id": target_instance_id,
"target_path": target_path,
"vault_id": vault_id,
}
@builtins.property
def restore_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: restoreType: Restore type
'''
result = self._values.get("restore_type")
assert result is not None, "Required property 'restore_type' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def snapshot_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: snapshotId: Snapshot ID
'''
result = self._values.get("snapshot_id")
assert result is not None, "Required property 'snapshot_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_client_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceClientId: Source client ID. It should be provided when SourceType=FILE.
'''
result = self._values.get("source_client_id")
assert result is not None, "Required property 'source_client_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_instance_id(
self,
) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceInstanceId: Source instance ID. It should be provided when SourceType=ECS_FILE.
'''
result = self._values.get("source_instance_id")
assert result is not None, "Required property 'source_instance_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceType: Source type
'''
result = self._values.get("source_type")
assert result is not None, "Required property 'source_type' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def target_client_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: targetClientId: Target client ID. It should be provided when RestoreType=FILE.
'''
result = self._values.get("target_client_id")
assert result is not None, "Required property 'target_client_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def target_instance_id(
self,
) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: targetInstanceId: Target instance ID. It should be provided when RestoreType=ECS_FILE.
'''
result = self._values.get("target_instance_id")
assert result is not None, "Required property 'target_instance_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def target_path(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: targetPath: Target path. For instance, "/".
'''
result = self._values.get("target_path")
assert result is not None, "Required property 'target_path' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def vault_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vaultId: Vault ID
'''
result = self._values.get("vault_id")
assert result is not None, "Required property 'vault_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RosRestoreJobProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"BackupClients",
"BackupClientsProps",
"DbAgent",
"DbAgentProps",
"DbPlan",
"DbPlanProps",
"DbVault",
"DbVaultProps",
"RestoreJob",
"RestoreJobProps",
"RosBackupClients",
"RosBackupClientsProps",
"RosDbAgent",
"RosDbAgentProps",
"RosDbPlan",
"RosDbPlanProps",
"RosDbVault",
"RosDbVaultProps",
"RosRestoreJob",
"RosRestoreJobProps",
]
publication.publish()
|
"""Bilinear layer for NeuralPy"""
from torch.nn import Bilinear as _BiLinear
from neuralpy.utils import CustomLayer
class Bilinear(CustomLayer):
"""
A bilinear layer is a function of two inputs x and y
that is linear in each input separately.
Simple bilinear functions on vectors are the
dot product or the element-wise product.
To learn more about Dense layers, please check PyTorch
documentation at https://pytorch.org/docs/stable/nn.html#bilinear
Supported Arguments:
n_nodes: (Integer) Size of the output sample
n1_features: (Integer) Size of the input sample1,
no need for this argument layers except the initial layer.
n2_features: (Integer) Size of the input sample2,
no need for this argument layers except the initial layer
bias: (Boolean) If true then uses the bias, Defaults to `true`
name: (String) Name of the layer, if not provided then
automatically calculates a unique name for the layer
"""
def __init__(
self, n_nodes, n1_features=None, n2_features=None, bias=True, name=None
):
"""
__init__ method for bilinear layer
Supported Arguments:
n_nodes: (Integer) Size of the output sample
n1_features: (Integer) Size of the input sample1,
no need for this argument layers except the initial layer.
n2_features: (Integer) Size of the input sample2,
no need for this argument layers except the initial layer
bias: (Boolean) If true then uses the bias, Defaults to `true`
name: (String) Name of the layer, if not provided then
automatically calculates a unique name for the layer
"""
# Checking the n_nodes field
if not n_nodes or not isinstance(n_nodes, int) or n_nodes <= 0:
raise ValueError("Please provide a valid n_nodes")
# Checking the n1_features field, it is a optional field
if n1_features is not None and not (
isinstance(n1_features, int) and n1_features >= 0
):
raise ValueError("Please provide a valid n1_features")
# Checking the n2_features field
if n2_features is not None and not (
isinstance(n2_features, int) and n2_features >= 0
):
raise ValueError("Please provide a valid n2_features")
# Checking the bias field, this is also optional, default to True
if not isinstance(bias, bool):
raise ValueError("Please provide a valid bias")
super().__init__(_BiLinear, "Bilinear", layer_name=name)
# Storing the data
self.__n_inputs = n1_features
self.__n_inputs2 = n2_features
self.__n_nodes = n_nodes
self.__bias = bias
def set_input_dim(self, prev_input_dim, prev_layer_type):
"""
This method calculates the input shape for layer based on previous output
layer.
This method is used by the NeuralPy Models, for building the models.
No need to call this method for using NeuralPy.
"""
# Checking if n_inputs is there or not, not overwriting the n_input
# field
if not self.__n_inputs:
layer_type = prev_layer_type.lower()
if layer_type in ("dense", "batchnorm1d", "bilinear"):
self.__n_inputs = prev_input_dim[0]
else:
raise ValueError(
"Unsupported previous layer, please provide your own input shape \
for the layer"
)
def get_layer(self):
"""
This method returns the details as dict of the layer.
This method is used by the NeuralPy Models, for building the models.
No need to call this method for using NeuralPy.
"""
# Returning all the details of the layer
return self._get_layer_details(
(self.__n_nodes,),
{
"in_features1": self.__n_inputs,
"in_features2": self.__n_inputs2,
"out_features": self.__n_nodes,
"bias": self.__bias,
},
)
|
from django.http import HttpResponse,HttpResponseRedirect,HttpResponseNotFound
from django.http import QueryDict
import json
from django.contrib.auth.models import User, Group
from django.contrib import messages
from django.core import serializers
from sampledbapp.models import *
import django_rq
from datetime import datetime, timedelta
import sampledbapp.rq_workers
from django.core.files.storage import default_storage
from django.core.files.uploadhandler import InMemoryUploadedFile, TemporaryUploadedFile
import os
from io import BytesIO
import openpyxl
from django.conf import settings
from sampledbapp.utils import *
from sampledbapp import rq_workers
import urllib
import operator
#import unicode
import shutil
# Save a new or edited project
def save_project(request):
form_data = QueryDict(request.POST['form_data'])
# save project
if 'project_id' in form_data:
project = Project.objects.get(id=form_data['project_id'])
project.title = form_data['title']
project.description = form_data['description']
project.group = Group.objects.get(id=form_data['group_id'])
project.p_code = form_data['p_code']
else:
#New object
project = Project( title=form_data['title'],
description=form_data['description'],
group=Group.objects.get(id=form_data['group_id']),
p_code=form_data['p_code'])
try:
project.full_clean()
except Exception as e:
error_map = {}
for field,error in e:
error_map[field] = error
data = {'failure': True,'Error': error_map}
else:
project.save()
data = {'success':True}
return HttpResponse(json.dumps(data), content_type = "application/json")
# Get the samples by project
def get_samples(request):
#filters = request.POST['filters']
#print(request.POST.getlist['filters'])
if request.POST['project_id'] and request.POST['study_title']:
samples = Sample.objects.filter(project__in=Project.objects.filter(pk=int(request.POST['project_id'])),study_title=request.POST['study_title']).order_by('id').exclude(is_deleted=True)
deleted_samples = Sample.objects.filter(project__in=Project.objects.filter(pk=int(request.POST['project_id'])),study_title=request.POST['study_title'],is_deleted=True).order_by('id')
else:
samples = []
deleted_samples = []
#samples = Sample.objects.filter(project__in=Project.objects.filter(group__in=Group.objects.filter(user=request.user))).order_by('id')
data = {'success': True, 'samples': serializers.serialize('json',samples),'deleted_samples':serializers.serialize('json',deleted_samples)}
return HttpResponse(json.dumps(data), content_type = "application/json")
# Submit sample changes for staging
def submit_edit_samples(request):
project_id = request.POST['project_id']
form_data = QueryDict(request.POST['form_data'])
edited_pks = extract_edited_pks(request.POST.getlist('edited_pks'))
objects = extract_objects(edited_pks,form_data)
#print objects
#json_data = json.dumps({'edited_pks': edited_pks,'form_data': form_data})
json_data = json.dumps(json.dumps({'edited_pks': edited_pks,'objects': objects}))
staging_object = Staging.objects.create(status=1,
user=request.user,
json=json_data,
project=Project.objects.get(pk=int(project_id)),
type="UI submission")
job = Job.objects.create(user=request.user,job_type='check_staging_samples',status=1,text_to_show=json_data,datetime_created=datetime.now())
staging_object.job = job
staging_object.save()
#rq_workers.check_staging_samples(job.id,staging_object.id)
rq_job = django_rq.enqueue(rq_workers.check_staging_samples, job.id,staging_object.id)
job.rq_id = rq_job.id
job.save()
messages.add_message(request, messages.SUCCESS, "Sample changes submitted for validation")
data = {'success':True,'staging_id':staging_object.id}
return HttpResponse(json.dumps(data), content_type = "application/json")
def extract_objects(edited_pks,form_data):
#print(form_data);
data = {}
for pk in edited_pks:
data[pk] = extract_data(pk,form_data)
return data
def extract_data(pk,form_data):
data = {}
data["study_title"] = form_data['study_title-' + pk]
data["sample_id"] = form_data['sample_id-' + pk]
data["species"] = form_data['species-' + pk]
data["sample_storage_type"] = form_data['sample_storage_type-' + pk]
data["sample_matrix"] = form_data['sample_matrix-' + pk]
data["collection_protocol"] = form_data['collection_protocol-' + pk]
data["campus"] = form_data['campus-' + pk]
data["building"] = form_data['building-' + pk]
data["room"] = form_data['room-' + pk]
data["freezer_id"] = form_data['freezer_id-' + pk]
data["shelf_id"] = form_data['shelf_id-' + pk]
data["box_id"] = form_data['box_id-' + pk]
data["parent_type"] = form_data['parent_type-' + pk]
data["parent_id"] = form_data['parent_id-' + pk]
data["consent_form_information"] = form_data['consent_form_information-' + pk]
data["tissue_bank_reference"] = form_data['tissue_bank_reference-' + pk]
data["hazard_group"] = form_data['hazard_group-' + pk]
data["hazard_description"] = form_data['hazard_description-' + pk]
return data
# Extract the PKs that are being edited.
def extract_edited_pks(submitted_edited_pks):
# example "edited_pks%5B%5D=1&edited_pks%5B%5D=2"
url_decoded = urllib.parse.unquote(submitted_edited_pks[0])
array_of_pks = url_decoded.split('&')
edited_pks = []
for elem in array_of_pks:
split_entry = elem.split('=')
edited_pks.append(split_entry[1])
return edited_pks
# Get the staging object, create a job, and add it to the queue
def commit_staging_samples(request):
staging_id = request.POST['staging_id']
staging = Staging.objects.get(pk=staging_id)
#print staging.json
job = Job.objects.create(user=request.user,job_type='commit_staging_samples',status=1,text_to_show=staging.json)
staging.job = job
staging.save()
# INLINE EXECUTION
#rq_workers.commit_staging_samples(job.id,staging.id)
# RQ EXECUTION
rq_job = django_rq.enqueue(rq_workers.commit_staging_samples, job.id, staging.id)
job.rq_id = rq_job.id
job.save()
messages.add_message(request, messages.SUCCESS, "Pending changes job submitted")
data = {'success':True}
return HttpResponse(json.dumps(data), content_type = "application/json")
# Take a staging object and commit it. This now actually happens in redis-queue workers.
def convert_and_commit_json(staging_object):
# IDK why this is double encoded. Just leave it for now.
json_objects = json.loads(json.loads(staging_object.json))
for sample_id,sample_fields in json_objects['objects'].items():
if sample_id == 'new':
staging_object = Sample.objects.create(**sample_fields)
else:
update_sample_model(sample_id,sample_fields)
# Take the number of days and return the jobs
def get_user_jobs(request):
days = request.POST.get("days",7)
time_threshold = datetime.now() - timedelta(days=int(days))
# If reverse is set, reverse it. Otherwise default to 0
if request.POST.get("reverse",0) == 0:
jobs = Job.objects.filter(user=request.user,datetime_created__gt=time_threshold).order_by('pk')
else:
jobs = Job.objects.filter(user=request.user,datetime_created__gt=time_threshold).order_by('-pk')[:5]
data = serializers.serialize('json',jobs)
#jobs = serializers.serialize('json',jobs)
return HttpResponse(data, content_type = "application/json")
# Get the pending commits
def get_pending_commits(request):
# If reverse is set, reverse it. Otherwise default to 0
staging = Staging.objects.filter(user=request.user,status__in=[1,2,3,4]).order_by('pk')
data = serializers.serialize('json',staging)
#jobs = serializers.serialize('json',jobs)
return HttpResponse(data, content_type = "application/json")
# Get the pending commits
def clear_all_non_completed_staging_tasks(request):
# If reverse is set, reverse it. Otherwise default to 0
stagings = Staging.objects.filter(user=request.user,status__in=[1,2,3])
for staging in stagings:
staging.status = 5
staging.datetime_deleted = datetime.now()
staging.save()
messages.add_message(request, messages.SUCCESS, "All non-committed changes removed")
data = {'success':True}
return HttpResponse(json.dumps(data), content_type = "application/json")
# Recommit changes from the view-commit page
def recommit_staging_samples(request):
# 1. Update fields in staging json with new entries
# 2. Schedule a new validation check job
stagings = Staging.objects.filter(pk=request.POST['staging_id'],user=request.user)
if len(stagings) == 0:
data = {'failure':True}
return HttpResponse(json.dumps(data), content_type = "application/json")
form_data = QueryDict(request.POST['form_data'])
edited_pks = extract_edited_pks(request.POST.getlist('edited_pks'))
#print edited_pks
staging = stagings[0]
json_data = json.loads(json.loads(staging.json))
objects = extract_objects_for_recommit(json_data['objects'],edited_pks,form_data)
# print objects
json_data['objects'] = objects
staging.json = json.dumps(json.dumps(json_data))
staging.status = 1
job = Job.objects.create(user=request.user,job_type='check_staging_samples',status=1,text_to_show=staging.json,datetime_created=datetime.now())
staging.job = job
staging.save()
rq_workers.check_staging_samples(job.id,staging.id)
#rq_job = django_rq.enqueue(rq_workers.check_staging_samples, job.id,staging.id)
#job.rq_id = rq_job.id
job.save()
data = {'success':True}
return HttpResponse(json.dumps(data), content_type = "application/json")
# Loop over the changed edits and add them to the json
def extract_objects_for_recommit(objects,edited_pks,form_data):
# print edited_pks
# print objects
# Loop over the data. If its been edited, pull out the edit.
for pk in edited_pks:
#objects[pk] = test_extract_data(objects,pk,form_data)
objects[pk] = extract_data_for_recommit(pk,objects[pk],form_data)
return objects
def extract_data_for_recommit(pk,object,form_data):
for key, value in form_data.items():
key_array = key.split("-")
sample_field_key = key_array[0]
field_pk = key_array[1]
if field_pk == pk:
object[sample_field_key] = value
return object
# File upload via ajax
def submit_file_upload(request):
project_id = request.POST['project_id']
filename = request.FILES['file'].name # received file name
file_obj = request.FILES['file']
folder_path = os.path.join(default_storage.location,'user_uploads',project_id)
if not os.path.isdir(folder_path):
# # print os.path.join(default_storage.location,path_to_save)
os.mkdir(folder_path)
# check its file extension
filename_array = str.split(file_obj.name,".")
file_extension = filename_array[1]
allowed_filetypes = ['xlsx']
if file_extension not in allowed_filetypes:
return HttpResponse(json.dumps({'failure':True,'error':'file type not allowed'}), content_type = "application/json")
file_obj.name = build_and_check_file_name(folder_path,0,file_obj.name)
# print file_obj.name
full_path = os.path.join(folder_path,file_obj.name)
shutil.move(file_obj.temporary_file_path(),full_path)
file = File.objects.create(project=Project.objects.get(pk=project_id),
user_uploaded=request.user,
filename=file_obj.name,
filepath=full_path,
datetime_uploaded=datetime.now(),
datetime_last_accessed = datetime.now(),
type='import')
staging_object = Staging.objects.create(status=1,
user=request.user,
project=Project.objects.get(pk=project_id),
type='File upload',
file=file)
job = Job.objects.create(user=request.user,job_type='validate_sample_file',status=1,datetime_created=datetime.now())
staging_object.job = job
staging_object.save()
# INLINE EXECUTION
#rq_workers.validate_sample_file(job.id,staging_object.id,file.id)
# RQ EXECUTION
rq_job = django_rq.enqueue(rq_workers.validate_sample_file, job.id,staging_object.id,file.id)
job.rq_id = rq_job.id
job.save()
messages.add_message(request, messages.SUCCESS, "Sample changes submitted for validation")
return HttpResponse(json.dumps({'success':True}), content_type = "application/json")
def get_files(request):
projects = Project.objects.filter(group__in=Group.objects.filter(user=request.user))
files = File.objects.filter(project__in=projects).order_by('-pk')
usernames = {}
projectnames = {}
for file in files:
if file.user_uploaded.username:
usernames[file.pk] = file.user_uploaded.username
else:
usernames[file.pk] = "<i>unknown</i>"
projectnames[file.pk] = file.project.title
data = {'success':True,
'files':serializers.serialize('json',files),
'usernames': usernames,
'projectnames': projectnames }
return HttpResponse(json.dumps(data), content_type = "application/json")
def delete_staging_object(request):
stagings = Staging.objects.filter(pk=request.POST['staging_id'],user=request.user)
if len(stagings) == 0:
data = {'failure':True}
return HttpResponse(json.dumps(data), content_type = "application/json")
staging_object = stagings[0]
if staging_object.file:
file = staging_object.file
os.remove(file.filepath)
file.delete()
staging_object.delete()
messages.add_message(request, messages.SUCCESS, "Sample changes deleted")
return HttpResponse(json.dumps({'success':True}), content_type = "application/json")
def export_samples(request):
job = Job.objects.create(user=request.user,job_type='export_to_excel',status=1,datetime_created=datetime.now())
#print request.POST
# print request.POST['sample_pks']
# INLINE EXECUTION
#rq_workers.export_to_excel(job.id,request.user,json.loads(request.POST['sample_pks']))
# RQ EXECUTION
rq_job = django_rq.enqueue(rq_workers.export_to_excel, job.id,request.user,json.loads(request.POST['sample_pks']))
job.rq_id = rq_job.id
job.save()
messages.add_message(request, messages.SUCCESS, "Sample scheduled for export")
return HttpResponse(json.dumps({'success':True}), content_type = "application/json")
# Ajax search samples
def search_samples(request):
if request.user.groups.filter(name='Auditors').exists():
samples = Sample.objects.all()
else:
samples = Sample.objects.filter(project__in=Project.objects.filter(group__in=Group.objects.filter(user=request.user)))
# Filter the samples
if request.POST['sample_id'] != '':
samples = samples.filter(sample_id__icontains=request.POST['sample_id'].strip())
if request.POST['box_id'] != '':
samples = samples.filter(box_id__icontains=request.POST['box_id'].strip())
if request.POST['freezer_id'] != '':
samples = samples.filter(freezer_id__icontains=request.POST['freezer_id'].strip())
if request.POST['parent_id'] != '':
samples = samples.filter(parent_id__icontains=request.POST['parent_id'].strip())
if request.POST['tissue_bank_reference'] != '':
samples = samples.filter(tissue_bank_reference__icontains=request.POST['tissue_bank_reference'].strip())
samples = samples.order_by('pk')
sample_extras = {}
# Set the extras needed for the view
for sample in samples:
sample_extras[sample.pk] = {}
if sample.project:
sample_extras[sample.pk]['project_title'] = sample.project.title
else:
sample_extras[sample.pk]['project_title'] = "None"
if sample.last_edited_user:
sample_extras[sample.pk]['last_edited_user'] = sample.last_edited_user.username
else:
sample_extras[sample.pk]['last_edited_user'] = "None"
return HttpResponse(json.dumps({'success':True,
'samples':serializers.serialize('json',samples),
'sample_extras':sample_extras}),
content_type = "application/json")
def get_project_studies(request):
if request.POST['project_id']:
study_titles = Sample.objects.filter(project__in=Project.objects.filter(pk=int(request.POST['project_id']))).values('study_title').distinct()
else:
data = {'failure':True}
return HttpResponse(json.dumps(data), content_type = "application/json")
study_title_json = []
for entry in study_titles:
study_title_json.append(entry['study_title'])
data = {'success':True,'study_titles':study_title_json}
return HttpResponse(json.dumps(data), content_type = "application/json")
def delete_samples(request):
samples = Sample.objects.filter(pk__in=json.loads(request.POST['sample_pks']))
for sample in samples:
sample.is_deleted = True
sample.delete_method = request.POST['disposal_route']
sample.save()
data = {'success':True}
return HttpResponse(json.dumps(data), content_type = "application/json")
def move_samples_to_project(request):
samples = Sample.objects.filter(pk__in=json.loads(request.POST['sample_pks']))
project = Project.objects.get(pk=request.POST['project'])
for sample in samples:
sample.project = project
sample.save()
data = {'success':True}
return HttpResponse(json.dumps(data), content_type = "application/json") |
<reponame>zhanqxun/cv_fish
import win32com
import win32com.client
if type(__path__)==type(''):
# For freeze to work!
import sys
try:
import adsi
sys.modules['win32com.adsi.adsi'] = adsi
except ImportError:
pass
else:
# See if we have a special directory for the binaries (for developers)
win32com.__PackageSupportBuildPath__(__path__)
# Some helpers
# We want to _look_ like the ADSI module, but provide some additional
# helpers.
# Of specific note - most of the interfaces supported by ADSI
# derive from IDispatch - thus, you get the custome methods from the
# interface, as well as via IDispatch.
import pythoncom
from adsi import *
LCID = 0
IDispatchType = pythoncom.TypeIIDs[pythoncom.IID_IDispatch]
IADsContainerType = pythoncom.TypeIIDs[adsi.IID_IADsContainer]
def _get_good_ret(ob,
# Named arguments used internally
resultCLSID = None):
assert resultCLSID is None, "Now have type info for ADSI objects - fix me!"
# See if the object supports IDispatch
if hasattr(ob, "Invoke"):
import win32com.client.dynamic
name = "Dispatch wrapper around %r" % ob
return win32com.client.dynamic.Dispatch(ob, name, ADSIDispatch)
return ob
class ADSIEnumerator:
def __init__(self, ob):
# Query the object for the container interface.
self._cont_ = ob.QueryInterface(IID_IADsContainer)
self._oleobj_ = ADsBuildEnumerator(self._cont_) # a PyIADsEnumVARIANT
self.index = -1
def __getitem__(self, index):
return self.__GetIndex(index)
def __call__(self, index):
return self.__GetIndex(index)
def __GetIndex(self, index):
if type(index)!=type(0): raise TypeError("Only integer indexes are supported for enumerators")
if index != self.index + 1:
# Index requested out of sequence.
raise ValueError("You must index this object sequentially")
self.index = index
result = ADsEnumerateNext(self._oleobj_, 1)
if len(result):
return _get_good_ret(result[0])
# Failed - reset for next time around.
self.index = -1
self._oleobj_ = ADsBuildEnumerator(self._cont_) # a PyIADsEnumVARIANT
raise IndexError("list index out of range")
class ADSIDispatch(win32com.client.CDispatch):
def _wrap_dispatch_(self, ob, userName = None, returnCLSID = None, UnicodeToString=None):
assert UnicodeToString is None, "this is deprectated and will be removed"
if not userName:
userName = "AD<PASSWORD>"
olerepr = win32com.client.dynamic.MakeOleRepr(ob, None, None)
return ADSIDispatch(ob, olerepr, userName)
def _NewEnum(self):
try:
return ADSIEnumerator(self)
except pythoncom.com_error:
# doesnt support it - let our base try!
return win32com.client.CDispatch._NewEnum(self)
def __getattr__(self, attr):
try:
return getattr(self._oleobj_, attr)
except AttributeError:
return win32com.client.CDispatch.__getattr__(self, attr)
def QueryInterface(self, iid):
ret = self._oleobj_.QueryInterface(iid)
return _get_good_ret(ret)
# We override the global methods to do the right thing.
_ADsGetObject = ADsGetObject # The one in the .pyd
def ADsGetObject(path, iid = pythoncom.IID_IDispatch):
ret = _ADsGetObject(path, iid)
return _get_good_ret(ret)
_ADsOpenObject = ADsOpenObject
def ADsOpenObject(path, username, password, reserved = 0, iid = pythoncom.IID_IDispatch):
ret = _ADsOpenObject(path, username, password, reserved, iid)
return _get_good_ret(ret)
|
<filename>change_detection/archive/models/smallunet.py
# <NAME>
# https://rcdaudt.github.io/
# <NAME>., <NAME>., & <NAME>. "Fully convolutional siamese networks for change detection". In 2018 25th IEEE International Conference on Image Processing (ICIP) (pp. 4063-4067). IEEE.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.padding import ReplicationPad2d
class SmallUnet(nn.Module):
"""EF segmentation network."""
def __init__(self, input_nbr, label_nbr):
super(SmallUnet, self).__init__()
self.input_nbr = input_nbr
self.conv11 = nn.Conv2d(input_nbr, 16, kernel_size=3, padding=1)
self.bn11 = nn.BatchNorm2d(16)
self.do11 = nn.Dropout2d(p=0.2)
self.conv12 = nn.Conv2d(16, 16, kernel_size=3, padding=1)
self.bn12 = nn.BatchNorm2d(16)
self.do12 = nn.Dropout2d(p=0.2)
self.conv21 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
self.bn21 = nn.BatchNorm2d(32)
self.do21 = nn.Dropout2d(p=0.2)
self.conv22 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.bn22 = nn.BatchNorm2d(32)
self.do22 = nn.Dropout2d(p=0.2)
self.upconv2 = nn.ConvTranspose2d(32, 32, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv22d = nn.ConvTranspose2d(64, 32, kernel_size=3, padding=1)
self.bn22d = nn.BatchNorm2d(32)
self.do22d = nn.Dropout2d(p=0.2)
self.conv21d = nn.ConvTranspose2d(32, 16, kernel_size=3, padding=1)
self.bn21d = nn.BatchNorm2d(16)
self.do21d = nn.Dropout2d(p=0.2)
self.upconv1 = nn.ConvTranspose2d(16, 16, kernel_size=3, padding=1, stride=2, output_padding=1)
self.conv12d = nn.ConvTranspose2d(32, 16, kernel_size=3, padding=1)
self.bn12d = nn.BatchNorm2d(16)
self.do12d = nn.Dropout2d(p=0.2)
self.conv11d = nn.ConvTranspose2d(16, label_nbr, kernel_size=3, padding=1)
self.sm = nn.LogSoftmax(dim=1)
def forward(self, x1, x2):
x = torch.cat((x1, x2), 1)
"""Forward method."""
# Stage 1
x11 = self.do11(F.relu(self.bn11(self.conv11(x))))
x12 = self.do12(F.relu(self.bn12(self.conv12(x11))))
x1p = F.max_pool2d(x12, kernel_size=2, stride=2)
# Stage 2
x21 = self.do21(F.relu(self.bn21(self.conv21(x1p))))
x22 = self.do22(F.relu(self.bn22(self.conv22(x21))))
x2p = F.max_pool2d(x22, kernel_size=2, stride=2)
# Stage 2d
x2d = self.upconv2(x2p)
pad2 = ReplicationPad2d((0, x22.size(3) - x2d.size(3), 0, x22.size(2) - x2d.size(2)))
x2d = torch.cat((pad2(x2d), x22), 1)
x22d = self.do22d(F.relu(self.bn22d(self.conv22d(x2d))))
x21d = self.do21d(F.relu(self.bn21d(self.conv21d(x22d))))
# Stage 1d
x1d = self.upconv1(x21d)
pad1 = ReplicationPad2d((0, x12.size(3) - x1d.size(3), 0, x12.size(2) - x1d.size(2)))
x1d = torch.cat((pad1(x1d), x12), 1)
x12d = self.do12d(F.relu(self.bn12d(self.conv12d(x1d))))
x11d = self.conv11d(x12d)
return self.sm(x11d)
|
<reponame>acady14/transitEval
__author__ = 'acady'
import googlemaps
import json
import time
import csv
import math
import numpy as np
import datetime as dt
import pickle
import random
from RouteClass import route
from GmapsConnection import *
from geometry import *
query_name=str(dt.datetime.now().year)
query_name+=str(dt.datetime.now().month)
query_name+=str(dt.datetime.now().day)
query_name+=("_")
query_name+=(str(dt.datetime.now().hour))
query_name+=(str(dt.datetime.now().minute))
print(query_name)
########################################################################################################################
#################################////////////PARAMETER ENTRY\\\\\\\\\\\\\\##############################################
########################################################################################################################
# first, we need to read in our API key. If you need an API key, they are available from Google for free up to 2500
# queries per day
with open('transitEvalApiKey.txt','r') as f:
apikey=f.read()
modeList=['transit']#,'bicycling']
#Time to use for current pull...COMPLETED TIMES:
timesList=(dt.datetime(2016,6,16,7),)
# Other data that is useful for the program to run...
#Polygon #2: this polygon encompasses 3 miles north of Expo line stations and 4 miles south
polygon=[(34.036763,-118.534884),
(34.057280,-118.500412),
(34.079303,-118.440661),
(34.067740,-118.403639),
(34.029623,-118.388831),
(33.967029,-118.394050),
(33.965821,-118.456077)
]
#there was an issue with roads changing, especially in downtown santa monica!
stationLocsll={"Downtown Santa Monica":(34.013666,-118.490970),
"17th St-SMC":(34.023648,-118.479664),
"26th-Bergamont Station":(34.027691,-118.468382),
"Expo and Bundy":(34.031800,-118.453422),
"Expo and Sepulveda":(34.035371,-118.434213),
"Westwood and Rancho Park":(34.036517,-118.425091),
"Palms Station":(34.029089,-118.403802),
"Downtown Culver City":(34.027854,-118.388894),
"La Cienega and Jefferson":(34.026359,-118.372171)
}
stationLocsadd={"Downtown Santa Monica":'Downtown Santa Monica, 402 Colorado Ave, Santa Monica, CA 90401',
"17th St-SMC":'17th St/SMC Station, 1610 Colorado Avenue, Santa Monica, CA 90404',
"26th-Bergamont Station":'Bergamot Station, 2525 Michigan Avenue, Santa Monica, CA 90404',
"Expo and Bundy":'Expo/Bundy Station, Exposition Corridor Bike Path, Los Angeles, CA 90064',
"Expo and Sepulveda":'Expo/Sepulveda Station - Expo Metro Line, 11295 Exposition Boulevard, Los Angeles, CA 90064',
"Westwood and Rancho Park":'Westwood/Rancho Park LA Metro Expo Line station, 2600 Westwood Boulevard, Los Angeles, CA 90064',
"Palms Station":'Palms Station, 10021 National Boulevard, Los Angeles, CA 90034',
"Downtown Culver City":'Culver City, 8817 Washington Boulevard, Culver City, CA 90232',
"La Cienega and Jefferson":'La Cienega / Jefferson Station'
}
directionList=['to_station','from_station']
header_row=[ 'driveFactor','origin id','route id','originAddress','direction','originLoc lat','originLoc lng','endAddress',
'end lat','end lng','totDist', 'totTime', 'railStation', 'mode','time of day','exact time','numLines',
'walkTime', 'walkDist','busTime','busDist','agencies',
'fare','waitTime','1_stopName','1_stopLat','1_stopLng','1_line','2_stopName','2_stopLat',
'2_stopLng','2_line','3_stopName','3_stopLat','3_stopLng','3_line'
]
polygonHeaderRow=header_row+['polypointlat','polypointlng','polyorder','polyid','exclude']
def calc_ave_indices(objList):
#this doesn't quite work yet...need to experiment more with it.
aveIndices = dict()
i = 0
for query in objList:
mode = query.mode
origin = query.originID
time = query.timeOfDay
ID=mode+"_"+str(origin)+query.railStation
try:
aveIndices[ID]['driveFactors'].append(query.driveFactor)
aveIndices[ID]['totTime'].append(query.totTime)
aveIndices[ID]['walkDistance'].append(query.walkDist)
aveIndices[ID]['walkTime'].append(query.walkTime)
#aveIndices[ID]['stations'].append(query.railStation)
aveIndices[ID]['transfers'].append(query.numLines)
try:
aveIndices[ID]['stops'].append([query.stops[0][1][0],query.stops[0][1][1]])
except (TypeError, IndexError):
aveIndices[ID]['stops'].append([None,None])
except:
aveIndices[ID]={'driveFactors':[],'totTime':[],'walkDistance':[],'walkTime':[],'stations':[],
'transfers':[],'stops':[]}
aveIndices[ID]['driveFactors'].append(query.driveFactor)
aveIndices[ID]['totTime'].append(query.totTime)
aveIndices[ID]['walkDistance'].append(query.walkDist)
aveIndices[ID]['walkTime'].append(query.walkTime)
#aveIndices[ID]['stations'].append(query.railStation)
aveIndices[ID]['transfers'].append(query.numLines)
try:
aveIndices[ID]['stops'].append([query.stops[0][1][0],query.stops[0][1][1]])
except (TypeError, IndexError):
aveIndices[ID]['stops'].append([None,None])
for ID in aveIndices:
for item in aveIndices[ID].values():
try:
item=np.mean(item)
except:
print(item)
try:
if len(set(item))>1:
if item[0]==item[1] or item[0]==item[2]:
item=item[0]
elif item[1]==item[2]:
item=item[1]
else:
item=item[1]
except:
pass
print(item,2)
return aveIndices
def calc_min_indices(objList):
minIndices = dict()
i = 0
for query in objList:
mode = query.mode
origin = query.originID
time=query.timeOfDay
UID=mode+"_"+str(origin)+"_"+str(time)
try:
if query.totTime<minIndices[UID][1]:
minIndices[UID]=[i,query.totTime]
else:
pass
except KeyError:
minIndices[UID]=[i,query.totTime]
i+=1
return
def read_picklefiles(filename):
with open('C:\\Users\\acady\\Documents\\courses\\y2Q3\\ClientPA\\queries\\'+filename+'.p','rb') as picklefile:
objectList=pickle.load(picklefile)
picklefile.close()
return objectList
def write_raw_datafiles(objectList,which='all'):
if which=='all' or which == 'minave':
try:
minIndices=calc_min_indices(objectList)
aveIndices=calc_ave_indices(objectList)
#print(aveIndices)
except NameError:
print("error!") #placeholder function...this block is really here to catch exceptions and direct to a previous
minIndices={}
# query stored in a pickle file.
# read_picklefiles('201657_204') #read in a previous pickle file
with open(query_name+'_clean_min.csv','w') as outfile:
writer = csv.writer(outfile, lineterminator='\n')
writer.writerow(header_row)
for row in minIndices.values():
try:
writer.writerow(objectList[row[0]].line_to_write())
except:
continue
outfile.close()
print(str(len(minIndices)),'cleaned queries successfully written out')
if which == 'all' or which=="raw":
#now write out the raw routes to a csv file...
with open(query_name+'.csv','w') as outfile:
writer=csv.writer(outfile, lineterminator='\n')
writer.writerow(header_row)
for row in objectList:
try:
writer.writerow(row.line_to_write())
except:
pass
print(str(len(objectList))+' raw queries written')
if which == 'all' or which =='pickle':
# and finally, write out to a pickle file. This might not work...
with open(query_name+'.p','wb') as picklefile:
pickle.dump(objectList,picklefile)
print(str(len(objectList))+' queries written to pickle file')
|
from __future__ import annotations
import logging
import os
import shutil
from distutils.dir_util import copy_tree
from functools import lru_cache
from typing import List, Tuple
import git
from ogr.abstract import GitProject
from packit.config import PackageConfig
from packit.constants import DG_PR_COMMENT_KEY_SG_PR, DG_PR_COMMENT_KEY_SG_COMMIT
from packit.downstream_checks import get_check_by_name
from packit.local_project import LocalProject
from packit.utils import get_rev_list_kwargs, FedPKG, run_command
from packit.watcher import SourceGitCheckHelper
logger = logging.getLogger(__name__)
class Transformator:
"""
Describes a relation between a source-git and a dist-git repository.
Allows multiple actions and transformation operations of the repos.
"""
def __init__(
self,
package_config: PackageConfig,
sourcegit: LocalProject,
distgit: LocalProject,
version: str = None,
fas_username: str = None,
) -> None:
self.package_config = package_config
self.sourcegit = sourcegit
self.distgit = distgit
self._version = version
self.rev_list_option_args = get_rev_list_kwargs(
self.package_config.metadata.get("rev_list_option", ["first_parent"])
)
self.fas_username = fas_username
self.upstream_name = self.package_config.metadata["upstream_name"]
self.package_name = (
self.package_config.metadata["package_name"] or self.upstream_name
)
@property
@lru_cache()
def source_specfile_path(self) -> str:
return os.path.join(
self.sourcegit.working_dir, self.package_config.specfile_path
)
@property
@lru_cache()
def dist_specfile_path(self) -> str:
return os.path.join(
self.distgit.working_dir, f"{self.package_name}.spec"
)
@property
@lru_cache()
def archive(self) -> str:
"""
Path of the archive generated from the source-git.
If not exists, the archive will be created in the destination directory.
"""
archive = self.create_archive()
return archive
@property
@lru_cache()
def version(self) -> str:
return self._version or self.version_from_specfile
@property
@lru_cache()
def version_from_specfile(self) -> str:
"""
Version extracted from the specfile.
"""
version_raw = run_command(
cmd=[
"rpmspec",
"-q",
"--qf",
"'%{version}\\n'",
"--srpm",
self.source_specfile_path,
],
output=True,
fail=True,
)
version = version_raw.strip("'\\\n")
return version
@property
@lru_cache()
def fedpkg(self) -> FedPKG:
"""
Instance of the FedPKG class (wrapper on top of the fedpkg command.)
"""
return FedPKG(
fas_username=self.fas_username,
repo_path=self.distgit.git_url,
directory=self.distgit.working_dir,
)
def clean(self) -> None:
"""
Clean te temporary dir.
"""
pass
def create_archive(
self, path: str = None, name="{project}-{version}.tar.gz"
) -> str:
"""
Create archive from the provided git repo. The archive needs to be able to be used to build
the project using rpmbuild command. The expectation is that the directory within the archive
should be named {project}-{version}.
:return: str, path to the archive
"""
archive_name = name.format(project=self.upstream_name, version=self.version)
archive_path = path or os.path.join(self.distgit.working_dir, archive_name)
self.add_exclude_redhat_to_gitattributes()
with open(archive_path, "wb") as fp:
self.sourcegit.git_repo.archive(
fp, prefix=f"./{self.upstream_name}-{self.version}/", worktree_attributes=True
)
logger.info(f"Archive created: {archive_path}")
return archive_path
def add_exclude_redhat_to_gitattributes(self) -> None:
"""
Add a line to .gitattributes to export-ignore redhat dir.
TODO: We need to use upstream release archive directly
"""
logger.debug("Adding 'redhat/ export-ignore' to .gitattributes")
gitattributes_path = os.path.join(self.sourcegit.working_dir, ".gitattributes")
with open(gitattributes_path, "a") as gitattributes_file:
for file in self.package_config.synced_files:
file_in_working_dir = os.path.join(self.sourcegit.working_dir, file)
if os.path.isdir(file_in_working_dir):
gitattributes_file.writelines([f"{file}/ export-ignore\n"])
elif os.path.isfile(file_in_working_dir):
gitattributes_file.writelines([f"{file} export-ignore\n"])
@lru_cache()
def create_srpm(self) -> str:
logger.debug("Start creating of the SRPM.")
archive = self.create_archive()
logger.debug(f"Using archive: {archive}")
output = run_command(
cmd=[
"rpmbuild",
"-bs",
f"{self.dist_specfile_path}",
"--define",
f"_sourcedir {self.distgit.working_dir}",
"--define",
f"_specdir {self.distgit.working_dir}",
"--define",
f"_buildir {self.distgit.working_dir}",
"--define",
f"_srcrpmdir {self.distgit.working_dir}",
"--define",
f"_rpmdir {self.distgit.working_dir}",
],
fail=True,
output=True,
)
specfile_name = output.split(':')[1].rstrip()
logger.info(f"Specfile created: {specfile_name}")
return specfile_name
@lru_cache()
def get_commits_to_upstream(
self, upstream: str, add_usptream_head_commit=False
) -> List[git.Commit]:
"""
Return the list of commits from current branch to upstream rev/tag.
:param upstream: str -- git branch or tag
:return: list of commits (last commit on the current branch.).
"""
if upstream in self.sourcegit.git_repo.tags:
upstream_ref = upstream
else:
upstream_ref = f"origin/{upstream}"
if upstream_ref not in self.sourcegit.git_repo.refs:
raise Exception(
f"Upstream {upstream_ref} branch nor {upstream} tag not found."
)
commits = list(
self.sourcegit.git_repo.iter_commits(
rev=f"{upstream_ref}..{self.sourcegit._branch}",
reverse=True,
**self.rev_list_option_args,
)
)
if add_usptream_head_commit:
commits.insert(0, self.sourcegit.git_repo.refs[f"{upstream_ref}"].commit)
logger.debug(
f"Delta ({upstream_ref}..{self.sourcegit._branch}): {len(commits)}"
)
return commits
def create_patches(self, upstream: str = None) -> List[Tuple[str, str]]:
"""
Create patches from downstream commits.
:param upstream: str -- git branch or tag
:return: [(patch_name, msg)] list of created patches (tuple of the file name and commit msg)
"""
upstream = upstream or self.version_from_specfile
commits = self.get_commits_to_upstream(upstream, add_usptream_head_commit=True)
patch_list = []
for i, commit in enumerate(commits[1:]):
parent = commits[i]
patch_name = f"{i + 1:04d}-{commit.hexsha}.patch"
patch_path = os.path.join(self.distgit.working_dir, patch_name)
patch_msg = f"{commit.summary}\nAuthor: {commit.author.name} <{commit.author.email}>"
logger.debug(f"PATCH: {patch_name}\n{patch_msg}")
diff = run_command(
cmd=[
"git",
"diff",
"--patch",
parent.hexsha,
commit.hexsha,
"--",
".",
'":(exclude)redhat"',
],
cwd=self.sourcegit.working_dir,
output=True,
)
with open(patch_path, mode="w") as patch_file:
patch_file.write(diff)
patch_list.append((patch_name, patch_msg))
return patch_list
def add_patches_to_specfile(self, patch_list: List[Tuple[str, str]] = None) -> None:
"""
Add the given list of (patch_name, msg) to the specfile.
:param patch_list: [(patch_name, msg)] if None, the patches will be generated
"""
if patch_list is None:
patch_list = self.create_patches()
if not patch_list:
return
specfile_path = os.path.join(
self.distgit.working_dir, f"{self.package_name}.spec"
)
with open(file=specfile_path, mode="r+") as spec_file:
last_source_position = None
line = spec_file.readline()
while line:
if line.startswith("Source"):
last_source_position = spec_file.tell()
line = spec_file.readline()
if not last_source_position:
raise Exception("Cannot found place for patches in specfile.")
spec_file.seek(last_source_position)
rest_of_the_file = spec_file.read()
spec_file.seek(last_source_position)
spec_file.write("\n\n# PATCHES FROM SOURCE GIT:\n")
for i, (patch, msg) in enumerate(patch_list):
commented_msg = "\n# " + "\n# ".join(msg.split("\n")) + "\n"
spec_file.write(commented_msg)
spec_file.write(f"Patch{i + 1:04d}: {patch}\n")
spec_file.write(rest_of_the_file)
logger.info(
f"Patches ({len(patch_list)}) added to the specfile ({specfile_path})"
)
self.sourcegit.git_repo.index.write()
def copy_synced_content_to_distgit_directory(self, synced_files: List[str]) -> None:
"""
Copy files from source-git to destination directory.
"""
for file in synced_files:
file_in_working_dir = os.path.join(self.sourcegit.working_dir, file)
logger.debug(f"Copying '{file_in_working_dir}' to distgit.")
if os.path.isdir(file_in_working_dir):
copy_tree(src=file_in_working_dir, dst=self.distgit.working_dir)
elif os.path.isfile(file_in_working_dir):
shutil.copy2(
file_in_working_dir, os.path.join(self.distgit.working_dir, file)
)
def upload_archive_to_lookaside_cache(self, keytab: str) -> None:
"""
Upload the archive to the lookaside cache using fedpkg.
(If not exists, the archive will be created.)
"""
logger.info("Uploading the archive to lookaside cache.")
self.fedpkg.init_ticket(keytab)
self.fedpkg.new_sources(sources=self.archive, fail=False)
def commit_distgit(self, title: str, msg: str) -> None:
main_msg = f"[source-git] {title}"
self.distgit.git_repo.git.add("-A")
self.distgit.git_repo.index.write()
# TODO: implement signing properly: we need to create a cert for the bot, distribute it to the container,
# prepare git config and then we can start signing
# TODO: make -s configurable
self.distgit.git_repo.git.commit("-s", "-m", main_msg, "-m", msg)
def reset_checks(
self, full_name: str, pr_id: int, github_token: str, pagure_user_token: str
) -> None:
"""
Before syncing a new change downstream, we need to reset status of checks
for all the configured tests
and wait for testing systems to get us the new ones.
"""
sg = SourceGitCheckHelper(github_token, pagure_user_token)
for check_dict in self.package_config.metadata["checks"]:
check = get_check_by_name(check_dict["name"])
sg.set_init_check(full_name, pr_id, check)
def update_or_create_dist_git_pr(
self,
project: GitProject,
pr_id: int,
pr_url: str,
top_commit: str,
title: str,
source_ref: str,
pagure_fork_token: str,
pagure_package_token: str,
) -> None:
# Sadly, pagure does not support editing initial comments of a PR via the API
# https://pagure.io/pagure/issue/4111
# Short-term solution: keep adding comments
# and get updated info about sg PR ID and commit desc
for pr in project.get_pr_list():
sg_pr_id_match = project.search_in_pr(
pr_id=pr.id,
filter_regex=DG_PR_COMMENT_KEY_SG_PR + r":\s*(\d+)",
reverse=True,
description=True,
)
if not sg_pr_id_match:
logger.debug(f"No automation comment found in dist-git PR: {pr.id}.")
continue
sg_pr_id = sg_pr_id_match[1]
if sg_pr_id_match[1] != str(pr_id):
logger.debug(
f"Dist-git PR `{pr.id}` does not match " f"source-git PR `{pr_id}`."
)
continue
commit_match = project.search_in_pr(
pr_id=pr.id,
filter_regex=DG_PR_COMMENT_KEY_SG_COMMIT + r":\s*(\d+)",
reverse=True,
description=True,
)
if not commit_match:
logger.debug(
f"Dist-git PR `{pr.id}` does not contain top-commit of the "
f"source-git PR `{pr_id}`."
)
continue
logger.debug(f"Adding a new comment with update to existing PR.")
msg = (
f"New changes were pushed to the upstream pull request\n\n"
f"[{DG_PR_COMMENT_KEY_SG_PR}: {pr_id}]({pr_url})\n"
f"{DG_PR_COMMENT_KEY_SG_COMMIT}: {top_commit}"
)
# FIXME: consider storing the data above as a git note of the top commit
project.change_token(pagure_package_token)
project.pr_comment(pr.id, msg)
logger.info(f"new comment added on PR {pr.id} ({pr.url})")
break
else:
logger.debug(f"Matching dist-git PR not found => creating a new one.")
msg = (
f"This pull request contains changes from upstream "
f"and is meant to integrate them into Fedora\n\n"
f"[{DG_PR_COMMENT_KEY_SG_PR}: {pr_id}]({pr_url})\n"
f"{DG_PR_COMMENT_KEY_SG_COMMIT}: {top_commit}"
)
# This pagure call requires token from the package's FORK
project_fork = project.get_fork()
project_fork.change_token(pagure_fork_token)
dist_git_pr_id = project_fork.pr_create(
title=f"[source-git] {title}",
body=msg,
source_branch=source_ref,
target_branch="master",
).id
logger.info(f"PR created: {dist_git_pr_id}")
def push_to_distgit_fork(self, project_fork, branch_name):
if "origin-fork" not in [
remote.name for remote in self.distgit.git_repo.remotes
]:
self.distgit.git_repo.create_remote(
name="origin-fork", url=project_fork.get_git_urls()["ssh"]
)
# I suggest to comment this one while testing when the push is not needed
self.distgit.git_repo.remote("origin-fork").push(
refspec=branch_name, force=branch_name in project_fork.get_branches()
)
def __enter__(self) -> Transformator:
return self
def __exit__(self, *args) -> None:
self.clean()
|
<reponame>nakamura196/neural-neighbors
import numpy as np
import faiss
import time
import json
def main():
with open('data.json', 'r') as f:
xb = np.array([np.array(json.loads(l.strip())).astype('float32') for l in f])
for i in range(len(xb)):
xb[i][0] += np.random.random()
nq = 1
xq = np.copy(xb[:nq])
nb, d = xb.shape
n_candidates = 10
# Search (brute force)
s = time.time()
result_d, result_i = [], []
for q in xq:
dist = np.array([np.linalg.norm(d) for d in (xb - q)])
idx = np.array(sorted(range(len(dist)), key=lambda k: dist[k])[:n_candidates])
result_d.append(dist[idx])
result_i.append(idx)
result_d, result_i = np.array(result_d), np.array(result_i)
print('Average query time (brute force): {:.2f} [ms]'.format((time.time() - s) * 1000 / nq))
# Index (faiss)
s = time.time()
index = faiss.IndexFlatL2(d)
index.add(xb)
print('Index time (faiss): {:.2f} [ms]'.format((time.time() - s) * 1000))
# Search (faiss)
s = time.time()
result_d1, result_i1 = index.search(xq, n_candidates)
print('Average query time (faiss): {:.2f} [ms]'.format((time.time() - s) * 1000 / nq))
# Evaluate (faiss)
evaluate(result_i, result_i1)
# Index (faiss (quantize))
s = time.time()
nlist = 100
quantizer = faiss.IndexFlatL2(d)
index = faiss.IndexIVFFlat(quantizer, d, nlist, faiss.METRIC_L2)
index.train(xb)
index.add(xb)
print('Index time (faiss / quantize): {:.2f} [ms]'.format((time.time() - s) * 1000))
# Search (faiss (quantize nprobe=1))
s = time.time()
index.nprobe = 1
result_d2, result_i2 = index.search(xq, n_candidates)
print('Average query time (faiss / quantize nprobe=1): {:.2f} [ms]'.format((time.time() - s) * 1000 / nq))
# Evaluate (faiss (quantize nprobe=1))
evaluate(result_i, result_i2)
# Search (faiss (quantize nprobe=10))
s = time.time()
index.nprobe = 10
result_d3, result_i3 = index.search(xq, n_candidates)
print('Average query time (faiss / quantize nprobe=10): {:.2f} [ms]'.format((time.time() - s) * 1000 / nq))
# Evaluate (faiss (quantize nprobe=10))
evaluate(result_i, result_i3)
# Evaluate
def evaluate(arr1, arr2):
top_1 = (arr1[:,0] == arr2[:,0]).sum() / arr1.shape[0]
total = 0
for t in np.c_[arr1, arr2]:
_, cnt = np.unique(t, return_counts=True)
total += (cnt >= 2).sum()
top_k = total / arr1.shape[0] / arr1.shape[1]
print('recall@1: {:.2f}, top {} recall: {:.2f}'.format(top_1, arr1.shape[1], top_k))
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.