input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
ActionError('Unable to update GroupAtom due to GAIN_PAIR action: Unknown atom type produced from set "{0}".'.format(self.atomType))
#Add a lone pair to a group atom with none
if not self.lonePairs:
self.lonePairs = [1,2,3,4] #set to a wildcard of any number greater than 0
#Add a lone pair to a group atom that already has at least one lone pair
else:
for x in self.lonePairs:
lonePairs.append(x + pair)
# Set the new lone electron pair count
self.lonePairs = lonePairs
# Set the new atom types, removing any duplicates
self.atomType = list(set(atomType))
def __losePair(self, pair):
"""
Update the atom group as a result of applying a LOSE_PAIR action,
where `pair` specifies the number of lone electron pairs to remove.
"""
lonePairs = []
atomType = []
for atom in self.atomType:
atomType.extend(atom.decrementLonePair)
if any([len(atom.decrementLonePair) == 0 for atom in self.atomType]):
raise ActionError('Unable to update GroupAtom due to LOSE_PAIR action: Unknown atom type produced from set "{0}".'.format(self.atomType))
if not self.lonePairs:
self.lonePairs = [0,1,2,3] #set to a wildcard of any number fewer than 4
else:
for x in self.lonePairs:
if x - pair < 0:
raise ActionError('Unable to update GroupAtom due to LOSE_PAIR action: Invalid lone electron pairs set "{0}".'.format(self.lonePairs))
lonePairs.append(x - pair)
# Set the new lone electron pair count
self.lonePairs = lonePairs
# Set the new atom types, removing any duplicates
self.atomType = list(set(atomType))
def applyAction(self, action):
"""
Update the atom group as a result of applying `action`, a tuple
containing the name of the reaction recipe action along with any
required parameters. The available actions can be found
:ref:`here <reaction-recipe-actions>`.
"""
act = action[0].upper()
if act == 'CHANGE_BOND':
self.__changeBond(action[2])
elif act == 'FORM_BOND':
self.__formBond(action[2])
elif act == 'BREAK_BOND':
self.__breakBond(action[2])
elif act == 'GAIN_RADICAL':
self.__gainRadical(action[2])
elif act == 'LOSE_RADICAL':
self.__loseRadical(action[2])
elif action[0].upper() == 'GAIN_PAIR':
self.__gainPair(action[2])
elif action[0].upper() == 'LOSE_PAIR':
self.__losePair(action[2])
else:
raise ActionError('Unable to update GroupAtom: Invalid action {0}".'.format(action))
def equivalent(self, other):
"""
Returns ``True`` if `other` is equivalent to `self` or ``False`` if not,
where `other` can be either an :class:`Atom` or an :class:`GroupAtom`
object. When comparing two :class:`GroupAtom` objects, this function
respects wildcards, e.g. ``R!H`` is equivalent to ``C``.
"""
cython.declare(group=GroupAtom)
if not isinstance(other, GroupAtom):
# Let the equivalent method of other handle it
# We expect self to be an Atom object, but can't test for it here
# because that would create an import cycle
return other.equivalent(self)
group=other
cython.declare(atomType1=AtomType, atomtype2=AtomType, radical1=cython.short, radical2=cython.short,
lp1=cython.short, lp2=cython.short, charge1=cython.short, charge2=cython.short)
# Compare two atom groups for equivalence
# Each atom type in self must have an equivalent in other (and vice versa)
for atomType1 in self.atomType:
for atomType2 in group.atomType:
if atomType1.equivalent(atomType2): break
else:
return False
for atomType1 in group.atomType:
for atomType2 in self.atomType:
if atomType1.equivalent(atomType2): break
else:
return False
# Each free radical electron state in self must have an equivalent in other (and vice versa)
for radical1 in self.radicalElectrons:
if group.radicalElectrons: # Only check if the list is non-empty. An empty list indicates a wildcard.
for radical2 in group.radicalElectrons:
if radical1 == radical2: break
else:
return False
for radical1 in group.radicalElectrons:
if self.radicalElectrons:
for radical2 in self.radicalElectrons:
if radical1 == radical2: break
else:
return False
for lp1 in self.lonePairs:
if group.lonePairs:
for lp2 in group.lonePairs:
if lp1 == lp2: break
else:
return False
#Each charge in self must have an equivalent in other (and vice versa)
for charge1 in self.charge:
if group.charge:
for charge2 in group.charge:
if charge1 == charge2: break
else:
return False
for charge1 in group.charge:
if self.charge:
for charge2 in self.charge:
if charge1 == charge2: break
else:
return False
# Otherwise the two atom groups are equivalent
return True
def isSpecificCaseOf(self, other):
"""
Returns ``True`` if `other` is the same as `self` or is a more
specific case of `self`. Returns ``False`` if some of `self` is not
included in `other` or they are mutually exclusive.
"""
cython.declare(group=GroupAtom)
if not isinstance(other, GroupAtom):
# Let the isSpecificCaseOf method of other handle it
# We expect self to be an Atom object, but can't test for it here
# because that would create an import cycle
return other.isSpecificCaseOf(self)
group=other
cython.declare(atomType1=AtomType, atomtype2=AtomType, radical1=cython.short, radical2=cython.short,
lp1=cython.short, lp2=cython.short, charge1=cython.short, charge2=cython.short)
# Compare two atom groups for equivalence
# Each atom type in self must have an equivalent in other (and vice versa)
for atomType1 in self.atomType: # all these must match
for atomType2 in group.atomType: # can match any of these
if atomType1.isSpecificCaseOf(atomType2): break
else:
return False
# Each free radical electron state in self must have an equivalent in other (and vice versa)
if self.radicalElectrons:
for radical1 in self.radicalElectrons:
if group.radicalElectrons:
for radical2 in group.radicalElectrons:
if radical1 == radical2: break
else:
return False
else:
if group.radicalElectrons: return False
if self.lonePairs:
for lp1 in self.lonePairs:
if group.lonePairs:
for lp2 in group.lonePairs:
if lp1 == lp2: break
else:
return False
else:
if group.lonePairs: return False
#Each charge in self must have an equivalent in other
if self.charge:
for charge1 in self.charge:
if group.charge:
for charge2 in group.charge:
if charge1 == charge2: break
else:
return False
else:
if group.charge: return False
# Otherwise self is in fact a specific case of other
return True
def isOxygen(self):
"""
Return ``True`` if the atom represents an oxygen atom or ``False`` if
not.
"""
allOxygens = [atomTypes['O']] + atomTypes['O'].specific
checkList=[x in allOxygens for x in self.atomType]
return all(checkList)
def isSulfur(self):
"""
Return ``True`` if the atom represents an sulfur atom or ``False`` if
not.
"""
allSulfur = [atomTypes['S']] + atomTypes['S'].specific
checkList=[x in allSulfur for x in self.atomType]
return all(checkList)
def hasWildcards(self):
"""
Return ``True`` if the atom has wildcards in any of the attributes:
atomtype, electronpairs, lone pairs, charge, and bond order. Returns
''False'' if no attribute has wildcards.
"""
if len(self.atomType) > 1:
return True
elif len(self.radicalElectrons) > 1 or len(self.radicalElectrons) == 0:
return True
elif len(self.lonePairs) > 1:
return True
for bond in self.bonds.values():
if len(bond.order) > 1:
return True
return False
def countBonds(self, wildcards = False):
"""
Returns: list of the number of bonds currently on the :class:GroupAtom
If the argument wildcards is turned off then any bonds with multiple
options for bond orders will not be counted
"""
#count up number of bonds
single = 0; rDouble = 0; oDouble = 0; sDouble = 0; triple = 0; benzene = 0
for atom2, bond12 in self.bonds.iteritems():
if not wildcards and len(bond12.order) > 1:
continue
# Count numbers of each higher-order bond type
if bond12.isSingle(wildcards = True):
single += 1
if bond12.isDouble(wildcards = True):
if atom2.isOxygen():
oDouble += 1
elif atom2.isSulfur():
sDouble += 1
else:
# rDouble is for double bonds NOT to oxygen or Sulfur
rDouble += 1
if bond12.isTriple(wildcards = True): triple += 1
if bond12.isBenzene(wildcards = True): benzene += 1
allDouble = rDouble + oDouble + sDouble
return [single, allDouble, rDouble, oDouble, sDouble, triple, benzene]
def makeSampleAtom(self):
"""
Returns: a class :Atom: object analagous to the GroupAtom
This makes a sample, so it takes the first element when there are multiple options inside of
self.atomtype, self.radicalElectrons, self.lonePairs, and self.charge
"""
#Use the first atomtype to determine element, even if there is more than one atomtype
atomtype = self.atomType[0]
element = None
defaultLonePairs={'H': 0,
'D': 0,
'T': 0,
'He':1,
'C': 0,
'O': 2,
'N': 1,
'Si':0,
'S': 2,
'Ne':4,
'Cl':3,
'Ar':4,
}
for elementLabel in allElements:
if atomtype is atomTypes[elementLabel] or atomtype in atomTypes[elementLabel].specific:
element = elementLabel
break
else:
#For types that correspond to more than one type of element, pick the first that appears in specific
for subtype in atomtype.specific:
if subtype.label in allElements:
element = subtype.label
break
#dummy defaultAtom to get default values
defaultAtom = mol.Atom()
newAtom = mol.Atom(element = element,
radicalElectrons = self.radicalElectrons[0] if self.radicalElectrons else defaultAtom.radicalElectrons,
charge = self.charge[0] if self.charge else defaultAtom.charge,
lonePairs = self.lonePairs[0] if self.lonePairs else defaultAtom.lonePairs,
label = self.label if self.label else | |
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODO
# Global settings and utility functions are currently stuffed in the toplevel Makefile.
# It may make sense to generate some .mk files on the side to keep the the files readable.
from __future__ import print_function
import os
import re
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_AIX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_OS390 = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS) -Wl,DLL
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS) -Wl,DLL
"""
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
def WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files, Sourceify):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir) for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'], options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList([gyp_binary, '-fmake'] + gyp.RegenerateFlags(options) + build_files_args)
}
)
def PerformBuild(_, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print('Building [%s]: %s' % (config, arguments))
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
from gyp.MakefileWriter import MakefileWriter, Sourceify, WriteRootHeaderSuffixRules, SHARED_HEADER
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file_arg, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_makefile_path = gyp.common.RelativePath(os.path.dirname(build_file_arg), options.depth)
# We write the file in the base_makefile_path directory.
output_makefile = os.path.join(options.depth, base_makefile_path, base_name)
if options.generator_output:
output_makefile = os.path.join(options.depth, options.generator_output, base_makefile_path, base_name)
base_makefile_path = gyp.common.RelativePath(os.path.dirname(build_file_arg), options.toplevel_dir)
return base_makefile_path, output_makefile
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
makefile_path = os.path.join(options.toplevel_dir, options.generator_output, makefile_name)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
Sourceify.srcdir_prefix = '$(srcdir)/'
flock_command = 'flock'
copy_archive_arguments = '-af'
makedep_arguments | |
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import pytest
from tests import unittest
from botocore import regions
from botocore.exceptions import NoRegionError, EndpointVariantError
class TestEndpointResolver(unittest.TestCase):
def _template(self):
return {
'partitions': [
{
'partition': 'aws',
'dnsSuffix': 'amazonaws.com',
'regionRegex': r'^(us|eu)\-\w+$',
'defaults': {
'hostname': '{service}.{region}.{dnsSuffix}',
'variants': [
{
'hostname': '{service}-fips.{region}.{dnsSuffix}',
'tags': ['fips']
},
{
'dnsSuffix': 'api.aws',
'hostname': '{service}.{region}.{dnsSuffix}',
'tags': ['dualstack']
},
{
'dnsSuffix': 'api.aws',
'hostname': '{service}-fips.{region}.{dnsSuffix}',
'tags': ['dualstack', 'fips']
},
]
},
'regions': {
'us-foo': {'regionName': 'a'},
'us-bar': {'regionName': 'b'},
'eu-baz': {'regionName': 'd'}
},
'services': {
'ec2': {
'defaults': {
'protocols': ['http', 'https'],
'variants': [
{
'dnsSuffix': 'api.aws',
'hostname': 'api.ec2.{region}.{dnsSuffix}',
'tags': ['dualstack']
}
]
},
'endpoints': {
'us-foo': {
'hostname': 'ec2.us-foo.amazonaws.com',
'variants': [
{
'dnsSuffix': 'api.aws',
'hostname': 'ec2.foo.{dnsSuffix}',
'tags': ['dualstack']
},
{
'hostname': 'ec2-fips.foo.amazonaws.com',
'tags': ['fips']
},
{
'hostname': 'ec2-fips.foo.api.aws',
'tags': ['fips', 'dualstack']
},
]
},
'us-bar': {},
'us-dep': {
'deprecated': True,
},
'us-fizz': {
'credentialScope': {
'region': 'us-fizz'
},
'hostname': 'ec2.us-fizz.amazonaws.com',
'variants': [
{
'hostname': 'ec2.fizz.api.aws',
'tags': ['dualstack']
}
]
},
'eu-baz': {},
'd': {}
},
},
's3': {
'defaults': {
'sslCommonName': '{service}.{region}.{dnsSuffix}',
'variants': [
{
'hostname': 's3.dualstack.{region}.{dnsSuffix}',
'tags': ['dualstack']
},
{
'hostname': 's3-fips.{region}.{dnsSuffix}',
'tags': ['fips']
},
{
'hostname': 's3-fips.dualstack.{region}.{dnsSuffix}',
'tags': ['dualstack', 'fips']
}
]
},
'endpoints': {
'us-foo': {
'sslCommonName': '{region}.{service}.{dnsSuffix}'
},
'us-bar': {},
'us-fizz': {
'hostname': 's3.api.us-fizz.amazonaws.com',
'variants': [
{
'tags': ['dualstack']
},
{
'tags': ['fips']
}
]
},
'eu-baz': {'hostname': 'foo'}
}
},
'not-regionalized': {
'isRegionalized': False,
'partitionEndpoint': 'aws',
'endpoints': {
'aws': {'hostname': 'not-regionalized'},
'us-foo': {},
'eu-baz': {}
}
},
'non-partition': {
'partitionEndpoint': 'aws',
'endpoints': {
'aws': {'hostname': 'host'},
'us-foo': {}
}
},
'merge': {
'defaults': {
'signatureVersions': ['v2'],
'protocols': ['http']
},
'endpoints': {
'us-foo': {'signatureVersions': ['v4']},
'us-bar': {'protocols': ['https']}
}
}
}
},
{
'partition': 'foo',
'dnsSuffix': 'foo.com',
'regionRegex': r'^(foo)\-\w+$',
'defaults': {
'hostname': '{service}.{region}.{dnsSuffix}',
'protocols': ['http'],
'foo': 'bar',
},
'regions': {
'foo-1': {'regionName': '1'},
'foo-2': {'regionName': '2'},
'foo-3': {'regionName': '3'}
},
'services': {
'ec2': {
'endpoints': {
'foo-1': {
'foo': 'baz'
},
'foo-2': {},
'foo-3': {}
}
}
}
},
{
'partition': 'aws-iso',
'dnsSuffix': 'amazonaws.com',
'defaults': {
'hostname': '{service}.{region}.{dnsSuffix}',
'protocols': ['http'],
},
'regions': {
'foo-1': {'regionName': '1'},
'foo-2': {'regionName': '2'},
'foo-3': {'regionName': '3'}
},
'services': {
'ec2': {
'endpoints': {
'foo-1': {
'foo': 'baz'
},
'foo-2': {},
'foo-3': {}
}
}
}
}
]
}
def test_ensures_region_is_not_none(self):
with self.assertRaises(NoRegionError):
resolver = regions.EndpointResolver(self._template())
resolver.construct_endpoint('foo', None)
def test_ensures_required_keys_present(self):
with self.assertRaises(ValueError):
regions.EndpointResolver({})
def test_returns_empty_list_when_listing_for_different_partition(self):
resolver = regions.EndpointResolver(self._template())
self.assertEqual([], resolver.get_available_endpoints('ec2', 'bar'))
def test_returns_empty_list_when_no_service_found(self):
resolver = regions.EndpointResolver(self._template())
self.assertEqual([], resolver.get_available_endpoints('what?'))
def test_gets_endpoint_names(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.get_available_endpoints(
'ec2', allow_non_regional=True)
self.assertEqual(['d', 'eu-baz', 'us-bar', 'us-dep',
'us-fizz', 'us-foo'],
sorted(result))
def test_gets_endpoint_names_for_partition(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.get_available_endpoints(
'ec2', allow_non_regional=True, partition_name='foo')
self.assertEqual(['foo-1', 'foo-2', 'foo-3'], sorted(result))
def test_list_regional_endpoints_only(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.get_available_endpoints(
'ec2', allow_non_regional=False)
self.assertEqual(['eu-baz', 'us-bar', 'us-foo'], sorted(result))
def test_returns_none_when_no_match(self):
resolver = regions.EndpointResolver(self._template())
self.assertIsNone(resolver.construct_endpoint('foo', 'baz'))
def test_constructs_regionalized_endpoints_for_exact_matches(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('not-regionalized', 'eu-baz')
self.assertEqual(
'not-regionalized.eu-baz.amazonaws.com', result['hostname']
)
self.assertEqual('aws', result['partition'])
self.assertEqual('eu-baz', result['endpointName'])
def test_constructs_partition_endpoints_for_real_partition_region(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('not-regionalized', 'us-bar')
self.assertEqual('not-regionalized', result['hostname'])
self.assertEqual('aws', result['partition'])
self.assertEqual('aws', result['endpointName'])
def test_constructs_partition_endpoints_for_regex_match(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('not-regionalized', 'us-abc')
self.assertEqual('not-regionalized', result['hostname'])
def test_constructs_endpoints_for_regionalized_regex_match(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('s3', 'us-abc')
self.assertEqual('s3.us-abc.amazonaws.com', result['hostname'])
def test_constructs_endpoints_for_unknown_service_but_known_region(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('unknown', 'us-foo')
self.assertEqual('unknown.us-foo.amazonaws.com', result['hostname'])
def test_merges_service_keys(self):
resolver = regions.EndpointResolver(self._template())
us_foo = resolver.construct_endpoint('merge', 'us-foo')
us_bar = resolver.construct_endpoint('merge', 'us-bar')
self.assertEqual(['http'], us_foo['protocols'])
self.assertEqual(['v4'], us_foo['signatureVersions'])
self.assertEqual(['https'], us_bar['protocols'])
self.assertEqual(['v2'], us_bar['signatureVersions'])
def test_merges_partition_default_keys_with_no_overwrite(self):
resolver = regions.EndpointResolver(self._template())
resolved = resolver.construct_endpoint('ec2', 'foo-1')
self.assertEqual('baz', resolved['foo'])
self.assertEqual(['http'], resolved['protocols'])
def test_merges_partition_default_keys_with_overwrite(self):
resolver = regions.EndpointResolver(self._template())
resolved = resolver.construct_endpoint('ec2', 'foo-2')
self.assertEqual('bar', resolved['foo'])
self.assertEqual(['http'], resolved['protocols'])
def test_gives_hostname_and_common_name_unaltered(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('s3', 'eu-baz')
self.assertEqual('s3.eu-baz.amazonaws.com', result['sslCommonName'])
self.assertEqual('foo', result['hostname'])
def tests_uses_partition_endpoint_when_no_region_provided(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('not-regionalized')
self.assertEqual('not-regionalized', result['hostname'])
self.assertEqual('aws', result['endpointName'])
def test_returns_dns_suffix_if_available(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('not-regionalized')
self.assertEqual(result['dnsSuffix'], 'amazonaws.com')
def test_construct_dualstack_from_endpoint_variant(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('ec2', 'us-foo',
use_dualstack_endpoint=True)
self.assertEqual(result['hostname'], 'ec2.foo.api.aws')
self.assertEqual(result['dnsSuffix'], 'api.aws')
def test_construct_dualstack_endpoint_from_service_default_variant(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('ec2', 'us-bar',
use_dualstack_endpoint=True)
self.assertEqual(result['hostname'], 'api.ec2.us-bar.api.aws')
self.assertEqual(result['dnsSuffix'], 'api.aws')
def test_construct_dualstack_endpoint_from_partition_default_variant(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('dynamodb', 'us-bar',
use_dualstack_endpoint=True)
self.assertEqual(result['hostname'], 'dynamodb.us-bar.api.aws')
self.assertEqual(result['dnsSuffix'], 'api.aws')
def test_constructs_dualstack_endpoint_no_hostname_in_variant(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('s3', 'us-fizz',
use_dualstack_endpoint=True)
self.assertEqual('s3.dualstack.us-fizz.api.aws',
result['hostname'])
def test_constructs_endpoint_dualstack_no_variant_dns_suffix(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('s3', 'us-bar',
use_dualstack_endpoint=True)
self.assertEqual('s3.dualstack.us-bar.api.aws',
result['hostname'])
def test_construct_dualstack_endpoint_iso_partition_raise_exception(self):
with self.assertRaises(EndpointVariantError):
resolver = regions.EndpointResolver(self._template())
resolver.construct_endpoint('foo', 'foo-1', 'aws-iso',
use_dualstack_endpoint=True)
def test_get_partition_dns_suffix_no_tags(self):
resolver = regions.EndpointResolver(self._template())
self.assertEqual(resolver.get_partition_dns_suffix('aws'),
'amazonaws.com')
def test_get_partition_dualstack_dns_suffix(self):
resolver = regions.EndpointResolver(self._template())
self.assertEqual(resolver.get_partition_dns_suffix(
'aws', ['dualstack']), 'api.aws')
def test_get_partition_dualstack_dns_suffix_does_not_exist(self):
resolver = regions.EndpointResolver(self._template())
self.assertIsNone(resolver.get_partition_dns_suffix(
'foo', ['dualstack']))
def test_get_available_fips_endpoints(self):
resolver = regions.EndpointResolver(self._template())
fips_endpoints = resolver.get_available_endpoints(
'ec2', endpoint_variant_tags=['fips'])
self.assertEqual(fips_endpoints, ['us-foo'])
def test_get_available_dualstack_endpoints(self):
resolver = regions.EndpointResolver(self._template())
dualstack_endpoints = resolver.get_available_endpoints(
'ec2', endpoint_variant_tags=['dualstack'])
self.assertEqual(dualstack_endpoints, ['us-foo'])
def test_get_available_fips_and_dualstack_endpoints(self):
resolver = regions.EndpointResolver(self._template())
fips_and_dualstack_endpoints = resolver.get_available_endpoints(
'ec2', endpoint_variant_tags=['fips', 'dualstack'])
self.assertEqual(fips_and_dualstack_endpoints, ['us-foo'])
def test_get_available_fips_endpoints_none(self):
resolver = regions.EndpointResolver(self._template())
fips_endpoints = resolver.get_available_endpoints(
'ec2', 'foo', endpoint_variant_tags=['fips'])
self.assertEqual(fips_endpoints, [])
def test_get_available_dualstack_endpoints_none(self):
resolver = regions.EndpointResolver(self._template())
dualstack_endpoints = resolver.get_available_endpoints(
'ec2', 'foo', endpoint_variant_tags=['dualstack'])
self.assertEqual(dualstack_endpoints, [])
def test_get_available_fips_and_dualstack_endpoints_none(self):
resolver = regions.EndpointResolver(self._template())
fips_and_dualstack_endpoints = resolver.get_available_endpoints(
'ec2', 'foo', endpoint_variant_tags=['fips', 'dualstack'])
self.assertEqual(fips_and_dualstack_endpoints, [])
def test_construct_deprecated_endpoint_raises_warning(self):
resolver = regions.EndpointResolver(self._template())
with self.assertLogs('botocore.regions', level='WARNING') as log:
result = resolver.construct_endpoint('ec2', 'us-dep',
use_fips_endpoint=True,
)
self.assertIn('deprecated endpoint', log.output[0])
self.assertEqual(result['hostname'], 'ec2-fips.us-dep.amazonaws.com')
self.assertEqual(result['dnsSuffix'], 'amazonaws.com')
def test_construct_fips_from_endpoint_variant(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('ec2', 'us-foo',
use_fips_endpoint=True)
self.assertEqual(result['hostname'], 'ec2-fips.foo.amazonaws.com')
self.assertEqual(result['dnsSuffix'], 'amazonaws.com')
def test_construct_fips_endpoint_from_service_default_variant(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('ec2', 'us-bar',
use_fips_endpoint=True)
self.assertEqual(result['hostname'], 'ec2-fips.us-bar.amazonaws.com')
self.assertEqual(result['dnsSuffix'], 'amazonaws.com')
def test_construct_fips_endpoint_from_partition_default_variant(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('dynamodb', 'us-bar',
use_fips_endpoint=True)
self.assertEqual(result['hostname'],
'dynamodb-fips.us-bar.amazonaws.com')
self.assertEqual(result['dnsSuffix'], 'amazonaws.com')
def test_constructs_fips_endpoint_no_hostname_in_variant(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('s3', 'us-fizz',
use_fips_endpoint=True)
self.assertEqual('s3-fips.us-fizz.amazonaws.com', result['hostname'])
def test_construct_dualstack_and_fips_from_endpoint_variant(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('ec2', 'us-foo',
use_dualstack_endpoint=True,
use_fips_endpoint=True)
self.assertEqual(result['hostname'], 'ec2-fips.foo.api.aws')
self.assertEqual(result['dnsSuffix'], 'api.aws')
def test_construct_dualstack_and_fips_endpoint_from_service_default_variant(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('ec2', 'us-bar',
use_dualstack_endpoint=True,
use_fips_endpoint=True)
self.assertEqual(result['hostname'], 'ec2-fips.us-bar.api.aws')
self.assertEqual(result['dnsSuffix'], 'api.aws')
def test_construct_dualstack_and_fips_endpoint_from_partition_default_variant(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('dynamodb', 'us-bar',
use_dualstack_endpoint=True,
use_fips_endpoint=True)
self.assertEqual(result['hostname'], 'dynamodb-fips.us-bar.api.aws')
self.assertEqual(result['dnsSuffix'], 'api.aws')
def test_constructs_dualstack_and_fips_endpoint_no_hostname_in_variant(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('s3', 'us-fizz',
use_dualstack_endpoint=True,
use_fips_endpoint=True)
self.assertEqual('s3-fips.dualstack.us-fizz.api.aws',
result['hostname'])
def test_construct_fips_endpoint_no_variant_raise_exception(self):
with self.assertRaises(EndpointVariantError):
resolver = regions.EndpointResolver(self._template())
resolver.construct_endpoint('ec2', 'foo-1', 'foo',
use_fips_endpoint=True)
def test_construct_dualstack_endpoint_no_variant_raise_exception(self):
with self.assertRaises(EndpointVariantError):
resolver = regions.EndpointResolver(self._template())
resolver.construct_endpoint('ec2', 'foo-1', 'foo',
use_dualstack_endpoint=True)
def test_construct_dualstack_and_fips_endpoint_no_variant_raise_exception(self):
with self.assertRaises(EndpointVariantError):
resolver = regions.EndpointResolver(self._template())
resolver.construct_endpoint('ec2', 'foo-1', 'foo',
use_dualstack_endpoint=True,
use_fips_endpoint=True)
def _variant_test_definitions():
return [
{
"service": "default-pattern-service",
"region": "us-west-2",
"fips": False,
"dualstack": False,
"endpoint": "default-pattern-service.us-west-2.amazonaws.com"
},
{
"service": "default-pattern-service",
"region": "us-west-2",
"fips": True,
"dualstack": False,
"endpoint": "default-pattern-service-fips.us-west-2.amazonaws.com"
},
{
"service": "default-pattern-service",
"region": "af-south-1",
"fips": False,
"dualstack": False,
"endpoint": "default-pattern-service.af-south-1.amazonaws.com"
},
{
"service": "default-pattern-service",
"region": "af-south-1",
"fips": True,
"dualstack": False,
"endpoint": "default-pattern-service-fips.af-south-1.amazonaws.com"
},
{
"service": "global-service",
"region": "aws-global",
"fips": False,
"dualstack": False,
"endpoint": "global-service.amazonaws.com"
},
{
"service": "global-service",
"region": "aws-global",
"fips": True,
"dualstack": False,
"endpoint": "global-service-fips.amazonaws.com"
},
{
"service": "global-service",
"region": "foo",
"fips": False,
"dualstack": False,
"endpoint": "global-service.amazonaws.com"
},
{
"service": "global-service",
"region": "foo",
"fips": True,
"dualstack": False,
"endpoint": "global-service-fips.amazonaws.com"
},
{
"service": "override-variant-service",
"region": "us-west-2",
"fips": False,
"dualstack": False,
"endpoint": "override-variant-service.us-west-2.amazonaws.com"
},
{
"service": "override-variant-service",
"region": "us-west-2",
"fips": True,
"dualstack": False,
"endpoint": "fips.override-variant-service.us-west-2.new.dns.suffix"
},
{
"service": "override-variant-service",
"region": "af-south-1",
"fips": False,
"dualstack": False,
"endpoint": "override-variant-service.af-south-1.amazonaws.com"
},
{
"service": "override-variant-service",
"region": "af-south-1",
"fips": True,
"dualstack": False,
"endpoint": "fips.override-variant-service.af-south-1.new.dns.suffix"
},
{
"service": "override-variant-dns-suffix-service",
"region": "us-west-2",
"fips": False,
"dualstack": False,
"endpoint": "override-variant-dns-suffix-service.us-west-2.amazonaws.com"
},
{
"service": "override-variant-dns-suffix-service",
"region": "us-west-2",
"fips": True,
"dualstack": False,
"endpoint": "override-variant-dns-suffix-service-fips.us-west-2.new.dns.suffix"
},
{
"service": "override-variant-dns-suffix-service",
"region": "af-south-1",
"fips": False,
"dualstack": False,
"endpoint": "override-variant-dns-suffix-service.af-south-1.amazonaws.com"
},
{
"service": "override-variant-dns-suffix-service",
"region": "af-south-1",
"fips": True,
"dualstack": False,
"endpoint": "override-variant-dns-suffix-service-fips.af-south-1.new.dns.suffix"
},
{
"service": "override-variant-hostname-service",
"region": "us-west-2",
"fips": False,
"dualstack": False,
"endpoint": "override-variant-hostname-service.us-west-2.amazonaws.com"
},
{
"service": "override-variant-hostname-service",
"region": "us-west-2",
"fips": True,
"dualstack": False,
"endpoint": "fips.override-variant-hostname-service.us-west-2.amazonaws.com"
},
{
"service": "override-variant-hostname-service",
"region": "af-south-1",
"fips": False,
"dualstack": False,
"endpoint": "override-variant-hostname-service.af-south-1.amazonaws.com"
},
{
"service": "override-variant-hostname-service",
"region": "af-south-1",
"fips": True,
"dualstack": False,
"endpoint": "fips.override-variant-hostname-service.af-south-1.amazonaws.com"
},
{
"service": "override-endpoint-variant-service",
"region": "us-west-2",
"fips": False,
"dualstack": False,
"endpoint": "override-endpoint-variant-service.us-west-2.amazonaws.com"
},
{
"service": "override-endpoint-variant-service",
"region": "us-west-2",
"fips": True,
"dualstack": False,
"endpoint": "fips.override-endpoint-variant-service.us-west-2.amazonaws.com"
},
| |
<gh_stars>100-1000
# Copyright 2012 SINA Corporation
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Sample configuration generator
Tool for generating a sample configuration file. See
../doc/source/cli/generator.rst for details.
.. versionadded:: 1.4
"""
import collections
import copy
import json
import logging
import operator
import sys
try:
# For oslo.config[rst-generator]
from docutils import core as docutils_core
from docutils.parsers.rst import nodes as docutils_nodes
from docutils.parsers.rst import roles as docutils_roles
import rst2txt
from sphinx import roles as sphinx_roles
except ImportError:
import textwrap
rst2txt = None
try:
# For Python 3.8 and later
import importlib.metadata as importlib_metadata
except ImportError:
# For everyone else
import importlib_metadata
import yaml
from oslo_config import cfg
from oslo_i18n import _message
import stevedore.named # noqa
LOG = logging.getLogger(__name__)
UPPER_CASE_GROUP_NAMES = ['DEFAULT']
_generator_opts = [
cfg.StrOpt(
'output-file',
help='Path of the file to write to. Defaults to stdout.'),
cfg.IntOpt(
'wrap-width',
default=70,
min=0,
help='The maximum length of help lines.'),
cfg.MultiStrOpt(
'namespace',
required=True,
help='Option namespace under "oslo.config.opts" in which to query '
'for options.'),
cfg.BoolOpt(
'minimal',
default=False,
help='Generate a minimal required configuration.'),
cfg.BoolOpt(
'summarize',
default=False,
help='Only output summaries of help text to config files. Retain '
'longer help text for Sphinx documents.'),
cfg.StrOpt(
'format',
help='Desired format for the output.',
default='ini',
choices=[
('ini', 'The only format that can be used directly with '
'oslo.config.'),
('json', 'Intended for third-party tools that want to write '
'config files based on the sample config data.'),
('yaml', 'Same as json'),
('rst', 'Can be used to dump the text given to Sphinx when '
'building documentation using the Sphinx extension. '
'Useful for debugging,')
],
dest='format_'),
]
def register_cli_opts(conf):
"""Register the formatter's CLI options with a ConfigOpts instance.
Note, this must be done before the ConfigOpts instance is called to parse
the configuration.
:param conf: a ConfigOpts instance
:raises: DuplicateOptError, ArgsAlreadyParsedError
"""
conf.register_cli_opts(_generator_opts)
def _format_defaults(opt):
"Return a list of formatted default values."
if isinstance(opt, cfg.MultiStrOpt):
if opt.sample_default is not None:
defaults = opt.sample_default
elif not opt.default:
defaults = ['']
else:
defaults = opt.default
else:
if opt.sample_default is not None:
default_str = str(opt.sample_default)
elif opt.default is None:
default_str = '<None>'
elif isinstance(opt, (cfg.StrOpt, cfg.IntOpt, cfg.FloatOpt, cfg.IPOpt,
cfg.PortOpt, cfg.HostnameOpt, cfg.HostAddressOpt,
cfg.URIOpt, cfg.Opt)):
default_str = str(opt.default)
elif isinstance(opt, cfg.BoolOpt):
default_str = str(opt.default).lower()
elif isinstance(opt, (cfg.ListOpt, cfg._ConfigFileOpt,
cfg._ConfigDirOpt)):
default_str = ','.join(str(d) for d in opt.default)
elif isinstance(opt, cfg.DictOpt):
sorted_items = sorted(opt.default.items(),
key=operator.itemgetter(0))
default_str = ','.join(['%s:%s' % i for i in sorted_items])
else:
LOG.warning('Unknown option type: %s', repr(opt))
default_str = str(opt.default)
defaults = [default_str]
results = []
for default_str in defaults:
if not isinstance(default_str, str):
default_str = str(default_str)
if default_str.strip() != default_str:
default_str = '"%s"' % default_str
results.append(default_str)
return results
_TYPE_NAMES = {
str: 'string value',
int: 'integer value',
float: 'floating point value',
}
def _format_type_name(opt_type):
"""Format the type name to use in describing an option"""
try:
return opt_type.type_name
except AttributeError: # nosec
pass
try:
return _TYPE_NAMES[opt_type]
except KeyError: # nosec
pass
return 'unknown value'
class _OptFormatter:
"""Format configuration option descriptions to a file."""
def __init__(self, conf, output_file=None):
"""Construct an OptFormatter object.
:param conf: The config object from _generator_opts
:param output_file: a writeable file object
"""
self.output_file = output_file or sys.stdout
self.wrap_width = conf.wrap_width or 998 # arbitrary line length
self.minimal = conf.minimal
self.summarize = conf.summarize
if rst2txt:
# register the default Sphinx roles
for rolename, nodeclass in sphinx_roles.generic_docroles.items():
generic = docutils_roles.GenericRole(rolename, nodeclass)
docutils_roles.register_local_role(rolename, generic)
for rolename, func in sphinx_roles.specific_docroles.items():
docutils_roles.register_local_role(rolename, func)
# plus our custom roles
for rolename in ('oslo.config:option', 'oslo.config:group'):
generic = docutils_roles.GenericRole(rolename,
docutils_nodes.strong)
docutils_roles.register_local_role(rolename, generic)
def _format_help(self, help_text):
"""Format the help for a group or option to the output file.
:param help_text: The text of the help string
"""
if rst2txt:
help_text = docutils_core.publish_string(
source=help_text,
writer=rst2txt.Writer(),
settings_overrides={'wrap_width': self.wrap_width}
).decode()
lines = ''
for line in help_text.splitlines():
lines += '# ' + line + '\n' if line else '#\n'
lines = [lines]
elif self.wrap_width > 0:
wrapped = ""
for line in help_text.splitlines():
text = "\n".join(textwrap.wrap(line, self.wrap_width,
initial_indent='# ',
subsequent_indent='# ',
break_long_words=False,
replace_whitespace=False))
wrapped += "#" if text == "" else text
wrapped += "\n"
lines = [wrapped]
else:
lines = ['# ' + help_text + '\n']
return lines
def _get_choice_text(self, choice):
if choice is None:
return '<None>'
elif choice == '':
return "''"
return str(choice)
def format_group(self, group_or_groupname):
"""Format the description of a group header to the output file
:param group_or_groupname: a cfg.OptGroup instance or a name of group
:returns: a formatted group description string
"""
if isinstance(group_or_groupname, cfg.OptGroup):
group = group_or_groupname
lines = ['[%s]\n' % group.name]
if group.help:
lines += self._format_help(group.help)
else:
groupname = group_or_groupname
lines = ['[%s]\n' % groupname]
self.writelines(lines)
def format(self, opt, group_name):
"""Format a description of an option to the output file.
:param opt: a cfg.Opt instance
:param group_name: name of the group to which the opt is assigned
:returns: a formatted opt description string
"""
if not opt.help:
LOG.warning('"%s" is missing a help string', opt.dest)
opt_type = _format_type_name(opt.type)
opt_prefix = ''
if (opt.deprecated_for_removal and
not opt.help.startswith('DEPRECATED')):
opt_prefix = 'DEPRECATED: '
if opt.help:
# an empty line signifies a new paragraph. We only want the
# summary line
if self.summarize:
_split = opt.help.split('\n\n')
opt_help = _split[0].rstrip(':').rstrip('.')
if len(_split) > 1:
opt_help += '. For more information, refer to the '
opt_help += 'documentation.'
else:
opt_help = opt.help
help_text = '%s%s (%s)' % (opt_prefix, opt_help, opt_type)
else:
help_text = '(%s)' % opt_type
lines = self._format_help(help_text)
if getattr(opt.type, 'min', None) is not None:
lines.append('# Minimum value: {}\n'.format(opt.type.min))
if getattr(opt.type, 'max', None) is not None:
lines.append('# Maximum value: {}\n'.format(opt.type.max))
if getattr(opt.type, 'choices', None):
lines.append('# Possible values:\n')
for choice in opt.type.choices:
help_text = '%s - %s' % (
self._get_choice_text(choice),
opt.type.choices[choice] or '<No description provided>')
lines.extend(self._format_help(help_text))
try:
if opt.mutable:
lines.append(
'# Note: This option can be changed without restarting.\n'
)
except AttributeError as err:
# NOTE(dhellmann): keystoneauth defines its own Opt class,
# and neutron (at least) returns instances of those
# classes instead of oslo_config Opt instances. The new
# mutable attribute is the first property where the API
# isn't supported in the external class, so we can use
# this failure to emit a warning. See
# https://bugs.launchpad.net/keystoneauth/+bug/1548433 for
# more details.
import warnings
if not isinstance(opt, cfg.Opt):
warnings.warn(
'Incompatible option class for %s (%r): %s' %
(opt.dest, opt.__class__, err),
)
else:
warnings.warn('Failed to fully format sample for %s: %s' %
(opt.dest, err))
for d in opt.deprecated_opts:
# NOTE(bnemec): opt names with a - are not valid in a config file,
# but it is possible to add a DeprecatedOpt with a - name. We
# want to ignore those as they won't work anyway.
if d.name and '-' not in d.name:
lines.append('# Deprecated group/name - [%s]/%s\n' %
(d.group or group_name, d.name or opt.dest))
if opt.deprecated_for_removal:
if opt.deprecated_since:
lines.append(
'# This option is deprecated for removal since %s.\n' % (
opt.deprecated_since))
else:
lines.append(
'# This option is deprecated for removal.\n')
lines.append(
'# Its value may be silently ignored in the future.\n')
if opt.deprecated_reason:
lines.extend(
self._format_help('Reason: ' + opt.deprecated_reason))
if opt.advanced:
lines.append(
'# Advanced Option: intended for advanced users and not used\n'
'# by the majority of users, and might have a significant\n'
'# effect on stability and/or performance.\n'
)
if opt.sample_default:
lines.append(
'#\n'
'# This option has a sample default set, which means that\n'
'# its actual default value may vary from the one documented\n'
'# below.\n'
)
if hasattr(opt.type, 'format_defaults'):
defaults = opt.type.format_defaults(opt.default,
opt.sample_default)
else:
LOG.debug(
"The type for option %(name)s which is %(type)s is not a "
"subclass of types.ConfigType and doesn't provide a "
"'format_defaults' method. A default formatter is not "
"available so the | |
then the unit of the
returned number will be the product of the units in self.unit
and unit. For example, if the flux units are counts/s, and
unit=u.angstrom, then the integrated flux will have units
counts*Angstrom/s.
Finally, if unit is None, then the units of the returned
number will be the product of self.unit and the units of the
wavelength axis of the spectrum (ie. self.wave.unit).
The result of the integration is returned as an astropy
Quantity, which holds the integrated value and its physical
units. The units of the returned number can be determined
from the .unit attribute of the return value. Alternatively
the returned value can be converted to another unit, using the
to() method of astropy quantities.
Parameters
----------
lmin : float
The minimum wavelength of the range to be integrated,
or None (the default), to select the minimum wavelength
of the first pixel of the spectrum. If this is below the
minimum wavelength of the spectrum, the integration
behaves as though the flux in the first pixel extended
down to that wavelength.
If the unit argument is None, lmin is a pixel index, and
the wavelength of the center of this pixel is used as the
lower wavelength of the integration.
lmax : float
The maximum wavelength of the range to be integrated,
or None (the default), to select the maximum wavelength
of the last pixel of the spectrum. If this is above the
maximum wavelength of the spectrum, the integration
behaves as though the flux in the last pixel extended
up to that wavelength.
If the unit argument is None, lmax is a pixel index, and
the wavelength of the center of this pixel is used as the
upper wavelength of the integration.
unit : `astropy.units.Unit`
The wavelength units of lmin and lmax, or None to indicate
that lmin and lmax are pixel indexes.
Returns
-------
out : `astropy.units.Quantity`, `astropy.units.Quantity`
The result of the integration and its error, expressed as
a floating point number with accompanying units. The integrated
value and its physical units can be extracted using the .value and
.unit attributes of the returned quantity. The value can also be
converted to different units, using the .to() method of the
returned objected.
"""
# Get the index of the first pixel within the wavelength range,
# and the minimum wavelength of the integration.
if lmin is None:
i1 = 0
lmin = self.wave.coord(-0.5, unit=unit)
else:
if unit is None:
l1 = lmin
lmin = self.wave.coord(max(-0.5, l1))
else:
l1 = self.wave.pixel(lmin, False, unit)
i1 = max(0, int(l1))
# Get the index of the last pixel within the wavelength range, plus
# 1, and the maximum wavelength of the integration.
if lmax is None:
i2 = self.shape[0]
lmax = self.wave.coord(i2 - 0.5, unit=unit)
else:
if unit is None:
l2 = lmax
lmax = self.wave.coord(min(self.shape[0] - 0.5, l2))
else:
l2 = self.wave.pixel(lmax, False, unit)
i2 = min(self.shape[0], int(l2) + 1)
# Get the lower wavelength of each pixel, including one extra
# pixel at the end of the range.
d = self.wave.coord(-0.5 + np.arange(i1, i2 + 1), unit=unit)
# Change the wavelengths of the first and last pixels to
# truncate or extend those pixels to the starting and ending
# wavelengths of the spectrum.
d[0] = lmin
d[-1] = lmax
if unit is None:
unit = self.wave.unit
# Get the data of the subspectrum covered by the integration.
data = self.data[i1:i2]
# If the spectrum has been calibrated, the flux units will be
# per angstrom, per nm, per um etc. If these wavelength units
# don't match the units of the wavelength axis of the
# integration, then although the results will be correct, they
# will have inconvenient units. In such cases attempt to
# convert the units of the wavelength axis to match the flux
# units.
if unit in self.unit.bases: # The wavelength units already agree.
out_unit = self.unit * unit
else:
try:
# Attempt to determine the wavelength units of the flux density
wunit = (set(self.unit.bases) &
set([u.pm, u.angstrom, u.nm, u.um])).pop()
# Scale the wavelength axis to have the same wavelength units.
d *= unit.to(wunit)
# Get the final units of the integration.
out_unit = self.unit * wunit
# If the wavelength units of the flux weren't recognized,
# simply return the units unchanged.
except Exception:
out_unit = self.unit * unit
# Integrate the spectrum by multiplying the value of each pixel
# by the difference in wavelength from the start of that pixel to
# the start of the next pixel.
flux = (data * np.diff(d)).sum() * out_unit
if self.var is None:
err_flux = np.inf
else:
err_flux = np.sqrt((self.var[i1:i2] * np.diff(d)**2).sum())
return (flux, err_flux * out_unit)
def poly_fit(self, deg, weight=True, maxiter=0,
nsig=(-3.0, 3.0), verbose=False):
"""Perform polynomial fit on normalized spectrum and returns polynomial
coefficients.
Parameters
----------
deg : int
Polynomial degree.
weight : bool
If weight is True, the weight is computed as the inverse of
variance.
maxiter : int
Maximum allowed iterations (0)
nsig : (float,float)
The low and high rejection factor in std units (-3.0,3.0)
Returns
-------
out : ndarray, shape.
Polynomial coefficients ordered from low to high.
"""
if self.shape[0] <= deg + 1:
raise ValueError('Too few points to perform polynomial fit')
if self._var is None:
weight = False
if weight:
vec_weight = 1.0 / np.sqrt(np.abs(self.var.filled(np.inf)))
else:
vec_weight = None
if self._mask is np.ma.nomask:
d = self._data
w = self.wave.coord()
else:
mask = ~self._mask
d = self._data[mask]
w = self.wave.coord()[mask]
if weight:
vec_weight = vec_weight[mask]
# normalize w
w0 = np.min(w)
dw = np.max(w) - w0
w = (w - w0) / dw
p = np.polynomial.polynomial.polyfit(w, d, deg, w=vec_weight)
if maxiter > 0:
err = d - np.polynomial.polynomial.polyval(w, p)
sig = np.std(err)
n_p = len(d)
for it in range(maxiter):
ind = np.where((err >= nsig[0] * sig) &
(np.abs(err) <= nsig[1] * sig))
if len(ind[0]) == n_p:
break
if len(ind[0]) <= deg + 1:
raise ValueError('Too few points to perform '
'polynomial fit')
if vec_weight is not None:
vec_weight = vec_weight[ind]
p = np.polynomial.polynomial.polyfit(w[ind], d[ind],
deg, w=vec_weight)
err = d[ind] - np.polynomial.polynomial.polyval(w[ind], p)
sig = np.std(err)
n_p = len(ind[0])
if verbose:
self._logger.info('Number of iteration: %d Std: %10.4e '
'Np: %d Frac: %4.2f', it + 1, sig, n_p,
100. * n_p / self.shape[0])
return p
def poly_val(self, z):
"""Update in place the spectrum data from polynomial coefficients.
Uses `numpy.poly1d`.
Parameters
----------
z : array
The polynomial coefficients, in increasing powers:
data = z0 + z1(lbda-min(lbda))/(max(lbda)-min(lbda)) + ...
+ zn ((lbda-min(lbda))/(max(lbda)-min(lbda)))**n
"""
l = self.wave.coord()
w0 = np.min(l)
dw = np.max(l) - w0
w = (l - w0) / dw
self._data = np.polynomial.polynomial.polyval(w, z)
if self._mask is not np.ma.nomask:
self._mask = ~(np.isfinite(self._data))
self._var = None
def poly_spec(self, deg, weight=True, maxiter=0,
nsig=(-3.0, 3.0), verbose=False):
"""Return a spectrum containing a polynomial fit.
Parameters
----------
deg : int
Polynomial degree.
weight : bool
If weight is True, the weight is computed as the inverse of
variance.
maxiter : int
Maximum allowed iterations (0)
nsig : (float,float)
The low and high rejection factor in std units (-3.0,3.0)
Returns
-------
out : Spectrum
"""
z = self.poly_fit(deg, weight, maxiter, nsig, verbose)
res = self.clone()
res.poly_val(z)
return res
def abmag_band(self, lbda, dlbda):
"""Compute AB magnitude corresponding to the wavelength band.
Parameters
----------
lbda : float
Mean wavelength in Angstrom.
dlbda : float
Width of the wavelength band in Angstrom.
Returns
-------
out : float, float
Magnitude value and its error
"""
vflux, err_flux = self.mean(lmin=lbda - dlbda / 2.0,
lmax=lbda + dlbda / 2.0,
weight=None, unit=u.angstrom)
if vflux == 0:
return (99, 0)
else:
unit = u.Unit('erg.s-1.cm-2.Angstrom-1')
vflux2 = (vflux * self.unit).to(unit)
err_flux2 = (err_flux * self.unit).to(unit)
return flux2mag(vflux2.value, err_flux2.value, lbda)
def abmag_filter_name(self, name):
"""Compute AB magnitude using the filter name.
Parameters
----------
name : | |
value for media image."""
if self._albumart_url:
return super().media_image_hash
if self._albumart:
return hashlib.md5(self._albumart).hexdigest()[:5]
return None
@property
def source(self):
"""Name of the current input source."""
return self._source
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return self._shuffle
@property
def repeat(self):
"""Return current repeat mode."""
return self._repeat
@property
def sound_mode(self):
"""Name of the current sound mode."""
return self._soundmode
@property
def sound_mode_list(self):
"""List of available sound modes."""
return self._soundmode_list
@property
def is_master(self):
"""Return true if it is a master."""
return self._isGroupMaster
async def async_get_media_image(self):
"""Fetch media image of current playing image."""
if self._albumart_url:
return await super().async_get_media_image()
if self._albumart:
return (self._albumart, "image/jpeg")
return None, None
async def async_turn_on(self):
"""Turn the media player on."""
if self._power_on_script:
await self._power_on_script.async_run(context=self._context)
async def async_turn_off(self):
"""Turn the media player off."""
if self._power_off_script:
await self._power_off_script.async_run(context=self._context)
async def async_volume_up(self):
"""Volume up the media player."""
if self._vol_up_script:
await self._vol_up_script.async_run(context=self._context)
else:
newvolume = min(self._volume + 0.05, 1)
self._volume = newvolume
await self.async_set_volume_level(newvolume)
async def async_volume_down(self):
"""Volume down media player."""
if self._vol_down_script:
await self._vol_down_script.async_run(context=self._context)
else:
newvolume = max(self._volume - 0.05, 0)
self._volume = newvolume
await self.async_set_volume_level(newvolume)
async def async_set_volume_level(self, volume):
"""Set volume level."""
if self._vol_down_script or self._vol_up_script:
return
if self._vol_script:
await self._vol_script.async_run(
{"volume": volume * (self._maxvolume - self._minvolume) + self._minvolume}, context=self._context
)
self._volume = volume
async def async_mute_volume(self, mute):
if mute:
if self._vol_mute_script:
await self._vol_mute_script.async_run(context=self._context)
else:
if self._vol_unmute_script:
await self._vol_unmute_script.async_run(context=self._context)
async def async_media_play_pause(self):
"""Simulate play pause media player."""
if self._state == STATE_PLAYING:
await self.async_media_pause()
else:
await self.async_media_play()
async def async_media_play(self):
"""Send play command."""
if self._play_script:
await self._play_script.async_run(context=self._context)
self._state = STATE_PLAYING
async def async_media_pause(self):
"""Send media pause command to media player."""
if self._pause_script:
await self._pause_script.async_run(context=self._context)
self._state = STATE_PAUSED
async def async_media_stop(self):
"""Send media stop command to media player."""
if self._stop_script:
await self._stop_script.async_run(context=self._context)
self._state = STATE_IDLE
async def async_media_next_track(self):
"""Send next track command."""
if self._next_script:
await self._next_script.async_run(context=self._context)
async def async_media_previous_track(self):
"""Send the previous track command."""
if self._previous_script:
await self._previous_script.async_run(context=self._context)
async def async_media_seek(self, position):
"""Send seek command."""
if self._seek_script:
await self._seek_script.async_run(
{"position": position}, context=self._context
)
async def async_select_source(self, source):
"""Select input source."""
if self._select_source_script:
await self._select_source_script.async_run(
{"source": source}, context=self._context
)
async def async_select_sound_mode(self, sound_mode):
"""Select sound mode."""
if self._select_soundmode_script:
await self._select_soundmode_script.async_run(
{"soundmode": sound_mode}, context=self._context
)
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
if self._shuffle_script:
await self._shuffle_script.async_run(
{"shuffle": shuffle}, context=self._context
)
async def async_set_repeat(self, repeat):
"""Set repeat mode."""
if self._repeat_script:
await self._repeat_script.async_run(
{"repeat": repeat}, context=self._context
)
async def async_play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player."""
if self._play_media_script:
await self._play_media_script.async_run(
{"content_type": media_type, "content_id": media_id}, context=self._context
)
# Multiroom
async def async_added_to_hass(self):
"""Record entity."""
await super().async_added_to_hass()
self.hass.data[DOMAIN].entities.append(self)
await self._subscribe_topics()
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
async def async_join(self, client_entities):
"""Select input source."""
if self._join_script:
_LOGGER.debug("Join script: %s", list(map(lambda e: e.multiroom_id, client_entities)))
await self._join_script.async_run(
{"master_id": self.multiroom_id, "client_ids": list(map(lambda e: e.multiroom_id, client_entities))}, context=self._context
)
async def async_unjoin(self):
"""Select input source."""
if self._unjoin_script:
await self._unjoin_script.async_run(
{"client_id": self.multiroom_id}, context=self._context
)
@property
def multiroom_id(self):
"""Return the ip address of the device."""
return self._multiroomid
@property
def musiccast_group(self):
"""Return the list of entities in the group."""
return self._multiroom_group
def refresh_group(self):
"""Refresh the entities that are part of the group."""
_LOGGER.debug("Refreshing group data for entity: %s", self.entity_id)
entities = self.hass.data[DOMAIN].entities
client_entities = [e for e in entities
if e.multiroom_id in self._multiroom_groupIds]
self._multiroom_group = [self] + client_entities
if MQTTMediaPlayer:
self.schedule_update_ha_state(False)
@property
def extra_state_attributes(self):
"""Return entity specific state attributes."""
attributes = {
ATTR_MQTTMULTIROOM_GROUP: [e.entity_id for e
in self._multiroom_group],
ATTR_MINVOLUME: self._minvolume,
ATTR_MAXVOLUME: self._maxvolume,
}
return attributes
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
topics = {}
@callback
@log_messages(self.hass, self.entity_id)
def power_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[POWER](msg.payload)
self._power = payload
if MQTTMediaPlayer:
self.schedule_update_ha_state(True)
if self._topic[POWER_TOPIC] is not None:
topics[POWER_TOPIC] = {
"topic": self._topic[POWER_TOPIC],
"msg_callback": power_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def playerstatus_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[PLAYERSTATUS](msg.payload)
self._mqtt_player_state = payload
if MQTTMediaPlayer:
self.schedule_update_ha_state(True)
if self._topic[PLAYERSTATUS_TOPIC] is not None:
topics[PLAYERSTATUS_TOPIC] = {
"topic": self._topic[PLAYERSTATUS_TOPIC],
"msg_callback": playerstatus_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def source_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[SOURCE](msg.payload)
self._source = payload
if MQTTMediaPlayer:
self.schedule_update_ha_state(False)
if self._topic[SOURCE_TOPIC] is not None:
topics[SOURCE_TOPIC] = {
"topic": self._topic[SOURCE_TOPIC],
"msg_callback": source_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def sourcelist_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[SOURCELIST](msg.payload)
if(isinstance(payload, str)):
self._source_list = json.loads(payload)
if(isinstance(payload, list)):
self._source_list = payload
if MQTTMediaPlayer:
self.schedule_update_ha_state(False)
if self._topic[SOURCELIST_TOPIC] is not None:
topics[SOURCELIST_TOPIC] = {
"topic": self._topic[SOURCELIST_TOPIC],
"msg_callback": sourcelist_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def soundmode_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[SOUNDMODE](msg.payload)
self._soundmode = payload
if MQTTMediaPlayer:
self.schedule_update_ha_state(False)
if self._topic[SOUNDMODE_TOPIC] is not None:
topics[SOUNDMODE_TOPIC] = {
"topic": self._topic[SOUNDMODE_TOPIC],
"msg_callback": soundmode_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def soundmodelist_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[SOUNDMODELIST](msg.payload)
if(isinstance(payload, str)):
self._soundmode_list = json.loads(payload)
if(isinstance(payload, list)):
self._soundmode_list = payload
if MQTTMediaPlayer:
self.schedule_update_ha_state(False)
if self._topic[SOUNDMODELIST_TOPIC] is not None:
topics[SOUNDMODELIST_TOPIC] = {
"topic": self._topic[SOUNDMODELIST_TOPIC],
"msg_callback": soundmodelist_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def shuffle_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[SHUFFLE](msg.payload)
self._shuffle = payload
if MQTTMediaPlayer:
self.schedule_update_ha_state(False)
if self._topic[SHUFFLE_TOPIC] is not None:
topics[SHUFFLE_TOPIC] = {
"topic": self._topic[SHUFFLE_TOPIC],
"msg_callback": shuffle_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def repeat_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[REPEAT](msg.payload)
self._repeat = payload
if MQTTMediaPlayer:
self.schedule_update_ha_state(False)
if self._topic[REPEAT_TOPIC] is not None:
topics[REPEAT_TOPIC] = {
"topic": self._topic[REPEAT_TOPIC],
"msg_callback": repeat_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def volume_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[VOL](msg.payload)
if isinstance(payload, int):
self._volume = int(payload)
if isinstance(payload, str):
try:
self._volume = float(payload)
except:
pass
self._volume = (self._volume - self._minvolume) / (self._maxvolume - self._minvolume)
if MQTTMediaPlayer:
self.schedule_update_ha_state(False)
if self._topic[VOL_TOPIC] is not None:
topics[VOL_TOPIC] = {
"topic": self._topic[VOL_TOPIC],
"msg_callback": volume_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def minvolume_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[MINVOL](msg.payload)
if isinstance(payload, int):
self._minvolume = int(payload)
if isinstance(payload, str):
try:
self._minvolume = float(payload)
except:
pass
if MQTTMediaPlayer:
self.schedule_update_ha_state(False)
if self._topic[MINVOL_TOPIC] is not None:
topics[MINVOL_TOPIC] = {
"topic": self._topic[MINVOL_TOPIC],
"msg_callback": minvolume_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def maxvolume_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[MAXVOL](msg.payload)
if isinstance(payload, int):
self._maxvolume = int(payload)
if isinstance(payload, str):
try:
self._maxvolume = float(payload)
except:
pass
if MQTTMediaPlayer:
self.schedule_update_ha_state(False)
if self._topic[MAXVOL_TOPIC] is not None:
topics[MAXVOL_TOPIC] = {
"topic": self._topic[MAXVOL_TOPIC],
"msg_callback": maxvolume_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def mute_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[MUTE](msg.payload)
self._mute = payload
if MQTTMediaPlayer:
self.schedule_update_ha_state(False)
if self._topic[MUTE_TOPIC] is not None:
topics[MUTE_TOPIC] = {
"topic": self._topic[MUTE_TOPIC],
"msg_callback": mute_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def title_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[MEDIA_TITLE](msg.payload)
self._track_name = payload
if MQTTMediaPlayer:
self.schedule_update_ha_state(False)
if self._topic[MEDIA_TITLE_TOPIC] is not None:
topics[MEDIA_TITLE_TOPIC] = {
"topic": self._topic[MEDIA_TITLE_TOPIC],
"msg_callback": title_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def artist_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[MEDIA_ARTIST](msg.payload)
self._track_artist = payload
if MQTTMediaPlayer:
self.schedule_update_ha_state(False)
if self._topic[MEDIA_ARTIST_TOPIC] is not None:
topics[MEDIA_ARTIST_TOPIC] = {
"topic": self._topic[MEDIA_ARTIST_TOPIC],
"msg_callback": artist_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def album_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[MEDIA_ALBUM](msg.payload)
self._track_album_name = payload
if MQTTMediaPlayer:
self.schedule_update_ha_state(False)
if self._topic[MEDIA_ALBUM_TOPIC] is not None:
topics[MEDIA_ALBUM_TOPIC] = {
"topic": self._topic[MEDIA_ALBUM_TOPIC],
"msg_callback": album_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def position_received(msg):
"""Handle new received MQTT message."""
lastUpdate = util.dt.utcnow()
payload = self._templates[MEDIA_POSITION](msg.payload)
if isinstance(payload, int):
self._media_position = payload
if isinstance(payload, str):
try:
self._media_position = int(payload)
except:
pass
self._media_position_last_update = lastUpdate
if MQTTMediaPlayer:
self.schedule_update_ha_state(False)
if self._topic[MEDIA_POSITION_TOPIC] is not None:
topics[MEDIA_POSITION_TOPIC] = {
"topic": self._topic[MEDIA_POSITION_TOPIC],
"msg_callback": position_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def duration_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[MEDIA_DURATION](msg.payload)
if isinstance(payload, int):
self._media_duration = payload
if isinstance(payload, str):
try:
self._media_duration = int(payload)
except:
pass
if MQTTMediaPlayer:
self.schedule_update_ha_state(False)
if self._topic[MEDIA_DURATION_TOPIC] is not None:
topics[MEDIA_DURATION_TOPIC] = {
"topic": self._topic[MEDIA_DURATION_TOPIC],
"msg_callback": duration_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def albumart_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[ALBUMART](msg.payload)
self._albumart = base64.b64decode(payload.replace("\n", ""))
| |
self.init = init
self.in_ch = None
self.out_ch = out_ch
self.epsilon = epsilon
self.stride1 = stride1
self.stride2 = stride2
self.optimizer = optimizer
self.momentum = momentum
self.kernel_shape1 = kernel_shape1
self.kernel_shape2 = kernel_shape2
self.act_fn = Affine(slope=1, intercept=0) if act_fn is None else act_fn
self._init_params()
def _init_params(self):
self._dv = {}
self.conv1 = Conv2D(
pad="same",
init=self.init,
out_ch=self.out_ch,
act_fn=self.act_fn,
stride=self.stride1,
optimizer=self.optimizer,
kernel_shape=self.kernel_shape1,
)
# we can't initialize `conv2` without X's dimensions; see `forward`
# for further details
self.batchnorm1 = BatchNorm2D(epsilon=self.epsilon, momentum=self.momentum)
self.batchnorm2 = BatchNorm2D(epsilon=self.epsilon, momentum=self.momentum)
self.add3 = Add(self.act_fn)
def _init_conv2(self):
self.conv2 = Conv2D(
pad="same",
init=self.init,
out_ch=self.in_ch,
stride=self.stride2,
optimizer=self.optimizer,
kernel_shape=self.kernel_shape2,
act_fn=Affine(slope=1, intercept=0),
)
@property
def parameters(self):
"""A dictionary of the module parameters."""
return {
"components": {
"add3": self.add3.parameters,
"conv1": self.conv1.parameters,
"conv2": self.conv2.parameters,
"batchnorm1": self.batchnorm1.parameters,
"batchnorm2": self.batchnorm2.parameters,
}
}
@property
def hyperparameters(self):
"""A dictionary of the module hyperparameters."""
return {
"layer": "SkipConnectionIdentityModule",
"init": self.init,
"in_ch": self.in_ch,
"out_ch": self.out_ch,
"epsilon": self.epsilon,
"stride1": self.stride1,
"stride2": self.stride2,
"momentum": self.momentum,
"optimizer": self.optimizer,
"act_fn": str(self.act_fn),
"kernel_shape1": self.kernel_shape1,
"kernel_shape2": self.kernel_shape2,
"component_ids": ["conv1", "batchnorm1", "conv2", "batchnorm2", "add3"],
"components": {
"add3": self.add3.hyperparameters,
"conv1": self.conv1.hyperparameters,
"conv2": self.conv2.hyperparameters,
"batchnorm1": self.batchnorm1.hyperparameters,
"batchnorm2": self.batchnorm2.hyperparameters,
},
}
@property
def derived_variables(self):
"""A dictionary of intermediate values computed during the
forward/backward passes."""
dv = {
"conv1_out": None,
"conv2_out": None,
"batchnorm1_out": None,
"batchnorm2_out": None,
"components": {
"add3": self.add3.derived_variables,
"conv1": self.conv1.derived_variables,
"conv2": self.conv2.derived_variables,
"batchnorm1": self.batchnorm1.derived_variables,
"batchnorm2": self.batchnorm2.derived_variables,
},
}
dv.update(self._dv)
return dv
@property
def gradients(self):
"""A dictionary of the accumulated module parameter gradients."""
return {
"components": {
"add3": self.add3.gradients,
"conv1": self.conv1.gradients,
"conv2": self.conv2.gradients,
"batchnorm1": self.batchnorm1.gradients,
"batchnorm2": self.batchnorm2.gradients,
}
}
def forward(self, X, retain_derived=True):
"""
Compute the module output given input volume `X`.
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape (n_ex, in_rows, in_cols, in_ch)
The input volume consisting of `n_ex` examples, each with dimension
(`in_rows`, `in_cols`, `in_ch`).
retain_derived : bool
Whether to retain the variables calculated during the forward pass
for use later during backprop. If False, this suggests the layer
will not be expected to backprop through wrt. this input. Default
is True.
Returns
-------
Y : :py:class:`ndarray <numpy.ndarray>` of shape (n_ex, out_rows, out_cols, out_ch)
The module output volume.
"""
if not hasattr(self, "conv2"):
self.in_ch = X.shape[3]
self._init_conv2()
conv1_out = self.conv1.forward(X, retain_derived)
bn1_out = self.batchnorm1.forward(conv1_out, retain_derived)
conv2_out = self.conv2.forward(bn1_out, retain_derived)
bn2_out = self.batchnorm2.forward(conv2_out, retain_derived)
Y = self.add3.forward([X, bn2_out], retain_derived)
if retain_derived:
self._dv["conv1_out"] = conv1_out
self._dv["conv2_out"] = conv2_out
self._dv["batchnorm1_out"] = bn1_out
self._dv["batchnorm2_out"] = bn2_out
return Y
def backward(self, dLdY, retain_grads=True):
"""
Compute the gradient of the loss with respect to the layer parameters.
Parameters
----------
dLdy : :py:class:`ndarray <numpy.ndarray>` of shape (`n_ex, out_rows, out_cols, out_ch`) or list of arrays
The gradient(s) of the loss with respect to the module output(s).
retain_grads : bool
Whether to include the intermediate parameter gradients computed
during the backward pass in the final parameter update. Default is
True.
Returns
-------
dX : :py:class:`ndarray <numpy.ndarray>` of shape (n_ex, in_rows, in_cols, in_ch)
The gradient of the loss with respect to the module input volume.
"""
dX, dBn2_out = self.add3.backward(dLdY, retain_grads)
dConv2_out = self.batchnorm2.backward(dBn2_out, retain_grads)
dBn1_out = self.conv2.backward(dConv2_out, retain_grads)
dConv1_out = self.batchnorm1.backward(dBn1_out, retain_grads)
dX += self.conv1.backward(dConv1_out, retain_grads)
self._dv["dLdAdd3_X"] = dX
self._dv["dLdBn2"] = dBn2_out
self._dv["dLdBn1"] = dBn1_out
self._dv["dLdConv2"] = dConv2_out
self._dv["dLdConv1"] = dConv1_out
return dX
class SkipConnectionConvModule(ModuleBase):
def __init__(
self,
out_ch1,
out_ch2,
kernel_shape1,
kernel_shape2,
kernel_shape_skip,
pad1=0,
pad2=0,
stride1=1,
stride2=1,
act_fn=None,
epsilon=1e-5,
momentum=0.9,
stride_skip=1,
optimizer=None,
init="glorot_uniform",
):
"""
A ResNet-like "convolution" shortcut module.
Notes
-----
In contrast to :class:`SkipConnectionIdentityModule`, the additional
`conv2d_skip` and `batchnorm_skip` layers in the shortcut path allow
adjusting the dimensions of `X` to match the output of the main set of
convolutions.
.. code-block:: text
X -> Conv2D -> Act_fn -> BatchNorm2D -> Conv2D -> BatchNorm2D -> + -> Act_fn
\_____________________ Conv2D -> Batchnorm2D __________________/
References
----------
.. [1] He et al. (2015). "Deep residual learning for image
recognition." https://arxiv.org/pdf/1512.03385.pdf
Parameters
----------
out_ch1 : int
The number of filters/kernels to compute in the first convolutional
layer.
out_ch2 : int
The number of filters/kernels to compute in the second
convolutional layer.
kernel_shape1 : 2-tuple
The dimension of a single 2D filter/kernel in the first
convolutional layer.
kernel_shape2 : 2-tuple
The dimension of a single 2D filter/kernel in the second
convolutional layer.
kernel_shape_skip : 2-tuple
The dimension of a single 2D filter/kernel in the "skip"
convolutional layer.
stride1 : int
The stride/hop of the convolution kernels in the first
convolutional layer. Default is 1.
stride2 : int
The stride/hop of the convolution kernels in the second
convolutional layer. Default is 1.
stride_skip : int
The stride/hop of the convolution kernels in the "skip"
convolutional layer. Default is 1.
pad1 : int, tuple, or 'same'
The number of rows/columns of 0's to pad the input to the first
convolutional layer with. Default is 0.
pad2 : int, tuple, or 'same'
The number of rows/columns of 0's to pad the input to the second
convolutional layer with. Default is 0.
act_fn : :doc:`Activation <numpy_ml.neural_nets.activations>` object or None
The activation function for computing ``Y[t]``. If None, use the
identity :math:`f(x) = x` by default. Default is None.
epsilon : float
A small smoothing constant to use during
:class:`~numpy_ml.neural_nets.layers.BatchNorm2D` computation to
avoid divide-by-zero errors. Default is 1e-5.
momentum : float
The momentum term for the running mean/running std calculations in
the :class:`~numpy_ml.neural_nets.layers.BatchNorm2D` layers. The
closer this is to 1, the less weight will be given to the mean/std
of the current batch (i.e., higher smoothing). Default is 0.9.
init : str
The weight initialization strategy. Valid entries are
{'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform'}.
optimizer : str or :doc:`Optimizer <numpy_ml.neural_nets.optimizers>` object
The optimization strategy to use when performing gradient updates
within the :class:`update` method. If None, use the
:class:`~numpy_ml.neural_nets.optimizers.SGD` optimizer with
default parameters. Default is None.
"""
super().__init__()
self.init = init
self.pad1 = pad1
self.pad2 = pad2
self.in_ch = None
self.out_ch1 = out_ch1
self.out_ch2 = out_ch2
self.epsilon = epsilon
self.stride1 = stride1
self.stride2 = stride2
self.momentum = momentum
self.optimizer = optimizer
self.stride_skip = stride_skip
self.kernel_shape1 = kernel_shape1
self.kernel_shape2 = kernel_shape2
self.kernel_shape_skip = kernel_shape_skip
self.act_fn = Affine(slope=1, intercept=0) if act_fn is None else act_fn
self._init_params()
def _init_params(self, X=None):
self._dv = {}
self.conv1 = Conv2D(
pad=self.pad1,
init=self.init,
act_fn=self.act_fn,
out_ch=self.out_ch1,
stride=self.stride1,
optimizer=self.optimizer,
kernel_shape=self.kernel_shape1,
)
self.conv2 = Conv2D(
pad=self.pad2,
init=self.init,
out_ch=self.out_ch2,
stride=self.stride2,
optimizer=self.optimizer,
kernel_shape=self.kernel_shape2,
act_fn=Affine(slope=1, intercept=0),
)
# we can't initialize `conv_skip` without X's dimensions; see `forward`
# for further details
self.batchnorm1 = BatchNorm2D(epsilon=self.epsilon, momentum=self.momentum)
self.batchnorm2 = BatchNorm2D(epsilon=self.epsilon, momentum=self.momentum)
self.batchnorm_skip = BatchNorm2D(epsilon=self.epsilon, momentum=self.momentum)
self.add3 = Add(self.act_fn)
def _calc_skip_padding(self, X):
pads = []
for p in [self.pad1, self.pad2]:
if isinstance(p, int):
pads.append((p, p, p, p))
elif isinstance(p, tuple) and len(p) == 2:
pads.append((p[0], p[0], p[1], p[1]))
self.pad1, self.pad2 = pads
# compute the dimensions of the convolution1 output
s1 = self.stride1
fr1, fc1 = self.kernel_shape1
_, in_rows, in_cols, _ = X.shape
pr11, pr12, pc11, pc12 = self.pad1
out_rows1 = np.floor(1 + (in_rows + pr11 + pr12 - fr1) / s1).astype(int)
out_cols1 = np.floor(1 + (in_cols + pc11 + pc12 - fc1) / s1).astype(int)
# compute the dimensions of the convolution2 output
s2 = self.stride2
fr2, fc2 = self.kernel_shape2
pr21, pr22, pc21, pc22 = self.pad2
out_rows2 = np.floor(1 + (out_rows1 + pr21 + pr22 - fr2) / s2).astype(int)
out_cols2 = np.floor(1 + (out_cols1 + pc21 + pc22 - fc2) / s2).astype(int)
# finally, compute the appropriate padding dims for the skip convolution
desired_dims = (out_rows2, out_cols2)
self.pad_skip = calc_pad_dims_2D(
X.shape,
desired_dims,
stride=self.stride_skip,
kernel_shape=self.kernel_shape_skip,
)
def _init_conv_skip(self, X):
self._calc_skip_padding(X)
self.conv_skip = Conv2D(
init=self.init,
pad=self.pad_skip,
out_ch=self.out_ch2,
stride=self.stride_skip,
kernel_shape=self.kernel_shape_skip,
act_fn=Affine(slope=1, intercept=0),
optimizer=self.optimizer,
)
@property
def parameters(self):
"""A dictionary of the module parameters."""
return {
"components": {
"add3": self.add3.parameters,
"conv1": self.conv1.parameters,
"conv2": self.conv2.parameters,
"conv_skip": self.conv_skip.parameters
if hasattr(self, "conv_skip")
else None,
"batchnorm1": self.batchnorm1.parameters,
"batchnorm2": self.batchnorm2.parameters,
"batchnorm_skip": self.batchnorm_skip.parameters,
}
}
@property
def hyperparameters(self):
"""A dictionary of the module hyperparameters."""
return {
"layer": "SkipConnectionConvModule",
"init": self.init,
"pad1": self.pad1,
"pad2": self.pad2,
"in_ch": self.in_ch,
"out_ch1": self.out_ch1,
"out_ch2": self.out_ch2,
"epsilon": self.epsilon,
"stride1": self.stride1,
"stride2": self.stride2,
"momentum": self.momentum,
"act_fn": str(self.act_fn),
"stride_skip": self.stride_skip,
"kernel_shape1": self.kernel_shape1,
"kernel_shape2": self.kernel_shape2,
"kernel_shape_skip": self.kernel_shape_skip,
| |
#!/usr/bin/env python3
#
# Copyright (c) 2004-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import contextlib
import errno
import fcntl
import logging
import os
import shutil
import stat
import struct
import tempfile
import time
import typing
from pathlib import Path
from typing import BinaryIO, Iterator, Optional, Tuple
from facebook.eden.overlay.ttypes import OverlayDir, OverlayEntry
class InvalidOverlayFile(Exception):
pass
class NoSuchOverlayFile(Exception):
def __init__(self, inode_number: int) -> None:
super().__init__(f"inode {inode_number} is not materialized in the overlay")
self.inode_number = inode_number
class InodeLookupError(Exception):
def __init__(self, msg: str, errnum: int) -> None:
super().__init__(msg)
self.errno = errnum
class OverlayHeader:
LENGTH = 64
VERSION_1 = 1
TYPE_DIR = b"OVDR"
TYPE_FILE = b"OVFL"
STRUCT_FORMAT = ">4sIQQQQQQ8s"
@classmethod
def parse(cls, data: bytes, type: Optional[bytes] = None) -> "OverlayHeader":
# A 0-length file is somewhat common on unclean reboot,
# so use a separate exception message for this case.
if len(data) == 0:
raise InvalidOverlayFile("zero-sized overlay file")
if len(data) < cls.LENGTH:
raise InvalidOverlayFile(
"overlay file is too short to contain a header: length={len(data)}"
)
(
header_id,
version,
atime_sec,
atime_nsec,
ctime_sec,
ctime_nsec,
mtime_sec,
mtime_nsec,
padding,
) = struct.unpack(cls.STRUCT_FORMAT, data)
if header_id not in (cls.TYPE_DIR, cls.TYPE_FILE):
raise InvalidOverlayFile(
"overlay file is too short to contain a header: length={len(data)}"
)
if version != cls.VERSION_1:
raise InvalidOverlayFile(f"unsupported overlay file version {version}")
return OverlayHeader(
header_id,
version,
atime_sec,
atime_nsec,
ctime_sec,
ctime_nsec,
mtime_sec,
mtime_nsec,
)
def __init__(
self,
type: bytes,
version: int,
atime_sec: int = 0,
atime_nsec: int = 0,
ctime_sec: int = 0,
ctime_nsec: int = 0,
mtime_sec: int = 0,
mtime_nsec: int = 0,
padding: bytes = b"\0\0\0\0\0\0\0\0",
) -> None:
self.type = type
self.version = version
self.atime_sec = atime_sec
self.atime_nsec = atime_nsec
self.ctime_sec = ctime_sec
self.ctime_nsec = ctime_nsec
self.mtime_sec = mtime_sec
self.mtime_nsec = mtime_nsec
self.padding = padding
@property
def atime(self) -> float:
return self.atime_sec + (self.atime_nsec / 1000000000.0)
@atime.setter
def atime(self, value: float) -> None:
self.atime_sec = int(value)
self.atime_nsec = int((value - self.atime_sec) * 1000000000)
@property
def ctime(self) -> float:
return self.ctime_sec + (self.ctime_nsec / 1000000000.0)
@ctime.setter
def ctime(self, value: float) -> None:
self.ctime_sec = int(value)
self.ctime_nsec = int((value - self.ctime_sec) * 1000000000)
@property
def mtime(self) -> float:
return self.mtime_sec + (self.mtime_nsec / 1000000000.0)
@mtime.setter
def mtime(self, value: float) -> None:
self.mtime_sec = int(value)
self.mtime_nsec = int((value - self.mtime_sec) * 1000000000)
def serialize(self) -> bytes:
return struct.pack(
self.STRUCT_FORMAT,
self.type,
self.version,
self.atime_sec,
self.atime_nsec,
self.ctime_sec,
self.ctime_nsec,
self.mtime_sec,
self.mtime_nsec,
self.padding,
)
class Overlay:
ROOT_INODE_NUMBER = 1
NEXT_INODE_NUMBER_PATH = "next-inode-number"
def __init__(self, path: str) -> None:
self.path = path
@contextlib.contextmanager
def try_lock(self) -> Iterator[bool]:
info_path = os.path.join(self.path, "info")
try:
lock_file = open(info_path, "rb")
except OSError:
yield False
return
try:
fcntl.flock(lock_file.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
yield True
except OSError:
yield False
finally:
# Release the lock once the yield returns
lock_file.close()
def get_path(self, inode_number: int) -> str:
dir_name = "{:02x}".format(inode_number % 256)
return os.path.join(self.path, dir_name, str(inode_number))
def open_overlay_file(self, inode_number: int) -> BinaryIO:
try:
return typing.cast(BinaryIO, open(self.get_path(inode_number), "rb"))
except OSError as ex:
if ex.errno == errno.ENOENT:
raise NoSuchOverlayFile(inode_number)
raise
def read_header(self, f: BinaryIO) -> OverlayHeader:
data = f.read(OverlayHeader.LENGTH)
return OverlayHeader.parse(data)
def check_header(
self, f: BinaryIO, inode_number: int, expected_type: bytes
) -> OverlayHeader:
data = f.read(OverlayHeader.LENGTH)
header = OverlayHeader.parse(data)
if header.type != expected_type:
raise InvalidOverlayFile(
f"unexpected type for inode {inode_number} in overlay: "
f"expected {expected_type!r} but found {header.type!r}"
)
return header
def read_dir_inode(self, inode_number: int) -> OverlayDir:
return self.read_dir_inode_tuple(inode_number)[1]
def read_dir_inode_tuple(
self, inode_number: int
) -> Tuple[OverlayHeader, OverlayDir]:
with self.open_overlay_file(inode_number) as f:
header = self.check_header(f, inode_number, OverlayHeader.TYPE_DIR)
data = f.read()
return (header, self.parse_dir_inode_data(data))
def parse_dir_inode_data(self, data: bytes) -> OverlayDir:
from thrift.util import Serializer
from thrift.protocol import TCompactProtocol
# Initialize entries to the empty dictionary.
# This value will be used if the serialized data does not have any value
# for this field.
tree_data = OverlayDir(entries={})
protocol_factory = TCompactProtocol.TCompactProtocolFactory()
Serializer.deserialize(protocol_factory, data, tree_data)
return tree_data
def open_file_inode(self, inode_number: int) -> BinaryIO:
return self.open_file_inode_tuple(inode_number)[1]
def open_file_inode_tuple(
self, inode_number: int
) -> Tuple[OverlayHeader, BinaryIO]:
"""Open the overlay file for the specified inode number.
Returns the header information and a file object opened to the start of the
file inode contents.
"""
f = self.open_overlay_file(inode_number)
try:
header = self.check_header(f, inode_number, OverlayHeader.TYPE_FILE)
except Exception:
f.close()
raise
return (header, f)
def lookup_path(self, path: Path) -> Optional[int]:
"""
Lookup a path in the overlay.
Returns the inode number corresponding to the path, if the path is materialized.
- If an inode number is found for this path, returns the inode number.
- If one of the parent directories is not materialized, returns None.
Without checking the source control data we cannot tell if this logical path
exists or not.
- If this path or one of its parent directories does not exist throws an
InodeLookupError
May throw other exceptions on error.
"""
assert not path.is_absolute()
if not path.parts:
return self.ROOT_INODE_NUMBER
parent_inode_number = self.ROOT_INODE_NUMBER
index = 0
while True:
parent_dir = self.read_dir_inode(parent_inode_number)
desired = path.parts[index]
index += 1
entries = [] if parent_dir.entries is None else parent_dir.entries.items()
entry: Optional[OverlayEntry] = None
for name, entry in entries: # noqa: ignore=B007
if name == desired:
break
if entry is None:
raise InodeLookupError(f"{path} does not exist", errno.ENOENT)
if index >= len(path.parts):
return entry.inodeNumber
if entry.mode is None or stat.S_IFMT(entry.mode) != stat.S_IFDIR:
non_dir_path = os.path.sep.join(path.parts[:index])
raise InodeLookupError(
f"error looking up {path}: {non_dir_path} is not a directory",
errno.ENOTDIR,
)
if entry.hash:
# This directory along the chain is not materialized
return None
parent_inode_number = entry.inodeNumber
def extract_file(
self, inode_number: int, output_path: Path, mode: int, remove: bool = False
) -> None:
"""Copy the specified file inode out of the overlay.
If remove=True the data for this inode will be removed from the overlay after it
has been extracted.
"""
with self.open_overlay_file(inode_number) as inf:
header = self.read_header(inf)
if header.type != OverlayHeader.TYPE_FILE:
raise Exception(
f"expected inode {inode_number} to be a regular file; "
f"found unexpected type {header.type!r}"
)
output_path.parent.mkdir(parents=True, exist_ok=True)
file_type = stat.S_IFMT(mode)
if file_type == stat.S_IFLNK:
contents = inf.read()
os.symlink(contents, bytes(output_path))
elif file_type == stat.S_IFREG:
with output_path.open("wb") as outf:
shutil.copyfileobj(inf, outf) # type: ignore
# Note: the file permissions bits are now stored in the inode table
# rather than the overlay. The mode bits in the overlay will
# reflect the correct file type only. Always extract orphan inodes
# with permissions 0o600 (read+write to owner only).
os.fchmod(outf.fileno(), 0o600)
else:
# We don't copy out sockets, fifos, or other unusual file types.
# These shouldn't have any actual file contents anyway.
logging.debug(
f"skipping inode {inode_number} at {output_path} with "
f"unsupported file type {file_type:#o}"
)
path = Path(self.get_path(inode_number))
path.unlink()
def extract_dir(
self, inode_number: int, output_path: Path, remove: bool = False
) -> None:
"""Recursively copy the specified directory inode out of the overlay.
All of its materialized children will be copied out. Children that still have
the same contents as a committed source control object will not be copied out.
If remove=True the data for the extracted inodes will be removed from the
overlay after they have been extracted.
"""
data = self.read_dir_inode(inode_number)
for name, entry in data.entries.items():
overlay_path = Path(self.get_path(entry.inodeNumber))
if not overlay_path.exists():
# Skip children that do not exist in the overlay.
# Note that we explicitly check for existence of the child even if
# entry.hash is set (i.e., if the inode is not materialized):
#
# - Non-materialized directories can have data in the overlay if they
# contain allocated inode numbers. We still recurse into the
# directory in this case. This makes sure we remove the overlay files
# when remove=True, and also ensures that we will find any
# materialized file data inside this subdirectory if Eden crashed in
# the middle of trying to materialize a file but before it marked the
# parent directories materialized.
# - Even for files we can have the same race on crash: eden may have
# crashed while | |
points in the experiment
rate : array_like
the recorded firing rate corresponding to the stimulus, must have dimensions of (T,)
filter_dims : tuple
a tuple defining the dimensions for the spatiotemporal filter. e.g., (n,n,tau) for a 2D stimulus or
or (n,tau) for a bars stimulus. tau must be less than T, the length of the experiment, and N (the
stimulus dimensionality) must equal the product of all but the last item in the tuple.
minibatch_size : int, optional
the size of each minibatch, in samples. defaults to a value such that the number of minibatches is
roughly equal to :math:`0.1 * sqrt(T)`
frac_train : float, optional
number between 0 and 1, gives the fraction of minibatches used for training (default: 0.8)
num_subunits : int, optional
number of subunits to use (default: 1), if a initial W is given, this parameter is unused
num_tents : int, optional
number of tent basis functions to use for parameterizing nonlinearities (default: 30)
sigmasq : float, optional
the size / scale of each tent basis function (default: 0.2)
tent_type : string
the type of tent basis function to use (default: 'gaussian')
final_nonlinearity : string
a function from the `nonlinearities` module
Other Parameters
----------------
spikes : array_like
an array of spike counts (same dimensions as the rate)
\\*\\*kwargs : keyword arguments
if given arguments with the keys `W` or `f`, then those values are used to initialize the filter
or nonlinearity parameters, respectively.
"""
# initialize the model object
if num_temporal_bases is None:
NeuralEncodingModel.__init__(self, 'lnln', stim, spkcounts,
filter_dims, minibatch_size,
frac_train=frac_train)
else:
assert num_temporal_bases < filter_dims[
-1], "Number of temporal basis functions must be less than the number of temporal dimensions"
# defaults
tmax = kwargs['tmax'] if 'tmax' in kwargs else 0.5
bias = kwargs[
'temporal_bias'] if 'temporal_bias' in kwargs else 0.2
# make raised cosine basis
self.temporal_basis = np.flipud(
tentbasis.make_rcos_basis
(np.linspace(0, tmax, filter_dims[-1]), num_temporal_bases,
bias=bias)[1])
# build the reduced model
NeuralEncodingModel.__init__(self, 'lnln', stim, spkcounts,
filter_dims, minibatch_size,
frac_train=frac_train,
temporal_basis=self.temporal_basis)
# default # of subunits
self.num_subunits = kwargs['W'].shape[
0] if 'W' in kwargs else num_subunits
# initialize tent basis functions
tent_span = (-6, 6) # suitable for z-scored input
self.tents = tentbasis.Gaussian(tent_span, num_tents, sigmasq=sigmasq)
# initialize parameter dictionary
self.theta_init = dict()
self.theta_init['W'] = np.zeros(
(self.num_subunits,) + (self.stim_dim, self.tau_filt))
self.theta_init['f'] = np.zeros(
(self.num_subunits, self.tents.num_params))
# initialize filter parameters
if 'W' in kwargs:
# check if we need to project onto the temporal basis
if kwargs['W'].shape[-1] != self.tau_filt:
if kwargs['W'].shape[-1] == self.tau:
kwargs['W'] = kwargs['W'].dot(self.temporal_basis)
elif kwargs['W'].shape[-1] < self.tau:
temp = kwargs['W'].shape[-1]
kwargs['W'] = kwargs['W'].dot(self.temporal_basis[:temp, :])
# ensure dimensions are consistent
assert self.theta_init['W'].shape == kwargs['W'].shape, \
"Shape of the filters (`W` keyword argument) are inconsistent with the given filter dimensions."
# normalize each of the given filters
for idx, w in enumerate(kwargs['W']):
self.theta_init['W'][idx] = utilities.nrm(w)
else:
# multiple subunits: random initialization
if self.num_subunits > 1:
for idx in range(self.num_subunits):
self.theta_init['W'][idx] = utilities.nrm(0.1 * np.random.randn(self.stim_dim, self.tau_filt))
# single subunit: initialize with the STA
else:
self.theta_init['W'][0] = utilities.nrm(self.sta).reshape(-1, self.sta.shape[-1])
# initialize nonlinearity parameters
if 'f' in kwargs:
# ensure dimensions are consistent
assert self.theta_init['f'].shape == kwargs['f'].shape, \
"Shape of the nonlinearity parameters (`f` keyword argument) are inconsistent with the number of tent basis functions."
self.theta_init['f'] = kwargs['f']
else:
# initialize each subunit nonlinearity to be linear
for idx in range(self.num_subunits):
ts = self.tents.tent_span
nonlin_init = np.linspace(ts[0], ts[1], 1000)
self.theta_init['f'][idx] = self.tents.fit(
nonlin_init,
nonlin_init)
# initialize regularizers
self.regularizers = {'W': list(), 'f': list()}
# final nonlinearity
self.final_nonlin_function = getattr(
nonlinearities,
final_nonlinearity)
def f_df(self, W, f, data, param_gradient=None):
"""
Evaluate the negative log-likelihood objective and gradient for the LNLN model class
Examples
--------
>>> f, df = f_df(self, W, f, data)
Parameters
----------
W : array_like
A numpy array containing parameter values for the first layer linear filters in the LNLN model
f : array_like
A numpy array containing parameter values for the first layer nonlinearity in the LNLN model
data : dict
Dictionary containing two keys: `stim` and `rate`, each of which is a numpy array.
param_gradient : string (optional, default=None)
A string indicating which parameters to compute the gradient for, either `W` or `f`
Returns
-------
obj_value : float
The negative log-likelihood objective value. Lower values indicate a better fit to the data.
obj_gradient : array_like
Contains the gradient (as a numpy array) with respect to the parameters given by `param_gradient`
"""
# f.shape is (K,P)
# W.shape is (K,N,tau)
k, n, tau = W.shape
m = (data['rate'].size - tau + 1)
# estimate firing rate and get model response
u, z, zgrad, zhess, drdz, dr2dz2, r = self.rate(
{'W': W, 'f': f}, data['stim'])
# objective in bits (poisson log-likelihood)
obj_value = np.mean(r - data['rate'] * np.log(r))
# factor in front of the gradient (poisson log-likelihood)
grad_factor = (1 - data['rate'] / r) * drdz # dims: (M)
# compute gradient
if param_gradient == 'W':
nonlin_proj = np.sum(f[:, np.newaxis, :] * zgrad, axis=2) # dims: (K, M)
weighted_proj = grad_factor[np.newaxis, :] * nonlin_proj # dims: (K, M)
obj_gradient = np.tensordot(weighted_proj, data['stim'], ([1], [1])) / float(m)
elif param_gradient == 'f':
obj_gradient = np.tensordot(grad_factor, z, ([0], [1])) / float(m)
elif param_gradient == 'both':
nonlin_proj = np.sum(
f[:, np.newaxis, :] * zgrad, axis=2) # dims: (K, M)
weighted_proj = grad_factor[
np.newaxis,
:] * nonlin_proj # dims: (K, M)
dW = np.tensordot(weighted_proj, data['stim'], ([1], [1])) / float(m)
df = np.tensordot(grad_factor, z, ([0], [1])) / float(m)
obj_gradient = {'W': dW, 'f': df}
else:
obj_gradient = None
return obj_value, obj_gradient
def noisy_oracle(self, key, theta_other):
if key is 'W':
def f_df_wrapper(theta):
ind = np.random.choice(list(self.indices['train']), size=1)
return self.f_df(theta, theta_other, self.data[ind], param_gradient='W')
elif key is 'f':
def f_df_wrapper(theta):
ind = np.random.choice(list(self.indices['train']), size=1)
return self.f_df(theta_other, theta, self.data[ind], param_gradient='f')
else:
raise ValueError('Incorrect key ' + key)
return f_df_wrapper
def fit(self, num_alt=2, max_iter=20, num_likelihood_steps=50, disp=2, check_grad=None, callback=None):
"""
Runs an optimization algorithm to learn the parameters of the model given training data and regularizers
Parameters
----------
num_alt : int, optional
The number of times to alternate between optimizing nonlinearities and optimizing filters. Default: 2
max_iter : int, optional
The maximum number of steps to take during each leg of the alternating minimization. Default: 25
num_likelihood_steps : int, optional
The number of steps to take when optimizing the data likelihood term (using SFO)
disp : int, optional
How much information to display during optimization. (Default: 2)
check_grad : string, optional
If 'f' or 'W', then the gradient of the log-likelihood objective with respect to that parameter is checked
against a numerical estimate.
callback : function
A callback function that gets called each iteration with the current parameters and a dictionary of other information
Notes
-----
See the `proxalgs` module for more information on the optimization algorithm
"""
# grab the initial parameters
theta_current = {
'W': self.theta_init['W'].copy(),
'f': self.theta_init['f'].copy()
}
# get list of training data
train_data = [self.data[idx] for idx in self.indices['train']]
# data generator
def datagen():
while True:
yield np.random.choice(train_data, 1)[0]
# store train/test results during optimization
self.convergence = defaultdict(list)
def update_results():
if disp > 0:
tmp_results = self.print_test_results(theta_current)
for k in ('train', 'test'):
self.convergence[k].append(tmp_results[k])
# runs the optimization procedure for one set of parameters (a single
# leg of the alternating minimization)
def optimize_param(f_df_wrapper, param_key, check_grad, cur_iter):
# initialize the SFO instance
loglikelihood_optimizer = SFO(
f_df_wrapper,
theta_current[param_key],
train_data,
display=0)
# check gradient
if check_grad == param_key:
loglikelihood_optimizer.check_grad()
# initialize the optimizer object
opt = Optimizer('sfo', optimizer=loglikelihood_optimizer, num_steps=num_likelihood_steps)
# add regularization terms
[opt.add_regularizer(reg) for reg in self.regularizers[param_key]]
# run the optimization procedure
t0 = perf_counter()
opt.minimize(
theta_current[param_key],
max_iter=max_iter,
disp=disp,
callback=callback)
t1 = perf_counter() - t0
print('Finished optimizing ' + param_key + '. Elapsed time: ' + tp.humantime(t1))
return opt.theta
# print results based on the initial parameters
print('\n')
tp.banner('Initial parameters')
update_results()
try:
# alternating optimization: switch between optimizing nonlinearities,
# and optimizing filters
for alt_iter in range(num_alt):
# Fit filters
print('\n')
tp.banner('Fitting filters')
| |
>>> p, q = Bools('p q')
>>> Xor(p, q)
Xor(p, q)
>>> simplify(Xor(p, q))
Not(p) == q
"""
ctx = _get_ctx(_ctx_from_ast_arg_list([a, b], ctx))
s = BoolSort(ctx)
a = s.cast(a)
b = s.cast(b)
return BoolRef(Z3_mk_xor(ctx.ref(), a.as_ast(), b.as_ast()), ctx)
def Not(a, ctx=None):
"""Create a Z3 not expression or probe.
>>> p = Bool('p')
>>> Not(Not(p))
Not(Not(p))
>>> simplify(Not(Not(p)))
p
"""
ctx = _get_ctx(_ctx_from_ast_arg_list([a], ctx))
if is_probe(a):
# Not is also used to build probes
return Probe(Z3_probe_not(ctx.ref(), a.probe), ctx)
else:
s = BoolSort(ctx)
a = s.cast(a)
return BoolRef(Z3_mk_not(ctx.ref(), a.as_ast()), ctx)
def mk_not(a):
if is_not(a):
return a.arg(0)
else:
return Not(a)
def _has_probe(args):
"""Return `True` if one of the elements of the given collection is a Z3 probe."""
for arg in args:
if is_probe(arg):
return True
return False
def And(*args):
"""Create a Z3 and-expression or and-probe.
>>> p, q, r = Bools('p q r')
>>> And(p, q, r)
And(p, q, r)
>>> P = BoolVector('p', 5)
>>> And(P)
And(p__0, p__1, p__2, p__3, p__4)
"""
last_arg = None
if len(args) > 0:
last_arg = args[len(args)-1]
if isinstance(last_arg, Context):
ctx = args[len(args)-1]
args = args[:len(args)-1]
elif len(args) == 1 and isinstance(args[0], AstVector):
ctx = args[0].ctx
args = [a for a in args[0]]
else:
ctx = None
args = _get_args(args)
ctx = _get_ctx(_ctx_from_ast_arg_list(args, ctx))
if z3_debug():
_z3_assert(ctx is not None, "At least one of the arguments must be a Z3 expression or probe")
if _has_probe(args):
return _probe_and(args, ctx)
else:
args = _coerce_expr_list(args, ctx)
_args, sz = _to_ast_array(args)
return BoolRef(Z3_mk_and(ctx.ref(), sz, _args), ctx)
def Or(*args):
"""Create a Z3 or-expression or or-probe.
>>> p, q, r = Bools('p q r')
>>> Or(p, q, r)
Or(p, q, r)
>>> P = BoolVector('p', 5)
>>> Or(P)
Or(p__0, p__1, p__2, p__3, p__4)
"""
last_arg = None
if len(args) > 0:
last_arg = args[len(args)-1]
if isinstance(last_arg, Context):
ctx = args[len(args)-1]
args = args[:len(args)-1]
elif len(args) == 1 and isinstance(args[0], AstVector):
ctx = args[0].ctx
args = [a for a in args[0]]
else:
ctx = None
args = _get_args(args)
ctx = _get_ctx(_ctx_from_ast_arg_list(args, ctx))
if z3_debug():
_z3_assert(ctx is not None, "At least one of the arguments must be a Z3 expression or probe")
if _has_probe(args):
return _probe_or(args, ctx)
else:
args = _coerce_expr_list(args, ctx)
_args, sz = _to_ast_array(args)
return BoolRef(Z3_mk_or(ctx.ref(), sz, _args), ctx)
#########################################
#
# Patterns
#
#########################################
class PatternRef(ExprRef):
"""Patterns are hints for quantifier instantiation.
"""
def as_ast(self):
return Z3_pattern_to_ast(self.ctx_ref(), self.ast)
def get_id(self):
return Z3_get_ast_id(self.ctx_ref(), self.as_ast())
def is_pattern(a):
"""Return `True` if `a` is a Z3 pattern (hint for quantifier instantiation.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) == 0, patterns = [ f(x) ])
>>> q
ForAll(x, f(x) == 0)
>>> q.num_patterns()
1
>>> is_pattern(q.pattern(0))
True
>>> q.pattern(0)
f(Var(0))
"""
return isinstance(a, PatternRef)
def MultiPattern(*args):
"""Create a Z3 multi-pattern using the given expressions `*args`
>>> f = Function('f', IntSort(), IntSort())
>>> g = Function('g', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) != g(x), patterns = [ MultiPattern(f(x), g(x)) ])
>>> q
ForAll(x, f(x) != g(x))
>>> q.num_patterns()
1
>>> is_pattern(q.pattern(0))
True
>>> q.pattern(0)
MultiPattern(f(Var(0)), g(Var(0)))
"""
if z3_debug():
_z3_assert(len(args) > 0, "At least one argument expected")
_z3_assert(all([ is_expr(a) for a in args ]), "Z3 expressions expected")
ctx = args[0].ctx
args, sz = _to_ast_array(args)
return PatternRef(Z3_mk_pattern(ctx.ref(), sz, args), ctx)
def _to_pattern(arg):
if is_pattern(arg):
return arg
else:
return MultiPattern(arg)
#########################################
#
# Quantifiers
#
#########################################
class QuantifierRef(BoolRef):
"""Universally and Existentially quantified formulas."""
def as_ast(self):
return self.ast
def get_id(self):
return Z3_get_ast_id(self.ctx_ref(), self.as_ast())
def sort(self):
"""Return the Boolean sort or sort of Lambda."""
if self.is_lambda():
return _sort(self.ctx, self.as_ast())
return BoolSort(self.ctx)
def is_forall(self):
"""Return `True` if `self` is a universal quantifier.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) == 0)
>>> q.is_forall()
True
>>> q = Exists(x, f(x) != 0)
>>> q.is_forall()
False
"""
return Z3_is_quantifier_forall(self.ctx_ref(), self.ast)
def is_exists(self):
"""Return `True` if `self` is an existential quantifier.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) == 0)
>>> q.is_exists()
False
>>> q = Exists(x, f(x) != 0)
>>> q.is_exists()
True
"""
return Z3_is_quantifier_exists(self.ctx_ref(), self.ast)
def is_lambda(self):
"""Return `True` if `self` is a lambda expression.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> q = Lambda(x, f(x))
>>> q.is_lambda()
True
>>> q = Exists(x, f(x) != 0)
>>> q.is_lambda()
False
"""
return Z3_is_lambda(self.ctx_ref(), self.ast)
def __getitem__(self, arg):
"""Return the Z3 expression `self[arg]`.
"""
if z3_debug():
_z3_assert(self.is_lambda(), "quantifier should be a lambda expression")
arg = self.sort().domain().cast(arg)
return _to_expr_ref(Z3_mk_select(self.ctx_ref(), self.as_ast(), arg.as_ast()), self.ctx)
def weight(self):
"""Return the weight annotation of `self`.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) == 0)
>>> q.weight()
1
>>> q = ForAll(x, f(x) == 0, weight=10)
>>> q.weight()
10
"""
return int(Z3_get_quantifier_weight(self.ctx_ref(), self.ast))
def num_patterns(self):
"""Return the number of patterns (i.e., quantifier instantiation hints) in `self`.
>>> f = Function('f', IntSort(), IntSort())
>>> g = Function('g', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) != g(x), patterns = [ f(x), g(x) ])
>>> q.num_patterns()
2
"""
return int(Z3_get_quantifier_num_patterns(self.ctx_ref(), self.ast))
def pattern(self, idx):
"""Return a pattern (i.e., quantifier instantiation hints) in `self`.
>>> f = Function('f', IntSort(), IntSort())
>>> g = Function('g', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) != g(x), patterns = [ f(x), g(x) ])
>>> q.num_patterns()
2
>>> q.pattern(0)
f(Var(0))
>>> q.pattern(1)
g(Var(0))
"""
if z3_debug():
_z3_assert(idx < self.num_patterns(), "Invalid pattern idx")
return PatternRef(Z3_get_quantifier_pattern_ast(self.ctx_ref(), self.ast, idx), self.ctx)
def num_no_patterns(self):
"""Return the number of no-patterns."""
return Z3_get_quantifier_num_no_patterns(self.ctx_ref(), self.ast)
def no_pattern(self, idx):
"""Return a no-pattern."""
if z3_debug():
_z3_assert(idx < self.num_no_patterns(), "Invalid no-pattern idx")
return _to_expr_ref(Z3_get_quantifier_no_pattern_ast(self.ctx_ref(), self.ast, idx), self.ctx)
def body(self):
"""Return the expression being quantified.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) == 0)
>>> q.body()
f(Var(0)) == 0
"""
return _to_expr_ref(Z3_get_quantifier_body(self.ctx_ref(), self.ast), self.ctx)
def num_vars(self):
"""Return the number of variables bounded by this quantifier.
>>> f = Function('f', IntSort(), IntSort(), IntSort())
>>> x = Int('x')
>>> y = Int('y')
>>> q = ForAll([x, y], f(x, y) >= x)
>>> q.num_vars()
2
"""
return int(Z3_get_quantifier_num_bound(self.ctx_ref(), self.ast))
def var_name(self, idx):
"""Return a string representing a name used when displaying the quantifier.
>>> f = Function('f', IntSort(), IntSort(), IntSort())
>>> x = Int('x')
>>> y = Int('y')
>>> q = ForAll([x, y], f(x, y) >= x)
>>> q.var_name(0)
'x'
>>> q.var_name(1)
'y'
"""
if z3_debug():
_z3_assert(idx < self.num_vars(), "Invalid variable idx")
return _symbol2py(self.ctx, Z3_get_quantifier_bound_name(self.ctx_ref(), self.ast, idx))
def var_sort(self, idx):
"""Return the sort of a bound variable.
>>> f = Function('f', IntSort(), RealSort(), IntSort())
>>> x = Int('x')
>>> y = Real('y')
>>> q = ForAll([x, y], f(x, y) >= x)
>>> q.var_sort(0)
Int
>>> q.var_sort(1)
Real
"""
if z3_debug():
_z3_assert(idx < self.num_vars(), "Invalid variable idx")
return _to_sort_ref(Z3_get_quantifier_bound_sort(self.ctx_ref(), self.ast, idx), self.ctx)
def children(self):
"""Return a list containing a single element self.body()
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) == 0)
>>> q.children()
[f(Var(0)) == 0]
"""
return [ self.body() ]
def is_quantifier(a):
"""Return `True` if `a` is a Z3 quantifier.
>>> f = Function('f', IntSort(), IntSort())
>>> x = Int('x')
>>> q = ForAll(x, f(x) == 0)
>>> is_quantifier(q)
True
>>> is_quantifier(f(x))
False
"""
return isinstance(a, QuantifierRef)
def _mk_quantifier(is_forall, vs, body, weight=1, qid="", skid="", patterns=[], no_patterns=[]):
if z3_debug():
_z3_assert(is_bool(body) or is_app(vs) or (len(vs) > 0 and is_app(vs[0])), "Z3 expression expected")
_z3_assert(is_const(vs) or (len(vs) > 0 and all([ is_const(v) for v in vs])), "Invalid bounded variable(s)")
_z3_assert(all([is_pattern(a) or is_expr(a) for a in patterns]), "Z3 patterns expected")
_z3_assert(all([is_expr(p) for p in no_patterns]), "no patterns are Z3 expressions")
if is_app(vs):
ctx = vs.ctx
vs = [vs]
else:
ctx = vs[0].ctx
if not is_expr(body):
body = BoolVal(body, ctx)
num_vars = len(vs)
if num_vars == 0:
return body
_vs = (Ast * num_vars)()
for i in range(num_vars):
## TODO: Check if is constant
_vs[i] = vs[i].as_ast()
patterns = [ _to_pattern(p) for p in patterns ]
num_pats = len(patterns)
| |
<filename>PyNite/FEModel3D.py
from numpy import array, matrix, zeros, empty, delete, insert, matmul, divide, add, subtract
from numpy import nanmax, seterr, shape
from numpy.linalg import solve
from scipy.sparse.linalg import spsolve
from scipy.sparse import csc_matrix
from math import isclose
from Node3D import Node3D
from Spring3D import Spring3D
from Member3D import Member3D
from Quad3D import Quad3D
from Plate3D import Plate3D
from LoadCombo import LoadCombo
# %%
class FEModel3D():
'''
A class representing a 3D finite element model.
'''
#%%
def __init__(self):
'''
Initializes a new 3D finite element model.
'''
self.Nodes = [] # A list of the structure's nodes
self.auxNodes = [] # A list of the structure's auxiliary nodes
self.Springs = [] # A list of the structure's springs
self.Members = [] # A list of the structure's members
self.Quads = [] # A list of the structura's quadiralterals
self.Plates = [] # A list of the structure's rectangular plates
self.__D = {} # A dictionary of the structure's nodal displacements by load combination
self.LoadCombos = {} # A dictionary of the structure's load combinations
#%%
def AddNode(self, Name, X, Y, Z):
'''
Adds a new node to the model.
Parameters
----------
Name : string
A unique user-defined name for the node.
X : number
The global X-coordinate of the node.
Y : number
The global Y-coordinate of the node.
Z : number
The global Z-coordinate of the node.
'''
# Create a new node
newNode = Node3D(Name, X, Y, Z)
# Add the new node to the list
self.Nodes.append(newNode)
#%%
def AddAuxNode(self, Name, X, Y, Z):
'''
Adds a new auxiliary node to the model.
Parameters
----------
Name : string
A unique user-defined name for the node.
X : number
The global X-coordinate of the node.
Y : number
The global Y-coordinate of the node.
Z : number
The global Z-coordinate of the node.
'''
# Create a new node
newNode = Node3D(Name, X, Y, Z)
# Add the new node to the list
self.auxNodes.append(newNode)
#%%
def AddSpring(self, Name, iNode, jNode, ks, tension_only=False, comp_only=False):
'''
Adds a new spring to the model.
Parameters
----------
Name : string
A unique user-defined name for the member.
iNode : string
The name of the i-node (start node).
jNode : string
The name of the j-node (end node).
ks : number
The spring constant (force/displacement).
tension_only : bool, optional
Indicates if the member is tension-only. Default is False.
comp_only : bool, optional
Indicates if the member is compression-only. Default is False.
'''
# Create a new spring
newSpring = Spring3D(Name, self.GetNode(iNode), self.GetNode(jNode), ks,
self.LoadCombos, tension_only=tension_only, comp_only=comp_only)
# Add the new member to the list
self.Springs.append(newSpring)
#%%
def AddMember(self, Name, iNode, jNode, E, G, Iy, Iz, J, A, auxNode=None,
tension_only=False, comp_only=False):
'''
Adds a new member to the model.
Parameters
----------
Name : string
A unique user-defined name for the member.
iNode : string
The name of the i-node (start node).
jNode : string
The name of the j-node (end node).
E : number
The modulus of elasticity of the member.
G : number
The shear modulus of the member.
Iy : number
The moment of inertia of the member about its local y-axis.
Iz : number
The moment of inertia of the member about its local z-axis.
J : number
The polar moment of inertia of the member.
A : number
The cross-sectional area of the member.
auxNode : string, optional
The name of the auxialary node used to define the local z-axis.
The default is for the program to define the axis instead of
using an auxiliary node.
tension_only : bool, optional
Indicates if the member is tension-only. Default is False.
comp_only : bool, optional
Indicates if the member is compression-only. Default is False.
'''
# Create a new member
if auxNode == None:
newMember = Member3D(Name, self.GetNode(iNode),
self.GetNode(jNode), E, G, Iy, Iz, J, A,
LoadCombos=self.LoadCombos, tension_only=tension_only, comp_only=comp_only)
else:
newMember = Member3D(Name, self.GetNode(iNode),
self.GetNode(jNode), E, G, Iy, Iz, J, A, self.GetAuxNode(auxNode),
self.LoadCombos, tension_only=tension_only, comp_only=comp_only)
# Add the new member to the list
self.Members.append(newMember)
#%%
def AddPlate(self, Name, iNode, jNode, mNode, nNode, t, E, nu):
'''
Adds a new plate to the model.
Plates will be dapricated in a future version. Quadrilaterals are more
verstile and will replace them.
Parameters
----------
Name : string
A unique user-defined name for the plate.
iNode : string
The name of the i-node (1st node definded in clockwise order).
jNode : string
The name of the j-node (2nd node defined in clockwise order).
mNode : string
The name of the m-node (3rd node defined in clockwise order).
nNode : string
The name of the n-node (4th node defined in clockwise order).
t : number
The thickness of the plate.
E : number
The modulus of elasticity of the plate.
mew : number
Posson's ratio for the plate.
'''
# Create a new member
newPlate = Plate3D(Name, self.GetNode(iNode), self.GetNode(jNode), self.GetNode(mNode), self.GetNode(nNode), t, E, nu)
# Add the new member to the list
self.Plates.append(newPlate)
#%%
def AddQuad(self, Name, iNode, jNode, mNode, nNode, t, E, nu):
'''
Adds a new quadrilateral to the model.
Quadrilaterals are similar to plates, except they do not have to be
rectangular. Plates will be dapricated in a future version. Note that
quadrilateral nodes are defined in counter-clockwise order instead of
the clockwise order that plates have used up to this point.
Parameters
----------
Name : string
A unique user-defined name for the quadrilateral.
iNode : string
The name of the i-node (1st node definded in counter-clockwise order).
jNode : string
The name of the j-node (2nd node defined in counter-clockwise order).
mNode : string
The name of the m-node (3rd node defined in counter-clockwise order).
nNode : string
The name of the n-node (4th node defined in counter-clockwise order).
t : number
The thickness of the quadrilateral.
E : number
The modulus of elasticity of the quadrilateral.
mew : number
Posson's ratio for the quadrilateral.
'''
# Create a new member
newQuad = Quad3D(Name, self.GetNode(iNode), self.GetNode(jNode), self.GetNode(mNode), self.GetNode(nNode), t, E, nu)
# Add the new member to the list
self.Quads.append(newQuad)
#%%
def RemoveNode(self, Node):
'''
Removes a node from the model. All nodal loads associated with the
node and members attached to the node will also be removed.
Parameters
----------
Node : string
The name of the node to be removed.
'''
# Remove the node. Nodal loads are stored within the node, so they
# will be deleted automatically when the node is deleted.
self.Nodes.remove(self.GetNode(Node))
# Find any members attached to the node and remove them
self.Members = [member for member in self.Members if member.iNode.Name != Node and member.jNode.Name != Node]
#%%
def RemoveSpring(self, Spring):
'''
Removes a spring from the model.
Parameters
----------
Spring : string
The name of the spring to be removed.
'''
# Remove the spring.
self.Springs.remove(self.GetSpring(Spring))
#%%
def RemoveMember(self, Member):
'''
Removes a member from the model. All member loads associated with the
member will also be removed.
Parameters
----------
Member : string
The name of the member to be removed.
'''
# Remove the member. Member loads are stored within the member, so they
# will be deleted automatically when the member is deleted.
self.Members.remove(self.GetMember(Member))
#%%
def DefineSupport(self, Node, SupportDX=False, SupportDY=False, SupportDZ=False, SupportRX=False, SupportRY=False, SupportRZ=False):
'''
Defines the support conditions at a node.
Nodes will default to fully unsupported unless specified otherwise.
Parameters
----------
Node : string
The name of the node where the support is being defined
SupportDX : number
Indicates whether the node is supported against translation in the global X-direction.
SupportDY : number
Indicates whether the node is supported against translation in the global Y-direction.
SupportDZ : number
Indicates whether the node is supported against translation in the global Z-direction.
SupportRX : number
Indicates whether the node is supported against | |
right_psdf, left_index=True, right_index=True, how='right').sort_index()
A B
1 2.0 x
2 NaN y
>>> ps.merge(left_psdf, right_psdf, left_index=True, right_index=True, how='outer').sort_index()
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
return obj.merge(
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
suffixes=suffixes,
)
def merge_asof(
left: Union[DataFrame, Series],
right: Union[DataFrame, Series],
on: Optional[Name] = None,
left_on: Optional[Name] = None,
right_on: Optional[Name] = None,
left_index: bool = False,
right_index: bool = False,
by: Optional[Union[Name, List[Name]]] = None,
left_by: Optional[Union[Name, List[Name]]] = None,
right_by: Optional[Union[Name, List[Name]]] = None,
suffixes: Tuple[str, str] = ("_x", "_y"),
tolerance: Optional[Any] = None,
allow_exact_matches: bool = True,
direction: str = "backward",
) -> DataFrame:
"""
Perform an asof merge.
This is similar to a left-join except that we match on nearest
key rather than equal keys.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
- A "nearest" search selects the row in the right DataFrame whose 'on'
key is closest in absolute distance to the left's key.
Optionally match on equivalent keys with 'by' before searching with 'on'.
.. versionadded:: 3.3.0
Parameters
----------
left : DataFrame or named Series
right : DataFrame or named Series
on : label
Field name to join on. Must be found in both DataFrames.
The data MUST be ordered. Furthermore this must be a numeric column,
such as datetimelike, integer, or float. On or left_on/right_on
must be given.
left_on : label
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
left_index : bool
Use the index of the left DataFrame as the join key.
right_index : bool
Use the index of the right DataFrame as the join key.
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
right_by : column name
Field names to match on in the right DataFrame.
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
tolerance : int or Timedelta, optional, default None
Select asof tolerance within this range; must be compatible
with the merge index.
allow_exact_matches : bool, default True
- If True, allow matching with the same 'on' value
(i.e. less-than-or-equal-to / greater-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., strictly less-than / strictly greater-than).
direction : 'backward' (default), 'forward', or 'nearest'
Whether to search for prior, subsequent, or closest matches.
Returns
-------
merged : DataFrame
See Also
--------
merge : Merge with a database-style join.
merge_ordered : Merge with optional filling/interpolation.
Examples
--------
>>> left = ps.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
>>> left
a left_val
0 1 a
1 5 b
2 10 c
>>> right = ps.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
>>> right
a right_val
0 1 1
1 2 2
2 3 3
3 6 6
4 7 7
>>> ps.merge_asof(left, right, on="a").sort_values("a").reset_index(drop=True)
a left_val right_val
0 1 a 1
1 5 b 3
2 10 c 7
>>> ps.merge_asof(
... left,
... right,
... on="a",
... allow_exact_matches=False
... ).sort_values("a").reset_index(drop=True)
a left_val right_val
0 1 a NaN
1 5 b 3.0
2 10 c 7.0
>>> ps.merge_asof(
... left,
... right,
... on="a",
... direction="forward"
... ).sort_values("a").reset_index(drop=True)
a left_val right_val
0 1 a 1.0
1 5 b 6.0
2 10 c NaN
>>> ps.merge_asof(
... left,
... right,
... on="a",
... direction="nearest"
... ).sort_values("a").reset_index(drop=True)
a left_val right_val
0 1 a 1
1 5 b 6
2 10 c 7
We can use indexed DataFrames as well.
>>> left = ps.DataFrame({"left_val": ["a", "b", "c"]}, index=[1, 5, 10])
>>> left
left_val
1 a
5 b
10 c
>>> right = ps.DataFrame({"right_val": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7])
>>> right
right_val
1 1
2 2
3 3
6 6
7 7
>>> ps.merge_asof(left, right, left_index=True, right_index=True).sort_index()
left_val right_val
1 a 1
5 b 3
10 c 7
Here is a real-world times-series example
>>> quotes = ps.DataFrame(
... {
... "time": [
... pd.Timestamp("2016-05-25 13:30:00.023"),
... pd.Timestamp("2016-05-25 13:30:00.023"),
... pd.Timestamp("2016-05-25 13:30:00.030"),
... pd.Timestamp("2016-05-25 13:30:00.041"),
... pd.Timestamp("2016-05-25 13:30:00.048"),
... pd.Timestamp("2016-05-25 13:30:00.049"),
... pd.Timestamp("2016-05-25 13:30:00.072"),
... pd.Timestamp("2016-05-25 13:30:00.075")
... ],
... "ticker": [
... "GOOG",
... "MSFT",
... "MSFT",
... "MSFT",
... "GOOG",
... "AAPL",
... "GOOG",
... "MSFT"
... ],
... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03]
... }
... )
>>> quotes
time ticker bid ask
0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades = ps.DataFrame(
... {
... "time": [
... pd.Timestamp("2016-05-25 13:30:00.023"),
... pd.Timestamp("2016-05-25 13:30:00.038"),
... pd.Timestamp("2016-05-25 13:30:00.048"),
... pd.Timestamp("2016-05-25 13:30:00.048"),
... pd.Timestamp("2016-05-25 13:30:00.048")
... ],
... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
... "price": [51.95, 51.95, 720.77, 720.92, 98.0],
... "quantity": [75, 155, 100, 100, 100]
... }
... )
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
1 2016-05-25 13:30:00.038 MSFT 51.95 155
2 2016-05-25 13:30:00.048 GOOG 720.77 100
3 2016-05-25 13:30:00.048 GOOG 720.92 100
4 2016-05-25 13:30:00.048 AAPL 98.00 100
By default we are taking the asof of the quotes
>>> ps.merge_asof(
... trades, quotes, on="time", by="ticker"
... ).sort_values(["time", "ticker", "price"]).reset_index(drop=True)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
3 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
4 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
We only asof within 2ms between the quote time and the trade time
>>> ps.merge_asof(
... trades,
... quotes,
... on="time",
... by="ticker",
... tolerance=F.expr("INTERVAL 2 MILLISECONDS") # pd.Timedelta("2ms")
... ).sort_values(["time", "ticker", "price"]).reset_index(drop=True)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
2 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
3 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
4 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
We only asof within 10ms between the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
propagate forward
>>> ps.merge_asof(
... trades,
... quotes,
... on="time",
... by="ticker",
... tolerance=F.expr("INTERVAL 10 MILLISECONDS"), # pd.Timedelta("10ms")
... allow_exact_matches=False
... ).sort_values(["time", "ticker", "price"]).reset_index(drop=True)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
3 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN
4 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN
"""
def to_list(os: Optional[Union[Name, List[Name]]]) -> List[Label]:
if os is None:
return []
elif is_name_like_tuple(os):
return [cast(Label, os)]
elif is_name_like_value(os):
return [(os,)]
else:
return [o if is_name_like_tuple(o) else (o,) for o in os]
if isinstance(left, Series):
left = left.to_frame()
if isinstance(right, Series):
right = right.to_frame()
if on:
if left_on or right_on:
raise ValueError(
'Can only pass argument "on" OR "left_on" and "right_on", '
"not a combination of both."
)
left_as_of_names = list(map(left._internal.spark_column_name_for, to_list(on)))
right_as_of_names = list(map(right._internal.spark_column_name_for, to_list(on)))
else:
if left_index:
if isinstance(left.index, MultiIndex):
raise ValueError("left can only have one index")
left_as_of_names | |
<reponame>frank1010111/pyCRM
import numpy as np
from numpy import ndarray
from numba import njit
import pandas as pd
import pickle
from scipy import optimize
from typing import Optional, Tuple, Union
from joblib import Parallel, delayed
@njit
def q_primary(
production: ndarray, time: ndarray, gain_producer: ndarray, tau_producer: ndarray
) -> ndarray:
"""Calculates primary production contribution using Arps equation with b=0
Args
----------
production : ndarray
Production, size: Number of time steps
time : ndarray
Producing times to forecast, size: Number of time steps
gain_producer : ndarray
Arps q_i factor
tau_producer : ndarray
Arps time constant
Returns
----------
q_hat : ndarray
Calculated production, size: Number of time steps
"""
time_decay = np.exp(-time / tau_producer)
q_hat = time_decay * production[0] * gain_producer
return q_hat
@njit
def q_CRM_perpair(
injection: ndarray, time: ndarray, gains: ndarray, taus: ndarray
) -> ndarray:
"""Calculates per injector-producer pair production for all injectors on one producer
using CRM model
Args
----------
injection : ndarray
Injected fluid, size: Number of time steps
time : ndarray
Producing times to forecast, size: Number of time steps
gains : ndarray
Connectivities between each injector and the producer,
size: Number of injectors
taus : ndarray
Time constants between each injector and the producer,
size: Number of injectors
Returns
----------
q_hat : ndarray
Calculated production, size: Number of time steps
"""
n = len(time)
q_hat = np.zeros(n)
conv_injected = np.zeros((n, injection.shape[1]))
# Compute convolved injection rates
for j in range(injection.shape[1]):
conv_injected[0, j] += (1 - np.exp((time[0] - time[1]) / taus[j])) * injection[
0, j
]
for k in range(1, n):
for m in range(1, k + 1):
time_decay = (1 - np.exp((time[m - 1] - time[m]) / taus[j])) * np.exp(
(time[m] - time[k]) / taus[j]
)
conv_injected[k, j] += time_decay * injection[m, j]
# Calculate waterflood rates
for k in range(n):
for j in range(injection.shape[1]):
q_hat[k] += gains[j] * conv_injected[k, j]
return q_hat
@njit
def q_CRM_perproducer(
injection: ndarray, time: ndarray, gain: ndarray, tau: float
) -> ndarray:
"""Calculates per injector-producer pair production for all injectors on one producer
using simplified CRMp model that assumes a single tau for each producer
Args
----------
injection : ndarray
Production, size: Number of time steps
time : ndarray
Producing times to forecast, size: Number of time steps
gains : ndarray
Connectivities between each injector and the producer,
size: Number of injectors
tau : float
Time constants all injectors and the producer
Returns
----------
q_hat : ndarray
Calculated production, size: Number of time steps
"""
tau2 = tau * np.ones(injection.shape[1])
return q_CRM_perpair(injection, time, gain, tau2)
def random_weights(
n_i: int, n_j: int, axis: int = 0, seed: Optional[int] = None
) -> ndarray:
"""Generates random weights for producer-injector gains
Args
----
n_i : int
n_j : int
axis : int, default is 0
seed : int, default is None
Returns
-------
gains_guess: ndarray
"""
rng = np.random.default_rng(seed)
limit = 10 * (n_i if axis == 0 else n_j)
vec = rng.integers(0, limit, (n_i, n_j))
axis_sum = vec.sum(axis, keepdims=True)
return vec / axis_sum
class CRM:
"""A Capacitance Resistance Model history matcher
CRM uses a physics-inspired mass balance approach to explain production for \
waterfloods. It treants each injector-producer well pair as a system \
with mass input, output, and pressure related to the mass balance. \
Several versions exist.
Args
----------
primary : bool
Whether to model primary production (strongly recommended)
tau_selection : str
How many tau values to select
- If 'per-pair', fit tau for each producer-injector pair
- If 'per-producer', fit tau for each producer (CRMp model)
constraints : str
How to constrain the gains
* If 'up-to one' (default), let gains vary from 0 (no connection) to 1 \
(all injection goes to producer)
* If 'positive', require each gain to be positive \
(It is unlikely to go negative in real life)
* If 'sum-to-one', require the gains for each injector to sum to one \
(all production accounted for)
* If 'sum-to-one injector' (not implemented), require each injector's \
gains to sum to one (all injection accounted for)
Examples
----------
crm = CRM(True, "per-pair", "up-to one")
References
----------
"A State-of-the-Art Literature Review on Capacitance Resistance Models for
Reservoir Characterization and Performance Forecasting" - Holanda et al., 2018.
"""
def __init__(
self,
primary: bool = True,
tau_selection: str = "per-pair",
constraints: str = "positive",
):
if type(primary) != bool:
raise TypeError("primary must be a boolean")
self.primary = primary
if constraints not in (
"positive",
"up-to one",
"sum-to-one",
"sum-to-one injector",
):
raise ValueError("Invalid constraints")
self.constraints = constraints
self.tau_selection = tau_selection
if tau_selection == "per-pair":
self.q_CRM = q_CRM_perpair
elif tau_selection == "per-producer":
self.q_CRM = q_CRM_perproducer
else:
raise ValueError(
"tau_selection must be one of"
+ '("per-pair","per-producer")'
+ f", not {tau_selection}"
)
def fit(
self,
production: ndarray,
injection: ndarray,
time: ndarray,
initial_guess: ndarray = None,
num_cores: int = 1,
random: bool = False,
**kwargs,
):
"""Build a CRM model from the production and injection data (production, injection)
Args
----------
production : ndarray
production rates for each time period,
shape: (n_time, n_producers)
injection : ndarray
injection rates for each time period,
shape: (n_time, n_injectors)
time : ndarray
relative time for each rate measurement, starting from 0,
shape: (n_time)
initial_guess : ndarray
initial guesses for gains, taus, primary production
contribution, of
shape: (len(guess), n_producers)
num_cores (int): number of cores to run fitting procedure on, defaults to 1
random : bool
whether to randomly initialize the gains
**kwargs:
keyword arguments to pass to scipy.optimize fitting routine
Returns
----------
self: trained model
"""
self.production = production
self.injection = injection
self.time = time
if production.shape[0] != injection.shape[0]:
raise ValueError(
"production and injection do not have the same number of time steps"
)
if production.shape[0] != time.shape[0]:
raise ValueError(
"production and time do not have the same number of timesteps"
)
if not initial_guess:
initial_guess = self._get_initial_guess(random=random)
bounds, constraints = self._get_bounds()
num_cores = kwargs.pop("num_cores", 1)
def fit_well(production, x0):
# residual is an L2 norm
def residual(x, production):
return sum(
(production - self._calculate_qhat(x, production, injection, time))
** 2
)
result = optimize.minimize(
residual,
x0,
bounds=bounds,
constraints=constraints,
args=(production,),
**kwargs,
)
return result
production_perwell = [x for x in self.production.T]
if num_cores == 1:
results = map(fit_well, production_perwell, initial_guess)
else:
results = Parallel(n_jobs=num_cores)(
delayed(fit_well)(p) for p, x0 in zip(production_perwell, initial_guess)
)
opts_perwell = [self._split_opts(r["x"]) for r in results]
gains_perwell, tau_perwell, gains_producer, tau_producer = map(
list, zip(*opts_perwell)
)
self.gains = np.vstack(gains_perwell)
self.tau = np.vstack(tau_perwell)
self.gains_producer = np.array(gains_producer)
self.tau_producer = np.array(tau_producer)
return self
def predict(self, injection=None, time=None, connections={}):
"""Predict production for a trained model.
If the injection and time are not provided, this will use the training values
Args
----------
injection : ndarray
The injection rates to input to the system, shape (n_time, n_inj)
time : ndarray
The timesteps to predict
connections : dict
if present, the gains, tau, gains_producer, tau_producer
matrices
Returns
----------
q_hat :ndarray
The predicted values, shape (n_time, n_producers)
"""
gains = connections["gains"] if "gains" in connections else self.gains
tau = connections["tau"] if "tau" in connections else self.tau
gains_producer = (
connections["gains_producer"]
if "gains_producer" in connections
else self.gains_producer
)
tau_producer = (
connections["tau_producer"]
if "tau_producer" in connections
else self.tau_producer
)
production = self.production
n_producers = production.shape[1]
if int(injection is None) + int(time is None) == 1:
raise TypeError("predict() takes 1 or 3 arguments, 2 given")
if injection is None:
injection = self.injection
if time is None:
time = self.time
if time.shape[0] != injection.shape[0]:
raise ValueError("injection and time need same number of steps")
q_hat = np.zeros((len(time), n_producers))
for i in range(n_producers):
q_hat[:, i] += q_primary(
production[:, i], time, gains_producer[i], tau_producer[i]
)
q_hat[:, i] += self.q_CRM(injection, time, gains[i, :], tau[i])
return q_hat
def set_rates(self, production=None, injection=None, time=None):
"""Sets production and injection rates and time"""
if production is not None:
self.production = production
if injection is not None:
self.injection = injection
if time is not None:
self.time = time
def set_connections(
self, gains=None, tau=None, gains_producer=None, tau_producer=None
):
"""Sets waterflood properties"""
if gains is not None:
self.gains = | |
from flask import (
abort,
current_app,
flash,
redirect,
render_template,
request,
session,
url_for,
)
from flask_login import current_user, login_required
from notifications_python_client.errors import HTTPError
from notifications_utils.field import Field
from notifications_utils.formatters import formatted_list
from app import (
billing_api_client,
current_service,
email_branding_client,
inbound_number_client,
organisations_client,
service_api_client,
user_api_client,
zendesk_client,
)
from app.main import main
from app.main.forms import (
BrandingOptionsEmail,
ConfirmPasswordForm,
FreeSMSAllowance,
InternationalSMSForm,
LetterBranding,
LinkOrganisationsForm,
OrganisationTypeForm,
RenameServiceForm,
RequestToGoLiveForm,
ServiceBasicViewForm,
ServiceContactLinkForm,
ServiceEditInboundNumberForm,
ServiceInboundNumberForm,
ServiceLetterContactBlockForm,
ServiceReplyToEmailForm,
ServiceSetBranding,
ServiceSmsSenderForm,
ServiceSwitchLettersForm,
SMSPrefixForm,
branding_options_dict,
)
from app.utils import (
AgreementInfo,
email_safe,
get_cdn_domain,
user_has_permissions,
user_is_platform_admin,
)
@main.route("/services/<service_id>/service-settings")
@login_required
@user_has_permissions('manage_service', 'manage_api_keys')
def service_settings(service_id):
letter_branding_organisations = email_branding_client.get_letter_email_branding()
organisation = organisations_client.get_service_organisation(service_id).get('name', None)
if current_service['email_branding']:
email_branding = email_branding_client.get_email_branding(current_service['email_branding'])['email_branding']
else:
email_branding = None
inbound_number = inbound_number_client.get_inbound_sms_number_for_service(service_id)
disp_inbound_number = inbound_number['data'].get('number', '')
reply_to_email_addresses = service_api_client.get_reply_to_email_addresses(service_id)
reply_to_email_address_count = len(reply_to_email_addresses)
default_reply_to_email_address = next(
(x['email_address'] for x in reply_to_email_addresses if x['is_default']), "Not set"
)
letter_contact_details = service_api_client.get_letter_contacts(service_id)
letter_contact_details_count = len(letter_contact_details)
default_letter_contact_block = next(
(Field(x['contact_block'], html='escape') for x in letter_contact_details if x['is_default']), "Not set"
)
sms_senders = service_api_client.get_sms_senders(service_id)
sms_sender_count = len(sms_senders)
default_sms_sender = next(
(Field(x['sms_sender'], html='escape') for x in sms_senders if x['is_default']), "None"
)
free_sms_fragment_limit = billing_api_client.get_free_sms_fragment_limit_for_year(service_id)
return render_template(
'views/service-settings.html',
email_branding=email_branding,
letter_branding=letter_branding_organisations.get(
current_service.get('dvla_organisation', '001')
),
can_receive_inbound=('inbound_sms' in current_service['permissions']),
inbound_number=disp_inbound_number,
default_reply_to_email_address=default_reply_to_email_address,
reply_to_email_address_count=reply_to_email_address_count,
default_letter_contact_block=default_letter_contact_block,
letter_contact_details_count=letter_contact_details_count,
default_sms_sender=default_sms_sender,
sms_sender_count=sms_sender_count,
free_sms_fragment_limit=free_sms_fragment_limit,
prefix_sms=current_service['prefix_sms'],
organisation=organisation,
)
@main.route("/services/<service_id>/service-settings/name", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_name_change(service_id):
form = RenameServiceForm()
if request.method == 'GET':
form.name.data = current_service['name']
if form.validate_on_submit():
if form.name.data == current_service['name']:
return redirect(url_for('.service_settings', service_id=service_id))
unique_name = service_api_client.is_service_name_unique(service_id, form.name.data, email_safe(form.name.data))
if not unique_name:
form.name.errors.append("This service name is already in use")
return render_template('views/service-settings/name.html', form=form)
session['service_name_change'] = form.name.data
return redirect(url_for('.service_name_change_confirm', service_id=service_id))
return render_template(
'views/service-settings/name.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/name/confirm", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_name_change_confirm(service_id):
# Validate password for form
def _check_password(pwd):
return user_api_client.verify_password(current_user.id, pwd)
form = ConfirmPasswordForm(_check_password)
if form.validate_on_submit():
try:
service_api_client.update_service(
current_service['id'],
name=session['service_name_change'],
email_from=email_safe(session['service_name_change'])
)
except HTTPError as e:
error_msg = "Duplicate service name '{}'".format(session['service_name_change'])
if e.status_code == 400 and error_msg in e.message['name']:
# Redirect the user back to the change service name screen
flash('This service name is already in use', 'error')
return redirect(url_for('main.service_name_change', service_id=service_id))
else:
raise e
else:
session.pop('service_name_change')
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/confirm.html',
heading='Change your service name',
form=form)
@main.route("/services/<service_id>/service-settings/request-to-go-live")
@login_required
@user_has_permissions('manage_service')
def request_to_go_live(service_id):
return render_template(
'views/service-settings/request-to-go-live.html',
has_team_members=(
user_api_client.get_count_of_users_with_permission(
service_id, 'manage_service'
) > 1
),
has_templates=(
service_api_client.count_service_templates(service_id) > 0
),
has_email_templates=(
service_api_client.count_service_templates(service_id, template_type='email') > 0
),
has_email_reply_to_address=bool(
service_api_client.get_reply_to_email_addresses(service_id)
)
)
@main.route("/services/<service_id>/service-settings/submit-request-to-go-live", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def submit_request_to_go_live(service_id):
form = RequestToGoLiveForm()
if form.validate_on_submit():
zendesk_client.create_ticket(
subject='Request to go live - {}'.format(current_service['name']),
message=(
'Service: {}\n'
'{}\n'
'\n---'
'\nOrganisation type: {}'
'\nAgreement signed: {}'
'\nChannel: {}\nStart date: {}\nStart volume: {}'
'\nPeak volume: {}'
'\nFeatures: {}'
).format(
current_service['name'],
url_for('main.service_dashboard', service_id=current_service['id'], _external=True),
current_service['organisation_type'],
AgreementInfo.from_current_user().as_human_readable,
formatted_list(filter(None, (
'email' if form.channel_email.data else None,
'text messages' if form.channel_sms.data else None,
'letters' if form.channel_letter.data else None,
)), before_each='', after_each=''),
form.start_date.data,
form.start_volume.data,
form.peak_volume.data,
formatted_list(filter(None, (
'one off' if form.method_one_off.data else None,
'file upload' if form.method_upload.data else None,
'API' if form.method_api.data else None,
)), before_each='', after_each='')
),
ticket_type=zendesk_client.TYPE_QUESTION,
user_email=current_user.email_address,
user_name=current_user.name
)
flash('Thanks for your request to go live. We’ll get back to you within one working day.', 'default')
return redirect(url_for('.service_settings', service_id=service_id))
return render_template('views/service-settings/submit-request-to-go-live.html', form=form)
@main.route("/services/<service_id>/service-settings/switch-live")
@login_required
@user_is_platform_admin
def service_switch_live(service_id):
service_api_client.update_service(
current_service['id'],
# TODO This limit should be set depending on the agreement signed by
# with Notify.
message_limit=250000 if current_service['restricted'] else 50,
restricted=(not current_service['restricted'])
)
return redirect(url_for('.service_settings', service_id=service_id))
@main.route("/services/<service_id>/service-settings/research-mode")
@login_required
@user_is_platform_admin
def service_switch_research_mode(service_id):
service_api_client.update_service_with_properties(
service_id,
{"research_mode": not current_service['research_mode']}
)
return redirect(url_for('.service_settings', service_id=service_id))
def switch_service_permissions(service_id, permission, sms_sender=None):
force_service_permission(
service_id,
permission,
on=permission not in current_service['permissions'],
sms_sender=sms_sender
)
def force_service_permission(service_id, permission, on=False, sms_sender=None):
permissions, permission = set(current_service['permissions']), {permission}
update_service_permissions(
service_id,
permissions | permission if on else permissions - permission,
sms_sender=sms_sender
)
def update_service_permissions(service_id, permissions, sms_sender=None):
current_service['permissions'] = list(permissions)
data = {'permissions': current_service['permissions']}
if sms_sender:
data['sms_sender'] = sms_sender
service_api_client.update_service_with_properties(service_id, data)
@main.route("/services/<service_id>/service-settings/can-send-email")
@login_required
@user_is_platform_admin
def service_switch_can_send_email(service_id):
switch_service_permissions(service_id, 'email')
return redirect(url_for('.service_settings', service_id=service_id))
@main.route("/services/<service_id>/service-settings/can-send-sms")
@login_required
@user_is_platform_admin
def service_switch_can_send_sms(service_id):
switch_service_permissions(service_id, 'sms')
return redirect(url_for('.service_settings', service_id=service_id))
@main.route("/services/<service_id>/service-settings/email-auth")
@login_required
@user_is_platform_admin
def service_switch_email_auth(service_id):
switch_service_permissions(service_id, 'email_auth')
return redirect(url_for('.service_settings', service_id=service_id))
@main.route("/services/<service_id>/service-settings/can-send-precompiled-letter")
@login_required
@user_is_platform_admin
def service_switch_can_send_precompiled_letter(service_id):
switch_service_permissions(service_id, 'precompiled_letter')
return redirect(url_for('.service_settings', service_id=service_id))
@main.route("/services/<service_id>/service-settings/can-upload-document", methods=['GET', 'POST'])
@login_required
@user_is_platform_admin
def service_switch_can_upload_document(service_id):
form = ServiceContactLinkForm()
# If turning the permission off, or turning it on and the service already has a contact_link,
# don't show the form to add the link
if 'upload_document' in current_service['permissions'] or current_service.get('contact_link'):
switch_service_permissions(service_id, 'upload_document')
return redirect(url_for('.service_settings', service_id=service_id))
if form.validate_on_submit():
service_api_client.update_service(
current_service['id'],
contact_link=form.url.data
)
switch_service_permissions(service_id, 'upload_document')
return redirect(url_for('.service_settings', service_id=service_id))
return render_template('views/service-settings/contact_link.html', form=form)
@main.route("/services/<service_id>/service-settings/archive", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def archive_service(service_id):
if request.method == 'POST':
service_api_client.archive_service(service_id)
return redirect(url_for('.service_settings', service_id=service_id))
else:
flash('There\'s no way to reverse this! Are you sure you want to archive this service?', 'delete')
return service_settings(service_id)
@main.route("/services/<service_id>/service-settings/suspend", methods=["GET", "POST"])
@login_required
@user_has_permissions('manage_service')
def suspend_service(service_id):
if request.method == 'POST':
service_api_client.suspend_service(service_id)
return redirect(url_for('.service_settings', service_id=service_id))
else:
flash("This will suspend the service and revoke all api keys. Are you sure you want to suspend this service?",
'suspend')
return service_settings(service_id)
@main.route("/services/<service_id>/service-settings/resume", methods=["GET", "POST"])
@login_required
@user_has_permissions('manage_service')
def resume_service(service_id):
if request.method == 'POST':
service_api_client.resume_service(service_id)
return redirect(url_for('.service_settings', service_id=service_id))
else:
flash("This will resume the service. New api key are required for this service to use the API.", 'resume')
return service_settings(service_id)
@main.route("/services/<service_id>/service-settings/contact-link", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_set_contact_link(service_id):
form = ServiceContactLinkForm()
if request.method == 'GET':
form.url.data = current_service.get('contact_link')
if form.validate_on_submit():
service_api_client.update_service(
current_service['id'],
contact_link=form.url.data
)
return redirect(url_for('.service_settings', service_id=current_service['id']))
return render_template('views/service-settings/contact_link.html', form=form)
@main.route("/services/<service_id>/service-settings/set-email", methods=['GET'])
@login_required
@user_has_permissions('manage_service')
def service_set_email(service_id):
return render_template(
'views/service-settings/set-email.html',
)
@main.route("/services/<service_id>/service-settings/set-reply-to-email", methods=['GET'])
@login_required
@user_has_permissions('manage_service')
def service_set_reply_to_email(service_id):
return redirect(url_for('.service_email_reply_to', service_id=service_id))
@main.route("/services/<service_id>/service-settings/email-reply-to", methods=['GET'])
@login_required
@user_has_permissions('manage_service', 'manage_api_keys')
def service_email_reply_to(service_id):
reply_to_email_addresses = service_api_client.get_reply_to_email_addresses(service_id)
return render_template(
'views/service-settings/email_reply_to.html',
reply_to_email_addresses=reply_to_email_addresses)
@main.route("/services/<service_id>/service-settings/email-reply-to/add", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_add_email_reply_to(service_id):
form = ServiceReplyToEmailForm()
reply_to_email_address_count = len(service_api_client.get_reply_to_email_addresses(service_id))
first_email_address = reply_to_email_address_count == 0
if form.validate_on_submit():
service_api_client.add_reply_to_email_address(
current_service['id'],
email_address=form.email_address.data,
is_default=first_email_address if first_email_address else form.is_default.data
)
return redirect(url_for('.service_email_reply_to', service_id=service_id))
return render_template(
'views/service-settings/email-reply-to/add.html',
form=form,
first_email_address=first_email_address)
@main.route(
"/services/<service_id>/service-settings/email-reply-to/<reply_to_email_id>/edit",
methods=['GET', 'POST'],
endpoint="service_edit_email_reply_to"
)
@main.route(
"/services/<service_id>/service-settings/email-reply-to/<reply_to_email_id>/delete",
methods=['GET'],
endpoint="service_confirm_delete_email_reply_to"
)
@login_required
@user_has_permissions('manage_service')
def service_edit_email_reply_to(service_id, reply_to_email_id):
form = ServiceReplyToEmailForm()
reply_to_email_address = service_api_client.get_reply_to_email_address(service_id, reply_to_email_id)
if request.method == 'GET':
form.email_address.data = reply_to_email_address['email_address']
form.is_default.data = reply_to_email_address['is_default']
if form.validate_on_submit():
service_api_client.update_reply_to_email_address(
current_service['id'],
reply_to_email_id=reply_to_email_id,
email_address=form.email_address.data,
is_default=True if reply_to_email_address['is_default'] else form.is_default.data
)
return redirect(url_for('.service_email_reply_to', service_id=service_id))
return render_template(
'views/service-settings/email-reply-to/edit.html',
form=form,
reply_to_email_address_id=reply_to_email_id,
confirm_delete=(request.endpoint == "main.service_confirm_delete_email_reply_to"),
)
@main.route("/services/<service_id>/service-settings/email-reply-to/<reply_to_email_id>/delete", methods=['POST'])
@login_required
@user_has_permissions('manage_service')
def service_delete_email_reply_to(service_id, reply_to_email_id):
service_api_client.delete_reply_to_email_address(
service_id=current_service['id'],
reply_to_email_id=reply_to_email_id,
)
return redirect(url_for('.service_email_reply_to', service_id=service_id))
@main.route("/services/<service_id>/service-settings/set-inbound-number", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_set_inbound_number(service_id):
available_inbound_numbers = inbound_number_client.get_available_inbound_sms_numbers()
service_has_inbound_number = inbound_number_client.get_inbound_sms_number_for_service(service_id)['data'] != {}
inbound_numbers_value_and_label = [
(number['id'], number['number']) for number in available_inbound_numbers['data']
]
no_available_numbers = available_inbound_numbers['data'] == []
form = ServiceInboundNumberForm(
inbound_number_choices=inbound_numbers_value_and_label
)
if form.validate_on_submit():
service_api_client.add_sms_sender(
current_service['id'],
sms_sender=form.inbound_number.data,
is_default=True,
inbound_number_id=form.inbound_number.data
)
switch_service_permissions(current_service['id'], 'inbound_sms')
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/set-inbound-number.html',
form=form,
no_available_numbers=no_available_numbers,
service_has_inbound_number=service_has_inbound_number
)
@main.route("/services/<service_id>/service-settings/set-sms", methods=['GET'])
@login_required
@user_has_permissions('manage_service')
def service_set_sms(service_id):
return render_template(
'views/service-settings/set-sms.html',
)
@main.route("/services/<service_id>/service-settings/sms-prefix", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_set_sms_prefix(service_id):
form = SMSPrefixForm(enabled=(
'on' if current_service['prefix_sms'] else 'off'
))
form.enabled.label.text = 'Start all text messages with ‘{}:’'.format(current_service['name'])
if form.validate_on_submit():
service_api_client.update_service(
current_service['id'],
prefix_sms=(form.enabled.data == 'on')
)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/sms-prefix.html',
form=form
)
@main.route("/services/<service_id>/service-settings/set-international-sms", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_set_international_sms(service_id):
form = InternationalSMSForm(
enabled='on' if 'international_sms' in current_service['permissions'] else 'off'
)
if form.validate_on_submit():
force_service_permission(
service_id,
'international_sms',
on=(form.enabled.data == 'on'),
)
return redirect(
url_for(".service_settings", service_id=service_id)
)
return render_template(
'views/service-settings/set-international-sms.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/set-inbound-sms", methods=['GET'])
@login_required
@user_has_permissions('manage_service')
def service_set_inbound_sms(service_id):
number = inbound_number_client.get_inbound_sms_number_for_service(service_id)['data'].get('number', '')
return render_template(
'views/service-settings/set-inbound-sms.html',
inbound_number=number,
)
@main.route("/services/<service_id>/service-settings/set-letters", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_set_letters(service_id):
form = ServiceSwitchLettersForm(
enabled='on' if 'letter' in current_service['permissions'] else 'off'
)
if form.validate_on_submit():
force_service_permission(
service_id,
'letter',
on=(form.enabled.data == 'on'),
)
return redirect(
url_for(".service_settings", service_id=service_id)
)
return render_template(
'views/service-settings/set-letters.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/set-auth-type", methods=['GET'])
@login_required
@user_has_permissions('manage_service')
def service_set_auth_type(service_id):
return render_template(
'views/service-settings/set-auth-type.html',
)
@main.route("/services/<service_id>/service-settings/set-basic-view", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service', 'send_messages')
def service_set_basic_view(service_id):
if current_user.previewing_basic_view:
session.pop('basic', None)
if not current_user.has_permissions('manage_service'):
abort(403)
form = ServiceBasicViewForm(
enabled='caseworking' in current_service['permissions']
)
if form.validate_on_submit():
force_service_permission(
service_id,
'caseworking',
on=(form.enabled.data == 'on'),
)
return redirect(
url_for('.service_settings', service_id=service_id)
)
return render_template(
'views/service-settings/set-basic-view.html',
form=form,
)
@main.route("/services/<service_id>/preview-basic-view")
@login_required
@user_has_permissions('manage_service')
def preview_basic_view(service_id):
session['basic'] = True
return redirect(url_for('.service_dashboard', service_id=service_id))
@main.route("/services/<service_id>/service-settings/letter-contacts", methods=['GET'])
@login_required
@user_has_permissions('manage_service', 'manage_api_keys')
def service_letter_contact_details(service_id):
letter_contact_details = service_api_client.get_letter_contacts(service_id)
return render_template(
'views/service-settings/letter-contact-details.html',
letter_contact_details=letter_contact_details)
@main.route("/services/<service_id>/service-settings/letter-contact/add", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_add_letter_contact(service_id):
form = ServiceLetterContactBlockForm()
letter_contact_blocks_count = len(service_api_client.get_letter_contacts(service_id))
first_contact_block = letter_contact_blocks_count == 0
if form.validate_on_submit():
service_api_client.add_letter_contact(
current_service['id'],
contact_block=form.letter_contact_block.data.replace('\r', '') or None,
is_default=first_contact_block if first_contact_block else form.is_default.data
)
if request.args.get('from_template'):
return redirect(
url_for('.set_template_sender', service_id=service_id, template_id=request.args.get('from_template'))
)
return redirect(url_for('.service_letter_contact_details', service_id=service_id))
return render_template(
'views/service-settings/letter-contact/add.html',
form=form,
first_contact_block=first_contact_block)
@main.route("/services/<service_id>/service-settings/letter-contact/<letter_contact_id>/edit", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_edit_letter_contact(service_id, letter_contact_id):
letter_contact_block = service_api_client.get_letter_contact(service_id, letter_contact_id)
form = ServiceLetterContactBlockForm(letter_contact_block=letter_contact_block['contact_block'])
if request.method == 'GET':
form.is_default.data = letter_contact_block['is_default']
if form.validate_on_submit():
service_api_client.update_letter_contact(
current_service['id'],
letter_contact_id=letter_contact_id,
contact_block=form.letter_contact_block.data.replace('\r', '') or None,
is_default=True if letter_contact_block['is_default'] else form.is_default.data
)
return redirect(url_for('.service_letter_contact_details', service_id=service_id))
return render_template(
'views/service-settings/letter-contact/edit.html',
form=form,
letter_contact_id=letter_contact_block['id'])
@main.route("/services/<service_id>/service-settings/sms-sender", methods=['GET'])
@login_required
@user_has_permissions('manage_service', 'manage_api_keys')
def service_sms_senders(service_id):
def attach_hint(sender):
hints = []
if sender['is_default']:
hints += ["default"]
if sender['inbound_number_id']:
hints += ["receives replies"]
if hints:
sender['hint'] = "(" + " and ".join(hints) + ")"
sms_senders = service_api_client.get_sms_senders(service_id)
for sender in sms_senders:
attach_hint(sender)
return render_template(
'views/service-settings/sms-senders.html',
sms_senders=sms_senders
)
@main.route("/services/<service_id>/service-settings/sms-sender/add", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_add_sms_sender(service_id):
form = ServiceSmsSenderForm()
sms_sender_count = len(service_api_client.get_sms_senders(service_id))
first_sms_sender = sms_sender_count == 0
if form.validate_on_submit():
service_api_client.add_sms_sender(
current_service['id'],
sms_sender=form.sms_sender.data.replace('\r', '') or None,
is_default=first_sms_sender if first_sms_sender else form.is_default.data
)
return redirect(url_for('.service_sms_senders', service_id=service_id))
return render_template(
'views/service-settings/sms-sender/add.html',
form=form,
first_sms_sender=first_sms_sender)
@main.route(
"/services/<service_id>/service-settings/sms-sender/<sms_sender_id>/edit",
methods=['GET', 'POST'],
endpoint="service_edit_sms_sender"
)
@main.route(
"/services/<service_id>/service-settings/sms-sender/<sms_sender_id>/delete",
methods=['GET'],
endpoint="service_confirm_delete_sms_sender"
)
@login_required
@user_has_permissions('manage_service')
def service_edit_sms_sender(service_id, sms_sender_id):
sms_sender = service_api_client.get_sms_sender(service_id, sms_sender_id)
is_inbound_number = sms_sender['inbound_number_id']
if is_inbound_number:
form = ServiceEditInboundNumberForm(is_default=sms_sender['is_default'])
else:
form = | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import sys
import random
import math
import tensorflow as tf
import numpy as np
import importlib
import argparse
import facenet
import lfw
import tensorflow.contrib.slim as slim
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
import models
def main(args):
network = importlib.import_module(args.model_def)
np.random.seed(seed=args.seed)
random.seed(args.seed)
# load training dataset
train_set = facenet.get_dataset(args.data_dir)
if args.filter_min_nrof_images_per_class > 0:
train_set = clean_dataset(train_set, args.filter_min_nrof_images_per_class)
nrof_classes = len(train_set)
pretrained_model = None
if args.pretrained_model:
pretrained_model = os.path.expanduser(args.pretrained_model)
print('Pre-trained model: %s' % pretrained_model)
# load lfw dataset for validation
if args.lfw_dir:
print('LFW directory: %s' % args.lfw_dir)
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
# Get the paths for the corresponding images
lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
global_step = tf.Variable(0, trainable=False)
# Get a list of image paths and their labels
image_list, label_list = facenet.get_image_paths_and_labels(train_set)
assert len(image_list)>0, 'The dataset should not be empty'
# Create a queue that produces indices into the image_list and label_list
labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
range_size = array_ops.shape(labels)[0]
with tf.device('/cpu:0'):
index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
shuffle=True, seed=None, capacity=32)
index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
labels_placeholder = tf.placeholder(tf.int64, shape=(None,1), name='labels')
input_queue = data_flow_ops.FIFOQueue(capacity=1000000,
dtypes=[tf.string, tf.int64],
shapes=[(1,), (1,)],
shared_name=None, name=None)
enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder], name='enqueue_op')
nrof_preprocess_threads = args.nrof_preprocess_threads
images_and_labels = []
for _ in range(nrof_preprocess_threads):
images_and_labels = distorted_inputs(images_and_labels,input_queue, args)
image_batch, label_batch = tf.train.batch_join(
images_and_labels, batch_size=args.batch_size,
enqueue_many=True,
capacity=4 * nrof_preprocess_threads * args.batch_size,
allow_smaller_final_batch=True)
print(image_batch)
# perfetching queue not working properly yet
batch_queue = slim.prefetch_queue.prefetch_queue(
[image_batch, label_batch], dynamic_pad=True, capacity=4)
image_batch, label_batch = batch_queue.dequeue()
image_batch = tf.identity(image_batch, 'image_batch')
image_batch = tf.identity(image_batch, 'input')
label_batch = tf.identity(label_batch, 'label_batch')
print('Total number of classes: %d' % nrof_classes)
print('Total number of examples: %d' % len(image_list))
print('Building training graph')
#///////////////////////////////////////////////////////////////////////////////////
# Build the inference graph
prelogits, _ = network.inference(image_batch, args.keep_probability,
phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size,
weight_decay=args.weight_decay, reuse=None)
embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
print(embeddings)
nrof_classes = len(train_set)
weights = tf.get_variable('softmax_weights', shape=(args.embedding_size,nrof_classes), dtype=tf.float32,
initializer=tf.variance_scaling_initializer(),
regularizer=slim.l2_regularizer(args.weight_decay), trainable=True)
weights = tf.nn.l2_normalize(weights, 0, name='norm_weights')
if args.keep_probability < 1.0:
scaled_prelogits = slim.dropout(scaled_prelogits, args.keep_probability, is_training=phase_train_placeholder,scope='Dropout')
logits = facenet.combined_loss(embeddings, label_batch, nrof_classes, weights, scale_factor=args.l2_constrained_scale_factor, m1=args.m1, m2=args.m2)
# Add norm loss
if args.norm_loss_factor>0.0:
norm_loss = args.norm_loss_factor*tf.reduce_mean(tf.pow(tf.norm(prelogits, axis=1)-args.l2_constrained_scale_factor, 2))
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, norm_loss)
learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
# Calculate the average cross entropy loss across the batch
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_batch, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# Calculate the total losses
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')
# Build a Graph that trains the model with one batch of examples and updates the model parameters
train_op = facenet.train(total_loss, global_step, args.optimizer,
learning_rate, args.moving_average_decay, tf.trainable_variables(), args.num_gpus, args.log_histograms)
#///////////////////////////////////////////////////////////////////////////////////
# Create a saver
if args.finetune:
print("finetune model")
all_vars = tf.trainable_variables()
vars_to_restore = [v for v in all_vars if not v.name.startswith('Logits')]
else:
vars_to_restore = tf.trainable_variables()
saver = tf.train.Saver(vars_to_restore, max_to_keep=40)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# create corresponding model and log directories
subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
if not os.path.isdir(log_dir): # Create the log directory if it doesn't exist
os.makedirs(log_dir)
model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
if not os.path.isdir(model_dir): # Create the model directory if it doesn't exist
os.makedirs(model_dir)
print('Model directory: %s' % model_dir)
print('Log directory: %s' % log_dir)
# Write arguments to a text file
facenet.write_arguments_to_file(args, os.path.join(model_dir, 'arguments.txt'))
#=============================================================================================================
# Start running operations on the Graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement = True, log_device_placement=False))
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
save_graph_def(sess, model_dir)
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
with sess.as_default():
if pretrained_model:
print('Restoring pretrained model: %s' % pretrained_model)
saver.restore(sess, pretrained_model)
# Training and validation loop
print('Running training')
epoch = 0
while epoch < args.max_nrof_epochs:
step= sess.run(global_step, feed_dict=None)
epoch = step // args.epoch_size
# Train for one epoch
train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
learning_rate_placeholder, phase_train_placeholder, global_step,
total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file, learning_rate, log_dir)
# Save variables and the metagraph if it doesn't exist already
save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)
# Evaluate on LFW
if args.lfw_dir:
evaluate(sess, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer)
return model_dir
def distorted_inputs(images_and_labels,input_queue, args):
filenames, label = input_queue.dequeue()
images = []
for filename in tf.unstack(filenames):
file_contents = tf.read_file(filename)
image = tf.image.decode_image(file_contents)
smallest_side = (int)(args.image_size * 1.10)
image = aspect_preserving_resize(image, smallest_side)
if args.random_rotate:
image = tf.py_func(facenet.random_rotate_image, [image], tf.uint8)
if args.random_crop:
image = tf.random_crop(image, [args.image_size, args.image_size, 3])
else:
image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)
if args.random_flip:
image = tf.image.random_flip_left_right(image)
image.set_shape((args.image_size, args.image_size, 3))
image = tf.image.per_image_standardization(image)
images.append(image)
images_and_labels.append([images, label])
return images_and_labels
def inputs(input_queue, args):
filenames, label = input_queue.dequeue()
images = []
for filename in tf.unstack(filenames):
file_contents = tf.read_file(filename)
image = tf.image.decode_image(file_contents)
image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)
image.set_shape((args.image_size, args.image_size, 3))
image = tf.image.per_image_standardization(image)
images.append(image)
images_and_labels.append([images, label])
return images_and_labels
def _add_loss_summaries(total_loss, scope=None):
"""Add summaries for losses.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses', scope)
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name +' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def find_threshold(var, percentile):
hist, bin_edges = np.histogram(var, 100)
cdf = np.float32(np.cumsum(hist)) / np.sum(hist)
bin_centers = (bin_edges[:-1]+bin_edges[1:])/2
threshold = np.interp(percentile*0.01, cdf, bin_centers)
return threshold
def clean_dataset(dataset, min_nrof_images_per_class):
removelist = []
for i in range(len(dataset)):
if len(dataset[i].image_paths)<min_nrof_images_per_class:
removelist.append(i)
ix = sorted(list(set(removelist)), reverse=True)
for i in ix:
del(dataset[i])
return dataset
def train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
learning_rate_placeholder, phase_train_placeholder, global_step,
loss, train_op, summary_op, summary_writer, regularization_losses, learning_rate_schedule_file, learning_rate, log_dir):
batch_number = 0
if args.learning_rate>0.0:
lr = args.learning_rate
else:
lr = facenet.get_learning_rate_from_file(learning_rate_schedule_file, epoch)
index_epoch = sess.run(index_dequeue_op)
label_epoch = np.array(label_list)[index_epoch]
image_epoch = np.array(image_list)[index_epoch]
# Enqueue one epoch of image paths and labels
labels_array = np.expand_dims(np.array(label_epoch),1)
image_paths_array = np.expand_dims(np.array(image_epoch),1)
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array})
# Training loop
train_time = 0
while batch_number < args.epoch_size:
start_time = time.time()
feed_dict = {learning_rate_placeholder: lr, phase_train_placeholder:True}
if (batch_number % 100 == 0):
err, _, step, reg_loss, summary_str,updated_lr = sess.run([loss, train_op, global_step, regularization_losses, summary_op, learning_rate], feed_dict=feed_dict)
summary_writer.add_summary(summary_str, global_step=step)
with open(os.path.join(log_dir,'training_log.txt'),'at') as f:
f.write('Epoch: [%d][%d/%d]\tLoss %2.3f\tRegLoss %2.3f\tlr %.4f\n' %
(epoch, batch_number+1, args.epoch_size, err, np.sum(reg_loss),updated_lr))
else:
err, _, step, reg_loss,updated_lr = sess.run([loss, train_op, global_step, regularization_losses, learning_rate], feed_dict=feed_dict)
duration = time.time() - start_time
if (batch_number % 10 == 0):
print('Epoch: [%d][%d/%d]\tTime %.3f\tImages/sec %d\tLoss %2.3f\tRegLoss %2.3f\tlr %.4f' %
(epoch, batch_number+1, args.epoch_size, duration, args.batch_size/duration, err, np.sum(reg_loss),updated_lr))
batch_number += 1
train_time += duration
# Add validation loss and accuracy to summary
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='time/total', simple_value=train_time)
summary_writer.add_summary(summary, step)
return step
def evaluate(sess, paths, actual_issame, batch_size, nrof_folds, log_dir, step, summary_writer):
start_time = time.time()
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
image_size = images_placeholder.get_shape()[1]
embedding_size = embeddings.get_shape()[1]
# Run forward pass to calculate embeddings
print('Runnning forward pass on LFW images')
batch_size = batch_size
nrof_images = len(paths)
assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size'
nrof_batches = int(math.ceil(1.0*nrof_images / batch_size))
emb_array = np.zeros((nrof_images, embedding_size))
for i in range(nrof_batches):
start_index = i*batch_size
end_index = min((i+1)*batch_size, nrof_images)
paths_batch = paths[start_index:end_index]
images = facenet.load_data(paths_batch, False, False, image_size)
feed_dict = { images_placeholder:images, phase_train_placeholder:False }
emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict)
#assert np.array_equal(lab_array, np.arange(nrof_images))==True, 'Wrong labels used for evaluation, possibly caused | |
Split Stock Move lines into production lot which specified split by quantity.
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be splited
@param split_by_qty : specify split by qty
@param prefix : specify prefix of production lot
@param with_lot : if true, prodcution lot will assign for split line otherwise not.
@param context: context arguments
@return: Splited move lines
"""
if context is None:
context = {}
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide proper quantity.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
if split_by_qty <= 0 or quantity == 0:
return res
uos_qty = split_by_qty / move.product_qty * move.product_uos_qty
quantity_rest = quantity % split_by_qty
uos_qty_rest = split_by_qty / move.product_qty * move.product_uos_qty
update_val = {
'product_qty': split_by_qty,
'product_uos_qty': uos_qty,
}
for idx in range(int(quantity//split_by_qty)):
if not idx and move.product_qty<=quantity:
current_move = move.id
else:
current_move = self.copy(cr, uid, move.id, {'state': move.state})
res.append(current_move)
if with_lot:
update_val['prodlot_id'] = self._create_lot(cr, uid, [current_move], move.product_id.id)
self.write(cr, uid, [current_move], update_val)
if quantity_rest > 0:
idx = int(quantity//split_by_qty)
update_val['product_qty'] = quantity_rest
update_val['product_uos_qty'] = uos_qty_rest
if not idx and move.product_qty<=quantity:
current_move = move.id
else:
current_move = self.copy(cr, uid, move.id, {'state': move.state})
res.append(current_move)
if with_lot:
update_val['prodlot_id'] = self._create_lot(cr, uid, [current_move], move.product_id.id)
self.write(cr, uid, [current_move], update_val)
return res
def action_consume(self, cr, uid, ids, quantity, location_id=False, context=None):
""" Consumed product with specific quatity from specific source location
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be consumed
@param quantity : specify consume quantity
@param location_id : specify source location
@param context: context arguments
@return: Consumed lines
"""
#quantity should in MOVE UOM
if context is None:
context = {}
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide proper quantity.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
move_qty = move.product_qty
if move_qty <= 0:
raise osv.except_osv(_('Error!'), _('Cannot consume a move with negative or zero quantity.'))
quantity_rest = move.product_qty
quantity_rest -= quantity
uos_qty_rest = quantity_rest / move_qty * move.product_uos_qty
if quantity_rest <= 0:
quantity_rest = 0
uos_qty_rest = 0
quantity = move.product_qty
uos_qty = quantity / move_qty * move.product_uos_qty
if float_compare(quantity_rest, 0, precision_rounding=move.product_id.uom_id.rounding):
default_val = {
'product_qty': quantity,
'product_uos_qty': uos_qty,
'state': move.state,
'location_id': location_id or move.location_id.id,
}
current_move = self.copy(cr, uid, move.id, default_val)
res += [current_move]
update_val = {}
update_val['product_qty'] = quantity_rest
update_val['product_uos_qty'] = uos_qty_rest
self.write(cr, uid, [move.id], update_val)
else:
quantity_rest = quantity
uos_qty_rest = uos_qty
res += [move.id]
update_val = {
'product_qty' : quantity_rest,
'product_uos_qty' : uos_qty_rest,
'location_id': location_id or move.location_id.id,
}
self.write(cr, uid, [move.id], update_val)
self.action_done(cr, uid, res, context=context)
return res
# FIXME: needs refactoring, this code is partially duplicated in stock_picking.do_partial()!
def do_partial(self, cr, uid, ids, partial_datas, context=None):
""" Makes partial pickings and moves done.
@param partial_datas: Dictionary containing details of partial picking
like partner_id, delivery_date, delivery
moves with product_id, product_qty, uom
"""
res = {}
picking_obj = self.pool.get('stock.picking')
product_obj = self.pool.get('product.product')
currency_obj = self.pool.get('res.currency')
uom_obj = self.pool.get('product.uom')
wf_service = netsvc.LocalService("workflow")
if context is None:
context = {}
complete, too_many, too_few = [], [], []
move_product_qty = {}
prodlot_ids = {}
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('done', 'cancel'):
continue
partial_data = partial_datas.get('move%s'%(move.id), False)
assert partial_data, _('Missing partial picking data for move #%s.') % (move.id)
product_qty = partial_data.get('product_qty',0.0)
move_product_qty[move.id] = product_qty
product_uom = partial_data.get('product_uom',False)
product_price = partial_data.get('product_price',0.0)
product_currency = partial_data.get('product_currency',False)
prodlot_ids[move.id] = partial_data.get('prodlot_id')
if move.product_qty == product_qty:
complete.append(move)
elif move.product_qty > product_qty:
too_few.append(move)
else:
too_many.append(move)
# Average price computation
if (move.picking_id.type == 'in') and (move.product_id.cost_method == 'average'):
product = product_obj.browse(cr, uid, move.product_id.id)
move_currency_id = move.company_id.currency_id.id
context['currency_id'] = move_currency_id
qty = uom_obj._compute_qty(cr, uid, product_uom, product_qty, product.uom_id.id)
if qty > 0:
new_price = currency_obj.compute(cr, uid, product_currency,
move_currency_id, product_price, round=False)
new_price = uom_obj._compute_price(cr, uid, product_uom, new_price,
product.uom_id.id)
if product.qty_available <= 0:
new_std_price = new_price
else:
# Get the standard price
amount_unit = product.price_get('standard_price', context=context)[product.id]
new_std_price = ((amount_unit * product.qty_available)\
+ (new_price * qty))/(product.qty_available + qty)
product_obj.write(cr, uid, [product.id],{'standard_price': new_std_price})
# Record the values that were chosen in the wizard, so they can be
# used for inventory valuation if real-time valuation is enabled.
self.write(cr, uid, [move.id],
{'price_unit': product_price,
'price_currency_id': product_currency,
})
for move in too_few:
product_qty = move_product_qty[move.id]
if product_qty != 0:
defaults = {
'product_qty' : product_qty,
'product_uos_qty': product_qty,
'picking_id' : move.picking_id.id,
'state': 'assigned',
'move_dest_id': False,
'price_unit': move.price_unit,
}
prodlot_id = prodlot_ids[move.id]
if prodlot_id:
defaults.update(prodlot_id=prodlot_id)
new_move = self.copy(cr, uid, move.id, defaults)
complete.append(self.browse(cr, uid, new_move))
self.write(cr, uid, [move.id],
{
'product_qty': move.product_qty - product_qty,
'product_uos_qty': move.product_qty - product_qty,
'prodlot_id': False,
'tracking_id': False,
})
for move in too_many:
self.write(cr, uid, [move.id],
{
'product_qty': move.product_qty,
'product_uos_qty': move.product_qty,
})
complete.append(move)
for move in complete:
if prodlot_ids.get(move.id):
self.write(cr, uid, [move.id],{'prodlot_id': prodlot_ids.get(move.id)})
self.action_done(cr, uid, [move.id], context=context)
if move.picking_id.id :
# TOCHECK : Done picking if all moves are done
cr.execute("""
SELECT move.id FROM stock_picking pick
RIGHT JOIN stock_move move ON move.picking_id = pick.id AND move.state = %s
WHERE pick.id = %s""",
('done', move.picking_id.id))
res = cr.fetchall()
if len(res) == len(move.picking_id.move_lines):
picking_obj.action_move(cr, uid, [move.picking_id.id])
wf_service.trg_validate(uid, 'stock.picking', move.picking_id.id, 'button_done', cr)
return [move.id for move in complete]
stock_move()
class stock_inventory(osv.osv):
_name = "stock.inventory"
_description = "Inventory"
_columns = {
'name': fields.char('Inventory Reference', size=64, required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date': fields.datetime('Creation Date', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date_done': fields.datetime('Date done'),
'inventory_line_id': fields.one2many('stock.inventory.line', 'inventory_id', 'Inventories', readonly=True, states={'draft': [('readonly', False)]}),
'move_ids': fields.many2many('stock.move', 'stock_inventory_move_rel', 'inventory_id', 'move_id', 'Created Moves'),
'state': fields.selection( (('draft', 'Draft'), ('cancel','Cancelled'), ('confirm','Confirmed'), ('done', 'Done')), 'Status', readonly=True, select=True),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, readonly=True, states={'draft':[('readonly',False)]}),
}
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'state': 'draft',
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c)
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default = default.copy()
default.update({'move_ids': [], 'date_done': False})
return super(stock_inventory, self).copy(cr, uid, id, default, context=context)
def _inventory_line_hook(self, cr, uid, inventory_line, move_vals):
""" Creates a stock move from an inventory line
@param inventory_line:
@param move_vals:
@return:
"""
return self.pool.get('stock.move').create(cr, uid, move_vals)
def action_done(self, cr, uid, ids, context=None):
""" Finish the inventory
@return: True
"""
if context is None:
context = {}
move_obj = self.pool.get('stock.move')
for inv in self.browse(cr, uid, ids, context=context):
move_obj.action_done(cr, uid, [x.id for x in inv.move_ids], context=context)
self.write(cr, uid, [inv.id], {'state':'done', 'date_done': time.strftime('%Y-%m-%d %H:%M:%S')}, context=context)
return True
def action_confirm(self, cr, uid, ids, context=None):
""" Confirm the inventory and writes its finished date
@return: True
"""
if context is None:
context = {}
# to perform the correct inventory corrections we need analyze stock location by
# location, never recursively, so we use a special context
product_context = dict(context, compute_child=False)
location_obj = self.pool.get('stock.location')
for inv in self.browse(cr, uid, ids, context=context):
move_ids = []
for line in inv.inventory_line_id:
pid = line.product_id.id
product_context.update(uom=line.product_uom.id, to_date=inv.date, date=inv.date, prodlot_id=line.prod_lot_id.id)
amount = location_obj._product_get(cr, uid, line.location_id.id, [pid], product_context)[pid]
change = line.product_qty - amount
lot_id = line.prod_lot_id.id
if change:
location_id = line.product_id.property_stock_inventory.id
value = {
'name': _('INV:') + (line.inventory_id.name or ''),
'product_id': line.product_id.id,
'product_uom': line.product_uom.id,
'prodlot_id': lot_id,
'date': inv.date,
}
if change > 0:
value.update( {
'product_qty': change,
'location_id': location_id,
'location_dest_id': line.location_id.id,
})
else:
value.update( {
'product_qty': -change,
'location_id': line.location_id.id,
'location_dest_id': location_id,
})
move_ids.append(self._inventory_line_hook(cr, uid, line, value))
self.write(cr, uid, [inv.id], {'state': 'confirm', 'move_ids': [(6, 0, move_ids)]})
self.pool.get('stock.move').action_confirm(cr, uid, move_ids, context=context)
return True
def action_cancel_draft(self, cr, uid, ids, context=None):
""" Cancels the stock move and change inventory state to draft.
@return: True
"""
for inv in self.browse(cr, uid, ids, context=context):
self.pool.get('stock.move').action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
self.write(cr, uid, [inv.id], {'state':'draft'}, context=context)
return True
def action_cancel_inventory(self, cr, uid, ids, context=None):
""" Cancels both stock move and inventory
@return: True
"""
move_obj = self.pool.get('stock.move')
account_move_obj = self.pool.get('account.move')
for inv in self.browse(cr, uid, ids, context=context):
move_obj.action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
for move in inv.move_ids:
account_move_ids = account_move_obj.search(cr, uid, [('name', '=', move.name)])
if account_move_ids:
account_move_data_l = account_move_obj.read(cr, uid, account_move_ids, ['state'], context=context)
for account_move in account_move_data_l:
if account_move['state'] == 'posted':
raise osv.except_osv(_('User Error!'),
_('In order to cancel this inventory, | |
in the tree.
for atom in ring:
atoms = {'*': atom}
entry = ring_database.descend_tree(molecule, atoms)
matched_ring_entries.append(entry)
if matched_ring_entries is []:
raise KeyError('Node not found in database.')
# Decide which group to keep
is_partial_match = True
complete_matched_groups = [entry for entry in matched_ring_entries
if not is_ring_partial_matched(ring, entry.item)]
if complete_matched_groups:
is_partial_match = False
matched_ring_entries = complete_matched_groups
depth_list = [len(ring_database.ancestors(entry)) for entry in matched_ring_entries]
most_specific_match_indices = [i for i, x in enumerate(depth_list) if x == max(depth_list)]
most_specific_matched_entries = [matched_ring_entries[idx] for idx in most_specific_match_indices]
if len(set(most_specific_matched_entries)) != 1:
logging.debug('More than one type of node was found to be most specific for this ring.')
logging.debug('This is either due to a database error in the ring or polycyclic groups, '
'or a partial match between the group and the full ring.')
logging.debug(most_specific_matched_entries)
# Condense the number of most specific groups down to one
most_specific_matched_entry = matched_ring_entries[most_specific_match_indices[0]]
node = most_specific_matched_entry
if node is None:
raise DatabaseError('Unable to determine thermo parameters for {0}: no data for {1} or '
'any of its ancestors.'.format(molecule, most_specific_match_indices[0]))
while node is not None and node.data is None:
# do average of its children
success, averaged_solute_data = self._average_children_solute(node)
if success:
node.data = averaged_solute_data
else:
node = node.parent
data = node.data
comment = node.label
while isinstance(data, str) and data is not None:
for entry in ring_database.entries.values():
if entry.label == data:
data = entry.data
comment = entry.label
node = entry
break
data.comment = '{0}({1})'.format(ring_database.label, comment)
if solute_data is None:
return data, node, is_partial_match
else:
return add_solute_data(solute_data, data, group_additivity=True, verbose=True), node, is_partial_match
# By setting verbose=True, we turn on the comments of ring correction to pass the unittest.
# Typically this comment is very short and also very helpful to check if the ring correction is calculated correctly.
def _average_children_solute(self, node):
"""
Use children's solute data to guess solute data of parent `node`
that doesn't have solute data built-in in tree yet.
For `node` has children that have solute data, return success flag
`True` and the average solute data.
For `node` whose children that all have no solute data, return flag
`False` and None for the solute data.
"""
if not node.children:
if node.data is None:
return False, None
else:
return True, node.data
else:
children_solute_data_list = []
for child in node.children:
if child.data is None:
success, child_solute_data_average = self._average_children_solute(child)
if success:
children_solute_data_list.append(child_solute_data_average)
else:
children_solute_data_list.append(child.data)
if children_solute_data_list:
return True, average_solute_data(children_solute_data_list)
else:
return False, None
def _add_group_solute_data(self, solute_data, database, molecule, atom):
"""
Determine the group additivity solute data for the atom `atom`
in the structure `structure`, and add it to the existing solute data
`solute_data`.
"""
node0 = database.descend_tree(molecule, atom, None)
if node0 is None:
raise KeyError('Node not found in database.')
# It's possible (and allowed) that items in the tree may not be in the
# library, in which case we need to fall up the tree until we find an
# ancestor that has an entry in the library
node = node0
while node is not None and node.data is None:
node = node.parent
if node is None:
raise KeyError('Node has no parent with data in database.')
data = node.data
comment = node.label
while isinstance(data, str) and data is not None:
for entry in database.entries.values():
if entry.label == data:
data = entry.data
comment = entry.label
break
data.comment = '{0}({1})'.format(database.label, comment)
# This code prints the hierarchy of the found node; useful for debugging
# result = ''
# while node is not None:
# result = ' -> ' + node + result
# node = database.tree.parent[node]
# print result[4:]
if solute_data is None:
return data
else:
return add_solute_data(solute_data, data, group_additivity=True)
def _remove_group_solute_data(self, solute_data, database, molecule, atom):
"""
Based on the _add_group_solute_data method. Just replace the last line with 'return remove_solute_data()'.
Determine the group additivity solute data for the atom `atom` in the structure `structure`,
and REMOVE it from the existing solute data `solute_data`.
"""
node0 = database.descend_tree(molecule, atom, None)
if node0 is None:
raise KeyError('Node not found in database.')
# It's possible (and allowed) that items in the tree may not be in the
# library, in which case we need to fall up the tree until we find an
# ancestor that has an entry in the library
node = node0
while node is not None and node.data is None:
node = node.parent
if node is None:
raise KeyError('Node has no parent with data in database.')
data = node.data
comment = node.label
while isinstance(data, str) and data is not None:
for entry in database.entries.values():
if entry.label == data:
data = entry.data
comment = entry.label
break
data.comment = '{0}({1})'.format(database.label, comment)
if solute_data is None:
return data
else:
return remove_solute_data(solute_data, data, True)
def calc_h(self, solute_data, solvent_data):
"""
Returns the enthalpy of solvation, at 298K, in J/mol
"""
# Use Mintz parameters for solvents. Multiply by 1000 to go from kJ->J to maintain consistency
delH = 1000 * ((solute_data.S * solvent_data.s_h) +
(solute_data.B * solvent_data.b_h) +
(solute_data.E * solvent_data.e_h) +
(solute_data.L * solvent_data.l_h) +
(solute_data.A * solvent_data.a_h) + solvent_data.c_h)
return delH
def calc_g(self, solute_data, solvent_data):
"""
Returns the Gibbs free energy of solvation, at 298K, in J/mol
"""
# Use Abraham parameters for solvents to get log K
logK = ((solute_data.S * solvent_data.s_g) +
(solute_data.B * solvent_data.b_g) +
(solute_data.E * solvent_data.e_g) +
(solute_data.L * solvent_data.l_g) +
(solute_data.A * solvent_data.a_g) + solvent_data.c_g)
# Convert to delG with units of J/mol
delG = -8.314 * 298 * 2.303 * logK
return delG
def calc_s(self, delG, delH):
"""
Returns the entropy of solvation, at 298K, in J/mol/K
"""
delS = (delH - delG) / 298
return delS
def get_solvation_correction(self, solute_data, solvent_data):
"""
Given a solute_data and solvent_data object, calculates the enthalpy, entropy,
and Gibbs free energy of solvation at 298 K. Returns a SolvationCorrection
object
"""
correction = SolvationCorrection(0.0, 0.0, 0.0)
correction.enthalpy = self.calc_h(solute_data, solvent_data)
correction.gibbs = self.calc_g(solute_data, solvent_data)
correction.entropy = self.calc_s(correction.gibbs, correction.enthalpy)
return correction
def get_Kfactor(self, solute_data, solvent_data, T):
"""
Given solute_data, solvent_data, and temperature, calculates K-factor T
if the solvent's name_in_coolprop is not None. K-factor = y_solute / x_solute.
If the temperature is above the critical temperature of the solvent, it raises InpurError.
If the solvent's name_in_coolprop is None, it raises DatabaseError
"""
if solvent_data.name_in_coolprop is not None:
Tc = solvent_data.get_solvent_critical_temperature()
if T < Tc:
kfactor_parameters = self.get_Kfactor_parameters(solute_data, solvent_data)
A = kfactor_parameters.lower_T[0]
B = kfactor_parameters.lower_T[1]
C = kfactor_parameters.lower_T[2]
D = kfactor_parameters.higher_T
T_transition = kfactor_parameters.T_transition
solvent_name = solvent_data.name_in_coolprop
rho_c = PropsSI('rhomolar_critical', solvent_name) # critical density of the solvent in mol/m^3
rho_l = PropsSI('Dmolar', 'T', T, 'Q', 0, solvent_name) # saturated liquid phase density of the solvent, in mol/m^3
if T < T_transition:
Kfactor = math.exp((A + B * (1 - T / Tc) ** 0.355 + C * math.exp(1 - T / Tc) * (T / Tc) ** 0.59) / (T / Tc))
else:
Kfactor = math.exp(D * (rho_l / rho_c -1) / (T / Tc))
else:
raise InputError("The input temperature {0} K cannot be greater than "
"or equal to the critical temperature, {1} K".format(T, Tc))
else:
raise DatabaseError("K-factor calculation or temperature-dependent solvation free energy calculation "
"is not available for the solvent whose `name_in_coolprop` is None")
return Kfactor
def get_T_dep_solvation_energy(self, solute_data, solvent_data, T):
"""
Given solute_data, solvent_data, and temperature, calculates the Gibbs free energy of
solvation at T if the solvent's name_in_coolprop is not None.
"""
Kfactor = self.get_Kfactor(solute_data, solvent_data, T)
rho_g = PropsSI('Dmolar', 'T', T, 'Q', 1, solvent_data.name_in_coolprop) # saturated gas phase density of the solvent, in mol/m^3
rho_l = PropsSI('Dmolar', 'T', T, 'Q', 0, solvent_data.name_in_coolprop) # saturated liquid phase density of the solvent, in mol/m^3
delG = constants.R * T * math.log(Kfactor * rho_g / (rho_l)) # in J/mol
return delG
def get_Kfactor_parameters(self, solute_data, solvent_data, T_trans_factor=0.75):
"""
Given solute_data and solvent_data object, if name_in_coolprop is not None for the solvent,
it finds the fitted K-factor parameters for the solvent-solute pair based on the enthalpy
| |
= cobra_model.metabolites.get_by_id('10fthf_c')
gly = cobra_model.metabolites.get_by_id('gly_c')
co2 = cobra_model.metabolites.get_by_id('co2_c')
glu = cobra_model.metabolites.get_by_id('glu_DASH_L_c')
gln = cobra_model.metabolites.get_by_id('gln_DASH_L_c')
asp = cobra_model.metabolites.get_by_id('asp_DASH_L_c')
fum = cobra_model.metabolites.get_by_id('fum_c')
#make GTPSYN (irreversible)
rxn_mets = {};
rxn_mets[r5p] = -1;
rxn_mets[fthf] = -1;
rxn_mets[gly] = -1;
rxn_mets[co2] = -1;
rxn_mets[fthf] = -1;
rxn_mets[gln] = -1;
rxn_mets[gln] = -1;
rxn_mets[asp] = -1;
rxn_mets[gtp] = 1;
rxn_mets[glu] = 1;
rxn_mets[glu] = 1;
rxn_mets[fum] = 1;
rxn = Reaction('GTPSYN');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include VPMATr_reverse and VPMATr:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','3mob_c');
mob3 = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
mob3.charge = met_row['charge']
#get metabolites in the model
val = cobra_model.metabolites.get_by_id('val_DASH_L_c')
ala = cobra_model.metabolites.get_by_id('ala_DASH_L_c')
pyr = cobra_model.metabolites.get_by_id('pyr_c')
#make VPMATr_reverse (irreversible)
rxn_mets = {};
rxn_mets[val] = -1;
rxn_mets[pyr] = -1;
rxn_mets[mob3] = 1;
rxn_mets[ala] = 1;
rxn = Reaction('VPMATr_reverse');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#make VPMATr (irreversible)
rxn_mets = {};
rxn_mets[mob3] = -1;
rxn_mets[ala] = -1;
rxn_mets[val] = 1;
rxn_mets[pyr] = 1;
rxn = Reaction('VPMATr');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include COASYN:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','coa_c');
coa = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
coa.charge = met_row['charge']
#get metabolites in the model
cys = cobra_model.metabolites.get_by_id('cys_DASH_L_c')
mlthf = cobra_model.metabolites.get_by_id('mlthf_c')
#make COASYN (irreversible)
rxn_mets = {};
rxn_mets[atp] = -1;
rxn_mets[mlthf] = -1;
rxn_mets[mob3] = -1;
rxn_mets[asp] = -1;
rxn_mets[cys] = -1;
rxn_mets[coa] = 1;
rxn_mets[co2] = 1;
rxn_mets[co2] = 1;
rxn = Reaction('COASYN');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include FADSYN:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','fad_c');
fad = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
fad.charge = met_row['charge']
#get metabolites in the model
ru5p = cobra_model.metabolites.get_by_id('ru5p_DASH_D_c')
#make FADSYN (irreversible)
rxn_mets = {};
rxn_mets[gtp] = -1;
rxn_mets[ru5p] = -1;
rxn_mets[ru5p] = -1;
rxn_mets[atp] = -1;
rxn_mets[fad] = 1;
rxn_mets[co2] = 1;
rxn_mets[co2] = 1;
rxn_mets[co2] = 1;
rxn = Reaction('FADSYN');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include CBMKr and CBMKr_reverse:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','cbp_c');
cbp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
cbp.charge = met_row['charge']
#make CBMKr (irreversible)
rxn_mets = {};
rxn_mets[co2] = -1;
rxn_mets[cbp] = 1;
rxn = Reaction('CBMKr');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#make CBMKr_reverse (irreversible)
rxn_mets = {};
rxn_mets[cbp] = -1;
rxn_mets[co2] = 1;
rxn = Reaction('CBMKr_reverse');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include UTPSYN:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','utp_c');
utp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
utp.charge = met_row['charge']
#make UTPSYN (irreversible)
rxn_mets = {};
rxn_mets[r5p] = -1;
rxn_mets[cbp] = -1;
rxn_mets[asp] = -1;
rxn_mets[utp] = 1;
rxn_mets[co2] = 1;
rxn = Reaction('UTPSYN');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
# update selected reactions to account for coa_c
cobra_model.reactions.get_by_id("ArgSYN").add_metabolites({coa:1});
cobra_model.reactions.get_by_id("CS").add_metabolites({coa:1});
cobra_model.reactions.get_by_id("LeuSYN").add_metabolites({coa:1});
cobra_model.reactions.get_by_id("PDH").add_metabolites({coa:-1});
cobra_model.reactions.get_by_id("PTAr_ACKr_ACS").add_metabolites({coa:1});
cobra_model.reactions.get_by_id("PTAr_ACKr_ACS_reverse").add_metabolites({coa:-1});
cobra_model.reactions.get_by_id("SERAT_CYSS").add_metabolites({coa:1});
cobra_model.reactions.get_by_id("THRD_GLYAT").add_metabolites({coa:-1});
cobra_model.reactions.get_by_id("MALS").add_metabolites({coa:1});
# update selected mappings to account for coa_c
for rxn,row in enumerate(atomMappingReactions):
if row['rxn_id'] == 'ArgSYN':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1,-1,-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1,1,1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['glu_DASH_L_c','co2_c','gln_DASH_L_c','asp_DASH_L_c','accoa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['arg_DASH_L_c','akg_c','fum_c','ac_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['abcde','f','ghijk','lmno','ABCDEFGHIJKLMNOPQRSTUpq']
atomMappingReactions[rxn]['products_mapping']=['abcdef','ghijk','lmno','pq','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'CS':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['oaa_c','accoa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['cit_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['abcd','ABCDEFGHIJKLMNOPQRSTUef']
atomMappingReactions[rxn]['products_mapping']=['dcbfea','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'LeuSYN':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1,-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1,1,1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['accoa_c','pyr_c','pyr_c','glu_DASH_L_c']
atomMappingReactions[rxn]['products_ids_tracked']=['leu_DASH_L_c','co2_c','co2_c','akg_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['ABCDEFGHIJKLMNOPQRSTUab','cde','fgh','ijklm']
atomMappingReactions[rxn]['products_mapping']=['abdghe','c','f','ijklm','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'PDH':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['pyr_c','coa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['accoa_c','co2_c']
atomMappingReactions[rxn]['reactants_mapping']=['abc','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['products_mapping']=['ABCDEFGHIJKLMNOPQRSTUbc','a']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'PTAr_ACKr_ACS':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['accoa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['ac_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['ABCDEFGHIJKLMNOPQRSTUab']
atomMappingReactions[rxn]['products_mapping']=['ab','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'PTAr_ACKr_ACS_reverse':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['ac_c','coa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['accoa_c']
atomMappingReactions[rxn]['reactants_mapping']=['ab','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['products_mapping']=['ABCDEFGHIJKLMNOPQRSTUab']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'SERAT_CYSS':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['ser_DASH_L_c','accoa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['cys_DASH_L_c','ac_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['abc','ABCDEFGHIJKLMNOPQRSTUde']
atomMappingReactions[rxn]['products_mapping']=['abc','de','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'THRD_GLYAT':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['thr_DASH_L_c','coa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['gly_c','accoa_c']
atomMappingReactions[rxn]['reactants_mapping']=['abcd','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['products_mapping']=['ab','ABCDEFGHIJKLMNOPQRSTUcd']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'MALS':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['accoa_c','glx_c']
atomMappingReactions[rxn]['products_ids_tracked']=['mal_DASH_L_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['ABCDEFGHIJKLMNOPQRSTUab','cd']
atomMappingReactions[rxn]['products_mapping']=['cdba','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
# update BOF
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','adp_c');
adp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
adp.charge = met_row['charge']
cobra_model.reactions.get_by_id("Ec_Biomass_INCA").add_metabolites({coa:2.51,
atp:-53.95,gtp:-0.20912,fad:-0.000223,utp:-0.1401});
# write the model to a temporary file
save_json_model(cobra_model,'data/cobra_model_tmp.json')
# add the model information to the database
io = stage02_isotopomer_io()
dataStage02IsotopomerModelRxns_data = [];
dataStage02IsotopomerModelMets_data = [];
dataStage02IsotopomerModels_data,\
dataStage02IsotopomerModelRxns_data,\
dataStage02IsotopomerModelMets_data = io._parse_model_json(model_id_O, date_I, 'data/cobra_model_tmp.json')
io.add_data_stage02_isotopomer_modelMetabolites(dataStage02IsotopomerModelMets_data);
io.add_data_stage02_isotopomer_modelReactions(dataStage02IsotopomerModelRxns_data);
io.add_data_stage02_isotopomer_models(dataStage02IsotopomerModels_data);
#add atomMappingReactions to the database
io.add_data_stage02_isotopomer_atomMappingReactions(atomMappingReactions);
# expand atomMappingReactions
imm = stage02_isotopomer_metaboliteMapping()
irm = stage02_isotopomer_reactionMapping()
mappingUtilities = stage02_isotopomer_mappingUtilities()
# make atomMappingMetabolites
mappingUtilities.make_missingMetaboliteMappings(experiment_id_I,model_id_I=[model_id_O],
mapping_id_rxns_I=[mapping_id_O],
mapping_id_mets_I=[],#mapping_id_mets_I=[mapping_id_I],
mapping_id_new_I=mapping_id_O);
# update symmetric metabolites
imm.get_metaboliteMapping(mapping_id_O,'succ_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'fum_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'26dap_DASH_M_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
## update _elements and _positions-_tracked
#irm.get_reactionMapping(mapping_id_O,'ArgSYN')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'CS')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'LeuSYN')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'PDH')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'PTAr_ACKr_ACS')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'PTAr_ACKr_ACS_reverse')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'SERAT_CYSS')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'THRD_GLYAT')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'MALS')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#make default base metabolites
imm.get_metaboliteMapping(mapping_id_O,'asp_DASH_L_c')
imm.make_defaultBaseMetabolites()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'cys_DASH_L_c')
imm.make_defaultBaseMetabolites()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'ru5p_DASH_D_c')
imm.make_defaultBaseMetabolites()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
#add in PRS to the network?
#if not, substitute r5p_c for prpp_c
#substitute co2_c for for_c
#substitute phe_DASH_L_c for phpyr_c
#ATPSYN
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'ATPSYN',
[{'r5p_c':'C'},{'10fthf_c':'C'},{'gly_c':'C'},{'co2_c':'C'},{'10fthf_c':'C'}],
[],
[],
'atp_c',
[],
[])
irm.add_productMapping(['atp_c'])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'ATPSYN',
[{'gln_DASH_L_c':'C'}],
[],
[],
'glu_DASH_L_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'ATPSYN',
[{'asp_DASH_L_c':'C'}],
[],
[],
'fum_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'ATPSYN',
[{'asp_DASH_L_c':'C'}],
[],
[],
'fum_c',
[],
[])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#GTPSYN
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'GTPSYN',
[{'r5p_c':'C'},{'10fthf_c':'C'},{'gly_c':'C'},{'co2_c':'C'},{'10fthf_c':'C'}],
[],
[],
'gtp_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'GTPSYN',
[{'gln_DASH_L_c':'C'}],
[],
[],
'glu_DASH_L_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'GTPSYN',
[{'gln_DASH_L_c':'C'}],
[],
[],
'glu_DASH_L_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'GTPSYN',
[{'asp_DASH_L_c':'C'}],
[],
[],
'fum_c',
[],
[])
irm.add_productMapping(['gtp_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#VPAMTr_reverse
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'VPAMTr_reverse',
[{'val_DASH_L_c':'C'}],
[],
[],
'3mob_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'VPAMTr_reverse',
[{'pyr_c':'C'}],
[],
[],
'ala_DASH_L_c',
[],
[])
irm.add_productMapping(['3mob_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#VPAMTr
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'VPAMTr',
[{'3mob_c':'C'}],
[],
[],
'val_DASH_L_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'VPAMTr',
[{'ala_DASH_L_c':'C'}],
[],
[],
'pyr_c',
[],
[])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#COASYN
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'COASYN',
[{'atp_c':'C'},{'mlthf_c':'C'},{'3mob_c':'C'},{'asp_DASH_L_c':'C'},{'cys_DASH_L_c':'C'}],
[{'asp_DASH_L_c':3},{'cys_DASH_L_c':4}],
[{'co2_c':0},{'co2_c':0}],
'coa_c',
[{'co2_c':'C'},{'co2_c':'C'}],
['co2_c','co2_c'])
#reverse product mapping for 3mob_c in database!
irm.update_productMapping(['coa_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#ACCOA_psuedo
irm.make_trackedBinaryReaction('full04','140407_iDM2014','accoa_c_base_met_ids',
[{'coa_c':'C'},{'ac_c':'C'}],
'accoa_c')
irm.update_productMapping(['accoa_c'])
irm.clear_reactionMapping()
#FADSYN
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'FADSYN',
[{'gtp_c':'C'},{'ru5p_DASH_D_c':'C'},{'ru5p_DASH_D_c':'C'},{'atp_c':'C'}],
[{'gtp_c':0},{'ru5p_DASH_D_c':1},{'ru5p_DASH_D_c':2}],
[{'10fthf_c':0},{'co2_c':0},{'co2_c':0}],
'fad_c',
[{'10fthf_c':'C'},{'co2_c':'C'},{'co2_c':'C'}],
['co2_c','co2_c','co2_c'])
irm.add_productMapping(['fad_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#CBMKr
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'CBMKr',
[{'co2_c':'C'}],
[],
[],
'cbp_c',
[],
[])
irm.add_productMapping(['cbp_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#CBMKr_reverse
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'CBMKr_reverse',
[{'cbp_c':'C'}],
[],
[],
'co2_c',
[],
[])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#UTPSYN
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'UTPSYN',
[{'r5p_c':'C'},{'cbp_c':'C'},{'asp_DASH_L_c':'C'}],
[{'asp_DASH_L_c':2}],
[{'co2_c':0}],
'utp_c',
[{'co2_c':'C'}],
['co2_c'])
irm.add_productMapping(['utp_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#ecoli_RL2013 modifications (TODO)
def expand_ecoliRL2013_01(self,experiment_id_I,model_id_I,mapping_id_I,date_I,model_id_O,mapping_id_O):
| |
on
the attribute dictionary - this saves us ton of safety checks later on.
"""
# initialize state
d = self._attributes_t_init ()
# call to update and the args/kwargs handling seems to be part of the
# dict interface conventions *shrug*
# we use similar mechanism to initialize attribs here:
for arg in args :
if arg == None:
# be resiliant to empty initialization
pass
elif isinstance (arg, dict):
d['extensible'] = True # it is just being extended ;)
d['camelcasing'] = True # default for dict inits
for key in list(arg.keys()):
us_key = self._attributes_t_underscore(key)
self._attributes_i_set(us_key, arg[key], force=True, flow=self._UP)
else:
raise se.BadParameter("initialization expects dictionary")
for key in list(kwargs.keys ()) :
self.set_attribute (key, kwargs[key])
# make iterable
d['_iterpos'] = 0
self.list_attributes ()
# --------------------------------------------------------------------------
#
# Internal interface tools.
#
# These tools are only for internal use, and should never be called from
# outside of this module.
#
# Naming: _attributes_t_*
#
@rus.takes ('Attributes',
rus.optional (str))
@rus.returns (dict)
def _attributes_t_init (self, key=None) :
"""
This internal function is not to be used by the consumer of this API.
The _attributes_t_init method initializes the interface's internal data
structures. We always need the attribute dict, and the extensible flag.
Everything else can be added on the fly. The method will not overwrite
any settings -- initialization occurs only once!
If a key is given, the existence of this key is checked. An exception
is raised if the key does not exist.
The internal data are stored as property on the _AttributesBase class.
Storing them as property on *this* class would obviously result in
recursion...
"""
d = {}
try :
d = _AttributesBase.__getattribute__ (self, '_d')
except :
# need to initialize -- any exceptions in the code below should fall through
d['attributes'] = {}
d['extensible'] = True
d['private'] = True
d['camelcasing'] = False
d['getter'] = None
d['setter'] = None
d['lister'] = None
d['caller'] = None
d['recursion'] = False
d['_iterpos'] = 0
_AttributesBase.__setattr__ (self, '_d', d)
# check if we know about the given attribute
if key :
if key not in d['attributes'] :
raise se.DoesNotExist ("attribute key is invalid: %s" % (key))
# all is well
return d
# --------------------------------------------------------------------------
#
@rus.takes ('Attributes',
str)
@rus.returns (str)
def _attributes_t_keycheck (self, key) :
"""
This internal function is not to be used by the consumer of this API.
For the given key, check if the key name is valid, and/or if it is
aliased.
If the does not yet exist, the validity check is performed, and allows
to limit dynamically added attribute names (for 'extensible' sets).
If the key does exist, the alias check triggers a deprecation warning,
and returns the aliased key for transparent operation.
"""
# make sure interface is ready to use
d = self._attributes_t_init ()
# perform name validity checks if key is new
if not key in d['attributes'] :
# FIXME: we actually don't have any tests, yet. We should allow to
# configure such via, say, _attributes_add_check (callable (key))
pass
# if key is known, check for aliasing
else:
# check if we know about the given attribute
if d['attributes'][key]['mode'] == ALIAS :
alias = d['attributes'][key]['alias']
print("attribute '%s' is deprecated - use '%s'" % (key, alias))
key = alias
return key
# --------------------------------------------------------------------------
#
@rus.takes ('Attributes',
str,
rus.anything)
@rus.returns (rus.nothing)
def _attributes_t_call_cb (self, key, val) :
"""
This internal function is not to be used by the consumer of this API.
It triggers the invocation of all callbacks for a given attribute.
Callbacks returning False (or nothing at all) will be unregistered after
their invocation.
"""
# make sure interface is ready to use
d = self._attributes_t_init (key)
# avoid recursion
if d['attributes'][key]['recursion'] :
return
callbacks = d['attributes'][key]['callbacks']
# iterate over a copy of the callback list, so that remove does not
# screw up the iteration
for cb in list (callbacks) :
call = cb
# got the callable - call it!
# raise and lower recursion shield as needed
ret = False
try :
d['attributes'][key]['recursion'] = True
ret = call (self, key, val)
finally :
d['attributes'][key]['recursion'] = False
# remove callbacks which return 'False', or raised and exception
if not ret :
callbacks.remove (cb)
# --------------------------------------------------------------------------
#
@rus.takes ('Attributes',
str,
rus.anything)
@rus.returns (rus.nothing)
def _attributes_t_call_setter (self, key, val) :
"""
This internal function is not to be used by the consumer of this API.
It triggers the setter callbacks, to signal that the attribute value
has just been set and should be propagated as needed.
"""
# make sure interface is ready to use.
d = self._attributes_t_init (key)
# avoid recursion
if d['attributes'][key]['recursion'] :
return
# no callbacks for private keys
if key[0] == '_' and d['private'] :
return
# key_setter overwrites results from all_setter
all_setter = d['setter']
key_setter = d['attributes'][key]['setter']
# Get the value via the attribute setter. The setter will not call
# attrib setters or callbacks, due to the recursion guard.
# Set the value via the native setter (to the backend),
# always raise and lower the recursion shield
#
# If both are present, we can ignore *one* exception. If one
# is present, exceptions are not ignored.
#
# always raise and lower the recursion shield.
can_ignore = 0
if all_setter and key_setter : can_ignore = 1
if all_setter :
try :
d['attributes'][key]['recursion'] = True
all_setter (key, val)
except Exception as e :
# ignoring failures from setter
pass
except Exception as e :
can_ignore -= 1
if not can_ignore : raise e
finally :
d['attributes'][key]['recursion'] = False
if key_setter :
try :
d['attributes'][key]['recursion'] = True
key_setter (val)
except:
can_ignore -= 1
if not can_ignore : raise
finally :
d['attributes'][key]['recursion'] = False
# --------------------------------------------------------------------------
#
@rus.takes ('Attributes',
str)
@rus.returns (rus.nothing)
def _attributes_t_call_getter (self, key) :
"""
This internal function is not to be used by the consumer of this API.
It triggers the getter callbacks, to signal that the attribute value
is about to be accesses and should be updated as needed.
"""
# make sure interface is ready to use.
d = self._attributes_t_init (key)
# avoid recursion
if d['attributes'][key]['recursion'] :
return
# no callbacks for private keys
if key[0] == '_' and d['private'] :
return
# key getter overwrites results from all_getter
all_getter = d['getter']
key_getter = d['attributes'][key]['getter']
# # Note that attributes have a time-to-live (ttl). If a _attributes_i_get
# # operation is attempted within 'time-of-last-update + ttl', the operation
# # is not triggering backend getter hooks, to avoid trashing (hooks are
# # expected to be costly). The force flag set to True will request to call
# # registered getter hooks even if ttl is not yet expired.
#
# # For example, job.wait() will update the plugin level state to 'Done',
# # but the cached job.state attribute will remain 'New' as the plugin does
# # not push the state change upward
#
# age = self._attributes_t_get_age (key)
# ttl = d['attributes'][key]['ttl']
#
# if age < ttl :
# return
# get the value from the native getter (from the backend), and
# get it via the attribute getter. The getter will not call
# attrib setters or callbacks, due to the recursion guard.
#
# If both are present, we can ignore *one* exception. If one
# is present, exceptions are not ignored.
#
# always raise and lower the recursion shield.
retries = 1
if all_getter and key_getter : retries = 2
if all_getter :
try :
d['attributes'][key]['recursion'] = True
val=all_getter (key)
d['attributes'][key]['value'] = val
except Exception:
retries -= 1
if not retries : raise
finally :
d['attributes'][key]['recursion'] = False
if key_getter :
try :
d['attributes'][key]['recursion'] = True
val=key_getter ()
d['attributes'][key]['value'] = val
except Exception:
retries -= 1
if not retries : raise
finally :
d['attributes'][key]['recursion'] = False
# | |
# pylint: disable=E1101, dangerous-default-value
"""
Classes to handle alignments in the SAM format.
Reader -> Sam -> Writer
"""
import sys
try:
from collections import OrderedDict
except ImportError: #python 2.6 or 3.6+
if sys.version_info >= (3,6):
OrderedDict = dict
else:
from ordereddict import OrderedDict
import os
from itertools import groupby
from subprocess import Popen, PIPE
from io import TextIOWrapper
import re
from six import PY3, string_types
try:
from multiprocessing.dummy.connection import Connection
except ImportError: #python2
from _multiprocessing import Connection
__version__ = '0.1.4.0'
class DefaultOrderedDict(OrderedDict):
def __init__(self, default, items=[]):
super(DefaultOrderedDict, self).__init__(items)
self._default = default
def __missing__(self, key):
self[key] = value = self._default()
return value
class GenomicOrder(object):
def __gt__(self, other):
if self.rname != other.rname:
return self.rname > other.rname
return self.pos > other.pos
def __lt__(self, other):
if self.rname != other.rname:
return self.rname < other.rname
return self.pos < other.pos
def __eq__(self, other):
return self.rname == other.rname and self.pos == other.pos
class Reader(object):
""" Read SAM/BAM format file as an iterable. """
def __init__(self, f, regions=False, kind=None, samtools_path="samtools"):
ext = None
self.samtools_path = samtools_path
self.spool = None # use this to catch alignment during reader scraping
self.type = 'sam'
try:
self._f_name = f.name
_, ext = os.path.splitext(f.name)
if f.name == '<stdin>': # stdin stream
self._sam_init(f)
elif (ext is not None and ext.lower()) == '.bam' or (kind is not None and kind.lower() == 'bam'):
self._bam_init(f, regions)
self.type = 'bam'
elif (ext is not None and ext.lower()) == '.sam' or (kind is not None and kind.lower() == 'sam'):
self._sam_init(f)
else:
self._sam_init(f)
if (regions and (ext is not None and ext.lower() != '.bam') and kind is None) or (regions and kind is not None and kind.lower() != 'bam'):
self.__exit__()
raise ValueError("Region support requires bam file.")
except AttributeError:
self._f_name = None
if isinstance(f, Connection):
self._pipe_init(f)
else:
self._sam_init(f)
def _pipe_init(self, f):
header = []
for line in iter(f.recv, ''):
if line[0] == '@':
header.append(line.rstrip('\n\r'))
else:
self.spool = line
break
self.header_as_dict(header)
self.f = iter(f.recv, '')
self._conn = 'pipe'
def _sam_init(self, f):
header = []
self.f = f
for line in self.f:
if line[0] == '@':
header.append(line.rstrip('\n\r'))
else:
self.spool = line
break
self.header_as_dict(header)
self._conn = 'file'
def _bam_init(self, f, regions):
pline = [self.samtools_path, 'view', '-H', f.name]
try:
p = Popen(pline, bufsize=-1, stdout=PIPE,
stderr=PIPE)
except OSError:
raise OSError('Samtools must be installed for BAM file support!\n')
self.header_as_dict([line.decode('utf-8').rstrip('\n\r') for line in p.stdout])
p.wait()
if regions:
try:
open(''.join([f.name, '.bai']))
except EnvironmentError:
sys.stderr.write("BAM index not found. Attempting to index file.\n")
index_p = Popen([self.samtools_path, 'index', f.name], stdout=PIPE, stderr=PIPE)
_, err = index_p.communicate()
if index_p.returncode > 0 or re.search("fail", str(err)):
raise OSError("Indexing failed. Is the BAM file sorted?\n")
else:
sys.stderr.write("Index created successfully.\n")
pline = [self.samtools_path, 'view', f.name, regions]
else:
pline = [self.samtools_path, 'view', f.name]
self.p = Popen(pline, bufsize=-1, stdout=PIPE,
stderr=PIPE)
if PY3:
self.f = TextIOWrapper(self.p.stdout)
else:
self.f = self.p.stdout
self._conn = 'proc'
def next(self):
""" Returns the next :class:`.Sam` object """
try:
if self.spool: # this will be the first alignment in a SAM file or stream
line = self.spool.rstrip('\n\r')
self.spool = None
else:
line = next(self.f).rstrip('\n\r')
if line == '':
raise StopIteration
fields = line.split('\t')
required = fields[:11]
tags = fields[11:]
return Sam(*required, tags=tags)
except StopIteration:
raise StopIteration
def __next__(self):
return self.next()
def __iter__(self):
return self
def __len__(self):
""" Returns the number of reads in an indexed BAM file.
Not implemented for SAM files. """
if self.type != 'bam':
raise NotImplementedError("len(Reader) is only implemented for BAM files.")
elif self.type == 'bam':
return sum(bam_read_count(self._f_name, self.samtools_path))
def subsample(self, n):
""" Returns an interator that draws every nth read from
the input file. Returns :class:`.Sam`. """
for i, line in enumerate(self.f):
if i % n == 0:
fields = line.split('\t')
required = fields[:11]
tags = fields[11:]
yield Sam(*required, tags=tags)
def header_as_dict(self, header):
""" Parse the header list and return a nested dictionary. """
self.header = DefaultOrderedDict(OrderedDict)
for line in header:
line = line.split('\t')
key, fields = (line[0], line[1:])
try:
self.header[key][fields[0]] = fields[1:]
except IndexError:
self.header[key][fields[0]] = ['']
@property
def seqs(self):
""" Return just the sequence names from the @SQ library as a generator. """
for key in self.header['@SQ'].keys():
yield key.split(':')[1]
def tile_genome(self, width):
""" Return a generator of UCSC-style regions tiling ``width``. """
assert isinstance(width, int)
for k, v in self.header['@SQ'].items():
rname = k.split(':')[1]
seqlength = v[0].split(':')[1]
for region in tile_region(rname, 1, int(seqlength), width):
yield region
def close(self):
self.__exit__()
def __enter__(self):
return self
def __exit__(self, *args):
if self._conn == 'file':
self.f.close()
if self._conn == 'proc':
self.f.close()
self.p.terminate()
class Writer(object):
""" Write SAM/BAM format file from :class:`.Sam` objects. """
def __init__(self, f, header=None):
try:
_, ext = os.path.splitext(f.name)
if ext == '.bam':
# Why not just pipe to samtools?
raise NotImplementedError("Bam writing support is not implemented.\n")
except AttributeError: # pipe?
pass
self.file = f
if self.file.mode == 'a' and self.file.tell() == 0:
# We're appending to an empty file. Assume we need a header.
self._merge_header(header)
self._header_dict_write()
elif self.file.mode == 'a' and self.file.tell() > 0:
if header:
raise NotImplementedError("Updating headers on existing SAM files is not supported.\n")
else:
self._merge_header(header)
self._header_dict_write()
def _merge_header(self, header):
self.header = DefaultOrderedDict(OrderedDict)
if not header:
self.header['@HD']['VN:1.0'] = ['SO:unknown']
else:
for key, values in header.items():
for k, v in values.items():
self.header[key][k] = v
def _header_dict_write(self):
for key, value in self.header.items():
for k, v in value.items():
tags = '\t'.join(v)
self.file.write('{key}\t{k}\t{tags}\n'.format(**locals()))
def write(self, sam):
""" Write the string representation of the ``sam`` :class:`.Sam` object. """
self.file.write(str(sam))
def close(self):
self.__exit__()
def __enter__(self):
return self
def __exit__(self, *args):
self.file.close()
class Sam(GenomicOrder):
""" Object representation of a SAM entry. """
# https://github.com/samtools/hts-specs/blob/da805be01e2ceaaa69fdde9f33c5377bf9ee6369/SAMv1.tex#L383
# operations that consume the reference
_cigar_ref = set(('M', 'D', 'N', '=', 'X', 'EQ'))
# operations that consume the query
_cigar_query = set(('M', 'I', 'S', '=', 'X', 'EQ'))
# operations that do not represent an alignment
_cigar_no_align = set(('H', 'P'))
_valid_cigar = _cigar_ref | _cigar_query | _cigar_no_align
# operations that can be represented as aligned to the reference
_cigar_align = _cigar_ref & _cigar_query
# operations that only consume the reference
_cigar_ref_only = _cigar_ref - _cigar_align
# operations that only consume the query
_cigar_query_only = _cigar_query - _cigar_align
def __init__(self, qname='', flag=4, rname='*', pos=0, mapq=255, cigar='*', rnext='*', pnext=0, tlen=0, seq='*', qual='*', tags=[]):
self.qname = qname
self.flag = int(flag)
self.rname = rname
self.pos = int(pos)
self.mapq = int(mapq)
self.cigar = cigar
self.rnext = rnext
self.pnext = int(pnext)
self.tlen = int(tlen)
self.seq = seq
self.qual = qual
self._tags = tags
self._cache = dict()
def __str__(self):
""" Returns the string representation of a SAM entry. Correspondes to one line
in the on-disk format of a SAM file. """
if self.tags:
tag_fields = '\t'.join([encode_tag(tag, self.tags[tag]) for tag in sorted(self.tags.keys())])
else:
tag_fields = '\t'.join(self._tags)
return '{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\t{10}\t{11}\n'.format(self.qname,
str(self.flag),
self.rname,
str(self.pos),
str(self.mapq),
self.cigar,
self.rnext,
str(self.pnext),
str(self.tlen),
self.seq,
self.qual,
tag_fields)
def __repr__(self):
return "Sam({0}:{1}:{2})".format(self.rname, self.pos, self.qname)
def __len__(self):
""" Returns the length of the portion of ``self.seq`` aligned to the reference. Unaligned reads will
have len() == 0. Insertions (I) and soft-clipped portions (S) will not contribute to the aligned length.
>>> x = Sam(cigar='8M2I4M1D3M4S')
>>> len(x)
16
"""
return sum(c[0] for c in self.cigars if c[1] in self._cigar_ref)
def __getitem__(self, tag):
""" Retreives the SAM tag named "tag" as a tuple: (tag_name, data). The
data type of the tag is interpreted as the proper Python object type.
>>> x = Sam(tags=['NM:i:0', 'ZZ:Z:xyz'])
>>> x['NM']
0
>>> x['ZZ']
'xyz'
"""
return self.tags[tag]
def __setitem__(self, tag, data):
""" Stores the SAM tag named "tag" with the value "data". The
data type of the tag is interpreted from the Python object type.
>>> x = Sam(tags=[])
>>> x['NM'] = 0
>>> x['NM']
0
"""
self.tags[tag] = data
def index_of(self, pos):
""" Return the relative index within the alignment from a genomic position 'pos' """
i = pos - self.pos
if i >= 0:
return i
else:
raise IndexError("Position {0:n} not in {1}.".format(pos, self.qname))
def get(self, key, default_value):
try:
return self[key]
except KeyError:
return default_value
def cigar_split(self):
# https://github.com/brentp/bwa-meth
if self.cigar == "*":
yield (0, None)
return
cig_iter = groupby(self.cigar, lambda c: c.isdigit())
for _, n in cig_iter:
op = int("".join(n)), | |
""" ResourceManagementIHEPDB
ResourceManagementIHEPDB for IHEPDIRAC.
"""
from datetime import datetime
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from IHEPDIRAC.ResourceStatusSystem.Utilities import MySQLWrapper
__RCSID__ = '$Id: $'
class ResourceManagementIHEPDB( object ):
"""
Class that defines the tables for the ResourceManagementIHEPDB on a python dictionary.
"""
# Written PrimaryKey as list on purpose !!
_tablesDB = {}
_tablesDB[ 'StorageCache' ] = { 'Fields' :
{
'SE' : 'VARCHAR(64) NOT NULL',
'Occupied' : 'BIGINT UNSIGNED NOT NULL DEFAULT 0',
'Free' : 'BIGINT UNSIGNED NOT NULL DEFAULT 0',
'Usage' : 'DOUBLE NOT NULL DEFAULT 0.0',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'SE' ]
}
_tablesDB[ 'JobCache' ] = { 'Fields' :
{
'Site' : 'VARCHAR(64) NOT NULL',
'MaskStatus' : 'VARCHAR(32) NOT NULL',
'Efficiency' : 'DOUBLE NOT NULL DEFAULT 0',
'Running' : 'INTEGER NOT NULL DEFAULT 0',
'Waiting' : 'INTEGER NOT NULL DEFAULT 0',
'Done' : 'INTEGER NOT NULL DEFAULT 0',
'Failed' : 'INTEGER NOT NULL DEFAULT 0',
'Completed' : 'INTEGER NOT NULL DEFAULT 0',
'Stalled' : 'INTEGER NOT NULL DEFAULT 0 ',
'Status' : 'VARCHAR(16) NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Site' ]
}
_tablesDB[ 'WorkNodeCache' ] = { 'Fields' :
{
'Host' : 'VARCHAR(128) NOT NULL',
'Site' : 'VARCHAR(32) NOT NULL',
'Done' : 'INTEGER NOT NULL DEFAULT 0',
'Failed' : 'INTEGER NOT NULL DEFAULT 0',
'Efficiency' : 'DOUBLE NOT NULL DEFAULT 0.0',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Host' ]
}
_tablesDB[ 'SAMResult' ] = { 'Fields' :
{
'ElementName' : 'VARCHAR(64) NOT NULL',
'TestType' : 'VARCHAR(16) NOT NULL',
'ElementType' : 'VARCHAR(16) NOT NULL',
'Status' : 'VARCHAR(8) NOT NULL',
'Log' : 'MEDIUMTEXT NOT NULL',
'JobID' : 'INTEGER NOT NULL',
'SubmissionTime' : 'DATETIME NOT NULL',
# 'CompletionTime' : 'DATETIME NOT NULL DEFAULT "0000-00-00"',
'CompletionTime' : 'DATETIME NOT NULL',
'ApplicationTime' : 'DOUBLE NOT NULL DEFAULT 0',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'ElementName' , 'TestType' ]
}
_tablesDB[ 'ResourceSAMStatus' ] = { 'Fields' :
{
'VO' : 'VARCHAR(32) NOT NULL',
'ElementName' : 'VARCHAR(64) NOT NULL',
'ElementType' : 'VARCHAR(16) NOT NULL',
'Tests' : 'VARCHAR(256) NOT NULL DEFAULT ""',
'Status' : 'VARCHAR(8) NOT NULL DEFAULT ""',
'LastCheckTime' : 'DATETIME NOT NULL',
},
'PrimaryKey' : [ 'VO', 'ElementName' ]
}
_tablesDB[ 'SiteSAMStatus' ] = { 'Fields' :
{
'VO' : 'VARCHAR(32) NOT NULL',
'Site' : 'VARCHAR(32) NOT NULL',
'SiteType' : 'VARCHAR(8) NOT NULL',
'Status' : 'VARCHAR(8) NOT NULL DEFAULT ""',
'CEStatus' : 'VARCHAR(8) NOT NULL DEFAULT ""',
'SEStatus' : 'VARCHAR(8) NOT NULL DEFAULT ""',
'LastCheckTime' : 'DATETIME NOT NULL',
},
'PrimaryKey' : [ 'VO', 'Site' ]
}
_tablesLike = {}
_tablesLike[ 'SAMResultWithID' ] = { 'Fields' :
{
'ID' : 'BIGINT UNSIGNED AUTO_INCREMENT NOT NULL',
'ElementName' : 'VARCHAR(64) NOT NULL',
'TestType' : 'VARCHAR(16) NOT NULL',
'ElementType' : 'VARCHAR(16) NOT NULL',
'Status' : 'VARCHAR(8) NOT NULL',
'Log' : 'MEDIUMTEXT NOT NULL',
'JobID' : 'INTEGER NOT NULL',
'SubmissionTime' : 'DATETIME NOT NULL',
# 'CompletionTime' : 'DATETIME NOT NULL DEFAULT "0000-00-00"',
'CompletionTime' : 'DATETIME NOT NULL',
'ApplicationTime' : 'DOUBLE NOT NULL DEFAULT 0',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'ID' ]
}
_tablesLike[ 'ResourceSAMStatusWithID' ] = { 'Fields' :
{
'ID' : 'BIGINT UNSIGNED AUTO_INCREMENT NOT NULL',
'VO' : 'VARCHAR(32) NOT NULL',
'ElementName' : 'VARCHAR(64) NOT NULL',
'ElementType' : 'VARCHAR(16) NOT NULL',
'Tests' : 'VARCHAR(256) NOT NULL DEFAULT ""',
'Status' : 'VARCHAR(8) NOT NULL DEFAULT ""',
'LastCheckTime' : 'DATETIME NOT NULL',
},
'PrimaryKey' : [ 'ID' ]
}
_tablesLike[ 'SiteSAMStatusWithID' ] = { 'Fields' :
{
'ID' : 'BIGINT UNSIGNED AUTO_INCREMENT NOT NULL',
'VO' : 'VARCHAR(32) NOT NULL',
'Site' : 'VARCHAR(32) NOT NULL',
'SiteType' : 'VARCHAR(8) NOT NULL',
'Status' : 'VARCHAR(8) NOT NULL DEFAULT ""',
'CEStatus' : 'VARCHAR(8) NOT NULL DEFAULT ""',
'SEStatus' : 'VARCHAR(8) NOT NULL DEFAULT ""',
'LastCheckTime' : 'DATETIME NOT NULL',
},
'PrimaryKey' : [ 'ID' ]
}
_likeToTable = {
'SAMResultLog' : 'SAMResultWithID',
'ResourceSAMStatusLog' : 'ResourceSAMStatusWithID',
'SiteSAMStatusLog' : 'SiteSAMStatusWithID',
}
def __init__( self, mySQL = None ):
'''
Constructor, accepts any DB or mySQL connection, mostly used for testing
purposes.
'''
self._tableDict = self.__generateTables()
if mySQL is not None:
self.database = mySQL
else:
self.database = DB( 'ResourceManagementIHEPDB',
'ResourceStatus/ResourceManagementIHEPDB' )
## SQL Methods ###############################################################
def insert( self, params, meta ):
'''
Inserts args in the DB making use of kwargs where parameters such as
the 'table' are specified ( filled automatically by the Client). Typically you
will not pass kwargs to this function, unless you know what are you doing
and you have a very special use case.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
utcnow = datetime.utcnow().replace( microsecond = 0 )
# We force lastCheckTime to utcnow if it is not present on the params
#if not( 'lastCheckTime' in params and not( params[ 'lastCheckTime' ] is None ) ):
if 'lastCheckTime' in params and params[ 'lastCheckTime' ] is None:
params[ 'lastCheckTime' ] = utcnow
if 'dateEffective' in params and params[ 'dateEffective' ] is None:
params[ 'dateEffective' ] = utcnow
return MySQLWrapper.insert( self, params, meta )
def update( self, params, meta ):
'''
Updates row with values given on args. The row selection is done using the
default of MySQLMonkey ( column.primary or column.keyColumn ). It can be
modified using kwargs. The 'table' keyword argument is mandatory, and
filled automatically by the Client. Typically you will not pass kwargs to
this function, unless you know what are you doing and you have a very
special use case.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
# We force lastCheckTime to utcnow if it is not present on the params
#if not( 'lastCheckTime' in params and not( params[ 'lastCheckTime' ] is None ) ):
if 'lastCheckTime' in params and params[ 'lastCheckTime' ] is None:
params[ 'lastCheckTime' ] = datetime.utcnow().replace( microsecond = 0 )
return MySQLWrapper.update( self, params, meta )
def select( self, params, meta ):
'''
Uses arguments to build conditional SQL statement ( WHERE ... ). If the
sql statement desired is more complex, you can use kwargs to interact with
the MySQL buildCondition parser and generate a more sophisticated query.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
return MySQLWrapper.select( self, params, meta )
def delete( self, params, meta ):
"""
Uses arguments to build conditional SQL statement ( WHERE ... ). If the
sql statement desired is more complex, you can use kwargs to interact with
the MySQL buildCondition parser and generate a more sophisticated query.
There is only one forbidden query, with all parameters None ( this would
mean a query of the type DELETE * from TableName ). The usage of kwargs
is the same as in the get function.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
"""
return MySQLWrapper.delete( self, params, meta )
## Extended SQL methods ######################################################
def addOrModify( self, params, meta ):
'''
Using the PrimaryKeys of the table, it looks for the record in the database.
If it is there, it is updated, if not, it is inserted as a new entry.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
| |
state.get(objs[1], "color")
return np.array([shelf_color], dtype=np.float32)
painttoshelf_nsrt = NSRT("PaintToShelf", parameters, preconditions,
add_effects, delete_effects, set(), option,
option_vars, painttoshelf_sampler)
nsrts.add(painttoshelf_nsrt)
# PlaceInBox
obj = Variable("?obj", obj_type)
box = Variable("?box", box_type)
robot = Variable("?robot", robot_type)
parameters = [obj, box, robot]
option_vars = [robot]
option = Place
preconditions = {
LiftedAtom(Holding, [obj]),
LiftedAtom(HoldingTop, [obj]),
}
if CFG.env == "repeated_nextto_painting":
preconditions.add(LiftedAtom(NextToBox, [robot, box]))
preconditions.add(LiftedAtom(NextTo, [robot, obj]))
add_effects = {
LiftedAtom(InBox, [obj, box]),
LiftedAtom(GripperOpen, [robot]),
LiftedAtom(NotOnTable, [obj]),
}
delete_effects = {
LiftedAtom(HoldingTop, [obj]),
LiftedAtom(Holding, [obj]),
LiftedAtom(OnTable, [obj]),
}
if CFG.env == "repeated_nextto_painting":
# (Not)OnTable is affected by moving, not placing, in rnt_painting.
# So we remove it from the add and delete effects here.
add_effects.remove(LiftedAtom(NotOnTable, [obj]))
delete_effects.remove(LiftedAtom(OnTable, [obj]))
def placeinbox_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del goal # unused
x = state.get(objs[0], "pose_x")
if CFG.env == "painting":
y = rng.uniform(PaintingEnv.box_lb, PaintingEnv.box_ub)
z = state.get(objs[0], "pose_z")
elif CFG.env == "repeated_nextto_painting":
y = rng.uniform(RepeatedNextToPaintingEnv.box_lb,
RepeatedNextToPaintingEnv.box_ub)
z = RepeatedNextToPaintingEnv.obj_z
return np.array([x, y, z], dtype=np.float32)
placeinbox_nsrt = NSRT("PlaceInBox", parameters, preconditions,
add_effects, delete_effects, set(), option,
option_vars, placeinbox_sampler)
nsrts.add(placeinbox_nsrt)
# PlaceInShelf
obj = Variable("?obj", obj_type)
shelf = Variable("?shelf", shelf_type)
robot = Variable("?robot", robot_type)
parameters = [obj, shelf, robot]
option_vars = [robot]
option = Place
preconditions = {
LiftedAtom(Holding, [obj]),
LiftedAtom(HoldingSide, [obj]),
}
if CFG.env == "repeated_nextto_painting":
preconditions.add(LiftedAtom(NextToShelf, [robot, shelf]))
preconditions.add(LiftedAtom(NextTo, [robot, obj]))
add_effects = {
LiftedAtom(InShelf, [obj, shelf]),
LiftedAtom(GripperOpen, [robot]),
LiftedAtom(NotOnTable, [obj]),
}
delete_effects = {
LiftedAtom(HoldingSide, [obj]),
LiftedAtom(Holding, [obj]),
LiftedAtom(OnTable, [obj]),
}
if CFG.env == "repeated_nextto_painting":
# (Not)OnTable is affected by moving, not placing, in rnt_painting.
# So we remove it from the add and delete effects here.
add_effects.remove(LiftedAtom(NotOnTable, [obj]))
delete_effects.remove(LiftedAtom(OnTable, [obj]))
def placeinshelf_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del goal # unused
x = state.get(objs[0], "pose_x")
if CFG.env == "painting":
y = rng.uniform(PaintingEnv.shelf_lb, PaintingEnv.shelf_ub)
z = state.get(objs[0], "pose_z")
elif CFG.env == "repeated_nextto_painting":
y = rng.uniform(RepeatedNextToPaintingEnv.shelf_lb,
RepeatedNextToPaintingEnv.shelf_ub)
z = RepeatedNextToPaintingEnv.obj_z
return np.array([x, y, z], dtype=np.float32)
placeinshelf_nsrt = NSRT("PlaceInShelf", parameters, preconditions,
add_effects, delete_effects, set(), option,
option_vars, placeinshelf_sampler)
nsrts.add(placeinshelf_nsrt)
# OpenLid
lid = Variable("?lid", lid_type)
robot = Variable("?robot", robot_type)
parameters = [lid, robot]
option_vars = [robot, lid]
option = OpenLid
preconditions = {LiftedAtom(GripperOpen, [robot])}
add_effects = set()
delete_effects = set()
openlid_nsrt = NSRT("OpenLid",
parameters, preconditions, add_effects, delete_effects,
set(), option, option_vars, null_sampler)
nsrts.add(openlid_nsrt)
# PlaceOnTable
obj = Variable("?obj", obj_type)
robot = Variable("?robot", robot_type)
parameters = [obj, robot]
option_vars = [robot]
option = Place
if CFG.env == "painting":
# The environment is a little weird: the object is technically
# already OnTable when we go to place it on the table, because
# of how the classifier is implemented.
preconditions = {
LiftedAtom(Holding, [obj]),
LiftedAtom(OnTable, [obj]),
}
add_effects = {
LiftedAtom(GripperOpen, [robot]),
}
delete_effects = {
LiftedAtom(Holding, [obj]),
LiftedAtom(HoldingTop, [obj]),
LiftedAtom(HoldingSide, [obj]),
}
elif CFG.env == "repeated_nextto_painting":
preconditions = {
LiftedAtom(Holding, [obj]),
LiftedAtom(NextTo, [robot, obj]),
LiftedAtom(NextToTable, [robot]),
}
add_effects = {
LiftedAtom(GripperOpen, [robot]),
LiftedAtom(OnTable, [obj]),
}
delete_effects = {
LiftedAtom(Holding, [obj]),
LiftedAtom(HoldingTop, [obj]),
LiftedAtom(HoldingSide, [obj]),
LiftedAtom(NotOnTable, [obj]),
}
def placeontable_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del goal # unused
x = state.get(objs[0], "pose_x")
if CFG.env == "painting":
# Always release the object where it is, to avoid the
# possibility of collisions with other objects.
y = state.get(objs[0], "pose_y")
z = state.get(objs[0], "pose_z")
elif CFG.env == "repeated_nextto_painting":
# Release the object at a randomly-chosen position on the table
# such that it is NextTo the robot.
robot_y = state.get(objs[1], "pose_y")
table_lb = RepeatedNextToPaintingEnv.table_lb
table_ub = RepeatedNextToPaintingEnv.table_ub
assert table_lb < robot_y < table_ub
nextto_thresh = RepeatedNextToPaintingEnv.nextto_thresh
y_sample_lb = max(table_lb, robot_y - nextto_thresh)
y_sample_ub = min(table_ub, robot_y + nextto_thresh)
y = rng.uniform(y_sample_lb, y_sample_ub)
z = RepeatedNextToPaintingEnv.obj_z
return np.array([x, y, z], dtype=np.float32)
placeontable_nsrt = NSRT("PlaceOnTable", parameters, preconditions,
add_effects, delete_effects, set(), option,
option_vars, placeontable_sampler)
nsrts.add(placeontable_nsrt)
if CFG.env == "repeated_nextto_painting":
def moveto_sampler(state: State, goal: Set[GroundAtom],
_rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del goal # unused
y = state.get(objs[1], "pose_y")
return np.array([y], dtype=np.float32)
# MoveToObj
robot = Variable("?robot", robot_type)
targetobj = Variable("?targetobj", obj_type)
parameters = [robot, targetobj]
option_vars = [robot, targetobj]
option = MoveToObj
preconditions = {
LiftedAtom(GripperOpen, [robot]),
LiftedAtom(OnTable, [targetobj]),
}
add_effects = {
LiftedAtom(NextTo, [robot, targetobj]),
LiftedAtom(NextToTable, [robot])
}
delete_effects = set()
# Moving could have us end up NextTo other objects, and
# can turn off being next to the box or the shelf.
side_predicates = {NextTo, NextToBox, NextToShelf}
movetoobj_nsrt = NSRT("MoveToObj", parameters, preconditions,
add_effects, delete_effects, side_predicates,
option, option_vars, moveto_sampler)
nsrts.add(movetoobj_nsrt)
# MoveToBox
robot = Variable("?robot", robot_type)
targetbox = Variable("?targetbox", box_type)
obj = Variable("?obj", obj_type)
parameters = [robot, targetbox, obj]
option_vars = [robot, targetbox]
option = MoveToBox
preconditions = {
LiftedAtom(NextTo, [robot, obj]),
LiftedAtom(Holding, [obj]),
}
add_effects = {
LiftedAtom(NextToBox, [robot, targetbox]),
LiftedAtom(NextTo, [robot, obj]),
LiftedAtom(NotOnTable, [obj])
}
delete_effects = {
LiftedAtom(NextToTable, [robot]),
LiftedAtom(OnTable, [obj])
}
# Moving could have us end up NextTo other objects.
side_predicates = {NextTo}
movetobox_nsrt = NSRT("MoveToBox", parameters, preconditions,
add_effects, delete_effects, side_predicates,
option, option_vars, moveto_sampler)
nsrts.add(movetobox_nsrt)
# MoveToShelf
robot = Variable("?robot", robot_type)
targetshelf = Variable("?targetshelf", shelf_type)
obj = Variable("?obj", obj_type)
parameters = [robot, targetshelf, obj]
option_vars = [robot, targetshelf]
option = MoveToShelf
preconditions = {
LiftedAtom(NextTo, [robot, obj]),
LiftedAtom(Holding, [obj]),
}
add_effects = {
LiftedAtom(NextToShelf, [robot, targetshelf]),
LiftedAtom(NextTo, [robot, obj]),
LiftedAtom(NotOnTable, [obj])
}
delete_effects = {
LiftedAtom(NextToTable, [robot]),
LiftedAtom(OnTable, [obj])
}
# Moving could have us end up NextTo other objects.
side_predicates = {NextTo}
movetoshelf_nsrt = NSRT("MoveToShelf", parameters, preconditions,
add_effects, delete_effects, side_predicates,
option, option_vars, moveto_sampler)
nsrts.add(movetoshelf_nsrt)
return nsrts
def _get_tools_gt_nsrts() -> Set[NSRT]:
"""Create ground truth NSRTs for ToolsEnv."""
robot_type, screw_type, screwdriver_type, nail_type, hammer_type, \
bolt_type, wrench_type, contraption_type = _get_types_by_names(
"tools", ["robot", "screw", "screwdriver", "nail", "hammer",
"bolt", "wrench", "contraption"])
HandEmpty, HoldingScrew, HoldingScrewdriver, HoldingNail, HoldingHammer, \
HoldingBolt, HoldingWrench, ScrewPlaced, NailPlaced, BoltPlaced, \
ScrewFastened, NailFastened, BoltFastened, ScrewdriverGraspable, \
HammerGraspable = _get_predicates_by_names(
"tools", ["HandEmpty", "HoldingScrew", "HoldingScrewdriver",
"HoldingNail", "HoldingHammer", "HoldingBolt",
"HoldingWrench", "ScrewPlaced", "NailPlaced",
"BoltPlaced", "ScrewFastened", "NailFastened",
"BoltFastened", "ScrewdriverGraspable",
"HammerGraspable"])
PickScrew, PickScrewdriver, PickNail, PickHammer, PickBolt, PickWrench, \
Place, FastenScrewWithScrewdriver, FastenScrewByHand, \
FastenNailWithHammer, FastenBoltWithWrench = _get_options_by_names(
"tools", ["PickScrew", "PickScrewdriver", "PickNail", "PickHammer",
"PickBolt", "PickWrench", "Place",
"FastenScrewWithScrewdriver", "FastenScrewByHand",
"FastenNailWithHammer", "FastenBoltWithWrench"])
def placeback_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
# Sampler for placing an item back in its initial spot.
del goal, rng # unused
_, item = objs
pose_x = state.get(item, "pose_x")
pose_y = state.get(item, "pose_y")
return np.array([pose_x, pose_y], dtype=np.float32)
def placeoncontraption_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
# Sampler for placing an item on a contraption.
del goal # unused
_, _, contraption = objs
pose_lx = state.get(contraption, "pose_lx")
pose_ly = state.get(contraption, "pose_ly")
pose_ux = pose_lx + ToolsEnv.contraption_size
pose_uy = pose_ly + ToolsEnv.contraption_size
# Note: Here we just use the average (plus noise), to make sampler
# learning easier. We found that it's harder to learn to imitate the
# more preferable sampler, which uses rng.uniform over the bounds.
pose_x = pose_lx + (pose_ux - pose_lx) / 2.0 + rng.uniform() * 0.01
pose_y = pose_ly + (pose_uy - pose_ly) / 2.0 + rng.uniform() * 0.01
return np.array([pose_x, pose_y], dtype=np.float32)
nsrts = set()
# PickScrew
robot = Variable("?robot", robot_type)
screw = Variable("?screw", screw_type)
parameters = [robot, screw]
option_vars = [robot, screw]
option = PickScrew
preconditions = {
LiftedAtom(HandEmpty, [robot]),
}
add_effects = {LiftedAtom(HoldingScrew, [screw])}
delete_effects = {
LiftedAtom(HandEmpty, [robot]),
}
nsrts.add(
NSRT("PickScrew", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars, null_sampler))
# PickScrewdriver
robot = Variable("?robot", robot_type)
screwdriver = Variable("?screwdriver", screwdriver_type)
parameters = [robot, screwdriver]
option_vars = [robot, screwdriver]
option = PickScrewdriver
preconditions = {
LiftedAtom(HandEmpty, [robot]),
LiftedAtom(ScrewdriverGraspable, [screwdriver])
}
add_effects = {LiftedAtom(HoldingScrewdriver, [screwdriver])}
delete_effects = {LiftedAtom(HandEmpty, [robot])}
nsrts.add(
NSRT("PickScrewDriver", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars, null_sampler))
# PickNail
robot = Variable("?robot", robot_type)
nail = Variable("?nail", nail_type)
parameters = [robot, nail]
option_vars = [robot, nail]
option = PickNail
preconditions = {
LiftedAtom(HandEmpty, [robot]),
}
add_effects = {LiftedAtom(HoldingNail, [nail])}
delete_effects = {
LiftedAtom(HandEmpty, [robot]),
}
nsrts.add(
NSRT("PickNail", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars, null_sampler))
# PickHammer
robot = Variable("?robot", robot_type)
hammer = Variable("?hammer", hammer_type)
parameters = | |
sap_flux = rawsap_flux / np.nanmedian(rawsap_flux)
# Deblending by calculating flux contaminatio ratio for stars within 3 TESS pixels of target
# If no othery stars are nearby, this ratio = 1
flux_contamination_ratio = calc_flux_contamination(ID)
# subtract and re-normalize sap_flux to complete the process
sap_flux = sap_flux - flux_contamination_ratio
sap_flux = sap_flux / np.nanmedian(sap_flux)
nanmask = np.where(np.isfinite(sap_flux)==True)[0]
# e0=hdu[1].data['FLUX_ERR'][~quality_mask]
# error = np.nansum(e0[:, pix_mask]**2, axis=1)**0.5
# is_allnan = ~np.any(np.isfinite(e0[:, pix_mask]), axis=1)
# error[is_allnan] = np.nan
# these provide LARGE errors for either or both TPFs/FFIs
error = np.abs( sap_flux / np.nanmedian(np.nansum(sap_flux)/np.nanmedian(sap_flux)))
error =error[nanmask]
if verbose==True:
print('len check ',' T', len(rawtime),' SAP',len(sap_flux), ' E ', len(error))
print('shape check ',' T', np.shape(rawtime),' SAP',np.shape(sap_flux), ' E ', np.shape(error))
###
###
###
SAP_LC = pd.DataFrame({"Time":rawtime,"RAW SAP Flux":rawsap_flux,"SAP Flux":sap_flux,"SAP Error":error, "Background Flux":bkgFlux})
###
### saving data before momentumdump removal at a later step
###
if verbose==True:
print('RAW len check:', len(rawtime),len(sap_flux),len(error))
SAP_LC.to_csv(savelcpath+"TIC_"+str(ID)+"_Sector_"+str(Sector)+"_RAW_LC.txt",index=False)
return bkg_mask, pix_mask ,flux, median_image, SAP_LC, flux_contamination_ratio
def get_coordinates(hdu, cadence='all'):
"""Returns two 3D arrays of RA and Dec values in decimal degrees.
If cadence number is given, returns 2D arrays for that cadence. If
cadence is 'all' returns one RA, Dec value for each pixel in every cadence.
Uses the WCS solution and the POS_CORR data from TPF header.
Parameters
----------
cadence : 'all' or int
Which cadences to return the RA Dec coordinates for.
Returns
-------
ra : numpy array, same shape as tpf.flux[cadence]
Array containing RA values for every pixel, for every cadence.
dec : numpy array, same shape as tpf.flux[cadence]
Array containing Dec values for every pixel, for every cadence.
"""
from astropy.wcs import WCS
import warnings
wcs = WCS(hdu[2].header)
X, Y = np.meshgrid(np.arange(hdu[1].data['FLUX'].shape[2]), np.arange(hdu[1].data['FLUX'].shape[1]))
pos_corr1_pix = np.copy(hdu[1].data['POS_CORR1'])
pos_corr2_pix = np.copy(hdu[1].data['POS_CORR2'])
# We zero POS_CORR* when the values are NaN or make no sense (>50px)
with warnings.catch_warnings(): # Comparing NaNs to numbers is OK here
warnings.simplefilter("ignore", RuntimeWarning)
bad = np.any([~np.isfinite(pos_corr1_pix),
~np.isfinite(pos_corr2_pix),
np.abs(pos_corr1_pix - np.nanmedian(pos_corr1_pix)) > 50,
np.abs(pos_corr2_pix - np.nanmedian(pos_corr2_pix)) > 50], axis=0)
pos_corr1_pix[bad], pos_corr2_pix[bad] = 0, 0
# Add in POSCORRs
X = (np.atleast_3d(X).transpose([2, 0, 1]) +
np.atleast_3d(pos_corr1_pix).transpose([1, 2, 0]))
Y = (np.atleast_3d(Y).transpose([2, 0, 1]) +
np.atleast_3d(pos_corr2_pix).transpose([1, 2, 0]))
# Pass through WCS
quality_mask = hdu[1].data['QUALITY']!=0
# ra, dec = wcs.wcs_pix2world(X.ravel(), Y.ravel(), 1)
ra, dec = wcs.wcs_pix2world(X.ravel(), Y.ravel(), 0)
ra = ra.reshape((pos_corr1_pix.shape[0], hdu[1].data['FLUX'].shape[1], hdu[1].data['FLUX'].shape[2]))
dec = dec.reshape((pos_corr2_pix.shape[0], hdu[1].data['FLUX'].shape[1], hdu[1].data['FLUX'].shape[2]))
ra, dec = ra[quality_mask], dec[quality_mask]
if cadence != 'all':
return ra[cadence], dec[cadence]
return ra, dec
def plot_orientation(hdu, ax):
nx, ny = hdu[1].data['FLUX'].shape[1:]
col,row = hdu[1].header['1CRPX5'], hdu[1].header['2CRPX5']
x0, y0 = 1,ny-2.5# + int(0.5 * nx), 0 + int(0.25 * ny)
#print(x0,y0)
# East
tmp = get_coordinates(hdu, cadence='all')
ra00, dec00 = tmp[0][0][0][0], tmp[1][0][0][0]
ra10, dec10 = tmp[0][0][0][-1], tmp[1][0][0][-1]
theta = np.arctan((dec10 - dec00) / (ra10 - ra00))
if (ra10 - ra00) < 0.0:
theta += np.pi
# theta = -22.*np.pi/180.
x1, y1 = 1.0 * np.cos(theta), 1.0 * np.sin(theta)
ax.arrow(x0, y0, x1, y1, head_width=0.2, color="black")
ax.text(x0 + 1.5 * x1, y0 + 1.5 * y1, "E", color="black")
# North
theta = theta + 90.0 * np.pi / 180.0
x1, y1 = 1.0 * np.cos(theta), 1.0 * np.sin(theta)
ax.arrow(x0, y0, x1, y1, head_width=0.2, color="black")
ax.text(x0 + 1.5 * x1, y0 + 1.5 * y1, "N", color="black")
def get_TESS_sources(ID,hdu,downloadpath,magnitude_limit):
#stuff for getting data from MAST
import astropy
from astroquery.mast import Catalogs
from astroquery.mast import Tesscut
from astropy.coordinates import SkyCoord
from astroquery.gaia import Gaia
from astropy.wcs import WCS
from astropy.io import fits
import astropy.units as u
from astropy.coordinates import SkyCoord, Angle
from astroquery.vizier import Vizier
import numpy as np
import pandas as pd
# changing cache directories
Tesscut.cache_location=downloadpath
Catalogs.cache_location=downloadpath
Vizier.cache_location=downloadpath
###
starName="TIC "+str(ID)
pix_scale=21
cone = 0.5*np.nanmax(hdu[1].data['FLUX'].shape[1:]) * pix_scale
cone = 5*pix_scale
# result = Catalogs.query_object(starName, catalog = "TIC",radius=Angle(cone, "arcsec"))
###
ra=hdu[2].header['RA_OBJ']
dec=hdu[2].header['DEC_OBJ']
# Get the positions of the Gaia sources
frame='icrs'
c1 = SkyCoord(ra,dec, frame=frame, unit='deg')
result = Catalogs.query_region(c1, catalog = "TIC",radius=Angle(cone, "arcsec"))
no_targets_found_message = ValueError('Either no sources were found in the query region '
'or Vizier is unavailable')
too_few_found_message = ValueError('No sources found brighter than {:0.1f}'.format(magnitude_limit))
print('')
# print(result.columns)
# result=result.filled()
result=result.to_pandas()
result = result[result['Tmag'] < magnitude_limit]
###
if len(result) == 0:
raise no_targets_found_message
return result
def get_GAIA_sources(ID,hdu, downloadpath,magnitude_limit):
#stuff for getting data from MAST
import astropy
from astroquery.mast import Catalogs
from astroquery.mast import Tesscut
from astropy.coordinates import SkyCoord
from astroquery.gaia import Gaia
from astropy.wcs import WCS
from astropy.io import fits
import astropy.units as u
from astropy.coordinates import SkyCoord, Angle
from astroquery.vizier import Vizier
import numpy as np
import pandas as pd
###
# changing cache directories
Tesscut.cache_location=downloadpath
Catalogs.cache_location=downloadpath
Vizier.cache_location=downloadpath
###
starName="TIC "+str(ID)
pix_scale=21
ra=hdu[2].header['RA_OBJ']
dec=hdu[2].header['DEC_OBJ']
# Get the positions of the Gaia sources
c1 = SkyCoord(ra,dec, frame='icrs', unit='deg')
# Use pixel scale for query size
pix_scale = 21.0
# Vizier.ROW_LIMIT = -1 # doesn't include target star (?)
cone = 0.5*np.nanmax(hdu[1].data['FLUX'].shape[1:]) * pix_scale
cone = 5*pix_scale
try:
result = Vizier.query_region(c1, catalog=["I/345/gaia2"],radius=Angle(cone, "arcsec"))
# result = Vizier.query_region(c1, catalog=["GAIA"],radius=Angle(cone, "arcsec"))
except requests.exceptions.ConnectionError as CE:
print(CE)
import time as clock
clock.sleep(10)
print('trying Vizier query again')
result = Vizier.query_region(c1, catalog=["I/345/gaia2"],radius=Angle(cone, "arcsec"))
print('')
#
no_targets_found_message = ValueError('Either no sources were found in the query region '
'or Vizier is unavailable')
too_few_found_message = ValueError('No sources found brighter than {:0.1f}'.format(magnitude_limit))
if result is None:
raise no_targets_found_message
elif len(result) == 0:
raise too_few_found_message
result = result["I/345/gaia2"].to_pandas() #using GAIA DR2
result = result[result.Gmag < magnitude_limit]
#rename RA DEC columns to match TESS query
result=result.rename(columns={'RA_ICRS': 'ra','DE_ICRS': 'dec', 'pmRA':'pmRA', 'pmDE':'pmDEC'})
return result
def Get_stellar_params(ID,downloadpath):
from transitleastsquares import catalog_info
#stuff for getting FFI data from MAST
import astropy
from astroquery.mast import Catalogs
from astroquery.mast import Tesscut
from astropy.coordinates import SkyCoord
from astroquery.gaia import Gaia
from astropy.wcs import WCS
from astropy.io import fits
import astropy.units as u
import numpy as np
import time as clock
import requests
# changing cache directories
Tesscut.cache_location=downloadpath
Catalogs.cache_location=downloadpath
try:
qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)
starName="TIC "+str(ID)
radSearch = 21/3600.0 # angular radius in degrees
catalogData = Catalogs.query_object(starName, radius = radSearch, catalog = "TIC")
except (requests.exceptions.ConnectionError,requests.exceptions.HTTPError) as E:
clock.sleep(5) #pause 5 seconds then try again
qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)
starName="TIC "+str(ID)
radSearch = 21/3600.0 # angular radius in degrees
catalogData = Catalogs.query_object(starName, radius = radSearch, catalog = "TIC")
###
ra = catalogData[0]['ra']
dec = catalogData[0]['dec']
coord = SkyCoord(ra, dec, unit = "deg")
Tmag=catalogData[0]['Tmag']
Gmag=catalogData[0]['GAIAmag']
Vmag=catalogData[0]['Vmag']
rmag=catalogData[0]['rmag']
imag=catalogData[0]['imag']
zmag=catalogData[0]['zmag']
Jmag=catalogData[0]['Jmag']
Hmag=catalogData[0]['Hmag']
Kmag=catalogData[0]['Kmag']
Teff=catalogData[0]['Teff']
logg=catalogData[0]['logg']
rho=catalogData[0]['rho']
dist=catalogData[0]['d']
###
###
if str(Vmag)==str('--'): Vmag=np.nan
if str(Tmag)==str('--'): Tmag=np.nan
if str(Gmag)==str('--'): Gmag=np.nan
if str(rmag)==str('--'): rmag=np.nan
if str(imag)==str('--'): imag=np.nan
if str(zmag)==str('--'): zmag=np.nan
if str(Jmag)==str('--'): Jmag=np.nan
if str(Hmag)==str('--'): Hmag=np.nan
if str(Kmag)==str('--'): Kmag=np.nan
if str(logg)==str('--'): logg=np.nan
if str(rho)==str('--'): rho=np.nan
if str(dist)==str('--'): dist=np.nan
###
return Vmag,Tmag,Gmag,rmag,imag,zmag,Jmag,Hmag,Kmag,Teff,ra,dec,logg,rho,dist
def plot_catalog_sources(ID,hdu,ax, downloadpath,magnitude_limit,catalog,dot_scale=35,dolegend="no"):
#stuff for getting data from MAST
import astropy
from astroquery.mast import Catalogs
from astroquery.mast import Tesscut
from astropy.coordinates import SkyCoord
from astroquery.gaia import Gaia
from astropy.wcs import WCS
from astropy.io import fits
import astropy.units as u
from astropy.coordinates import SkyCoord, Angle
from astroquery.vizier import Vizier
import numpy as np
import pandas as pd
# changing cache directories
Tesscut.cache_location=downloadpath
Catalogs.cache_location=downloadpath
Vizier.cache_location=downloadpath
if catalog=='TESS':
result = get_TESS_sources(ID,hdu, downloadpath, magnitude_limit)
if catalog=='GAIA':
result = get_GAIA_sources(ID,hdu, downloadpath,magnitude_limit)
no_targets_found_message = ValueError('Either no sources were found in the query region '
'or Vizier is unavailable')
too_few_found_message = ValueError('No sources found brighter than {:0.1f}'.format(magnitude_limit))
if result is None:
raise no_targets_found_message
pass
elif len(result) == 0:
raise too_few_found_message
pass
else:
radecs = np.vstack([result['ra'], result['dec']]).T
wcs = WCS(hdu[2].header)
coords = wcs.all_world2pix(radecs, 0)
#coords = wcs.wcs_world2pix(radecs, 1) #test if this is better?
###
year = ((np.nanmin(hdu[1].data['TIME'])+2457000 ) * u.day).to(u.year)
###
pmra = ((np.nan_to_num(np.asarray(result['pmRA'])) * u.milliarcsecond/u.year) * year).to(u.arcsec).value
pmdec = ((np.nan_to_num(np.asarray(result['pmDEC'])) * u.milliarcsecond/u.year) * year).to(u.arcsec).value
result['ra'] += pmra
result['dec'] += pmdec
###
if catalog=='TESS':
df = pd.DataFrame(data=dict(x=coords[:, 0], y=coords[:, 1], mag=result['Tmag']))
if catalog=='GAIA':
df = pd.DataFrame(data=dict(x=coords[:, 0], y=coords[:, 1], mag=result['Gmag']))
Nbins= 22#(int(magnitude_limit)+1)
#print('Nbins',Nbins)
bins = np.linspace(df.mag.min(), df.mag.max(), Nbins)
grouped = df.groupby(np.digitize(df.mag, bins))
###
| |
<reponame>mirochaj/ares
"""
OpticalDepth.py
Author: <NAME>
Affiliation: University of Colorado at Boulder
Created on: Sat Feb 21 11:26:50 MST 2015
Description:
"""
import inspect
import numpy as np
from ..data import ARES
import os, re, types, sys
from ..util.Pickling import read_pickle_file, write_pickle_file
from scipy.integrate import quad
from ..physics import Cosmology, Hydrogen
from scipy.interpolate import interp1d as interp1d_scipy
from ..util.Misc import num_freq_bins
from ..physics.Constants import c, h_p, erg_per_ev, lam_LyA, lam_LL
from ..util.Math import interp1d
from ..util.Warnings import no_tau_table
from ..util import ProgressBar, ParameterFile
from ..physics.CrossSections import PhotoIonizationCrossSection, \
ApproximatePhotoIonizationCrossSection
from ..util.Warnings import tau_tab_z_mismatch, tau_tab_E_mismatch
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
try:
import h5py
have_h5py = True
except ImportError:
have_h5py = False
try:
from mpi4py import MPI
size = MPI.COMM_WORLD.size
rank = MPI.COMM_WORLD.rank
except ImportError:
size = 1
rank = 0
# Put this stuff in utils
defkwargs = \
{
'zf':None,
'xray_flux':None,
'xray_emissivity': None,
'lw_flux':None,
'lw_emissivity': None,
'tau':None,
'return_rc': False,
'energy_units':False,
'xavg': 0.0,
'zxavg':0.0,
}
barn = 1e-24
Mbarn = 1e-18
class OpticalDepth(object):
def __init__(self, **kwargs):
self.pf = ParameterFile(**kwargs)
# Include helium opacities approximately?
self.approx_He = self.pf['include_He'] and self.pf['approx_He']
# Include helium opacities self-consistently?
self.self_consistent_He = self.pf['include_He'] \
and (not self.pf['approx_He'])
if self.pf['approx_sigma']:
self.sigma = ApproximatePhotoIonizationCrossSection
else:
self.sigma = PhotoIonizationCrossSection
self._set_integrator()
def _set_integrator(self):
self.integrator = self.pf["unsampled_integrator"]
self.sampled_integrator = self.pf["sampled_integrator"]
self.rtol = self.pf["integrator_rtol"]
self.atol = self.pf["integrator_atol"]
self.divmax = int(self.pf["integrator_divmax"])
@property
def ionization_history(self):
if not hasattr(self, '_ionization_history'):
self._ionization_history = lambda z: 0.0
return self._ionization_history
@ionization_history.setter
def ionization_history(self, value):
if inspect.ismethod(interp1d):
self._ionization_history = value
elif isinstance(value, interp1d_scipy):
self._ionization_history = value
elif type(value) is not types.FunctionType:
self._ionization_history = lambda z: value
else:
self._ionization_history = value
@property
def cosm(self):
if not hasattr(self, '_cosm'):
self._cosm = Cosmology(pf=self.pf, **self.pf)
return self._cosm
@property
def hydr(self):
if not hasattr(self, '_hydr'):
self._hydr = Hydrogen(pf=self.pf, cosm=self.cosm, **self.pf)
return self._hydr
def OpticalDepth(self):
return self.DiffuseOpticalDepth()
def DiffuseOpticalDepth(self, z1, z2, E, **kwargs):
"""
Compute the optical depth between two redshifts.
If no keyword arguments are supplied, assumes the IGM is neutral.
Parameters
----------
z1 : float
observer redshift
z2 : float
emission redshift
E : float
observed photon energy (eV)
Notes
-----
If keyword argument 'xavg' is supplied, it must be a function of
redshift.
Returns
-------
Optical depth between z1 and z2 at observed energy E.
"""
kw = self._fix_kwargs(functionify=True, **kwargs)
# Compute normalization factor to help numerical integrator
#norm = self.cosm.hubble_0 / c / Mbarn
# Temporary function to compute emission energy of observed photon
Erest = lambda z: self.RestFrameEnergy(z1, E, z)
# Always have hydrogen
sHI = lambda z: self.sigma(Erest(z), species=0)
# Figure out number densities and cross sections of everything
if self.approx_He:
nHI = lambda z: self.cosm.nH(z) * (1. - kw['xavg'](z))
nHeI = lambda z: nHI(z) * self.cosm.y
sHeI = lambda z: self.sigma(Erest(z), species=1)
nHeII = lambda z: 0.0
sHeII = lambda z: 0.0
elif self.self_consistent_He:
if type(kw['xavg']) is not list:
raise TypeError('hey! fix me')
nHI = lambda z: self.cosm.nH(z) * (1. - kw['xavg'](z))
nHeI = lambda z: self.cosm.nHe(z) \
* (1. - kw['xavg'](z) - kw['xavg'](z))
sHeI = lambda z: self.sigma(Erest(z), species=1)
nHeII = lambda z: self.cosm.nHe(z) * kw['xavg'](z)
sHeII = lambda z: self.sigma(Erest(z), species=2)
else:
nHI = lambda z: self.cosm.nH(z) * (1. - kw['xavg'](z))
nHeI = sHeI = nHeII = sHeII = lambda z: 0.0
tau_integrand = lambda z: self.cosm.dldz(z) \
* (nHI(z) * sHI(z) + nHeI(z) * sHeI(z) + nHeII(z) * sHeII(z))
# Integrate using adaptive Gaussian quadrature
tau = quad(tau_integrand, z1, z2, epsrel=self.rtol,
epsabs=self.atol, limit=self.divmax)[0] #/ norm
return tau
def _fix_kwargs(self, functionify=False, **kwargs):
kw = defkwargs.copy()
kw.update(kwargs)
if functionify and (type(kw['xavg']) is not types.FunctionType):
tmp = kw['xavg']
kw['xavg'] = lambda z: tmp
if kw['zf'] is None:
kw['zf'] = self.pf['final_redshift']
#if not self.pf['source_solve_rte']:
# pass
#elif (kw['Emax'] is None) and self.background.solve_rte[popid] and \
# np.any(self.background.bands_by_pop[popid] > pop.pf['pop_Emin_xray']):
# kw['Emax'] = self.background.energies[popid][-1]
return kw
def TabulateOpticalDepth(self):
"""
Compute optical depth as a function of (redshift, photon energy).
Parameters
----------
xavg : function
Mean ionized fraction as a function of redshift.
Notes
-----
Assumes logarithmic grid in variable x = 1 + z. Corresponding
grid in photon energy determined in _init_xrb.
Returns
-------
Optical depth table.
"""
xavg = self.ionization_history
if not hasattr(self, 'L'):
self._set_xrb(use_tab=False)
# Create array for each processor
tau_proc = np.zeros([self.L, self.N])
pb = ProgressBar(self.L * self.N, 'tau')
pb.start()
# Loop over redshift, photon energy
for l in range(self.L):
for n in range(self.N):
m = l * self.N + n + 1
if m % size != rank:
continue
# Compute optical depth
if l == (self.L - 1):
tau_proc[l,n] = 0.0
else:
tau_proc[l,n] = self.DiffuseOpticalDepth(self.z[l],
self.z[l+1], self.E[n], xavg=xavg)
pb.update(m)
pb.finish()
# Communicate results
if size > 1:
tau = np.zeros_like(tau_proc)
nothing = MPI.COMM_WORLD.Allreduce(tau_proc, tau)
else:
tau = tau_proc
self.tau = tau
return tau
def RestFrameEnergy(self, z, E, zp):
"""
Return energy of a photon observed at (z, E) and emitted at zp.
"""
return E * (1. + zp) / (1. + z)
def ObserverFrameEnergy(self, z, Ep, zp):
"""
What is the energy of a photon observed at redshift z and emitted
at redshift zp and energy Ep?
"""
return Ep * (1. + z) / (1. + zp)
def _set_xrb(self, use_tab=True):
"""
From parameter file, initialize grids in redshift and frequency.
Parameters
----------
Only depends on contents of self.pf.
Notes
-----
If tau_Nz != None, will setup logarithmic grid in new parameter
x = 1 + z. Then, given that R = x_{j+1} / x_j = const. for j < J, we can
create a logarithmic photon frequency / energy grid. This technique
is outlined in Haardt & Madau (1996) Appendix C.
References
----------
<NAME>. & <NAME>. 1996, ApJ, 461, 20
"""
if self.pf['tau_redshift_bins'] is None and self.pf['tau_table'] is None:
# Set bounds in frequency/energy space
self.E0 = self.pf['tau_Emin']
self.E1 = self.pf['tau_Emax']
return
self.tabname = None
# Use Haardt & Madau (1996) Appendix C technique for z, nu grids
if not ((self.pf['tau_redshift_bins'] is not None or \
self.pf['tau_table'] is not None)):
# and (not self.pf['approx_xrb'])?
raise NotImplemented('whats going on here')
if use_tab and (self.pf['tau_table'] is not None or self.pf['pop_solve_rte']):
found = False
if self.pf['pop_solve_rte']:
# First, look in CWD or $ARES (if it exists)
self.tabname = self.find_tau(self.pf['tau_prefix'])
if self.tabname is not None:
found = True
# tau_table will override any tables found automatically
if self.pf['tau_table'] is not None:
self.tabname = self.pf['tau_table']
elif found:
pass
else:
# Raise an error if we haven't found anything
no_tau_table(self)
sys.exit(1)
# If we made it this far, we found a table that may be suitable
z, E, tau = self.load(self.tabname)
zmax_ok = (self.z.max() >= self.pf['first_light_redshift']) or \
np.allclose(self.z.max(), self.pf['first_light_redshift'])
zmin_ok = (self.z.min() <= self.pf['final_redshift']) or \
np.allclose(self.z.min(), self.pf['final_redshift'])
Emin_ok = (self.E0 <= self.pf['tau_Emin']) or \
np.allclose(self.E0, self.pf['tau_Emin'])
# Results insensitive to Emax (so long as its relatively large)
# so be lenient with this condition (100 eV or 1% difference
# between parameter file and lookup table)
Emax_ok = np.allclose(self.E1, self.pf['tau_Emax'],
atol=100., rtol=1e-2)
# Check redshift bounds
if not (zmax_ok and zmin_ok):
if not zmax_ok:
tau_tab_z_mismatch(self, zmin_ok, zmax_ok, self.z)
sys.exit(1)
else:
if self.pf['verbose']:
tau_tab_z_mismatch(self, zmin_ok, zmax_ok)
if not (Emax_ok and Emin_ok):
if self.pf['verbose']:
tau_tab_E_mismatch(self, Emin_ok, Emax_ok)
if self.E1 < self.pf['tau_Emax']:
sys.exit(1)
dlogx = np.diff(self.logx)
if not np.all(np.abs(dlogx - np.roll(dlogx, -1)) <= tiny_dlogx):
raise ValueError(wrong_tab_type)
else:
# Set bounds in frequency/energy space
self.E0 = self.pf['tau_Emin']
self.E1 = self.pf['tau_Emax']
# Set up log-grid in parameter x = 1 + z
self.x = np.logspace(np.log10(1+self.pf['final_redshift']),
np.log10(1+self.pf['first_light_redshift']),
int(self.pf['tau_redshift_bins']))
self.z = self.x - 1.
self.logx = np.log10(self.x)
self.logz = np.log10(self.z)
# Constant ratio between elements in x-grid
self.R = self.x[1] / self.x[0]
self.logR = np.log10(self.R)
# Create mapping to frequency space
self.N = num_freq_bins(self.x.size,
zi=self.pf['first_light_redshift'], zf=self.pf['final_redshift'],
Emin=self.E0, Emax=self.E1)
# Create energy arrays
if self.pf['tau_Emin_pin']:
self.E = self.E0 * self.R**np.arange(self.N)
else:
self.E = np.flip(self.E1 * self.R**-np.arange(self.N), 0)
# Frequency grid must be index-1-based.
self.nn = np.arange(1, self.N+1)
# R-squared and x-squared (crop up in CXRB calculation)
self.Rsq = self.R**2
self.xsq = self.x**2
# Set attributes for z-dimensions | |
construct the circuit
operations_in = (
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [2, 3]),
circuit.Operation(_random_matrix_gate(1), [3])
)[:num_operations]
circ = circuit.Circuit(4, operations_in)
# (indirectly) call circ.__iter__
operations_out = tuple(circ)
# check length and content of operations_out
self.assertLen(operations_out, num_operations)
self.assertTrue(_elementwise_is(operations_out, operations_in))
def test_add(self):
# construct two circuits
num_qubits = 4
circ_1 = circuit.Circuit(num_qubits, [
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [2, 3]),
circuit.Operation(_random_matrix_gate(1), [3])
])
circ_2 = circuit.Circuit(num_qubits, [
circuit.Operation(_random_matrix_gate(1), [1]),
circuit.Operation(_random_matrix_gate(2), [1, 2]),
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [0, 1])
])
# add the circuits
circ_tot = circ_1 + circ_2
# check num_qubits
self.assertEqual(circ_tot.get_num_qubits(), num_qubits)
# check that the operations of circ_tot are the concatenation of the
# operations of circ_1 and circ_2
self.assertTrue(_elementwise_is(
circ_tot.get_operation_sequence(),
circ_1.get_operation_sequence() + circ_2.get_operation_sequence()
))
def test_add_type_error(self):
circ = circuit.Circuit(4, [
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [2, 3]),
circuit.Operation(_random_matrix_gate(1), [3])
])
with self.assertRaisesRegex(
TypeError,
r'illegal type for other: int \(expected a Circuit\)'):
circ + 5 # pylint: disable=pointless-statement
def test_add_inconsistent_num_qubits_error(self):
circ_1 = circuit.Circuit(4, [
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [2, 3]),
circuit.Operation(_random_matrix_gate(1), [3])
])
circ_2 = circuit.Circuit(5, [
circuit.Operation(_random_matrix_gate(1), [1]),
circuit.Operation(_random_matrix_gate(2), [1, 2]),
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [0, 1])
])
with self.assertRaisesRegex(
ValueError,
r'number of qubits does not match \(4 vs 5\)'):
circ_1 + circ_2 # pylint: disable=pointless-statement
@parameterized.parameters([0, 1, 2, -3, -2, -1])
def test_single_item(self, index):
# preparation work: define the operations and construct the circuit
operation_sequence = (
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [2, 3]),
circuit.Operation(_random_matrix_gate(1), [3])
)
circ = circuit.Circuit(4, operation_sequence)
# check __getitem__
self.assertIs(circ[index], operation_sequence[index])
# check operation method
self.assertIs(circ.operation(index), operation_sequence[index])
@parameterized.parameters([
slice(None),
slice(2),
slice(1, 3),
slice(2),
slice(1, -1),
slice(-2, 3),
slice(-2, -1),
slice(1, 1)
])
def test_slicing(self, key):
# preparation work: define the operations and construct the circuit
operations_full = (
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [2, 3]),
circuit.Operation(_random_matrix_gate(1), [3])
)
circ_full = circuit.Circuit(4, operations_full)
operations_extracted = operations_full[key]
# extracting slice using __getitem__
circ_1 = circ_full[key]
# check type and operations for circ_1
self.assertIs(type(circ_1), circuit.Circuit)
self.assertTrue(_elementwise_is(
circ_1.get_operation_sequence(),
operations_extracted
))
# extracting slice using extract_slice
circ_2 = circ_full.extract_slice(key)
# check type and operations for circ_2
self.assertIs(type(circ_2), circuit.Circuit)
self.assertTrue(_elementwise_is(
circ_2.get_operation_sequence(),
operations_extracted
))
@parameterized.parameters(itertools.product(
[
(),
(0,),
(0, 2),
(0, -1),
(-1, -2, -3)
],
[list, np.array]
))
def test_arbitrary_items(self, keys_value, keys_type):
# preparation work: define the operations and construct the circuit
operations_full = (
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [2, 3]),
circuit.Operation(_random_matrix_gate(1), [3])
)
circ_full = circuit.Circuit(4, operations_full)
operations_extracted = [operations_full[key] for key in keys_value]
# extracting selection using __getitem__
circ_1 = circ_full[keys_type(keys_value)]
# check type and operations for circ_1
self.assertIs(type(circ_1), circuit.Circuit)
self.assertTrue(_elementwise_is(
circ_1.get_operation_sequence(),
operations_extracted
))
# extracting selection using subcircuit
circ_2 = circ_full.subcircuit(keys_type(keys_value))
# check type and operations for circ_2
self.assertIs(type(circ_2), circuit.Circuit)
self.assertTrue(_elementwise_is(
circ_2.get_operation_sequence(),
operations_extracted
))
def test_items_multiple_keys(self):
# preparation work: define the operations and construct the circuit
operations_full = (
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [2, 3]),
circuit.Operation(_random_matrix_gate(1), [3]),
circuit.Operation(_random_matrix_gate(2), [3, 4]),
circuit.Operation(_random_matrix_gate(1), [4]),
circuit.Operation(_random_matrix_gate(2), [4, 5]),
circuit.Operation(_random_matrix_gate(1), [5]),
circuit.Operation(_random_matrix_gate(2), [5, 6]),
circuit.Operation(_random_matrix_gate(1), [6]),
circuit.Operation(_random_matrix_gate(2), [6, 7]),
circuit.Operation(_random_matrix_gate(1), [7])
)
circ_full = circuit.Circuit(10, operations_full)
# calling __getitem__
circ = circ_full[1:3, [8, 9, -4], 5]
# check type and operations for circ
self.assertIs(type(circ), circuit.Circuit)
self.assertTrue(_elementwise_is(
circ.get_operation_sequence(),
operations_full[1:3] + (
operations_full[8],
operations_full[9],
operations_full[-4],
operations_full[5]
)
))
def test_getitem_single_key_noniterable_type_error(self):
circ = circuit.Circuit(4, [
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [2, 3]),
circuit.Operation(_random_matrix_gate(1), [3])
])
with self.assertRaisesRegex(
TypeError,
r'unsupported key type: float'):
circ[47.11] # pylint: disable=pointless-statement
def test_getitem_multiple_keys_noniterable_type_error(self):
circ = circuit.Circuit(4, [
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [2, 3]),
circuit.Operation(_random_matrix_gate(1), [3])
])
with self.assertRaisesRegex(
TypeError,
r'unsupported key type: float'):
circ[47.11, 0.815] # pylint: disable=pointless-statement
def test_getitem_single_key_iterable_type_error(self):
circ = circuit.Circuit(4, [
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [2, 3]),
circuit.Operation(_random_matrix_gate(1), [3])
])
with self.assertRaisesRegex(
TypeError,
r'unsupported key type: str'):
circ['hello'] # pylint: disable=pointless-statement
def test_getitem_multiple_keys_iterable_type_error(self):
circ = circuit.Circuit(4, [
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [2, 3]),
circuit.Operation(_random_matrix_gate(1), [3])
])
with self.assertRaisesRegex(
TypeError,
r'unsupported key type: str'):
circ['hello', 'world'] # pylint: disable=pointless-statement
def test_operation_key_type_error(self):
circ = circuit.Circuit(4, [
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [2, 3]),
circuit.Operation(_random_matrix_gate(1), [3])
])
with self.assertRaisesRegex(
TypeError,
r'key is not integer-like \(found type: float\)'):
circ.operation(47.11)
def test_extract_slice_key_type_error(self):
circ = circuit.Circuit(4, [
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [2, 3]),
circuit.Operation(_random_matrix_gate(1), [3])
])
with self.assertRaisesRegex(
TypeError,
r'key is not a slice \(found type: float\)'):
circ.extract_slice(47.11)
def test_subcircuit_noniterable_key_type_error(self):
circ = circuit.Circuit(4, [
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [2, 3]),
circuit.Operation(_random_matrix_gate(1), [3])
])
with self.assertRaisesRegex(
TypeError,
r'key is not an iterable of int \(found type: float\)'):
circ.subcircuit(47.11)
def test_subcircuit_iterable_key_type_error(self):
circ = circuit.Circuit(4, [
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [2, 3]),
circuit.Operation(_random_matrix_gate(1), [3])
])
with self.assertRaisesRegex(
TypeError,
r'key is not an iterable of int \(found type: str\)'):
circ.subcircuit('hello')
def test_schedule_and_depth(self):
# preparation work: construct the circuit and define the expected moments
circ = circuit.Circuit(4, [
circuit.Operation(_random_matrix_gate(1), [2]),
circuit.Operation(_random_matrix_gate(2), [0, 1]),
circuit.Operation(_random_matrix_gate(2), [1, 2]),
circuit.Operation(_random_matrix_gate(1), [1]),
circuit.Operation(_random_matrix_gate(1), [0]),
circuit.Operation(_random_matrix_gate(2), [0, 1])
])
moments_expected = [0, 0, 1, 2, 1, 3]
depth_expected = np.max(moments_expected) + 1
# call circ.depth()
depth_1 = circ.depth()
# check type and value for depth_1
self.assertIs(type(depth_1), int)
self.assertEqual(depth_1, depth_expected)
# call circ.schedule()
depth_2, moments = circ.schedule()
# check type and value for depth_2
self.assertIs(type(depth_2), int)
self.assertEqual(depth_2, depth_expected)
# check type and value for moments
self.assertIs(type(moments), np.ndarray)
self.assertTupleEqual(moments.shape, (len(circ),))
self.assertEqual(moments.dtype, np.int64)
np.testing.assert_array_equal(moments, moments_expected)
class OperationTest(parameterized.TestCase):
@parameterized.parameters([
[(42,)],
[(47, 11)],
[(1, 3, 7)]
])
def test_initializer_and_getters(self, qubits_in):
# preparation work
num_qubits_in = len(qubits_in)
gate = _random_matrix_gate(num_qubits_in)
# construct the operation
operation = circuit.Operation(gate, qubits_in)
# retrieve the gate and check that it is the one which has been put in
self.assertIs(operation.get_gate(), gate)
# retrieve qubits
qubits_out = operation.get_qubits()
# check type and value for qubits
self.assertIs(type(qubits_out), tuple)
self.assertTrue(all(type(qubit) == int for qubit in qubits_out)) # want only int and not any possible subtype, so pylint: disable=unidiomatic-typecheck
self.assertEqual(qubits_out, qubits_in)
# retrieve num_qubits
num_qubits_out = operation.get_num_qubits()
# check type and value for num_qubits
self.assertIs(type(num_qubits_out), int)
self.assertEqual(num_qubits_out, num_qubits_in)
def test_initializer_gate_type_error(self):
with self.assertRaisesRegex(
TypeError,
r'gate is not a Gate \(found type: range\)'):
circuit.Operation(range(42), [47, 11])
def test_initializer_non_integer_qubits_error(self):
with self.assertRaisesRegex(
TypeError,
r'qubit is not integer-like \(found type: float\)'):
circuit.Operation(circuit.MatrixGate(np.eye(4)), [2, 3.0])
def test_initializer_duplicate_qubits_error(self):
with self.assertRaisesRegex(
ValueError,
r'qubits \(47, 11, 47\) contain duplicate values'):
circuit.Operation(circuit.MatrixGate(np.eye(8)), [47, 11, 47])
def test_initializer_negative_qubits_error(self):
with self.assertRaisesRegex(
ValueError,
r'illegal qubit indices: -7, -5 \(must be non-negative\)'):
circuit.Operation(circuit.MatrixGate(np.eye(4)), [-5, -7])
def test_initializer_num_qubits_error(self):
with self.assertRaisesRegex(
ValueError,
r'num_qubits of gate does not match len\(qubits\) \[2 vs 1\]'):
circuit.Operation(circuit.MatrixGate(np.eye(4)), [42])
@parameterized.parameters([
[(42,)],
[(47, 11)],
[(1, 3, 7)]
])
def test_replace_gate(self, qubits):
# preparation work
num_qubits = len(qubits)
placeholder_gate = _random_matrix_gate(num_qubits)
replacement_gate = _random_matrix_gate(num_qubits)
# construct the operation
initial_operation = circuit.Operation(placeholder_gate, qubits)
operation = initial_operation.replace_gate(replacement_gate)
# check that the number of qubits did not change
self.assertEqual(operation.get_num_qubits(), num_qubits)
# retrieve the gate and check that it is the replacement_gate
self.assertIs(operation.get_gate(), replacement_gate)
# check qubits
self.assertTupleEqual(operation.get_qubits(), qubits)
def test_replace_gate_type_error(self):
initial_operation = circuit.Operation(circuit.MatrixGate(np.eye(2)), [42])
with self.assertRaisesRegex(
TypeError,
r'gate is not a Gate \(found type: range\)'):
initial_operation.replace_gate(range(42))
def test_replace_gate_num_qubits_error(self):
initial_operation = circuit.Operation(circuit.MatrixGate(np.eye(2)), [42])
with self.assertRaisesRegex(
ValueError,
r'num_qubits of gate does not match len\(qubits\) \[2 vs 1\]'):
initial_operation.replace_gate(circuit.MatrixGate(np.eye(4)))
@parameterized.parameters([
[(42,), (21,)],
[(47, 11), (12, 24)],
[(1, 3, 7), (2, 5, 4)]
])
def test_replace_qubits(self, placeholder_qubits, replacement_qubits):
# preparation work
num_qubits = len(placeholder_qubits)
gate = _random_matrix_gate(num_qubits)
# construct the operation
initial_operation = circuit.Operation(gate, placeholder_qubits)
operation = initial_operation.replace_qubits(replacement_qubits)
# check that the number of qubits did not change
self.assertEqual(operation.get_num_qubits(), num_qubits)
# retrieve the qubits and check that they match the replacement_qubits
self.assertTupleEqual(operation.get_qubits(), replacement_qubits)
# check gate
self.assertIs(operation.get_gate(), gate)
def test_replace_qubits_non_integer_qubits_error(self):
initial_operation = circuit.Operation(circuit.MatrixGate(np.eye(2)), [42])
with self.assertRaisesRegex(
TypeError,
r'qubit is not integer-like \(found type: float\)'):
initial_operation.replace_qubits([3.0])
def test_replace_qubits_duplicate_qubits_error(self):
initial_operation = circuit.Operation(
circuit.MatrixGate(np.eye(8)),
[1, 3, 7]
)
with self.assertRaisesRegex(
ValueError,
r'qubits \(19, 4, 19\) contain duplicate values'):
initial_operation.replace_qubits([19, 4, 19])
def test_replace_qubits_negative_qubits_error(self):
initial_operation = circuit.Operation(circuit.MatrixGate(np.eye(2)), [42])
with self.assertRaisesRegex(
ValueError,
r'illegal qubit indices: -7 \(must be non-negative\)'):
initial_operation.replace_qubits([-7])
def test_replace_qubits_num_qubits_error(self):
initial_operation = circuit.Operation(circuit.MatrixGate(np.eye(2)), [42])
with self.assertRaisesRegex(
ValueError,
r'num_qubits of gate does not match len\(qubits\) \[1 vs 2\]'):
initial_operation.replace_qubits([47, 11])
@parameterized.parameters(itertools.product([1, 2, 3], [False, True]))
def test_permute_qubits_trivial(self, num_qubits, inverse):
# preparation work
gate = _random_matrix_gate(num_qubits)
# construct the operation
initial_operation = circuit.Operation(
gate,
np.random.permutation(10)[:num_qubits]
)
operation = initial_operation.permute_qubits(
range(num_qubits),
inverse=inverse
)
# check that operation is the initial_operation
self.assertIs(operation, initial_operation)
@parameterized.parameters([
[(47, 11), (1, 0), False, (11, 47)],
[(47, 11), (1, 0), True, (11, 47)],
[(47, | |
"""
Decoding module for a neural speaker (with attention capabilities).
The MIT License (MIT)
Originally created at 06/15/19, for Python 3.x
Copyright (c) 2021 <NAME> (ai.stanford.edu/~optas) & Stanford Geometric Computing Lab
"""
import torch
import random
import time
import warnings
import tqdm
import math
import numpy as np
import torch.nn.functional as F
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils import clip_grad_norm_
from .attention import AdditiveVisioLinguistic
from ..utils.stats import AverageMeter
class AttentiveDecoder(nn.Module):
"""
Note: code adapted from: https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Image-Captioning
implementing a solid version of Show, Attend, and Tell. Many thanks Sagar and the team.
Special (optional) features:
- use stochastic teacher forcer
- add auxiliary input data at each decoding step (besides each 'previous' token).
- tie the weights of the encoder/decoder weight matrices
"""
def __init__(self, word_embedding, rnn_hidden_dim, encoder_dim, attention_dim,
vocab, dropout_rate=0, tie_weights=False, teacher_forcing_ratio=1,
auxiliary_net=None, auxiliary_dim=0):
"""
:param word_embedding: nn.Embedding
:param rnn_hidden_dim: hidden (and thus output) dimension of the decoding rnn
:param encoder_dim: feature dimension of encoded stimulus
:param attention_dim: feature dimension over which attention is computed
:param vocab: artemis.utils.vocabulary instance
:param dropout: dropout rate
:param tie_weights: (opt, boolean) if True, the hidden-to-word weights are equal (tied) to the word-embeddings,
see https://arxiv.org/abs/1611.01462 for explanation of why this might be a good idea.
:param teacher_forcing_ratio:
:param auxiliary_net: (optional) nn.Module that will be feeding the decoder at each time step
with some "auxiliary" information (say an emotion label). Obviously, this information is separate than the
output of the typically used image-encoder.
:param auxiliary_dim: (int, optional) the output feature-dimension of the auxiliary net.
"""
super(AttentiveDecoder, self).__init__()
self.vocab = vocab
self.vocab_size = len(vocab)
self.word_embedding = word_embedding
self.auxiliary_net = auxiliary_net
self.uses_aux_data = False
if auxiliary_dim > 0:
self.uses_aux_data = True
self.decode_step = nn.LSTMCell(word_embedding.embedding_dim + encoder_dim + auxiliary_dim, rnn_hidden_dim)
self.attention = AdditiveVisioLinguistic(encoder_dim, rnn_hidden_dim, attention_dim)
if dropout_rate > 0:
self.dropout = nn.Dropout(p=dropout_rate, inplace=True)
else:
self.dropout = nn.Identity()
self.init_h = nn.Linear(encoder_dim, rnn_hidden_dim) # linear layer to find initial hidden state of LSTMCell
self.init_c = nn.Linear(encoder_dim, rnn_hidden_dim) # linear layer to find initial cell state of LSTMCell
self.f_beta = nn.Linear(rnn_hidden_dim, encoder_dim) # linear layer to create a sigmoid-activated gate
self.sigmoid = nn.Sigmoid()
self.next_word = nn.Linear(rnn_hidden_dim, self.vocab_size) # linear layer to find scores over vocabulary
self.init_weights()
self.teacher_forcing_ratio = teacher_forcing_ratio
if tie_weights:
if self.word_embedding.embedding_dim != rnn_hidden_dim:
raise ValueError('When using the tied weights')
print('tying weights of encoder/decoder')
self.next_word.weight = self.word_embedding.weight
def init_hidden_state(self, encoder_out):
"""
Creates the initial hidden and cell states for the decoder's LSTM based on the encoded images.
:param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)
:return: hidden state, cell state
"""
mean_encoder_out = encoder_out.mean(dim=1)
h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)
c = self.init_c(mean_encoder_out)
return h, c
def init_weights(self, init_range=0.1):
""" Better initialization """
self.word_embedding.weight.data.uniform_(-init_range, init_range) # remove if pre-trained model comes up
self.next_word.bias.data.zero_()
self.next_word.weight.data.uniform_(-init_range, init_range)
def __call__(self, encoder_out, captions, auxiliary_data=None):
""" Forward propagation.
:param encoder_out: encoded images, a tensor of dimension (batch_size, enc_image_size, enc_image_size, encoder_dim)
:param captions: encoded captions, a tensor of dimension (batch_size, max_caption_length)
:param auxiliary_data: extra information associated with the images (batch_size, some_dim)
:return: scores for vocabulary, sorted encoded captions, decode lengths, weights, sort indices
"""
return self.sort_captions_and_forward(encoder_out, captions, auxiliary_data=auxiliary_data)
def sort_captions_and_forward(self, encoder_out, captions, auxiliary_data=None):
""" Feed forward that ...
:param encoder_out:
:param captions:
:return:
"""
batch_size = encoder_out.size(0)
encoder_dim = encoder_out.size(-1)
# Flatten image
encoder_out = encoder_out.view(batch_size, -1, encoder_dim) # (batch_size, num_pixels, encoder_dim)
num_pixels = encoder_out.size(1)
decode_lengths = torch.where(captions == self.vocab.eos)[1] # "<sos> I am <eos>" => decode_length = 3
# we do not feed <eos> as input to generate
# something after it
# Sort input data by decreasing lengths to reduce compute below
decode_lengths, sort_ind = decode_lengths.sort(dim=0, descending=True)
encoder_out = encoder_out[sort_ind]
captions = captions[sort_ind]
if auxiliary_data is not None:
auxiliary_data = auxiliary_data[sort_ind]
auxiliary_data = self.auxiliary_net(auxiliary_data)
# prepare for unravelling
embeddings = self.word_embedding(captions) # (batch_size, max_caption_length, embed_dim)
h, c = self.init_hidden_state(encoder_out) # (batch_size, decoder_dim)
decode_lengths = decode_lengths.tolist()
device = embeddings.device
# Create tensors to hold word prediction logits and attention maps (alphas)
predictions = torch.zeros(batch_size, max(decode_lengths), self.vocab_size).to(device)
alphas = torch.zeros(batch_size, max(decode_lengths), num_pixels).to(device)
# At each time-step, decode by
# attention-weighing the encoder's output based on the decoder's previous hidden state output
# then generate a new word in the decoder with the previous word and the attention weighted encoding
for t in range(max(decode_lengths)):
batch_size_t = sum([l > t for l in decode_lengths])
h = h[:batch_size_t] # effective h
attention_weighted_encoding, alpha = self.attention(encoder_out[:batch_size_t], h)
gate = self.sigmoid(self.f_beta(h)) # gating scalar, (batch_size_t, encoder_dim)
attention_weighted_encoding = gate * attention_weighted_encoding
use_teacher_forcing = True if random.random() < self.teacher_forcing_ratio else False
if use_teacher_forcing or t == 0:
decoder_lang_input = embeddings[:batch_size_t, t]
else:
_, top_pred = preds[:batch_size_t].topk(1)
top_pred = top_pred.squeeze(-1).detach() # detach from history as input
decoder_lang_input = self.word_embedding(top_pred)
if auxiliary_data is not None:
auxiliary_data_t = auxiliary_data[:batch_size_t]
decoder_in = torch.cat([decoder_lang_input, attention_weighted_encoding, auxiliary_data_t], dim=1)
else:
decoder_in = torch.cat([decoder_lang_input, attention_weighted_encoding], dim=1)
h, c = self.decode_step(decoder_in, (h, c[:batch_size_t])) # (batch_size_t, decoder_dim)
preds = self.next_word(self.dropout(h)) # (batch_size_t, vocab_size)
predictions[:batch_size_t, t] = preds
alphas[:batch_size_t, t] = alpha
return predictions, captions, decode_lengths, alphas, sort_ind
def attend_and_predict_next_word(self, encoder_out, h, c, tokens, aux_data=None):
"""Given current hidden/memory state of the decoder and the input tokens, guess the next tokens
and update the hidden/memory states.
:param encoder_out: the grounding
:param h: current hidden state
:param c: current memory state
:param tokens: current token input to the decoder
:return: logits over vocabulary distribution, updated h/c
"""
attention_weighted_encoding, alpha = self.attention(encoder_out, h)
gate = self.sigmoid(self.f_beta(h)) # gating scalar, (batch_size_t, encoder_dim)
attention_weighted_encoding = gate * attention_weighted_encoding
embeddings = self.word_embedding(tokens) # (batch_size, embed_dim)
decoder_input = torch.cat([embeddings, attention_weighted_encoding], dim=1)
if aux_data is not None:
aux_feat = self.auxiliary_net(aux_data)
decoder_input = torch.cat([decoder_input, aux_feat], dim=1)
h, c = self.decode_step(decoder_input, (h, c)) # (batch_size_t, decoder_dim)
logits = self.next_word(h) # (batch_size_t, vocab_size)
return h, c, logits, alpha
def single_epoch_train(train_loader, model, criterion, optimizer, epoch, device, tb_writer=None, **kwargs):
""" Perform training for one epoch.
:param train_loader: DataLoader for training data
:param model: nn.ModuleDict with 'encoder', 'decoder' keys
:param criterion: loss layer
:param optimizer: optimizer
:param epoch: epoch number
:param device:
"""
alpha_c = kwargs.get('alpha_c', 1.0) # Weight of doubly stochastic (attention) regularization.
grad_clip = kwargs.get('grad_clip', 5.0) # Gradient clipping (norm magnitude)
print_freq = kwargs.get('print_freq', 100)
use_emotion = kwargs.get('use_emotion', False)
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
entropy_loss_meter = AverageMeter() # entropy loss (per word decoded)
total_loss_meter = AverageMeter()
start = time.time()
steps_taken = (epoch-1) * len(train_loader.dataset)
model.train()
for i, batch in enumerate(train_loader):
imgs = batch['image'].to(device)
caps = batch['tokens'].to(device)
b_size = len(imgs)
data_time.update(time.time() - start)
if use_emotion:
emotion = batch['emotion'].to(device)
res = model.decoder(model.encoder(imgs), caps, emotion)
else:
res = model.decoder(model.encoder(imgs), caps)
logits, caps_sorted, decode_lengths, alphas, sort_ind = res
# Since we decoded starting with <sos>, the targets are all words after <sos>, up to <eos>
targets = caps_sorted[:, 1:]
# Remove time-steps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
logits = pack_padded_sequence(logits, decode_lengths, batch_first=True)
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)
# Calculate loss
ent_loss = criterion(logits.data, targets.data)
total_loss = ent_loss
# Add doubly stochastic attention regularization
# Note. some implementation simply do this like: d_atn_loss = alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()
# here we take care of the fact that some samples in the same batch have more/less tokens than others.
if alpha_c > 0:
total_energy = torch.from_numpy(np.array(decode_lengths)) / alphas.shape[-1] # n_tokens / num_pixels
total_energy.unsqueeze_(-1) # B x 1
total_energy = total_energy.to(device)
d_atn_loss = alpha_c * ((total_energy - alphas.sum(dim=1)) ** 2).mean()
total_loss += d_atn_loss
# Back prop.
optimizer.zero_grad()
total_loss.backward()
if grad_clip is not None:
clip_grad_norm_(model.parameters(), grad_clip)
# Update weights
optimizer.step()
# Keep track of metrics
entropy_loss_meter.update(ent_loss.item(), sum(decode_lengths))
total_loss_meter.update(total_loss.item(), sum(decode_lengths))
batch_time.update(time.time() - start)
start = time.time()
steps_taken += b_size
# Print status
if print_freq is not None and i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data Load Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(epoch, i, len(train_loader),
batch_time=batch_time,
data_time=data_time,
loss=total_loss_meter))
if tb_writer is not None:
tb_writer.add_scalar('training-entropy-loss-with-batch-granularity', entropy_loss_meter.avg, steps_taken)
return total_loss_meter.avg
@torch.no_grad()
def negative_log_likelihood(model, data_loader, device):
"""
:param model:
:param data_loader:
:param | |
= 0
self.alterSpeed( choice( [-1, 1] ) )
self.rise = ( 0, -8, -16, -20, -16, -8, 0 )
self.newSlime = None
def move(self, delay, sprites):
self.checkHitBack()
self.rect.left += self.speed
if (getPos(self,0.75,0)[0] >= self.scope[1] and self.speed > 0) or (getPos(self,0.25,0)[0] <= self.scope[0] and self.speed < 0):
self.alterSpeed(-self.speed)
if not (delay % 6 ):
trPos = [ self.rect.left + self.rect.width//2, self.rect.bottom-self.rise[self.imgIndx] ] # 为保证图片位置正确,临时存储之前的位置信息
self.imgIndx = (self.imgIndx+1) % len(self.imgLeftList)
self.image = self.imgLeftList[self.imgIndx] if ( self.speed<0 ) else self.imgRightList[self.imgIndx]
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
self.rect.left = trPos[0]-self.rect.width//2
self.rect.bottom = trPos[1] + self.rise[self.imgIndx]
if ( self.coolDown==0 ):
for each in sprites:
if pygame.sprite.collide_mask(self, each):
self.coolDown = 30
if (self.coolDown > 0):
self.coolDown -= 1
if ( self.coolDown == 20 ):
cldList( self, sprites )
# If there is a newly generated slime, return it.
if (self.newSlime):
newS = self.newSlime
self.newSlime = None
return newS
def hitted(self, damage, pushed):
if pushed>0: # 向右击退
self.hitBack = max( pushed-self.weight, 0 )
elif pushed<0: # 向左击退
self.hitBack = min( pushed+self.weight, 0 )
self.health -= damage
if self.health <= 0: # dead
self.health = 0
self.erase()
return True
else: # hitted but alive, check whether split another slime.
if ( pushed>0 and self.speed<0 ) or ( pushed<0 and self.speed>0 ):
slime = Slime( self.wallList, 1, self.onlayer ) # 前两项参数都是不需要的,但为了完整执行init函数,第一项参数传入,第二项设为1.
# Set the properties of the generated slime.
slime.scope = self.scope
slime.speed = -self.speed # 速度相反
slime.imgIndx = self.imgIndx # 图像相同
slime.image = slime.imgLeftList[0] if ( slime.speed<0 ) else slime.imgRightList[slime.imgIndx]
slime.rect = slime.image.get_rect()
slime.rect.left = getPos(self, 0.5, 0.5)[0] - slime.rect.width//2
slime.rect.bottom = self.rect.bottom # 位置相同
self.newSlime = slime # 挂到本对象的newSlime变量上,等待下一次刷新调用move的时候上报给model。
def level(self, dist):
self.rect.left += dist
self.scope = (self.scope[0]+dist, self.scope[1]+dist)
# -----------------------------------
class Fly(Monster):
def __init__(self, XRange, y, onlayer):
# initialize the sprite
Monster.__init__(self, "fly", (0,255,10,240), 38, 6, 1, onlayer, 4, 3)
self.imgLeftList = [ pygame.image.load("image/stg4/flyLeft0.png").convert_alpha(), pygame.image.load("image/stg4/flyLeft1.png").convert_alpha(), \
pygame.image.load("image/stg4/flyLeft0.png").convert_alpha(), pygame.image.load("image/stg4/flyLeft2.png").convert_alpha() ]
self.imgRightList = [ pygame.transform.flip(self.imgLeftList[0], True, False), pygame.transform.flip(self.imgLeftList[1], True, False), \
pygame.transform.flip(self.imgLeftList[2], True, False), pygame.transform.flip(self.imgLeftList[3], True, False) ]
self.attLeftImg = pygame.image.load("image/stg4/flyAtt.png").convert_alpha()
self.attRightImg = pygame.transform.flip(self.attLeftImg, True, False)
self.imgIndx = 0
self.image = self.imgLeftList[0]
self.mask = pygame.mask.from_surface(self.image)
# calculate its position
self.rect = self.image.get_rect()
self.leftBd = XRange[0]
self.rightBd = XRange[1]
self.rect.left = randint( XRange[0], XRange[1] )
self.rect.top = y
self.damage = 5.5
self.alterSpeed(-1)
self.cnt = 0 # count for the loop of shift position
self.coolDown = 0 # count for attack coolDown
self.nxt = [0, 0]
self.snd = pygame.mixer.Sound("audio/flapper.wav")
def move(self, delay, sprites):
self.checkHitBack()
if not (delay % 4 ):
self.imgIndx = (self.imgIndx+1) % len(self.imgLeftList)
self.image = self.imgLeftList[self.imgIndx] if ( self.direction=="left" ) else self.imgRightList[self.imgIndx]
# find new position
if self.cnt == 0:
self.cnt = 60
self.nxt = [ randint(self.leftBd, self.rightBd), randint(20, 580) ] # randomize a new position
self.direction = "left" if ( self.nxt[0] < self.rect.left + self.rect.width/2 ) else "right"
# charging motion
if not (delay % 3):
self.shift( self.nxt[0], self.nxt[1] )
self.cnt -= 1
if random()<0.02:
self.snd.play(0)
if (self.coolDown == 0):
for each in sprites:
if pygame.sprite.collide_mask(self, each):
self.coolDown = 42
elif self.coolDown > 0:
self.coolDown -= 1
if self.coolDown >= 32:
if self.direction=="left":
self.image = self.attLeftImg
self.push = self.pushList[0]
else:
self.image = self.attRightImg
self.push = self.pushList[1]
if self.coolDown == 34:
cldList( self, sprites )
def shift(self, final_x, final_y):
maxSpan = 8
spd = 4
dist = 0
x = self.rect.left + self.rect.width/2
y = self.rect.top + self.rect.height/2
if (x == final_x) or (y == final_y):
return True
if (x < final_x):
dist = math.ceil( (final_x - x)/spd )
if dist > maxSpan:
dist = maxSpan
self.rect.left += dist
elif (x > final_x):
dist = math.ceil( (x - final_x)/spd )
if dist > maxSpan:
dist = maxSpan
self.rect.left -= dist
if (y < final_y):
dist = math.ceil( (final_y - y)/spd )
if dist > maxSpan:
dist = maxSpan
self.rect.top += dist
elif (y > final_y):
dist = math.ceil( (y - final_y)/spd )
if dist > maxSpan:
dist = maxSpan
self.rect.top -= dist
def erase(self):
self.snd.stop()
self.kill()
del self
def level(self, dist):
self.rect.left += dist
self.nxt[0] += dist
self.leftBd += dist
self.rightBd += dist
# -----------------------------------
class Nest(Monster):
def __init__(self, wallGroup, onlayer):
# calculate its position
Monster.__init__(self, "nest", (255,255,80,240), 55, 0, 0, onlayer, 3, 2)
self.wallList = [] # 存储本行的所有砖块; # 每次初始化一个新实例时,清空此类的wallList(否则会在上一个实例的基础上再加!)
posList = [] # 辅助列表,用于暂时存储本行砖块的位置(左边线)
for aWall in wallGroup: # 由于spriteGroup不好进行索引/随机选择操作,因此将其中的sprite逐个存入列表中存储
self.wallList.append(aWall)
posList.append(aWall.rect.left)
wall = choice(self.wallList)
# initialize the sprite
self.imgList = [ pygame.image.load("image/stg4/nest0.png").convert_alpha(), pygame.image.load("image/stg4/nest1.png").convert_alpha() ]
self.attLList = [ ]
self.imgIndx = 0
self.image = self.imgList[0]
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
self.rect.left = wall.rect.left
self.rect.top = wall.rect.bottom - 4 # link more tight with the block (block bottom is not even)
def move(self, delay, allElem, mons):
if self.health > 0:
if not (delay % 24 ):
self.imgIndx = (self.imgIndx+1) % len(self.imgList)
self.image = self.imgList[self.imgIndx]
if random() < 0.1:
worm = Worm( self.rect.left+self.rect.width//2, self.rect.bottom, self.onlayer )
allElem.add( worm )
mons.add( worm )
else:
pos1 = getPos(self, 0.3, 0.3)
worm1 = Worm( pos1[0], pos1[1], self.onlayer )
pos2 = getPos(self, 0.5, 0.6)
worm2 = Worm( pos2[0], pos2[1], self.onlayer )
pos3 = getPos(self, 0.7, 0.3)
worm3 = Worm( pos3[0], pos3[1], self.onlayer )
self.erase()
return ( worm1, worm2, worm3 )
def hitted(self, damage, pushed):
self.health -= damage
if self.health <= 0:
self.health = 0
return True
def level(self, dist):
self.rect.left += dist
class Worm(Monster):
def __init__(self, x, y, onlayer):
Monster.__init__(self, "worm", (255,255,10,240), 5, 1, 0, onlayer-2, 1, 1)# 由于掉落下来一定要减一层,所以传入onlayer-2。
# initialize the sprite
self.imgLeftList = [ pygame.image.load("image/stg4/worm0.png").convert_alpha(), pygame.image.load("image/stg4/worm1.png").convert_alpha(), \
pygame.image.load("image/stg4/worm2.png").convert_alpha(), pygame.image.load("image/stg4/worm3.png").convert_alpha(), pygame.image.load("image/stg4/worm4.png").convert_alpha() ]
self.imgRightList = [ pygame.transform.flip(self.imgLeftList[0], True, False), pygame.transform.flip(self.imgLeftList[1], True, False), \
pygame.transform.flip(self.imgLeftList[2], True, False), pygame.transform.flip(self.imgLeftList[3], True, False), pygame.transform.flip(self.imgLeftList[4], True, False) ]
self.imgIndx = 0
self.attIndx = 0
self.image = self.imgLeftList[0]
self.mask = pygame.mask.from_surface( self.image )
self.rect = self.image.get_rect()
self.rect.left = x-self.rect.width//2
self.rect.top = y
self.damage = 4.5
if random()<= 0.5: # 随即决定向左或向右
self.alterSpeed(-1)
else:
self.alterSpeed(1)
self.aground = False
self.doom = 0
self.lifeSpan = 300
def move(self, delay, lineWall, keyLine, sideWall, sprites):
self.checkHitBack()
# 无论health和lifespan是否有,都要检查下落
self.fall(lineWall, keyLine)
# 若健康的话,检查与英雄的碰撞
if (self.health>0) and (self.lifeSpan > 0):
self.lifeSpan -= 1
# 如果着地,则可进行水平运动
if self.aground and not (delay % 5):
self.rect.left += self.speed
self.imgIndx = (self.imgIndx+1) % len(self.imgLeftList)
self.image = self.imgLeftList[self.imgIndx] if ( self.speed<0 ) else self.imgRightList[self.imgIndx]
if ( pygame.sprite.spritecollide(self, sideWall, False, pygame.sprite.collide_mask) ):
self.alterSpeed(-self.speed)
for each in sprites:
if ( pygame.sprite.collide_mask(self, each) ):
self.health = 0
else:
self.doom += 1
if self.doom == 19:
self.erase()
elif self.doom >= 13:
self.image = pygame.image.load("image/stg4/boom2.png")
elif self.doom >= 7:
self.image = pygame.image.load("image/stg4/boom1.png") # 检查溅射到英雄,造成伤害
if self.doom == 7:
cldList( self, sprites )
elif self.doom >= 1:
self.image = pygame.image.load("image/stg4/boom0.png")
def fall(self, lineWall, keyLine):
self.rect.bottom += 5
while ( pygame.sprite.spritecollide(self, lineWall, False, pygame.sprite.collide_mask) ): # 如果和参数中的物体重合,则尝试纵坐标-1
self.rect.bottom -= 1
if not ( pygame.sprite.spritecollide(self, lineWall, False, pygame.sprite.collide_mask) ): # 循环-1,直到不再和任何物体重合为止,进入这个if语句跳出循环
self.aground = True
if self.rect.top >= keyLine:
self.onlayer -= 2
if self.onlayer<-1:
self.erase()
def hitted(self, damage, pushed):
self.health -= damage
if self.health <= 0:
self.health = 0
return True # dead
def level(self, dist):
self.rect.left += dist
# -----------------------------------
class Python(Monster):
# 构造方式:本体为head,其余部分均用Ajunction实现。
# onlayer是该python所在的行高;xLine是Python缠绕状态下head的竖直方向中点;boundaries是左右sideWall的内侧边界,用于确定当python head抵达时转换为twine状态。
def __init__(self, onlayer, xLine, boundaries, blockSize):
# initialize the sprite
Monster.__init__(self, "Python", (10, 30, 10, 240), 100, 0, 0, onlayer, 40, 20)
self.onlayer = int(onlayer)
self.xLine = xLine
self.boundaries = boundaries
#self.coolDown = 0
#self.speed = 4 # attacking时的横向速度绝对值
self.status = "await" # status可取三种情况:await(初始), twine, attack。初始情况下默认为left,方向决定了位置
# ----- head part (the core of the Python) ------
self.imgLeft = [ pygame.image.load("image/stg4/twine0.png").convert_alpha(), pygame.image.load("image/stg4/twine1.png").convert_alpha(),
pygame.image.load("image/stg4/twine2.png").convert_alpha(), pygame.image.load("image/stg4/twine3.png").convert_alpha(),
pygame.image.load("image/stg4/twine4.png").convert_alpha() ]
self.imgRight = [ pygame.transform.flip(self.imgLeft[0], True, False), pygame.transform.flip(self.imgLeft[1], True, False),
pygame.transform.flip(self.imgLeft[2], True, False), pygame.transform.flip(self.imgLeft[3], True, False),
pygame.transform.flip(self.imgLeft[4], True, False) ]
self.imgR = { "left":[ () ] }
#self.headLeftList = [ pygame.image.load("image/stg6/WarMachine0.png").convert_alpha(), pygame.image.load("image/stg6/WarMachine1.png").convert_alpha() ]
#self.headRightList = [ pygame.transform.flip(self.bodyLeftList[0], True, False), pygame.transform.flip(self.bodyLeftList[1], True, False) ]
self.imgIndx = 0
self.image = self.imgLeft[0]
#self.image = self.headLeftList[0]
self.mask = pygame.mask.from_surface(self.image)
# calculate its position
self.rect = self.image.get_rect()
self.rect.left = self.boundaries[1]-self.rect.width # 位于右侧边界向左
self.rect.bottom = self.xLine+self.rect.height//2
# ------------- segment part --------------
#self.segLeft = [ pygame.image.load("image/stg6/arm.png").convert_alpha(), pygame.image.load("image/stg6/armFire.png").convert_alpha() ]
#self.segRight = | |
<reponame>pasientskyhosting/redis-operator
load("@bazel_gazelle//:deps.bzl", "go_repository")
def go_repositories():
go_repository(
name = "co_elastic_go_apm",
importpath = "go.elastic.co/apm",
sum = "h1:arba7i+CVc36Jptww3R1ttW+O10ydvnBtidyd85DLpg=",
version = "v1.5.0",
)
go_repository(
name = "co_elastic_go_apm_module_apmhttp",
importpath = "go.elastic.co/apm/module/apmhttp",
sum = "h1:sxntP97oENyWWi+6GAwXUo05oEpkwbiarZLqrzLRA4o=",
version = "v1.5.0",
)
go_repository(
name = "co_elastic_go_apm_module_apmot",
importpath = "go.elastic.co/apm/module/apmot",
sum = "h1:rPyHRI6Ooqjwny67au6e2eIxLZshqd7bJfAUpdgOw/4=",
version = "v1.5.0",
)
go_repository(
name = "co_elastic_go_fastjson",
importpath = "go.elastic.co/fastjson",
sum = "h1:ooXV/ABvf+tBul26jcVViPT3sBir0PvXgibYB1IQQzg=",
version = "v1.0.0",
)
go_repository(
name = "co_honnef_go_tools",
importpath = "honnef.co/go/tools",
sum = "h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=",
version = "v0.0.1-2019.2.3",
)
go_repository(
name = "com_github_agnivade_levenshtein",
importpath = "github.com/agnivade/levenshtein",
sum = "h1:3oJU7J3FGFmyhn8KHjmVaZCN5hxTr7GxgRue+sxIXdQ=",
version = "v1.0.1",
)
go_repository(
name = "com_github_alecthomas_template",
importpath = "github.com/alecthomas/template",
sum = "h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=",
version = "v0.0.0-20190718012654-fb15b899a751",
)
go_repository(
name = "com_github_alecthomas_units",
importpath = "github.com/alecthomas/units",
sum = "h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=",
version = "v0.0.0-20190924025748-f65c72e2690d",
)
go_repository(
name = "com_github_aliyun_aliyun_oss_go_sdk",
importpath = "github.com/aliyun/aliyun-oss-go-sdk",
sum = "h1:EaK5256H3ELiyaq5O/Zwd6fnghD6DqmZDQmmzzJklUU=",
version = "v2.0.4+incompatible",
)
go_repository(
name = "com_github_andreyvit_diff",
importpath = "github.com/andreyvit/diff",
sum = "h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=",
version = "v0.0.0-20170406064948-c7f18ee00883",
)
go_repository(
name = "com_github_antihax_optional",
importpath = "github.com/antihax/optional",
sum = "h1:uZuxRZCz65cG1o6K/xUqImNcYKtmk9ylqaH0itMSvzA=",
version = "v0.0.0-20180407024304-ca021399b1a6",
)
go_repository(
name = "com_github_apache_thrift",
importpath = "github.com/apache/thrift",
sum = "h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs=",
version = "v0.12.0",
)
go_repository(
name = "com_github_armon_circbuf",
importpath = "github.com/armon/circbuf",
sum = "h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=",
version = "v0.0.0-20150827004946-bbbad097214e",
)
go_repository(
name = "com_github_armon_consul_api",
importpath = "github.com/armon/consul-api",
sum = "h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA=",
version = "v0.0.0-20180202201655-eb2c6b5be1b6",
)
go_repository(
name = "com_github_armon_go_metrics",
importpath = "github.com/armon/go-metrics",
sum = "h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU=",
version = "v0.3.0",
)
go_repository(
name = "com_github_armon_go_radix",
importpath = "github.com/armon/go-radix",
sum = "h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=",
version = "v1.0.0",
)
go_repository(
name = "com_github_asaskevich_govalidator",
importpath = "github.com/asaskevich/govalidator",
sum = "h1:zV3ejI06GQ59hwDQAvmK1qxOQGB3WuVTRoY0okPTAv0=",
version = "v0.0.0-20200108200545-475eaeb16496",
)
go_repository(
name = "com_github_aws_aws_sdk_go",
importpath = "github.com/aws/aws-sdk-go",
sum = "h1:J82DYDGZHOKHdhx6hD24Tm30c2C3GchYGfN0mf9iKUk=",
version = "v1.25.48",
)
go_repository(
name = "com_github_azure_azure_pipeline_go",
importpath = "github.com/Azure/azure-pipeline-go",
sum = "h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=",
version = "v0.2.2",
)
go_repository(
name = "com_github_azure_azure_sdk_for_go",
importpath = "github.com/Azure/azure-sdk-for-go",
sum = "h1:smHlbChr/JDmsyUqELZXLs0YIgpXecIGdUibuc2983s=",
version = "v36.1.0+incompatible",
)
go_repository(
name = "com_github_azure_azure_storage_blob_go",
importpath = "github.com/Azure/azure-storage-blob-go",
sum = "h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o=",
version = "v0.8.0",
)
go_repository(
name = "com_github_azure_go_ansiterm",
importpath = "github.com/Azure/go-ansiterm",
sum = "h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=",
version = "v0.0.0-20170929234023-d6e3b3328b78",
)
go_repository(
name = "com_github_azure_go_autorest",
importpath = "github.com/Azure/go-autorest",
replace = "github.com/Azure/go-autorest",
sum = "h1:VxzPyuhtnlBOzc4IWCZHqpyH2d+QMLQEuy3wREyY4oc=",
version = "v13.3.2+incompatible",
)
go_repository(
name = "com_github_azure_go_autorest_autorest",
importpath = "github.com/Azure/go-autorest/autorest",
sum = "h1:5YWtOnckcudzIw8lPPBcWOnmIFWMtHci1ZWAZulMSx0=",
version = "v0.9.6",
)
go_repository(
name = "com_github_azure_go_autorest_autorest_adal",
importpath = "github.com/Azure/go-autorest/autorest/adal",
sum = "h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0=",
version = "v0.8.2",
)
go_repository(
name = "com_github_azure_go_autorest_autorest_date",
importpath = "github.com/Azure/go-autorest/autorest/date",
sum = "h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=",
version = "v0.2.0",
)
go_repository(
name = "com_github_azure_go_autorest_autorest_mocks",
importpath = "github.com/Azure/go-autorest/autorest/mocks",
sum = "h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=",
version = "v0.3.0",
)
go_repository(
name = "com_github_azure_go_autorest_autorest_to",
importpath = "github.com/Azure/go-autorest/autorest/to",
sum = "h1:2McfZNaDqGPjv2pddK547PENIk4HV+NT7gvqRq4L0us=",
version = "v0.3.1-0.20191028180845-3492b2aff503",
)
go_repository(
name = "com_github_azure_go_autorest_autorest_validation",
importpath = "github.com/Azure/go-autorest/autorest/validation",
sum = "h1:RBrGlrkPWapMcLp1M6ywCqyYKOAT5ERI6lYFvGKOThE=",
version = "v0.2.1-0.20191028180845-3492b2aff503",
)
go_repository(
name = "com_github_azure_go_autorest_logger",
importpath = "github.com/Azure/go-autorest/logger",
sum = "h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=",
version = "v0.1.0",
)
go_repository(
name = "com_github_azure_go_autorest_tracing",
importpath = "github.com/Azure/go-autorest/tracing",
sum = "h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=",
version = "v0.5.0",
)
go_repository(
name = "com_github_baiyubin_aliyun_sts_go_sdk",
importpath = "github.com/baiyubin/aliyun-sts-go-sdk",
sum = "h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA=",
version = "v0.0.0-20180326062324-cfa1a18b161f",
)
go_repository(
name = "com_github_beorn7_perks",
importpath = "github.com/beorn7/perks",
sum = "h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=",
version = "v1.0.1",
)
go_repository(
name = "com_github_bgentry_speakeasy",
importpath = "github.com/bgentry/speakeasy",
sum = "h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=",
version = "v0.1.0",
)
go_repository(
name = "com_github_bitly_go_hostpool",
importpath = "github.com/bitly/go-hostpool",
sum = "h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=",
version = "v0.0.0-20171023180738-a3a6125de932",
)
go_repository(
name = "com_github_bitly_go_simplejson",
importpath = "github.com/bitly/go-simplejson",
sum = "h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=",
version = "v0.5.0",
)
go_repository(
name = "com_github_blang_semver",
importpath = "github.com/blang/semver",
sum = "h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=",
version = "v3.5.1+incompatible",
)
go_repository(
name = "com_github_bmizerany_assert",
importpath = "github.com/bmizerany/assert",
sum = "h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=",
version = "v0.0.0-20160611221934-b7ed37b82869",
)
go_repository(
name = "com_github_bradfitz_gomemcache",
importpath = "github.com/bradfitz/gomemcache",
sum = "h1:L/QXpzIa3pOvUGt1D1lA5KjYhPBAN/3iWdP7xeFS9F0=",
version = "v0.0.0-20190913173617-a41fca850d0b",
)
go_repository(
name = "com_github_brancz_gojsontoyaml",
importpath = "github.com/brancz/gojsontoyaml",
sum = "h1:PdvQdwUXiFnSmWsOJcBXLpyH3mJfP2FMPTT3J0i7+8o=",
version = "v0.0.0-20191212081931-bf2969bbd742",
)
go_repository(
name = "com_github_brancz_kube_rbac_proxy",
importpath = "github.com/brancz/kube-rbac-proxy",
sum = "h1:RdMeazKvTXwH66i9DeVtEV/o0XAqyhSaiWkZ8gj54x4=",
version = "v0.5.0",
)
go_repository(
name = "com_github_bshuster_repo_logrus_logstash_hook",
importpath = "github.com/bshuster-repo/logrus-logstash-hook",
sum = "h1:pgAtgj+A31JBVtEHu2uHuEx0n+2ukqUJnS2vVe5pQNA=",
version = "v0.4.1",
)
go_repository(
name = "com_github_bugsnag_bugsnag_go",
importpath = "github.com/bugsnag/bugsnag-go",
sum = "h1:yeRUT3mUE13jL1tGwvoQsKdVbAsQx9AJ+fqahKveP04=",
version = "v1.5.3",
)
go_repository(
name = "com_github_bugsnag_osext",
importpath = "github.com/bugsnag/osext",
sum = "h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ=",
version = "v0.0.0-20130617224835-0dd3f918b21b",
)
go_repository(
name = "com_github_bugsnag_panicwrap",
importpath = "github.com/bugsnag/panicwrap",
sum = "h1:OzrKrRvXis8qEvOkfcxNcYbOd2O7xXS2nnKMEMABFQA=",
version = "v1.2.0",
)
go_repository(
name = "com_github_burntsushi_toml",
importpath = "github.com/BurntSushi/toml",
sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
version = "v0.3.1",
)
go_repository(
name = "com_github_burntsushi_xgb",
importpath = "github.com/BurntSushi/xgb",
sum = "h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=",
version = "v0.0.0-20160522181843-27f122750802",
)
go_repository(
name = "com_github_campoy_embedmd",
importpath = "github.com/campoy/embedmd",
sum = "h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY=",
version = "v1.0.0",
)
go_repository(
name = "com_github_cenkalti_backoff",
importpath = "github.com/cenkalti/backoff",
sum = "h1:Da6uN+CAo1Wf09Rz1U4i9QN8f0REjyNJ73BEwAq/paU=",
version = "v0.0.0-20181003080854-62661b46c409",
)
go_repository(
name = "com_github_cenkalti_backoff_v3",
importpath = "github.com/cenkalti/backoff/v3",
sum = "h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c=",
version = "v3.0.0",
)
go_repository(
name = "com_github_census_instrumentation_opencensus_proto",
importpath = "github.com/census-instrumentation/opencensus-proto",
sum = "h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=",
version = "v0.2.1",
)
go_repository(
name = "com_github_cespare_xxhash",
importpath = "github.com/cespare/xxhash",
sum = "h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=",
version = "v1.1.0",
)
go_repository(
name = "com_github_cespare_xxhash_v2",
importpath = "github.com/cespare/xxhash/v2",
sum = "h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=",
version = "v2.1.1",
)
go_repository(
name = "com_github_chai2010_gettext_go",
importpath = "github.com/chai2010/gettext-go",
sum = "h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8=",
version = "v0.0.0-20160711120539-c6fed771bfd5",
)
go_repository(
name = "com_github_chzyer_logex",
importpath = "github.com/chzyer/logex",
sum = "h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=",
version = "v1.1.10",
)
go_repository(
name = "com_github_chzyer_readline",
importpath = "github.com/chzyer/readline",
sum = "h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=",
version = "v0.0.0-20180603132655-2972be24d48e",
)
go_repository(
name = "com_github_chzyer_test",
importpath = "github.com/chzyer/test",
sum = "h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=",
version = "v0.0.0-20180213035817-a1ea475d72b1",
)
go_repository(
name = "com_github_circonus_labs_circonus_gometrics",
importpath = "github.com/circonus-labs/circonus-gometrics",
sum = "h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=",
version = "v2.3.1+incompatible",
)
go_repository(
name = "com_github_circonus_labs_circonusllhist",
importpath = "github.com/circonus-labs/circonusllhist",
sum = "h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=",
version = "v0.1.3",
)
go_repository(
name = "com_github_client9_misspell",
importpath = "github.com/client9/misspell",
sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=",
version = "v0.3.4",
)
go_repository(
name = "com_github_cncf_udpa_go",
importpath = "github.com/cncf/udpa/go",
sum = "h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU=",
version = "v0.0.0-20191209042840-269d4d468f6f",
)
go_repository(
name = "com_github_cockroachdb_apd",
importpath = "github.com/cockroachdb/apd",
sum = "h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=",
version = "v1.1.0",
)
go_repository(
name = "com_github_cockroachdb_cockroach_go",
importpath = "github.com/cockroachdb/cockroach-go",
sum = "h1:2zRrJWIt/f9c9HhNHAgrRgq0San5gRRUJTBXLkchal0=",
version = "v0.0.0-20181001143604-e0a95dfd547c",
)
go_repository(
name = "com_github_cockroachdb_datadriven",
importpath = "github.com/cockroachdb/datadriven",
sum = "h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=",
version = "v0.0.0-20190809214429-80d97fb3cbaa",
)
go_repository(
name = "com_github_codahale_hdrhistogram",
importpath = "github.com/codahale/hdrhistogram",
sum = "h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=",
version = "v0.0.0-20161010025455-3a0bb77429bd",
)
go_repository(
name = "com_github_containerd_cgroups",
importpath = "github.com/containerd/cgroups",
sum = "h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=",
version = "v0.0.0-20190919134610-bf292b21730f",
)
go_repository(
name = "com_github_containerd_console",
importpath = "github.com/containerd/console",
sum = "h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg=",
version = "v0.0.0-20180822173158-c12b1e7919c1",
)
go_repository(
name = "com_github_containerd_containerd",
importpath = "github.com/containerd/containerd",
sum = "h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA=",
version = "v1.3.2",
)
go_repository(
name = "com_github_containerd_continuity",
importpath = "github.com/containerd/continuity",
sum = "h1:nXPkFq8X1a9ycY3GYQpFNxHh3j2JgY7zDZfq2EXMIzk=",
version = "v0.0.0-20200413184840-d3ef23f19fbb",
)
go_repository(
name = "com_github_containerd_fifo",
importpath = "github.com/containerd/fifo",
sum = "h1:PUD50EuOMkXVcpBIA/R95d56duJR9VxhwncsFbNnxW4=",
version = "v0.0.0-20190226154929-a9fb20d87448",
)
go_repository(
name = "com_github_containerd_go_runc",
importpath = "github.com/containerd/go-runc",
sum = "h1:esQOJREg8nw8aXj6uCN5dfW5cKUBiEJ/+nni1Q/D/sw=",
version = "v0.0.0-20180907222934-5a6d9f37cfa3",
)
go_repository(
name = "com_github_containerd_ttrpc",
importpath = "github.com/containerd/ttrpc",
sum = "h1:IfVOxKbjyBn9maoye2JN95pgGYOmPkQVqxtOu7rtNIc=",
version = "v1.0.1",
)
go_repository(
name = "com_github_containerd_typeurl",
importpath = "github.com/containerd/typeurl",
sum = "h1:JNn81o/xG+8NEo3bC/vx9pbi/g2WI8mtP2/nXzu297Y=",
version = "v0.0.0-20180627222232-a93fcdb778cd",
)
go_repository(
name = "com_github_coreos_bbolt",
importpath = "github.com/coreos/bbolt",
sum = "h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=",
version = "v1.3.2",
)
go_repository(
name = "com_github_coreos_etcd",
importpath = "github.com/coreos/etcd",
sum = "h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE=",
version = "v3.3.15+incompatible",
)
go_repository(
name = "com_github_coreos_go_etcd",
importpath = "github.com/coreos/go-etcd",
sum = "h1:bXhRBIXoTm9BYHS3gE0TtQuyNZyeEMux2sDi4oo5YOo=",
version = "v2.0.0+incompatible",
)
go_repository(
name = "com_github_coreos_go_oidc",
importpath = "github.com/coreos/go-oidc",
sum = "h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=",
version = "v2.1.0+incompatible",
)
go_repository(
name = "com_github_coreos_go_semver",
importpath = "github.com/coreos/go-semver",
sum = "h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=",
version = "v0.3.0",
)
go_repository(
name = "com_github_coreos_go_systemd",
importpath = "github.com/coreos/go-systemd",
sum = "h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=",
version = "v0.0.0-20190321100706-95778dfbb74e",
)
go_repository(
name = "com_github_coreos_pkg",
importpath = "github.com/coreos/pkg",
sum = "h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=",
version = "v0.0.0-20180928190104-399ea9e2e55f",
)
go_repository(
name = "com_github_coreos_prometheus_operator",
importpath = "github.com/coreos/prometheus-operator",
sum = "h1:nMbUjGuF7UzVluucix/vsy4973BNdEiT/aX6kFtskKM=",
version = "v0.38.1-0.20200424145508-7e176fda06cc",
)
go_repository(
name = "com_github_cpuguy83_go_md2man",
importpath = "github.com/cpuguy83/go-md2man",
sum = "h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=",
version = "v1.0.10",
)
go_repository(
name = "com_github_cpuguy83_go_md2man_v2",
importpath = "github.com/cpuguy83/go-md2man/v2",
sum = "h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=",
version = "v2.0.0",
)
go_repository(
name = "com_github_creack_pty",
importpath = "github.com/creack/pty",
sum = "h1:6pwm8kMQKCmgUg0ZHTm5+/YvRK0s3THD/28+T6/kk4A=",
version = "v1.1.7",
)
go_repository(
name = "com_github_cyphar_filepath_securejoin",
importpath = "github.com/cyphar/filepath-securejoin",
sum = "h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=",
version = "v0.2.2",
)
go_repository(
name = "com_github_cznic_b",
importpath = "github.com/cznic/b",
sum = "h1:UHFGPvSxX4C4YBApSPvmUfL8tTvWLj2ryqvT9K4Jcuk=",
version = "v0.0.0-20180115125044-35e9bbe41f07",
)
go_repository(
name = "com_github_cznic_fileutil",
importpath = "github.com/cznic/fileutil",
sum = "h1:7uSNgsgcarNk4oiN/nNkO0J7KAjlsF5Yv5Gf/tFdHas=",
version = "v0.0.0-20180108211300-6a051e75936f",
)
go_repository(
name = "com_github_cznic_golex",
importpath = "github.com/cznic/golex",
sum = "h1:CVAqftqbj+exlab+8KJQrE+kNIVlQfJt58j4GxCMF1s=",
version = "v0.0.0-20170803123110-4ab7c5e190e4",
)
go_repository(
name = "com_github_cznic_internal",
importpath = "github.com/cznic/internal",
sum = "h1:FHpbUtp2K8X53/b4aFNj4my5n+i3x+CQCZWNuHWH/+E=",
version = "v0.0.0-20180608152220-f44710a21d00",
)
go_repository(
name = "com_github_cznic_lldb",
importpath = "github.com/cznic/lldb",
sum = "h1:AIA+ham6TSJ+XkMe8imQ/g8KPzMUVWAwqUQQdtuMsHs=",
version = "v1.1.0",
)
go_repository(
name = "com_github_cznic_mathutil",
importpath = "github.com/cznic/mathutil",
sum = "h1:XNT/Zf5l++1Pyg08/HV04ppB0gKxAqtZQBRYiYrUuYk=",
version = "v0.0.0-20180504122225-ca4c9f2c1369",
)
| |
test_08__cont2host(self, mock_local, mock_isdir):
"""Test08 ExecutionEngineCommon()._cont2host()."""
self._init()
mock_isdir.return_value = True
ex_eng = udocker.ExecutionEngineCommon(mock_local)
ex_eng.opt["vol"] = ("/opt/xxx:/mnt",)
status = ex_eng._cont2host("/mnt")
self.assertEqual(status, "/opt/xxx")
ex_eng.opt["vol"] = ("/var/xxx:/mnt",)
status = ex_eng._cont2host("/opt")
self.assertTrue(status.endswith("/opt"))
# change dir to volume (regression of #51)
ex_eng.opt["vol"] = ("/var/xxx",)
status = ex_eng._cont2host("/var/xxx/tt")
self.assertEqual(status, "/var/xxx/tt")
# change dir to volume (regression of #51)
ex_eng.opt["vol"] = ("/var/xxx:/mnt",)
status = ex_eng._cont2host("/mnt/tt")
self.assertEqual(status, "/var/xxx/tt")
@mock.patch('udocker.FileUtil')
@mock.patch('udocker.LocalRepository')
def test_09__create_mountpoint(self, mock_local, mock_futil):
"""Test09 ExecutionEngineCommon()._create_mountpoint()."""
self._init()
mock_futil.return_value.isdir.return_value = False
exc = udocker.ExecutionEngineCommon(mock_local)
status = exc._create_mountpoint("", "", True)
self.assertTrue(status)
# mock_isdir.return_value = True
# exc = udocker.ExecutionEngineCommon(mock_local)
# status = exc._create_mountpoint("", "")
# self.assertTrue(status)
@mock.patch('udocker.os.path.exists')
@mock.patch('udocker.LocalRepository')
def test_10__check_volumes(self, mock_local, mock_exists):
"""Test10 ExecutionEngineCommon()._check_volumes()."""
self._init()
exc = udocker.ExecutionEngineCommon(mock_local)
exc.opt["vol"] = ()
status = exc._check_volumes()
self.assertTrue(status)
mock_exists.return_value = False
exc = udocker.ExecutionEngineCommon(mock_local)
status = exc._check_volumes()
self.assertTrue(status)
@mock.patch('udocker.NixAuthentication')
@mock.patch('udocker.LocalRepository')
def test_11__get_bindhome(self, mock_local, mock_nixauth):
"""Test11 ExecutionEngineCommon()._get_bindhome()."""
self._init()
mock_nixauth.return_value.get_home.return_value = ""
prex = udocker.ExecutionEngineCommon(mock_local)
prex.opt["bindhome"] = False
status = prex._get_bindhome()
self.assertEqual(status, "")
mock_nixauth.return_value.get_home.return_value = "/home/user"
prex = udocker.ExecutionEngineCommon(mock_local)
prex.opt["bindhome"] = True
status = prex._get_bindhome()
self.assertEqual(status, "/home/user")
mock_nixauth.return_value.get_home.return_value = ""
prex = udocker.ExecutionEngineCommon(mock_local)
prex.opt["bindhome"] = True
status = prex._get_bindhome()
self.assertEqual(status, "")
@mock.patch('udocker.LocalRepository')
def test_12__is_volume(self, mock_local):
"""Test11 ExecutionEngineCommon()._is_volume()."""
self._init()
exc = udocker.ExecutionEngineCommon(mock_local)
exc.opt["vol"] = ["/tmp"]
status = exc._is_volume("/tmp")
self.assertTrue(status)
exc = udocker.ExecutionEngineCommon(mock_local)
exc.opt["vol"] = [""]
status = exc._is_volume("/tmp")
self.assertFalse(status)
# def test_13__is_mountpoint(self):
# """Test13 ExecutionEngineCommon()._is_mountpoint()."""
# pass
# def test_14__set_volume_bindings(self):
# """Test14 ExecutionEngineCommon()._set_volume_bindings()."""
# pass
@mock.patch('udocker.Msg')
@mock.patch('udocker.os.path.isdir')
@mock.patch('udocker.os.path.exists')
@mock.patch('udocker.ExecutionEngineCommon._cont2host')
@mock.patch('udocker.ExecutionEngineCommon._getenv')
@mock.patch('udocker.LocalRepository')
def test_15__check_paths(self, mock_local, mock_getenv, mock_isinvol,
mock_exists, mock_isdir, mock_msg):
"""Test15 ExecutionEngineCommon()._check_paths()."""
self._init()
mock_msg.level = 0
ex_eng = udocker.ExecutionEngineCommon(mock_local)
mock_getenv.return_value = ""
mock_isinvol.return_value = False
mock_exists.return_value = False
mock_isdir.return_value = False
ex_eng.opt["uid"] = "0"
ex_eng.opt["cwd"] = ""
ex_eng.opt["home"] = "/home/user"
ex_eng.container_root = "/containers/123/ROOT"
status = ex_eng._check_paths()
self.assertFalse(status)
self.assertEqual(ex_eng.opt["env"][-1],
"PATH=/usr/sbin:/sbin:/usr/bin:/bin")
self.assertEqual(ex_eng.opt["cwd"], ex_eng.opt["home"])
ex_eng.opt["uid"] = "1000"
status = ex_eng._check_paths()
self.assertFalse(status)
self.assertEqual(ex_eng.opt["env"][-1],
"PATH=/usr/bin:/bin:/usr/local/bin")
self.assertEqual(ex_eng.opt["cwd"], ex_eng.opt["home"])
mock_exists.return_value = True
mock_isdir.return_value = True
status = ex_eng._check_paths()
self.assertTrue(status)
@mock.patch('udocker.Msg')
@mock.patch('udocker.os.access')
@mock.patch('udocker.os.readlink')
@mock.patch('udocker.os.path.isfile')
@mock.patch('udocker.ExecutionEngineCommon._getenv')
@mock.patch('udocker.LocalRepository')
def test_16__check_executable(self, mock_local, mock_getenv, mock_isfile,
mock_readlink, mock_access, mock_msg):
"""Test16 ExecutionEngineCommon()._check_executable()."""
self._init()
mock_msg.level = 0
ex_eng = udocker.ExecutionEngineCommon(mock_local)
mock_getenv.return_value = ""
ex_eng.opt["entryp"] = "/bin/shell -x -v"
mock_isfile.return_value = False
ex_eng.container_root = "/containers/123/ROOT"
status = ex_eng._check_executable()
self.assertFalse(status)
mock_isfile.return_value = True
mock_access.return_value = True
status = ex_eng._check_executable()
self.assertTrue(status)
ex_eng.opt["entryp"] = ["/bin/shell", "-x", "-v"]
ex_eng.opt["cmd"] = ""
status = ex_eng._check_executable()
self.assertEqual(ex_eng.opt["cmd"], ex_eng.opt["entryp"])
ex_eng.opt["entryp"] = ["/bin/shell", ]
ex_eng.opt["cmd"] = ["-x", "-v"]
status = ex_eng._check_executable()
self.assertEqual(ex_eng.opt["cmd"], ["/bin/shell", "-x", "-v"])
@mock.patch('udocker.ContainerStructure')
@mock.patch('udocker.ExecutionEngineCommon._check_exposed_ports')
@mock.patch('udocker.ExecutionEngineCommon._getenv')
@mock.patch('udocker.LocalRepository')
def test_17__run_load_metadata(self, mock_local, mock_getenv,
mock_chkports, mock_cstruct):
"""Test17 ExecutionEngineCommon()._run_load_metadata()."""
self._init()
ex_eng = udocker.ExecutionEngineCommon(mock_local)
mock_getenv.return_value = ""
udocker.Config.location = "/tmp/container"
status = ex_eng._run_load_metadata("123")
self.assertEqual(status, ("", []))
udocker.Config.location = ""
mock_cstruct.return_value.get_container_attr.return_value = ("", [])
status = ex_eng._run_load_metadata("123")
self.assertEqual(status, (None, None))
udocker.Config.location = ""
mock_cstruct.return_value.get_container_attr.return_value = ("/x", [])
ex_eng.opt["nometa"] = True
status = ex_eng._run_load_metadata("123")
self.assertEqual(status, ("/x", []))
udocker.Config.location = ""
mock_cstruct.return_value.get_container_attr.return_value = ("/x", [])
ex_eng.opt["nometa"] = False
status = ex_eng._run_load_metadata("123")
self.assertEqual(status, ("/x", []))
@mock.patch('udocker.LocalRepository')
def test_18__check_env(self, mock_local):
"""Test18 ExecutionEngineCommon()._check_env()."""
self._init()
ex_eng = udocker.ExecutionEngineCommon(mock_local)
ex_eng.opt["env"] = ["HOME=/home/user", "PATH=/bin:/usr/bin"]
status = ex_eng._check_env()
self.assertTrue(status)
ex_eng = udocker.ExecutionEngineCommon(mock_local)
ex_eng.opt["env"] = ["HOME =", "PATH=/bin:/usr/bin"]
status = ex_eng._check_env()
self.assertFalse(status)
@mock.patch('udocker.LocalRepository')
def test_19__getenv(self, mock_local):
"""Test19 ExecutionEngineCommon()._getenv()."""
self._init()
ex_eng = udocker.ExecutionEngineCommon(mock_local)
ex_eng.opt["env"] = ["HOME=/home/user", "PATH=/bin:/usr/bin"]
status = ex_eng._getenv("")
self.assertEqual(status, None)
status = ex_eng._getenv("XXX")
self.assertEqual(status, None)
status = ex_eng._getenv("HOME")
self.assertEqual(status, "/home/user")
status = ex_eng._getenv("PATH")
self.assertEqual(status, "/bin:/usr/bin")
# def test_20__select_auth_files(self):
# """Test20 ExecutionEngineCommon()._select_auth_files()."""
# pass
# def test_21__validate_user_str(self):
# """Test21 ExecutionEngineCommon()._validate_user_str()."""
# pass
# def test_22__user_from_str(self):
# """Test22 ExecutionEngineCommon()._user_from_str()."""
# pass
# @mock.patch('udocker.Msg')
# @mock.patch('udocker.NixAuthentication')
# @mock.patch('udocker.LocalRepository')
# @mock.patch('udocker.ExecutionEngineCommon._create_user')
# def test_23__setup_container_user(self, mock_cruser,
# mock_local, mock_nix, mock_msg):
# """Test23 ExecutionEngineCommon()._setup_container_user()."""
# self._init()
# mock_msg.level = 0
# ex_eng = udocker.ExecutionEngineCommon(mock_local)
# status = ex_eng._setup_container_user("0:0")
# self.assertFalse(status)
# ex_eng = udocker.ExecutionEngineCommon(mock_local)
# ex_eng.opt["vol"] = ""
# ex_eng.opt["hostauth"] = False
# mock_nix.return_value.get_user.return_value = ("", "", "",
# "", "", "")
# status = ex_eng._setup_container_user("0:0")
# self.assertTrue(status)
# self.assertTrue(mock_cruser.called)
# ex_eng = udocker.ExecutionEngineCommon(mock_local)
# ex_eng.opt["vol"] = ""
# ex_eng.opt["hostauth"] = False
# mock_nix.return_value.get_user.return_value = ("root", 0, 0,
# "", "", "")
# status = ex_eng._setup_container_user("0:0")
# self.assertTrue(status)
# self.assertTrue(mock_cruser.called)
# ex_eng = udocker.ExecutionEngineCommon(mock_local)
# ex_eng.opt["vol"] = ""
# ex_eng.opt["hostauth"] = True
# mock_nix.return_value.get_user.return_value = ("", "", "",
# "", "", "")
# status = ex_eng._setup_container_user("0:0")
# self.assertFalse(status)
# ex_eng = udocker.ExecutionEngineCommon(mock_local)
# ex_eng.opt["vol"] = ""
# ex_eng.opt["hostauth"] = True
# mock_nix.return_value.get_user.return_value = ("root", 0, 0,
# "", "", "")
# status = ex_eng._setup_container_user("0:0")
# self.assertTrue(status)
# self.assertTrue(mock_cruser.called)
# ex_eng = udocker.ExecutionEngineCommon(mock_local)
# ex_eng.opt["vol"] = ""
# ex_eng.opt["hostauth"] = False
# mock_nix.return_value.get_user.return_value = ("", "", "",
# "", "", "")
# status = ex_eng._setup_container_user("")
# self.assertTrue(status)
# self.assertTrue(mock_cruser.called)
# ex_eng = udocker.ExecutionEngineCommon(mock_local)
# ex_eng.opt["vol"] = ""
# ex_eng.opt["hostauth"] = False
# mock_nix.return_value.get_user.return_value = ("root", 0, 0,
# "", "", "")
# status = ex_eng._setup_container_user("")
# self.assertTrue(status)
# self.assertTrue(mock_cruser.called)
# ex_eng = udocker.ExecutionEngineCommon(mock_local)
# ex_eng.opt["vol"] = ""
# ex_eng.opt["hostauth"] = True
# mock_nix.return_value.get_user.return_value = ("", "", "",
# "", "", "")
# status = ex_eng._setup_container_user("")
# self.assertFalse(status)
# ex_eng = udocker.ExecutionEngineCommon(mock_local)
# ex_eng.opt["vol"] = ""
# ex_eng.opt["hostauth"] = False
# mock_nix.return_value.get_user.return_value = ("", 100, 0,
# "", "", "")
# status = ex_eng._setup_container_user("0:0")
# self.assertTrue(status)
# self.assertTrue(mock_cruser.called)
# self.assertEqual(ex_eng.opt["user"], "")
# def test_24__setup_container_user_noroot(self):
# """Test24 ExecutionEngineCommon()._setup_container_user_noroot()."""
# pass
# def test_25__fill_user(self):
# """Test25 ExecutionEngineCommon()._fill_user()."""
# pass
# @mock.patch('udocker.os.getgroups')
# @mock.patch('udocker.FileUtil')
# @mock.patch('udocker.Msg')
# @mock.patch('udocker.NixAuthentication')
# @mock.patch('udocker.LocalRepository')
# def test_26__create_user(self, mock_local, mock_nix, mock_msg,
# mock_futil, mock_groups):
# """Test26 ExecutionEngineCommon()._create_user()."""
# self._init()
# mock_msg.level = 0
# container_auth = udocker.NixAuthentication("", "")
# container_auth.passwd_file = ""
# container_auth.group_file = ""
# host_auth = udocker.NixAuthentication("", "")
# udocker.Config.uid = 1000
# udocker.Config.gid = 1000
# mock_nix.return_value.add_user.return_value = False
# ex_eng = udocker.ExecutionEngineCommon(mock_local)
# ex_eng.opt["uid"] = ""
# ex_eng.opt["gid"] = ""
# ex_eng.opt["user"] = ""
# ex_eng.opt["home"] = ""
# ex_eng.opt["shell"] = ""
# ex_eng.opt["gecos"] = ""
# status = ex_eng._create_user(container_auth, host_auth)
# self.assertFalse(status)
# self.assertEqual(ex_eng.opt["uid"], "1000")
# self.assertEqual(ex_eng.opt["gid"], "1000")
# self.assertEqual(ex_eng.opt["user"], "udoc1000")
# self.assertEqual(ex_eng.opt["home"], "/home/udoc1000")
# self.assertEqual(ex_eng.opt["shell"], "/bin/sh")
# self.assertEqual(ex_eng.opt["gecos"], "*UDOCKER*")
# mock_nix.return_value.add_user.return_value = False
# ex_eng = udocker.ExecutionEngineCommon(mock_local)
# ex_eng.opt["uid"] = "60000"
# ex_eng.opt["gid"] = "60000"
# ex_eng.opt["user"] = "someuser"
# ex_eng.opt["home"] = ""
# ex_eng.opt["shell"] = "/bin/false"
# ex_eng.opt["gecos"] = "*XXX*"
# status = ex_eng._create_user(container_auth, host_auth)
# self.assertFalse(status)
# self.assertEqual(ex_eng.opt["uid"], "60000")
# self.assertEqual(ex_eng.opt["gid"], "60000")
# self.assertEqual(ex_eng.opt["user"], "someuser")
# self.assertEqual(ex_eng.opt["home"], "/home/someuser")
# self.assertEqual(ex_eng.opt["shell"], "/bin/false")
# self.assertEqual(ex_eng.opt["gecos"], "*XXX*")
# mock_nix.return_value.add_user.return_value = False
# ex_eng = udocker.ExecutionEngineCommon(mock_local)
# ex_eng.opt["uid"] = "60000"
# ex_eng.opt["gid"] = "60000"
# ex_eng.opt["user"] = "someuser"
# ex_eng.opt["home"] = "/home/batata"
# ex_eng.opt["shell"] = "/bin/false"
# ex_eng.opt["gecos"] = "*XXX*"
# status = ex_eng._create_user(container_auth, host_auth)
# self.assertFalse(status)
# self.assertEqual(ex_eng.opt["uid"], "60000")
# self.assertEqual(ex_eng.opt["gid"], "60000")
# self.assertEqual(ex_eng.opt["user"], "someuser")
# self.assertEqual(ex_eng.opt["home"], "/home/batata")
# self.assertEqual(ex_eng.opt["shell"], "/bin/false")
# self.assertEqual(ex_eng.opt["gecos"], "*XXX*")
# mock_nix.return_value.add_user.return_value = True
# mock_nix.return_value.get_group.return_value = ("", "", "")
# mock_nix.return_value.add_group.return_value = True
# mock_groups.return_value = ()
# ex_eng = udocker.ExecutionEngineCommon(mock_local)
# ex_eng.opt["uid"] = "60000"
# ex_eng.opt["gid"] = "60000"
# ex_eng.opt["user"] = "someuser"
# ex_eng.opt["home"] = "/home/batata"
# ex_eng.opt["shell"] = "/bin/false"
# ex_eng.opt["gecos"] = "*XXX*"
# status = ex_eng._create_user(container_auth, host_auth)
# self.assertTrue(status)
# self.assertEqual(ex_eng.opt["uid"], "60000")
# self.assertEqual(ex_eng.opt["gid"], "60000")
# self.assertEqual(ex_eng.opt["user"], "someuser")
# self.assertEqual(ex_eng.opt["home"], "/home/batata")
# self.assertEqual(ex_eng.opt["shell"], "/bin/false")
# self.assertEqual(ex_eng.opt["gecos"], "*XXX*")
# self.assertEqual(ex_eng.opt["hostauth"], True)
# mgroup = mock_nix.return_value.get_group
# self.assertTrue(mgroup.called_once_with("60000"))
# mock_nix.return_value.add_user.return_value = True
# mock_nix.return_value.get_group.return_value = ("", "", "")
# mock_nix.return_value.add_group.return_value = True
# mock_groups.return_value = (80000,)
# ex_eng = udocker.ExecutionEngineCommon(mock_local)
# ex_eng.opt["uid"] = "60000"
# ex_eng.opt["gid"] = "60000"
# ex_eng.opt["user"] = "someuser"
# ex_eng.opt["home"] = "/home/batata"
# ex_eng.opt["shell"] = "/bin/false"
# ex_eng.opt["gecos"] = "*XXX*"
# status = ex_eng._create_user(container_auth, host_auth)
# self.assertTrue(status)
# self.assertEqual(ex_eng.opt["uid"], "60000")
# self.assertEqual(ex_eng.opt["gid"], "60000")
# self.assertEqual(ex_eng.opt["user"], "someuser")
# self.assertEqual(ex_eng.opt["home"], "/home/batata")
# self.assertEqual(ex_eng.opt["shell"], "/bin/false")
# self.assertEqual(ex_eng.opt["gecos"], "*XXX*")
# self.assertEqual(ex_eng.opt["hostauth"], True)
# ggroup = mock_nix.return_value.get_group
# self.assertTrue(ggroup.called_once_with("60000"))
# agroup = mock_nix.return_value.add_group
# self.assertTrue(agroup.called_once_with("G80000", "80000"))
@mock.patch('udocker.Msg')
@mock.patch('udocker.os.path.basename')
@mock.patch('udocker.LocalRepository')
def test_27__run_banner(self, mock_local, mock_base, mock_msg):
"""Test27 ExecutionEngineCommon()._run_banner()."""
self._init()
mock_msg.level = 0
ex_eng = udocker.ExecutionEngineCommon(mock_local)
ex_eng._run_banner("/bin/bash")
ex_eng.container_id = "CONTAINERID"
self.assertTrue(mock_base.called_once_with("/bin/bash"))
@mock.patch('udocker.Config')
@mock.patch('udocker.os')
@mock.patch('udocker.LocalRepository')
def test_28__env_cleanup_dict(self, mock_local, mock_os, mock_config):
"""Test28 ExecutionEngineCommon()._env_cleanup()."""
udocker.Config = mock_config
udocker.Config.valid_host_env = ("HOME",)
mock_os.environ = {'HOME': '/', 'USERNAME': 'user', }
ex_eng = udocker.ExecutionEngineCommon(mock_local)
ex_eng._run_env_cleanup_dict()
self.assertEqual(mock_os.environ, {'HOME': '/', })
@mock.patch('udocker.Config')
@mock.patch('udocker.os')
@mock.patch('udocker.LocalRepository')
def test_29__run_env_cleanup_list(self, mock_local, mock_os, mock_config):
"""Test29 ExecutionEngineCommon()._run_env_cleanup_list()."""
udocker.Config | |
self.w /= sum(self.w)
self.w = flip(self.w)
### Calculation range
self.shift = 5*tau #Number of days to start calculation before the frist Rt.
self.n = min(self.m, n) #Number of Rt's to calculate, from the present into the past.
self.N = n+self.shift #Total range (into the past) for calculation
#If self.N is larger than the whole data set
if self.N > (self.m-1):
self.n -= self.N - (self.m-1)#Reduce self.n accordingly
self.N = n+self.shift
if self.n < 0:
raise ValueError("ERROR: Not enough data to calculate Rts: 5*tau > %d (data size)" % (self.m,))
print("Not enough data to calculate Rts: 5*tau + n > %d (data size)" % (self.m,))
print("Reducing to n=%d" % (self.n,))
for t in range(self.n):
if self.data[self.m-(self.n - t)] >= 10:
break
else:
self.n -= 1 #Reduce n if the counts have not reached 10
print("Incidence below 10, reducing n to %d." % (self.n,))
self.N = self.n+self.shift
### Setting prior parameters
self.delta = 1-(1/tau)
self.tau = tau
self.pred = pred
self.g = 1 #exp(-2/tau)
self.m0 = m0
self.c_a_0 = c_a_0
self.w_a_t = w_a_t
self.n0 = n0
self.s0 = s0
"""
### Calculation range
for t in range( self.m - self.N, self.m):
if sum(self.data[:t]) <= 10:# Rt calculated only for more than 10 counts
print("Not more than 10 counts for day %d" % (-t,))
self.n -= 1
self.N = min(self.m, n+self.shift)
"""
### We calculate all gammas previously:
self.Gammak = zeros(self.m)
for s in range(self.m):
self.Gammak[s] = self.data[:s] @ self.w[(self.m-s):] #\Gamma_k
### Calculate the log data:
### We add 1e-6 for convinience, since very early data may be zero
### This makes no diference at the end.
self.y = log(self.data + 1e-6) - log(self.Gammak + 1e-6)
def sim_data( self, R, I0):
pass
def CalculateRts( self, q=[10,25,50,75,90]):
"""Calculate the posterior distribution and the Rt's quantiles.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts of size q, for each Rt.
If q=2, save the mean and dispersion parameter of the posterior for Rt
"""
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
self.rts = zeros(( len(q), self.n))
self.rts_pred = zeros((len(q), self.pred))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
self.rts = zeros(( q, self.n))
self.rts_pred = zeros(( q, self.pred))
simulate = True
self.q = q
self.simulate = simulate
### nt, at, rt, qt, st, mt, ct # hiperparameters
### 0 1 2 3 4 5 6
self.hiper = zeros(( self.N+1, 7))
### nt, at, rt, qt, st, mt, ct # hiperparameters
self.hiper[0,:] = self.n0, -1, -1, -1, self.s0, self.m0, self.s0*self.c_a_0
for t in range( self.N ):
r_a_t = self.g**2 * self.hiper[t,6] + self.w_a_t #r^*_t
At = r_a_t/(r_a_t + 1)
self.hiper[t+1,0] = self.delta*self.hiper[t,0] + 1 #nt
self.hiper[t+1,1] = self.g * self.hiper[t,5] #at
et = self.y[self.m-(self.N - t)] - self.hiper[t+1,1]
self.hiper[t+1,2] = self.hiper[t,4]*r_a_t #rt
self.hiper[t+1,3] = self.hiper[t,4]*(r_a_t + 1) #qt
# st:
self.hiper[t+1,4] = self.delta*(self.hiper[t,0]/self.hiper[t+1,0])*self.hiper[t,4] +\
self.hiper[t,4]/self.hiper[t+1,0] * (et**2/self.hiper[t+1,3])
self.hiper[t+1,5] = self.hiper[t+1,1] + At*et #mt
#ct
self.hiper[t+1,6] = (self.hiper[t+1,4]/self.hiper[t,4]) * (self.hiper[t+1,2]- self.hiper[t+1,3]*At**2)
if t >= self.shift:
if self.simulate:
self.rts[:,t-self.shift] = exp(t_student.rvs( size=self.q, df=self.hiper[t+1,0], loc=self.hiper[t+1,5], scale=sqrt(self.hiper[t+1,6]) ))
else:
self.rts[:,t-self.shift] = exp(t_student.ppf( q=self.q, df=self.hiper[t+1,0], loc=self.hiper[t+1,5], scale=sqrt(self.hiper[t+1,6]) ))
if self.pred>0:
t = self.N
self.pred_hiper = zeros(( self.pred, 2)) # a_t^k and r_t^k
for k in range(self.pred):
self.pred_hiper[k,0] = self.g**(k+1) * self.hiper[t,5] #a_t^k
if self.g == 1:
self.pred_hiper[k,1] = self.g**(2*(k+1)) * self.hiper[t,6] + self.w_a_t * (k+1) #r_t^k
else:
self.pred_hiper[k,1] = self.g**(2*(k+1)) * self.hiper[t,6] + self.w_a_t * ((1-self.g**(2*(k+1)))/(1-self.g**2)) #r_t^k
if self.simulate:
self.rts_pred[:,k] = exp(t_student.rvs( size=self.q, df=self.hiper[t,0], loc=self.pred_hiper[k,0], scale=sqrt(self.pred_hiper[k,1]) ))
else:
self.rts_pred[:,k] = exp(t_student.ppf( q=self.q, df=self.hiper[t,0], loc=self.pred_hiper[k,0], scale=sqrt(self.pred_hiper[k,1]) ))
def PlotPostRt( self, i, ax=None, color='black'):
"""Plot the i-th Rt posterior distribution."""
if ax == None:
fig, ax = subplots(figsize=( 5,5) )
t = i+self.tau
y = linspace( 0.01, 4, num=500)
### Transformed pdf using the Jacobian y^{-1}
pdf = (y**-1) * t_student.pdf( log(y), df=self.hiper[t+1,0], loc=self.hiper[t+1,5], scale=sqrt(self.hiper[t+1,6]) )
ax.plot( y, pdf, '-', color=color)
ax.set_ylabel("Density")
ax.set_xlabel(r'$R_{%d}$' % (i))
def PlotRts( self, color='blue', median_color='red', x_jump=1, plot_area=[0.4,2.2], alpha=0.25, csv_fnam=None, ax=None):
"""Makes a board with the Rt evolution.
csv_fnam: optional file name to save the Rts info: workdir/csv/csv_fnam.csv
ax: Axis hadle to for the plot, if None, it creates one and retruns it.
x_jump: put ticks every x_jump days.
plot_area: ([0.4,2.2]), interval with the y-axis (Rt values) plot area.
"""
#self.rts already been produced after running CalculateRts
last_date = self.init_date + timedelta(self.m)
if ax == None:
fig, ax = subplots(figsize=( self.n/3, 3.5) )
### Plot the Rt's posterior quantiles
for i in range(self.n):
h = self.rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=0.25)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=0.25)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color )
### Plot the observed Rt's
ax.plot( exp(self.y[self.m-self.n:]), '-', color='grey')
### Plot the predictions
if self.pred >0:
for k in range(self.pred):
h = self.rts_pred[:,k]
i=self.n+k
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color='light'+color, alpha=alpha)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color='light'+color, alpha=alpha)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color )
ax.set_title(self.data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(0,self.n,x_jump))
ax.set_xticklabels([(last_date-timedelta(self.n-i)).strftime("%d.%m") for i in range(0,self.n,x_jump)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim(plot_area)
ax.set_yticks(arange( plot_area[0], plot_area[1], step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(self.n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(self.q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = self.rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in self.q])
savetxt( self.workdir + "csv/" + csv_fnam + ".csv", sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
##### Dirctionary with general information for the metro zone or region to be analyzed:
##### id Name not used Population init date
ZMs = { "9-01": ["Mexico city", 2, 21.942666e6, date(2020, 2, 27)],\
"15-02": ["Toluca", 1, 2.377828e6, date(2020, 3, 7)],\
"31-01": ["Mérida", 2, 1.237697e6, date(2020, 3, 7)],\
"17-02": ["Cuernavaca", 1, 1.059521e6, date(2020, 3, 2)],\
"12-01": ["Acapulco", 2, 0.919726e6, date(2020, 3, 11)],\
"25-01": ["Culiacán", 2, 0.962871e6, date(2020, 3, 1)],\
"23-01": ["Cancun", 2, 0.867768e6, date(2020, 3, 1)]}
### The correponding data files have two columns separated by space, deaths and incidence.
### Each row is one day.
### The file for clave="9-01" (Mexico city) is: ../data/clave.csv etc.
if __name__=='__main__':
rcParams.update({'font.size': 14})
close('all')
#Plot the imputed serial time distribution for covid: erlang( a=3, scale=8/3 )
fig, ax = subplots( num=30, figsize=( 4.5, 3.5))
PlotFrozenDist( erlang( a=3, scale=8/3 ), ax=ax)
### Plota the erlang( a=5, scale=9/5 ) alternative
PlotFrozenDist( erlang( a=5, scale=9/5 ), color='grey', ax=ax)
ax.set_xlim((0,20))
ax.grid(color='grey', linestyle='--', linewidth=0.5)
ax.set_ylabel(r"Density")
ax.set_xlabel("days")
ax.set_title("")
fig.tight_layout()
fig.savefig("../figs/Covid19_SerialTimeDist.png")
### Plot the Rt's estimation. Only Merida, '13-01' and Mexico city, '9-01', are in the paper
claves = ['15-02', '17-02', '23-01', '25-01', '12-01', "31-01", '9-01']
n=60 ## Number of days to calculate the Rt's
trim=0 ## Number of days to cut data from the end, negative, e.g. -10, cut 10 days
x_jump = 7 ## For ploting, put ticks every x_jump days.
for i,clave in enumerate(claves):
print(clave)
### Open an instance of the Rts_AR class:
tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=5, n=n)
tst.CalculateRts() # Most be called before ploting the Rt's
### Plot the Rts:
fig, ax = subplots( num=i+1, figsize=( 8, 3.5))
### Plot Cori et al (2013) Poisson model version:
PlotRts_P( '../data/%s.csv' % (clave,), init_date=ZMs[clave][3]+timedelta(days=4),\
n=tst.n, trim=trim, ax=ax, color='green', alpha=0.5, median_color='black')
### Plot ours:
tst.PlotRts( ax=ax, x_jump=x_jump, plot_area=[0.4,2.2], csv_fnam=clave)
ax.set_title("")
ax.set_ylabel(r"$R_t$")
ax.set_xlabel("")
ax.set_title(ZMs[clave][0] + ", Mexico")
fig.tight_layout()
fig.savefig("../figs/%s_Rts_AR.png" % (clave,))
if clave == '9-01':
m_max | |
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import sys
from mixbox.binding_utils import *
from . import cybox_common
class HiveListType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Hive=None):
if Hive is None:
self.Hive = []
else:
self.Hive = Hive
def factory(*args_, **kwargs_):
if HiveListType.subclass:
return HiveListType.subclass(*args_, **kwargs_)
else:
return HiveListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Hive(self): return self.Hive
def set_Hive(self, Hive): self.Hive = Hive
def add_Hive(self, value): self.Hive.append(value)
def insert_Hive(self, index, value): self.Hive[index] = value
def validate_StringObjectPropertyType(self, value):
# Validate type cybox_common.StringObjectPropertyType, a restriction on None.
pass
def hasContent_(self):
if (
self.Hive
):
return True
else:
return False
def export(self, lwrite, level, namespace_='WinSystemRestoreObj:', name_='HiveListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='HiveListType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='WinSystemRestoreObj:', name_='HiveListType'):
pass
def exportChildren(self, lwrite, level, namespace_='WinSystemRestoreObj:', name_='HiveListType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Hive_ in self.Hive:
Hive_.export(lwrite, level, 'WinSystemRestoreObj:', name_='Hive', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Hive':
obj_ = cybox_common.StringObjectPropertyType.factory()
obj_.build(child_)
self.Hive.append(obj_)
# end class HiveListType
class ChangeLogEntryTypeType(cybox_common.BaseObjectPropertyType):
"""ChangeLogEntryTypeType types, via a union of the
ChangeLogEntryTypeEnum type and the atomic xs:string type. Its
base type is the CybOX Core cybox_common.BaseObjectPropertyType, for
permitting complex (i.e. regular-expression based)
specifications.This attribute is optional and specifies the
expected type for the value of the specified property."""
subclass = None
superclass = cybox_common.BaseObjectPropertyType
def __init__(self, obfuscation_algorithm_ref=None, refanging_transform_type=None, has_changed=None, delimiter='##comma##', pattern_type=None, datatype='string', refanging_transform=None, is_case_sensitive=True, bit_mask=None, appears_random=None, observed_encoding=None, defanging_algorithm_ref=None, is_obfuscated=None, regex_syntax=None, apply_condition='ANY', trend=None, idref=None, is_defanged=None, id=None, condition=None, valueOf_=None):
super(ChangeLogEntryTypeType, self).__init__(obfuscation_algorithm_ref, refanging_transform_type, has_changed, delimiter, pattern_type, datatype, refanging_transform, is_case_sensitive, bit_mask, appears_random, observed_encoding, defanging_algorithm_ref, is_obfuscated, regex_syntax, apply_condition, trend, idref, is_defanged, id, condition, valueOf_)
self.datatype = _cast(None, datatype)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if ChangeLogEntryTypeType.subclass:
return ChangeLogEntryTypeType.subclass(*args_, **kwargs_)
else:
return ChangeLogEntryTypeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_datatype(self): return self.datatype
def set_datatype(self, datatype): self.datatype = datatype
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_ or
super(ChangeLogEntryTypeType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, namespace_='WinSystemRestoreObj:', name_='ChangeLogEntryTypeType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='ChangeLogEntryTypeType')
if self.hasContent_():
lwrite('>')
lwrite(quote_xml(self.valueOf_))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='WinSystemRestoreObj:', name_='ChangeLogEntryTypeType'):
super(ChangeLogEntryTypeType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='ChangeLogEntryTypeType')
if self.datatype is not None:
lwrite(' datatype=%s' % (quote_attrib(self.datatype), ))
def exportChildren(self, lwrite, level, namespace_='WinSystemRestoreObj:', name_='ChangeLogEntryTypeType', fromsubclass_=False, pretty_print=True):
super(ChangeLogEntryTypeType, self).exportChildren(lwrite, level, 'WinSystemRestoreObj:', name_, True, pretty_print=pretty_print)
pass
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('datatype', node)
if value is not None:
self.datatype = value
super(ChangeLogEntryTypeType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ChangeLogEntryTypeType
class WindowsSystemRestoreObjectType(cybox_common.ObjectPropertiesType):
"""The WindowsSystemRestoreObjectType is intended to characterize
Windows system restore points."""
subclass = None
superclass = cybox_common.ObjectPropertiesType
def __init__(self, object_reference=None, Custom_Properties=None, xsi_type=None, Restore_Point_Description=None, Restore_Point_Full_Path=None, Restore_Point_Name=None, Restore_Point_Type=None, ACL_Change_SID=None, ACL_Change_Username=None, Backup_File_Name=None, Change_Event=None, ChangeLog_Entry_Flags=None, ChangeLog_Entry_Sequence_Number=None, ChangeLog_Entry_Type=None, Change_Log_File_Name=None, Created=None, File_Attributes=None, New_File_Name=None, Original_File_Name=None, Original_Short_File_Name=None, Process_Name=None, Registry_Hive_List=None):
super(WindowsSystemRestoreObjectType, self).__init__(object_reference, Custom_Properties, xsi_type )
self.Restore_Point_Description = Restore_Point_Description
self.Restore_Point_Full_Path = Restore_Point_Full_Path
self.Restore_Point_Name = Restore_Point_Name
self.Restore_Point_Type = Restore_Point_Type
self.ACL_Change_SID = ACL_Change_SID
self.ACL_Change_Username = ACL_Change_Username
self.Backup_File_Name = Backup_File_Name
self.Change_Event = Change_Event
self.ChangeLog_Entry_Flags = ChangeLog_Entry_Flags
self.ChangeLog_Entry_Sequence_Number = ChangeLog_Entry_Sequence_Number
self.ChangeLog_Entry_Type = ChangeLog_Entry_Type
self.Change_Log_File_Name = Change_Log_File_Name
self.Created = Created
self.File_Attributes = File_Attributes
self.New_File_Name = New_File_Name
self.Original_File_Name = Original_File_Name
self.Original_Short_File_Name = Original_Short_File_Name
self.Process_Name = Process_Name
self.Registry_Hive_List = Registry_Hive_List
def factory(*args_, **kwargs_):
if WindowsSystemRestoreObjectType.subclass:
return WindowsSystemRestoreObjectType.subclass(*args_, **kwargs_)
else:
return WindowsSystemRestoreObjectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Restore_Point_Description(self): return self.Restore_Point_Description
def set_Restore_Point_Description(self, Restore_Point_Description): self.Restore_Point_Description = Restore_Point_Description
def validate_StringObjectPropertyType(self, value):
# Validate type cybox_common.StringObjectPropertyType, a restriction on None.
pass
def get_Restore_Point_Full_Path(self): return self.Restore_Point_Full_Path
def set_Restore_Point_Full_Path(self, Restore_Point_Full_Path): self.Restore_Point_Full_Path = Restore_Point_Full_Path
def get_Restore_Point_Name(self): return self.Restore_Point_Name
def set_Restore_Point_Name(self, Restore_Point_Name): self.Restore_Point_Name = Restore_Point_Name
def get_Restore_Point_Type(self): return self.Restore_Point_Type
def set_Restore_Point_Type(self, Restore_Point_Type): self.Restore_Point_Type = Restore_Point_Type
def get_ACL_Change_SID(self): return self.ACL_Change_SID
def set_ACL_Change_SID(self, ACL_Change_SID): self.ACL_Change_SID = ACL_Change_SID
def get_ACL_Change_Username(self): return self.ACL_Change_Username
def set_ACL_Change_Username(self, ACL_Change_Username): self.ACL_Change_Username = ACL_Change_Username
def get_Backup_File_Name(self): return self.Backup_File_Name
def set_Backup_File_Name(self, Backup_File_Name): self.Backup_File_Name = Backup_File_Name
def get_Change_Event(self): return self.Change_Event
def set_Change_Event(self, Change_Event): self.Change_Event = Change_Event
def validate_ChangeLogEntryTypeType(self, value):
# Validate type ChangeLogEntryTypeType, a restriction on None.
pass
def get_ChangeLog_Entry_Flags(self): return self.ChangeLog_Entry_Flags
def set_ChangeLog_Entry_Flags(self, ChangeLog_Entry_Flags): self.ChangeLog_Entry_Flags = ChangeLog_Entry_Flags
def get_ChangeLog_Entry_Sequence_Number(self): return self.ChangeLog_Entry_Sequence_Number
def set_ChangeLog_Entry_Sequence_Number(self, ChangeLog_Entry_Sequence_Number): self.ChangeLog_Entry_Sequence_Number = ChangeLog_Entry_Sequence_Number
def validate_LongObjectPropertyType(self, value):
# Validate type cybox_common.LongObjectPropertyType, a restriction on None.
pass
def get_ChangeLog_Entry_Type(self): return self.ChangeLog_Entry_Type
def set_ChangeLog_Entry_Type(self, ChangeLog_Entry_Type): self.ChangeLog_Entry_Type = ChangeLog_Entry_Type
def get_Change_Log_File_Name(self): return self.Change_Log_File_Name
def set_Change_Log_File_Name(self, Change_Log_File_Name): self.Change_Log_File_Name = Change_Log_File_Name
def get_Created(self): return self.Created
def set_Created(self, Created): self.Created = Created
def validate_DateTimeObjectPropertyType(self, value):
# Validate type cybox_common.DateTimeObjectPropertyType, a restriction on None.
pass
def get_File_Attributes(self): return self.File_Attributes
def set_File_Attributes(self, File_Attributes): self.File_Attributes = File_Attributes
def get_New_File_Name(self): return self.New_File_Name
def set_New_File_Name(self, New_File_Name): self.New_File_Name = New_File_Name
def get_Original_File_Name(self): return self.Original_File_Name
def set_Original_File_Name(self, Original_File_Name): self.Original_File_Name = Original_File_Name
def get_Original_Short_File_Name(self): return self.Original_Short_File_Name
def set_Original_Short_File_Name(self, Original_Short_File_Name): self.Original_Short_File_Name = Original_Short_File_Name
def get_Process_Name(self): return self.Process_Name
def set_Process_Name(self, Process_Name): self.Process_Name = Process_Name
def get_Registry_Hive_List(self): return self.Registry_Hive_List
def set_Registry_Hive_List(self, Registry_Hive_List): self.Registry_Hive_List = Registry_Hive_List
def hasContent_(self):
if (
self.Restore_Point_Description is not None or
self.Restore_Point_Full_Path is not None or
self.Restore_Point_Name is not None or
self.Restore_Point_Type is not None or
self.ACL_Change_SID is not None or
self.ACL_Change_Username is not None or
self.Backup_File_Name is not None or
self.Change_Event is not None or
self.ChangeLog_Entry_Flags is not None or
self.ChangeLog_Entry_Sequence_Number is not None or
self.ChangeLog_Entry_Type is not None or
self.Change_Log_File_Name is not None or
self.Created is not None or
self.File_Attributes is not None or
self.New_File_Name is not None or
self.Original_File_Name is not None or
self.Original_Short_File_Name is not None or
self.Process_Name is not None or
self.Registry_Hive_List is not None or
super(WindowsSystemRestoreObjectType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, namespace_='WinSystemRestoreObj:', name_='WindowsSystemRestoreObjectType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='WindowsSystemRestoreObjectType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s%s>%s' % (namespace_, name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='WinSystemRestoreObj:', name_='WindowsSystemRestoreObjectType'):
super(WindowsSystemRestoreObjectType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='WindowsSystemRestoreObjectType')
def exportChildren(self, lwrite, level, namespace_='WinSystemRestoreObj:', name_='WindowsSystemRestoreObjectType', fromsubclass_=False, pretty_print=True):
super(WindowsSystemRestoreObjectType, self).exportChildren(lwrite, level, 'WinSystemRestoreObj:', name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Restore_Point_Description is not None:
self.Restore_Point_Description.export(lwrite, level, 'WinSystemRestoreObj:', name_='Restore_Point_Description', pretty_print=pretty_print)
if self.Restore_Point_Full_Path is not None:
self.Restore_Point_Full_Path.export(lwrite, level, 'WinSystemRestoreObj:', name_='Restore_Point_Full_Path', pretty_print=pretty_print)
if self.Restore_Point_Name is not None:
self.Restore_Point_Name.export(lwrite, level, 'WinSystemRestoreObj:', name_='Restore_Point_Name', pretty_print=pretty_print)
if self.Restore_Point_Type is not None:
self.Restore_Point_Type.export(lwrite, level, 'WinSystemRestoreObj:', name_='Restore_Point_Type', pretty_print=pretty_print)
if self.ACL_Change_SID is not None:
self.ACL_Change_SID.export(lwrite, level, 'WinSystemRestoreObj:', name_='ACL_Change_SID', pretty_print=pretty_print)
if self.ACL_Change_Username is not None:
self.ACL_Change_Username.export(lwrite, level, 'WinSystemRestoreObj:', name_='ACL_Change_Username', pretty_print=pretty_print)
if self.Backup_File_Name is not None:
self.Backup_File_Name.export(lwrite, level, 'WinSystemRestoreObj:', name_='Backup_File_Name', pretty_print=pretty_print)
if self.Change_Event is not None:
self.Change_Event.export(lwrite, level, 'WinSystemRestoreObj:', name_='Change_Event', pretty_print=pretty_print)
if self.ChangeLog_Entry_Flags is not None:
self.ChangeLog_Entry_Flags.export(lwrite, level, 'WinSystemRestoreObj:', name_='ChangeLog_Entry_Flags', pretty_print=pretty_print)
if self.ChangeLog_Entry_Sequence_Number is not None:
self.ChangeLog_Entry_Sequence_Number.export(lwrite, level, 'WinSystemRestoreObj:', name_='ChangeLog_Entry_Sequence_Number', pretty_print=pretty_print)
if self.ChangeLog_Entry_Type is not None:
self.ChangeLog_Entry_Type.export(lwrite, level, 'WinSystemRestoreObj:', name_='ChangeLog_Entry_Type', pretty_print=pretty_print)
if self.Change_Log_File_Name is not None:
self.Change_Log_File_Name.export(lwrite, level, 'WinSystemRestoreObj:', name_='Change_Log_File_Name', pretty_print=pretty_print)
if self.Created is not None:
self.Created.export(lwrite, level, 'WinSystemRestoreObj:', name_='Created', pretty_print=pretty_print)
if self.File_Attributes is not None:
self.File_Attributes.export(lwrite, level, 'WinSystemRestoreObj:', name_='File_Attributes', pretty_print=pretty_print)
if self.New_File_Name is not None:
self.New_File_Name.export(lwrite, level, 'WinSystemRestoreObj:', name_='New_File_Name', pretty_print=pretty_print)
if self.Original_File_Name is not None:
| |
<reponame>lovewsy/patrace
##########################################################################
#
# Copyright 2011 <NAME>
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
'''Describe GL parameters.'''
from .stdapi import *
from .gltypes import *
# Shorthands for the types
X = None # To be determined, merely an enum
B = GLboolean
I = GLint
I64 = GLint64
E = GLenum
F = GLfloat
D = GLdouble
P = OpaquePointer(Void)
S = CString
H = GLhandleARB
# A underscore prefix (e.g., _glGet) is used to skip automatic code generation
# for parameters that are obtained through other ways.
parameters = [
# (functions, type, count, name) # value
("", X, 1, "GL_ZERO"), # 0x0000
("", X, 1, "GL_ONE"), # 0x0001
# These are enumerated separately in GLenum_mode
#("", X, 1, "GL_POINTS"), # 0x0000
#("", X, 1, "GL_LINES"), # 0x0001
#("", X, 1, "GL_LINE_LOOP"), # 0x0002
#("", X, 1, "GL_LINE_STRIP"), # 0x0003
#("", X, 1, "GL_TRIANGLES"), # 0x0004
#("", X, 1, "GL_TRIANGLE_STRIP"), # 0x0005
#("", X, 1, "GL_TRIANGLE_FAN"), # 0x0006
("", X, 1, "GL_ADD"), # 0x0104
("", X, 1, "GL_NEVER"), # 0x0200
("", X, 1, "GL_LESS"), # 0x0201
("", X, 1, "GL_EQUAL"), # 0x0202
("", X, 1, "GL_LEQUAL"), # 0x0203
("", X, 1, "GL_GREATER"), # 0x0204
("", X, 1, "GL_NOTEQUAL"), # 0x0205
("", X, 1, "GL_GEQUAL"), # 0x0206
("", X, 1, "GL_ALWAYS"), # 0x0207
("", X, 1, "GL_SRC_COLOR"), # 0x0300
("", X, 1, "GL_ONE_MINUS_SRC_COLOR"), # 0x0301
("", X, 1, "GL_SRC_ALPHA"), # 0x0302
("", X, 1, "GL_ONE_MINUS_SRC_ALPHA"), # 0x0303
("", X, 1, "GL_DST_ALPHA"), # 0x0304
("", X, 1, "GL_ONE_MINUS_DST_ALPHA"), # 0x0305
("", X, 1, "GL_DST_COLOR"), # 0x0306
("", X, 1, "GL_ONE_MINUS_DST_COLOR"), # 0x0307
("", X, 1, "GL_SRC_ALPHA_SATURATE"), # 0x0308
("", X, 1, "GL_FRONT"), # 0x0404
("", X, 1, "GL_BACK"), # 0x0405
("", X, 1, "GL_FRONT_AND_BACK"), # 0x0408
("", X, 1, "GL_INVALID_ENUM"), # 0x0500
("", X, 1, "GL_INVALID_VALUE"), # 0x0501
("", X, 1, "GL_INVALID_OPERATION"), # 0x0502
("", X, 1, "GL_STACK_OVERFLOW"), # 0x0503
("", X, 1, "GL_STACK_UNDERFLOW"), # 0x0504
("", X, 1, "GL_OUT_OF_MEMORY"), # 0x0505
("", X, 1, "GL_INVALID_FRAMEBUFFER_OPERATION"), # 0x0506
("", X, 1, "GL_EXP"), # 0x0800
("", X, 1, "GL_EXP2"), # 0x0801
("", X, 1, "GL_CW"), # 0x0900
("", X, 1, "GL_CCW"), # 0x0901
("glGet", F, 4, "GL_CURRENT_COLOR"), # 0x0B00
("glGet", F, 3, "GL_CURRENT_NORMAL"), # 0x0B02
("glGet", F, 4, "GL_CURRENT_TEXTURE_COORDS"), # 0x0B03
("glGet", B, 1, "GL_POINT_SMOOTH"), # 0x0B10
("glGet", F, 1, "GL_POINT_SIZE"), # 0x0B11
("glGet", B, 1, "GL_LINE_SMOOTH"), # 0x0B20
("glGet", F, 1, "GL_LINE_WIDTH"), # 0x0B21
("glGet", B, 1, "GL_CULL_FACE"), # 0x0B44
("glGet", E, 1, "GL_CULL_FACE_MODE"), # 0x0B45
("glGet", E, 1, "GL_FRONT_FACE"), # 0x0B46
("glGet", B, 1, "GL_LIGHTING"), # 0x0B50
("glGet", B, 1, "GL_LIGHT_MODEL_TWO_SIDE"), # 0x0B52
("glGet", F, 4, "GL_LIGHT_MODEL_AMBIENT"), # 0x0B53
("glGet", E, 1, "GL_SHADE_MODEL"), # 0x0B54
("glGet", B, 1, "GL_COLOR_MATERIAL"), # 0x0B57
("glGet", B, 1, "GL_FOG"), # 0x0B60
("glGet", F, 1, "GL_FOG_DENSITY"), # 0x0B62
("glGet", F, 1, "GL_FOG_START"), # 0x0B63
("glGet", F, 1, "GL_FOG_END"), # 0x0B64
("glGet", E, 1, "GL_FOG_MODE"), # 0x0B65
("glGet", F, 4, "GL_FOG_COLOR"), # 0x0B66
("glGet", F, 2, "GL_DEPTH_RANGE"), # 0x0B70
("glGet", B, 1, "GL_DEPTH_TEST"), # 0x0B71
("glGet", B, 1, "GL_DEPTH_WRITEMASK"), # 0x0B72
("glGet", F, 1, "GL_DEPTH_CLEAR_VALUE"), # 0x0B73
("glGet", E, 1, "GL_DEPTH_FUNC"), # 0x0B74
("glGet", B, 1, "GL_STENCIL_TEST"), # 0x0B90
("glGet", I, 1, "GL_STENCIL_CLEAR_VALUE"), # 0x0B91
("glGet", E, 1, "GL_STENCIL_FUNC"), # 0x0B92
("glGet", I, 1, "GL_STENCIL_VALUE_MASK"), # 0x0B93
("glGet", E, 1, "GL_STENCIL_FAIL"), # 0x0B94
("glGet", E, 1, "GL_STENCIL_PASS_DEPTH_FAIL"), # 0x0B95
("glGet", E, 1, "GL_STENCIL_PASS_DEPTH_PASS"), # 0x0B96
("glGet", I, 1, "GL_STENCIL_REF"), # 0x0B97
("glGet", I, 1, "GL_STENCIL_WRITEMASK"), # 0x0B98
("glGet", E, 1, "GL_MATRIX_MODE"), # 0x0BA0
("glGet", B, 1, "GL_NORMALIZE"), # 0x0BA1
("glGet", I, 4, "GL_VIEWPORT"), # 0x0BA2
("glGet", I, 1, "GL_MODELVIEW_STACK_DEPTH"), # 0x0BA3
("glGet", I, 1, "GL_PROJECTION_STACK_DEPTH"), # 0x0BA4
("glGet", I, 1, "GL_TEXTURE_STACK_DEPTH"), # 0x0BA5
("glGet", F, 16, "GL_MODELVIEW_MATRIX"), # 0x0BA6
("glGet", F, 16, "GL_PROJECTION_MATRIX"), # 0x0BA7
("glGet", F, 16, "GL_TEXTURE_MATRIX"), # 0x0BA8
("glGet", B, 1, "GL_ALPHA_TEST"), # 0x0BC0
("glGet", E, 1, "GL_ALPHA_TEST_FUNC"), # 0x0BC1
("glGet", F, 1, "GL_ALPHA_TEST_REF"), # 0x0BC2
("glGet", B, 1, "GL_DITHER"), # 0x0BD0
("glGet", E, 1, "GL_BLEND_DST"), # 0x0BE0
("glGet", E, 1, "GL_BLEND_SRC"), # 0x0BE1
("glGet", B, 1, "GL_BLEND"), # 0x0BE2
("glGet", E, 1, "GL_LOGIC_OP_MODE"), # 0x0BF0
("glGet", B, 1, "GL_COLOR_LOGIC_OP"), # 0x0BF2
("glGet", E, 1, "GL_READ_BUFFER"), # 0x0C02
("glGet", I, 4, "GL_SCISSOR_BOX"), # 0x0C10
("glGet", B, 1, "GL_SCISSOR_TEST"), # 0x0C11
("glGet", F, 4, "GL_COLOR_CLEAR_VALUE"), # 0x0C22
("glGet", B, 4, "GL_COLOR_WRITEMASK"), # 0x0C23
("glGet", E, 1, "GL_PERSPECTIVE_CORRECTION_HINT"), # 0x0C50
("glGet", E, 1, "GL_POINT_SMOOTH_HINT"), # 0x0C51
("glGet", E, 1, "GL_LINE_SMOOTH_HINT"), # 0x0C52
("glGet", E, 1, "GL_FOG_HINT"), # 0x0C54
("glGet", I, 1, "GL_UNPACK_ROW_LENGTH"), # 0x0CF2
("glGet", I, 1, "GL_UNPACK_SKIP_ROWS"), # 0x0CF3
("glGet", I, 1, "GL_UNPACK_SKIP_PIXELS"), # 0x0CF4
("glGet", I, 1, "GL_UNPACK_ALIGNMENT"), # 0x0CF5
("glGet", I, 1, "GL_PACK_ROW_LENGTH"), # 0x0D02
("glGet", I, 1, "GL_PACK_SKIP_ROWS"), # 0x0D03
("glGet", I, 1, "GL_PACK_SKIP_PIXELS"), # 0x0D04
("glGet", I, 1, "GL_PACK_ALIGNMENT"), # 0x0D05
("glGet,glGetTexEnv", F, 1, "GL_ALPHA_SCALE"), # 0x0D1C
("glGet", I, 1, "GL_MAX_LIGHTS"), # 0x0D31
("glGet", I, 1, "GL_MAX_TEXTURE_SIZE"), # 0x0D33
("glGet", I, 1, "GL_MAX_MODELVIEW_STACK_DEPTH"), # 0x0D36
("glGet", I, 1, "GL_MAX_PROJECTION_STACK_DEPTH"), # 0x0D38
("glGet", I, 1, "GL_MAX_TEXTURE_STACK_DEPTH"), # 0x0D39
("glGet", F, 2, "GL_MAX_VIEWPORT_DIMS"), # 0x0D3A
("glGet", I, 1, "GL_SUBPIXEL_BITS"), # 0x0D50
("glGet", I, 1, "GL_RED_BITS"), # 0x0D52
("glGet", I, 1, "GL_GREEN_BITS"), # 0x0D53
("glGet", I, 1, "GL_BLUE_BITS"), # 0x0D54
("glGet", I, 1, "GL_ALPHA_BITS"), # 0x0D55
("glGet", I, 1, "GL_DEPTH_BITS"), # 0x0D56
("glGet", I, 1, "GL_STENCIL_BITS"), # 0x0D57
("_glGet", B, 1, "GL_TEXTURE_2D"), # 0x0DE1
("glGetTexParameter,glGetSamplerParameter", F, 4, "GL_TEXTURE_BORDER_COLOR_EXT"), # 0x1004
("glGetTexLevelParameter", I, 1, "GL_TEXTURE_BORDER"), # 0x1005
("", X, 1, "GL_DONT_CARE"), # 0x1100
("", X, 1, "GL_FASTEST"), # 0x1101
("", X, 1, "GL_NICEST"), # 0x1102
("glGetLight,glGetMaterial", F, 4, "GL_AMBIENT"), # 0x1200
("glGetLight,glGetMaterial", F, 4, "GL_DIFFUSE"), # 0x1201
("glGetLight,glGetMaterial", F, 4, "GL_SPECULAR"), # 0x1202
("glGetLight", F, 4, "GL_POSITION"), # 0x1203
("glGetLight", F, 3, "GL_SPOT_DIRECTION"), # 0x1204
("glGetLight", F, 1, "GL_SPOT_EXPONENT"), # 0x1205
("glGetLight", F, 1, "GL_SPOT_CUTOFF"), # 0x1206
("glGetLight", F, 1, "GL_CONSTANT_ATTENUATION"), # 0x1207
("glGetLight", F, 1, "GL_LINEAR_ATTENUATION"), # 0x1208
("glGetLight", F, 1, "GL_QUADRATIC_ATTENUATION"), # 0x1209
("", X, 1, "GL_BYTE"), # 0x1400
("", X, 1, "GL_UNSIGNED_BYTE"), # 0x1401
("", X, 1, "GL_SHORT"), # 0x1402
("", X, 1, "GL_UNSIGNED_SHORT"), # 0x1403
("", X, 1, "GL_INT"), # 0x1404
("", X, 1, "GL_UNSIGNED_INT"), # 0x1405
("", X, 1, "GL_FLOAT"), # 0x1406
("", X, 1, "GL_HALF_FLOAT"), # 0x140B
("", X, 1, "GL_FIXED"), # 0x140C
("", X, 1, "GL_CLEAR"), # 0x1500
("", X, 1, "GL_AND"), # 0x1501
("", X, 1, "GL_AND_REVERSE"), # 0x1502
("", X, 1, "GL_COPY"), # 0x1503
("", X, 1, "GL_AND_INVERTED"), # 0x1504
("", X, 1, "GL_NOOP"), # 0x1505
("", X, 1, "GL_XOR"), # 0x1506
("", X, 1, "GL_OR"), # 0x1507
("", X, 1, "GL_NOR"), # 0x1508
("", X, 1, "GL_EQUIV"), # 0x1509
("", X, 1, "GL_INVERT"), # 0x150A
("", X, 1, "GL_OR_REVERSE"), # 0x150B
("", X, 1, "GL_COPY_INVERTED"), # 0x150C
("", X, 1, "GL_OR_INVERTED"), # 0x150D
("", X, 1, "GL_NAND"), # 0x150E
("", X, 1, "GL_SET"), # 0x150F
("glGetMaterial", F, 4, "GL_EMISSION"), # 0x1600
("glGetMaterial", F, 1, "GL_SHININESS"), # 0x1601
("", F, 4, "GL_AMBIENT_AND_DIFFUSE"), # 0x1602
("", X, 1, "GL_MODELVIEW"), # 0x1700
("", X, 1, "GL_PROJECTION"), # 0x1701
("", X, 1, "GL_TEXTURE"), # 0x1702
("glClearBuffer", F, 1, "GL_COLOR"), # 0x1800 (a 32bit value)
("glClearBuffer", F, 1, "GL_DEPTH"), # 0x1801 (a 32bit value)
("", X, 1, "GL_STENCIL"), # 0x1802
("", X, 1, "GL_DEPTH_COMPONENT"), # 0x1902
("", X, 1, "GL_RED"), # 0x1903
("", X, 1, "GL_GREEN"), # 0x1904
("", X, 1, "GL_BLUE"), # 0x1905
("", X, 1, "GL_ALPHA"), # 0x1906
("", X, 1, "GL_RGB"), # 0x1907
("", X, 1, "GL_RGBA"), # 0x1908
("", X, 1, "GL_LUMINANCE"), # 0x1909
("", X, 1, "GL_LUMINANCE_ALPHA"), # 0x190A
("", X, 1, "GL_FLAT"), # 0x1D00
("", X, 1, "GL_SMOOTH"), # 0x1D01
("", X, 1, "GL_KEEP"), # 0x1E00
("", X, 1, "GL_REPLACE"), # 0x1E01
("", X, 1, "GL_INCR"), # 0x1E02
("", X, 1, "GL_DECR"), # 0x1E03
("glGet", S, 1, "GL_VENDOR"), # 0x1F00
("glGet", S, 1, "GL_RENDERER"), # 0x1F01
("glGet", S, 1, "GL_VERSION"), # 0x1F02
("glGet", S, 1, "GL_EXTENSIONS"), # 0x1F03
("", X, 1, "GL_MODULATE"), # 0x2100
("", X, 1, "GL_DECAL"), # 0x2101
("glGetTexEnv", E, 1, "GL_TEXTURE_ENV_MODE"), # 0x2200
("glGetTexEnv", F, 4, "GL_TEXTURE_ENV_COLOR"), # 0x2201
("", X, 1, "GL_TEXTURE_ENV"), # 0x2300
("", X, 1, "GL_NEAREST"), # 0x2600
("", X, 1, "GL_LINEAR"), # 0x2601
("", X, 1, "GL_NEAREST_MIPMAP_NEAREST"), # 0x2700
("", X, 1, "GL_LINEAR_MIPMAP_NEAREST"), # 0x2701
("", X, 1, "GL_NEAREST_MIPMAP_LINEAR"), # 0x2702
("", X, 1, "GL_LINEAR_MIPMAP_LINEAR"), # 0x2703
("glGetTexParameter", E, 1, "GL_TEXTURE_MAG_FILTER"), # 0x2800
("glGetTexParameter", E, 1, "GL_TEXTURE_MIN_FILTER"), # 0x2801
("glGetTexParameter", E, 1, "GL_TEXTURE_WRAP_S"), # 0x2802
("glGetTexParameter", E, 1, "GL_TEXTURE_WRAP_T"), # 0x2803
("", X, 1, "GL_REPEAT"), # 0x2901
("glGet", F, 1, "GL_POLYGON_OFFSET_UNITS"), # 0x2A00
("_glGet", B, 1, "GL_LIGHT0"), # 0x4000
("_glGet", B, 1, "GL_LIGHT1"), # 0x4001
("_glGet", B, 1, "GL_LIGHT2"), # 0x4002
("_glGet", B, 1, "GL_LIGHT3"), # 0x4003
("_glGet", B, 1, "GL_LIGHT4"), # 0x4004
("_glGet", B, 1, "GL_LIGHT5"), # 0x4005
("_glGet", B, 1, "GL_LIGHT6"), # 0x4006
("_glGet", B, 1, "GL_LIGHT7"), # 0x4007
("", X, 1, "GL_CONSTANT_COLOR"), # 0x8001
("", X, 1, "GL_ONE_MINUS_CONSTANT_COLOR"), # 0x8002
("", X, 1, "GL_CONSTANT_ALPHA"), # 0x8003
("", X, 1, "GL_ONE_MINUS_CONSTANT_ALPHA"), # 0x8004
("glGet", F, 4, "GL_BLEND_COLOR"), # 0x8005
("", X, 1, "GL_FUNC_ADD"), # 0x8006
("", X, 1, "GL_MIN"), # 0x8007
("", X, 1, "GL_MAX"), # 0x8008
("glGet", E, 1, "GL_BLEND_EQUATION_RGB"), # 0x8009
("", X, 1, "GL_FUNC_SUBTRACT"), # 0x800A
("", X, 1, "GL_FUNC_REVERSE_SUBTRACT"), # 0x800B
("", X, 1, "GL_UNSIGNED_SHORT_4_4_4_4"), # 0x8033
("", X, 1, "GL_UNSIGNED_SHORT_5_5_5_1"), # 0x8034
("glGet", B, 1, "GL_POLYGON_OFFSET_FILL"), # 0x8037
("glGet", F, 1, "GL_POLYGON_OFFSET_FACTOR"), # 0x8038
("glGet", B, 1, "GL_RESCALE_NORMAL"), # 0x803A
("", X, 1, "GL_RGB8"), # 0x8051
("", X, 1, "GL_RGBA4"), # 0x8056
("", X, 1, "GL_RGB5_A1"), # 0x8057
("", X, 1, "GL_RGBA8"), # 0x8058
("", X, 1, "GL_RGB10_A2"), # 0x8059
("_glGet", I, 1, "GL_TEXTURE_BINDING_2D"), # 0x8069
("_glGet", I, 1, "GL_TEXTURE_BINDING_3D"), # 0x806A
("glGet", I, 1, "GL_UNPACK_SKIP_IMAGES"), # 0x806D
("glGet", F, 1, "GL_UNPACK_IMAGE_HEIGHT"), # 0x806E
("glGet", B, 1, "GL_TEXTURE_3D"), # 0x806F
("glGetTexParameter", E, 1, "GL_TEXTURE_WRAP_R"), # 0x8072
("glGet", I, 1, "GL_MAX_3D_TEXTURE_SIZE"), # 0x8073
("glGet", B, 1, "GL_VERTEX_ARRAY"), # 0x8074
("glGet", B, 1, "GL_NORMAL_ARRAY"), # 0x8075
("glGet", B, 1, "GL_COLOR_ARRAY"), # 0x8076
("glGet", B, 1, "GL_TEXTURE_COORD_ARRAY"), # 0x8078
("glGet", I, 1, "GL_VERTEX_ARRAY_SIZE"), # 0x807A
("glGet", E, 1, "GL_VERTEX_ARRAY_TYPE"), # 0x807B
("glGet", I, 1, "GL_VERTEX_ARRAY_STRIDE"), # 0x807C
("glGet", E, 1, "GL_NORMAL_ARRAY_TYPE"), # 0x807E
("glGet", I, 1, "GL_NORMAL_ARRAY_STRIDE"), # 0x807F
("glGet", I, 1, "GL_COLOR_ARRAY_SIZE"), # 0x8081
("glGet", E, 1, "GL_COLOR_ARRAY_TYPE"), # 0x8082
("glGet", I, 1, "GL_COLOR_ARRAY_STRIDE"), # 0x8083
("glGet", I, 1, "GL_TEXTURE_COORD_ARRAY_SIZE"), # 0x8088
("glGet", E, 1, "GL_TEXTURE_COORD_ARRAY_TYPE"), # 0x8089
("glGet", I, 1, "GL_TEXTURE_COORD_ARRAY_STRIDE"), # 0x808A
("glGet", P, 1, "GL_VERTEX_ARRAY_POINTER"), # 0x808E
("glGet", P, 1, "GL_NORMAL_ARRAY_POINTER"), # 0x808F
("glGet", P, 1, "GL_COLOR_ARRAY_POINTER"), # 0x8090
("glGet", P, 1, "GL_TEXTURE_COORD_ARRAY_POINTER"), # 0x8092
("glGet", I, 1, "GL_MULTISAMPLE"), # 0x809D
("glGet", I, 1, "GL_SAMPLE_ALPHA_TO_COVERAGE"), # 0x809E
("glGet", I, 1, "GL_SAMPLE_ALPHA_TO_ONE"), # 0x809F
("glGet", I, 1, "GL_SAMPLE_COVERAGE"), # 0x80A0
("glGet", I, 1, "GL_SAMPLE_BUFFERS"), # 0x80A8
("glGet", I, 1, "GL_SAMPLES"), # 0x80A9
("glGet", F, 1, "GL_SAMPLE_COVERAGE_VALUE"), # 0x80AA
("glGet", I, 1, "GL_SAMPLE_COVERAGE_INVERT"), # 0x80AB
("glGet", E, 1, "GL_BLEND_DST_RGB"), # 0x80C8
("glGet", E, 1, "GL_BLEND_SRC_RGB"), # 0x80C9
("glGet", E, 1, "GL_BLEND_DST_ALPHA"), # 0x80CA
("glGet", E, 1, "GL_BLEND_SRC_ALPHA"), # 0x80CB
("glGet", I, 1, "GL_MAX_ELEMENTS_VERTICES"), # 0x80E8
("glGet", I, 1, "GL_MAX_ELEMENTS_INDICES"), # 0x80E9
("glGet", F, 1, "GL_POINT_SIZE_MIN"), # 0x8126
("glGet", F, 1, "GL_POINT_SIZE_MAX"), # 0x8127
("glGet", F, 1, "GL_POINT_FADE_THRESHOLD_SIZE"), # 0x8128
("glGet", F, 3, "GL_POINT_DISTANCE_ATTENUATION"), # 0x8129
("glGet", I, 1, "GL_CLAMP_TO_BORDER_EXT"), # 0x812D
("", X, 1, "GL_CLAMP_TO_EDGE"), # 0x812F
("glGetTexParameter", F, 1, "GL_TEXTURE_MIN_LOD"), # 0x813A
("glGetTexParameter", F, 1, "GL_TEXTURE_MAX_LOD"), # 0x813B
("glGetTexParameter", F, 1, "GL_TEXTURE_BASE_LEVEL"), # 0x813C
("glGetTexParameter", F, 1, "GL_TEXTURE_MAX_LEVEL"), # 0x813D
("glGetTexParameter", B, 1, "GL_GENERATE_MIPMAP"), # 0x8191
("glGet", E, 1, "GL_GENERATE_MIPMAP_HINT"), # 0x8192
("", X, 1, "GL_DEPTH_COMPONENT16"), # 0x81A5
("", X, 1, "GL_DEPTH_COMPONENT24"), # 0x81A6
("glGetFramebufferAttachmentParameter", E, 1, "GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING"), # 0x8210
("glGetFramebufferAttachmentParameter", E, 1, "GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE"), # 0x8211
("glGetFramebufferAttachmentParameter", I, 1, "GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE"), # 0x8212
("glGetFramebufferAttachmentParameter", I, 1, "GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE"), # 0x8213
("glGetFramebufferAttachmentParameter", I, 1, "GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE"), # 0x8214
("glGetFramebufferAttachmentParameter", I, 1, "GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE"), # 0x8215
("glGetFramebufferAttachmentParameter", I, 1, "GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE"), # 0x8216
("glGetFramebufferAttachmentParameter", I, 1, "GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE"), # 0x8217
("", X, 1, "GL_FRAMEBUFFER_DEFAULT"), # 0x8218
("", X, 1, "GL_FRAMEBUFFER_UNDEFINED"), # 0x8219
("", X, 1, "GL_DEPTH_STENCIL_ATTACHMENT"), # 0x821A
("glGet", I, 1, "GL_MAJOR_VERSION"), # 0x821B
("glGet", I, 1, "GL_MINOR_VERSION"), # 0x821C
("glGet", I, 1, "GL_NUM_EXTENSIONS"), # 0x821D
("glGetBufferParameter", B, 1, "GL_BUFFER_IMMUTABLE_STORAGE"), # 0x821F
("glGetBufferParameter", I, 1, "GL_BUFFER_STORAGE_FLAGS"), # 0x8220
("glGet", B, 1, "GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED"), # 0x8221
("", X, 1, "GL_RG"), # 0x8227
("", X, 1, "GL_RG_INTEGER"), # 0x8228
("", X, 1, "GL_R8"), # 0x8229
("", X, 1, "GL_RG8"), # 0x822B
("", X, 1, "GL_R16F"), # 0x822D
("", X, 1, "GL_R32F"), # 0x822E
("", X, 1, "GL_RG16F"), # 0x822F
("", X, 1, "GL_RG32F"), # 0x8230
("", X, 1, "GL_R8I"), # 0x8231
("", X, 1, "GL_R8UI"), # 0x8232
("", X, 1, "GL_R16I"), # 0x8233
("", X, 1, "GL_R16UI"), # 0x8234
("", X, 1, "GL_R32I"), # 0x8235
("", X, 1, "GL_R32UI"), # 0x8236
("", X, 1, "GL_RG8I"), # 0x8237
("", X, 1, "GL_RG8UI"), # 0x8238
("", X, 1, "GL_RG16I"), # 0x8239
("", X, 1, "GL_RG16UI"), # 0x823A
("", X, 1, "GL_RG32I"), # 0x823B
("", X, 1, "GL_RG32UI"), # 0x823C
("glGet", B, 1, "GL_DEBUG_OUTPUT_SYNCHRONOUS"), # 0x8242
("glGet", I, 1, "GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH"), # 0x8243
("glGet", P, 1, "GL_DEBUG_CALLBACK_FUNCTION"), # 0x8244
("glGet", P, 1, "GL_DEBUG_CALLBACK_USER_PARAM"), # 0x8245
("glGet", E, 1, "GL_RESET_NOTIFICATION_STRATEGY_ARB"), # 0x8256
("", X, 1, "GL_PROGRAM_BINARY_RETRIEVABLE_HINT"), # 0x8257
("glGet", E, 1, "GL_LAYER_PROVOKING_VERTEX_EXT"), # 0x825E
("glGet", I, 1, "GL_MAX_DEBUG_GROUP_STACK_DEPTH"), # 0x826C
("glGet", I, 1, "GL_DEBUG_GROUP_STACK_DEPTH"), # 0x826D
("", X, 1, "GL_UNSIGNED_SHORT_5_6_5"), # 0x8363
("", X, 1, "GL_UNSIGNED_INT_2_10_10_10_REV"), # 0x8368
("", X, 1, "GL_MIRRORED_REPEAT"), # 0x8370
("", X, 1, "GL_COMPRESSED_RGB_S3TC_DXT1_EXT"), # 0x83F0
("", X, 1, "GL_COMPRESSED_RGBA_S3TC_DXT1_EXT"), # 0x83F1
("glGet", F, 2, "GL_ALIASED_POINT_SIZE_RANGE"), # 0x846D
("glGet", F, 2, "GL_ALIASED_LINE_WIDTH_RANGE"), # 0x846E
("", X, 1, "GL_TEXTURE0"), # 0x84C0
("", X, 1, "GL_TEXTURE1"), # 0x84C1
("", X, 1, "GL_TEXTURE2"), # 0x84C2
("", X, 1, "GL_TEXTURE3"), # 0x84C3
("", X, 1, "GL_TEXTURE4"), # 0x84C4
("", X, 1, "GL_TEXTURE5"), # 0x84C5
("", X, 1, "GL_TEXTURE6"), # 0x84C6
("", X, 1, "GL_TEXTURE7"), # 0x84C7
("", X, 1, "GL_TEXTURE8"), # 0x84C8
("", X, 1, "GL_TEXTURE9"), # 0x84C9
("", X, 1, "GL_TEXTURE10"), # 0x84CA
("", X, 1, "GL_TEXTURE11"), # 0x84CB
("", X, 1, "GL_TEXTURE12"), # 0x84CC
("", X, 1, "GL_TEXTURE13"), # 0x84CD
("", X, 1, "GL_TEXTURE14"), # 0x84CE
("", X, 1, "GL_TEXTURE15"), # 0x84CF
("", X, 1, "GL_TEXTURE16"), # 0x84D0
("", X, 1, "GL_TEXTURE17"), # 0x84D1
("", X, 1, "GL_TEXTURE18"), # 0x84D2
("", X, 1, "GL_TEXTURE19"), # 0x84D3
("", X, 1, "GL_TEXTURE20"), # 0x84D4
("", X, 1, "GL_TEXTURE21"), # 0x84D5
("", X, 1, "GL_TEXTURE22"), # 0x84D6
("", X, 1, "GL_TEXTURE23"), # 0x84D7
("", X, 1, "GL_TEXTURE24"), # 0x84D8
("", X, 1, "GL_TEXTURE25"), # 0x84D9
("", X, 1, "GL_TEXTURE26"), # 0x84DA
("", X, 1, "GL_TEXTURE27"), # 0x84DB
("", X, 1, "GL_TEXTURE28"), # 0x84DC
("", X, 1, "GL_TEXTURE29"), # 0x84DD
("", X, 1, "GL_TEXTURE30"), # 0x84DE
("", X, 1, "GL_TEXTURE31"), # 0x84DF
("glGet", E, 1, "GL_ACTIVE_TEXTURE"), # 0x84E0
("glGet", E, 1, "GL_CLIENT_ACTIVE_TEXTURE"), # 0x84E1
("glGet", I, 1, "GL_MAX_TEXTURE_UNITS"), # 0x84E2
("", X, 1, "GL_SUBTRACT"), # 0x84E7
("glGet", I, 1, "GL_MAX_RENDERBUFFER_SIZE"), # 0x84E8
("", X, 1, "GL_ALL_COMPLETED_NV"), # 0x84F2
("", X, 1, "GL_FENCE_STATUS_NV"), # 0x84F3
("", X, 1, "GL_FENCE_CONDITION_NV"), # 0x84F4
("", X, 1, "GL_DEPTH_STENCIL"), # 0x84F9
("", X, 1, "GL_UNSIGNED_INT_24_8"), # 0x84FA
("glGet", F, 1, "GL_MAX_TEXTURE_LOD_BIAS"), # 0x84FD
("glGetTexParameter", F, 1, "GL_TEXTURE_MAX_ANISOTROPY_EXT"), # 0x84FE
("glGet", F, 1, "GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT"), # 0x84FF
("", X, 1, "GL_INCR_WRAP"), # 0x8507
("", X, 1, "GL_DECR_WRAP"), # 0x8508
("_glGet", B, 1, "GL_TEXTURE_CUBE_MAP"), # 0x8513
("_glGet", I, 1, "GL_TEXTURE_BINDING_CUBE_MAP"), # 0x8514
("", X, 1, "GL_TEXTURE_CUBE_MAP_POSITIVE_X"), # 0x8515
("", X, 1, "GL_TEXTURE_CUBE_MAP_NEGATIVE_X"), # 0x8516
("", X, 1, "GL_TEXTURE_CUBE_MAP_POSITIVE_Y"), # 0x8517
("", X, 1, "GL_TEXTURE_CUBE_MAP_NEGATIVE_Y"), # 0x8518
("", X, 1, "GL_TEXTURE_CUBE_MAP_POSITIVE_Z"), # 0x8519
("", X, 1, "GL_TEXTURE_CUBE_MAP_NEGATIVE_Z"), # 0x851A
("glGet", I, 1, "GL_MAX_CUBE_MAP_TEXTURE_SIZE"), # 0x851C
("", X, 1, "GL_COMBINE"), # 0x8570
("glGetTexEnv", E, 1, "GL_COMBINE_RGB"), # 0x8571
("glGetTexEnv", E, 1, "GL_COMBINE_ALPHA"), # 0x8572
("glGetTexEnv", F, 1, "GL_RGB_SCALE"), # 0x8573
("", X, 1, "GL_ADD_SIGNED"), # 0x8574
("", X, 1, "GL_INTERPOLATE"), # 0x8575
("", X, 1, "GL_CONSTANT"), # 0x8576
("", X, 1, "GL_PRIMARY_COLOR"), # 0x8577
("", X, 1, "GL_PREVIOUS"), # 0x8578
("glGetTexEnv", E, 1, "GL_SRC0_RGB"), # 0x8580
("glGetTexEnv", E, 1, "GL_SRC1_RGB"), # 0x8581
("glGetTexEnv", E, 1, "GL_SRC2_RGB"), # 0x8582
("glGetTexEnv", E, 1, "GL_SRC0_ALPHA"), # 0x8588
("glGetTexEnv", E, 1, "GL_SRC1_ALPHA"), # 0x8589
("glGetTexEnv", E, 1, "GL_SRC2_ALPHA"), # 0x858A
("glGetTexEnv", E, 1, "GL_OPERAND0_RGB"), # 0x8590
("glGetTexEnv", E, 1, "GL_OPERAND1_RGB"), # 0x8591
("glGetTexEnv", E, 1, "GL_OPERAND2_RGB"), # 0x8592
("glGetTexEnv", E, 1, "GL_OPERAND0_ALPHA"), # 0x8598
("glGetTexEnv", E, 1, "GL_OPERAND1_ALPHA"), # 0x8599
("glGetTexEnv", E, 1, "GL_OPERAND2_ALPHA"), # 0x859A
("glGet", I, 1, "GL_VERTEX_ARRAY_BINDING"), # 0x85B5
("glGetVertexAttrib", B, 1, "GL_VERTEX_ATTRIB_ARRAY_ENABLED"), # 0x8622
("glGetVertexAttrib", I, 1, "GL_VERTEX_ATTRIB_ARRAY_SIZE"), # 0x8623
("glGetVertexAttrib", I, 1, "GL_VERTEX_ATTRIB_ARRAY_STRIDE"), # 0x8624
("glGetVertexAttrib", E, 1, "GL_VERTEX_ATTRIB_ARRAY_TYPE"), # 0x8625
("glGetVertexAttrib", D, 4, "GL_CURRENT_VERTEX_ATTRIB"), # 0x8626
("glGetVertexAttrib", P, 1, "GL_VERTEX_ATTRIB_ARRAY_POINTER"), # 0x8645
("glGet", I, 1, "GL_NUM_COMPRESSED_TEXTURE_FORMATS"), # 0x86A2
#XXX: the list is GL_NUM_COMPRESSED_TEXTURES
#("glGet", E, 1, "GL_COMPRESSED_TEXTURE_FORMATS"), # 0x86A3
("", X, 1, "GL_DOT3_RGB"), # 0x86AE
("", X, 1, "GL_DOT3_RGBA"), # 0x86AF
("glGetProgram", I, 1, "GL_PROGRAM_BINARY_LENGTH"), # 0x8741,
("glGetBufferParameter", I, 1, "GL_BUFFER_SIZE"), # 0x8764
("glGetBufferParameter", E, 1, "GL_BUFFER_USAGE"), # 0x8765
("glGet", I, 1, "GL_NUM_PROGRAM_BINARY_FORMATS"), # 0x87FE
#XXX: the list is GL_NUM_PROGRAM_BINARY_FORMATS
#("", X, 1, "GL_PROGRAM_BINARY_FORMATS"), # 0x87FF
("glGet", E, 1, "GL_STENCIL_BACK_FUNC"), # 0x8800
("glGet", E, 1, "GL_STENCIL_BACK_FAIL"), # 0x8801
("glGet", E, 1, "GL_STENCIL_BACK_PASS_DEPTH_FAIL"), # 0x8802
("glGet", E, 1, "GL_STENCIL_BACK_PASS_DEPTH_PASS"), # 0x8803
("", X, 1, "GL_RGBA32F"), # 0x8814
("", X, 1, "GL_RGB32F"), # 0x8815
("", X, 1, "GL_RGBA16F"), # 0x881A
("", X, 1, "GL_RGB16F"), # 0x881B
("glGet", I, 1, "GL_MAX_DRAW_BUFFERS"), # 0x8824
("glGet", E, 1, "GL_DRAW_BUFFER0"), # 0x8825
("glGet", E, 1, "GL_DRAW_BUFFER1"), # 0x8826
("glGet", E, 1, "GL_DRAW_BUFFER2"), # 0x8827
("glGet", E, 1, "GL_DRAW_BUFFER3"), # 0x8828
("glGet", E, 1, "GL_DRAW_BUFFER4"), # 0x8829
("glGet", E, 1, "GL_DRAW_BUFFER5"), # 0x882A
("glGet", E, 1, "GL_DRAW_BUFFER6"), # 0x882B
("glGet", E, 1, "GL_DRAW_BUFFER7"), # 0x882C
("glGet", E, 1, "GL_DRAW_BUFFER8"), # 0x882D
("glGet", E, 1, "GL_DRAW_BUFFER9"), # 0x882E
("glGet", E, 1, "GL_DRAW_BUFFER10"), # 0x882F
("glGet", E, 1, "GL_DRAW_BUFFER11"), # 0x8830
("glGet", E, 1, "GL_DRAW_BUFFER12"), # 0x8831
("glGet", E, 1, "GL_DRAW_BUFFER13"), # 0x8832
("glGet", E, 1, "GL_DRAW_BUFFER14"), # 0x8833
("glGet", E, 1, "GL_DRAW_BUFFER15"), # 0x8834
("glGet", E, 1, "GL_BLEND_EQUATION_ALPHA"), # 0x883D
("glGetTexParameter", E, 1, "GL_TEXTURE_COMPARE_MODE"), # 0x884C
("glGetTexParameter", E, 1, "GL_TEXTURE_COMPARE_FUNC"), # 0x884D
("", X, 1, "GL_COMPARE_REF_TO_TEXTURE"), # 0x884E
("glGetQuery", I, 1, "GL_QUERY_COUNTER_BITS_EXT"), # 0x8864
("glGetQuery", I, 1, "GL_CURRENT_QUERY"), # 0x8865
("glGetQueryObject", I, 1, "GL_QUERY_RESULT"), # 0x8866
("glGetQueryObject", B, 1, "GL_QUERY_RESULT_AVAILABLE"), # 0x8867
("glGet", I, 1, "GL_MAX_VERTEX_ATTRIBS"), # 0x8869
("glGetVertexAttrib", B, 1, "GL_VERTEX_ATTRIB_ARRAY_NORMALIZED"), # 0x886A
("glGet", I, 1, "GL_MAX_TESS_CONTROL_INPUT_COMPONENTS_EXT"), # 0x886C
("glGet", I, 1, "GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS_EXT"), # 0x886D
("glGet", I, 1, "GL_MAX_TEXTURE_IMAGE_UNITS"), # 0x8872
("", X, 1, "GL_ARRAY_BUFFER"), # 0x8892
("", X, 1, "GL_ELEMENT_ARRAY_BUFFER"), # 0x8893
("glGet", I, 1, "GL_ARRAY_BUFFER_BINDING"), # 0x8894
("glGet", I, 1, "GL_ELEMENT_ARRAY_BUFFER_BINDING"), # 0x8895
("glGet", I, 1, "GL_VERTEX_ARRAY_BUFFER_BINDING"), # 0x8896
("glGet", I, 1, "GL_NORMAL_ARRAY_BUFFER_BINDING"), # 0x8897
("glGet", I, 1, "GL_COLOR_ARRAY_BUFFER_BINDING"), # 0x8898
("glGet", I, 1, "GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING"), # 0x889A
("glGetVertexAttrib", I, 1, "GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING"), # 0x889F
("glGetBufferParameter", B, 1, "GL_BUFFER_MAPPED"), # 0x88BC
("glGetBufferParameter", P, 1, "GL_BUFFER_MAP_POINTER"), # 0x88BD
("glGetQuery", I, 1, "GL_TIME_ELAPSED_EXT"), # 0x88BF
("", X, 1, "GL_STREAM_DRAW"), # 0x88E0
("", X, 1, "GL_STREAM_READ"), # 0x88E1
("", X, 1, "GL_STREAM_COPY"), # 0x88E2
("", X, 1, "GL_STATIC_DRAW"), # 0x88E4
("", X, 1, "GL_STATIC_READ"), # 0x88E5
("", X, 1, "GL_STATIC_COPY"), # 0x88E6
("", X, 1, "GL_DYNAMIC_DRAW"), # 0x88E8
("", X, 1, "GL_DYNAMIC_READ"), # 0x88E9
("", X, 1, "GL_DYNAMIC_COPY"), # 0x88EA
("", X, 1, "GL_PIXEL_PACK_BUFFER"), # 0x88EB
("", X, 1, "GL_PIXEL_UNPACK_BUFFER"), # 0x88EC
("glGet", I, 1, "GL_PIXEL_PACK_BUFFER_BINDING"), # 0x88ED
("glGet", I, 1, "GL_PIXEL_UNPACK_BUFFER_BINDING"), # 0x88EF
("", X, 1, "GL_DEPTH24_STENCIL8"), # 0x88F0
("glGetVertexAttrib", B, 1, "GL_VERTEX_ATTRIB_ARRAY_INTEGER"), # 0x88FD
("glGetVertexAttrib", I, 1, "GL_VERTEX_ATTRIB_ARRAY_DIVISOR"), # 0x88FE
("glGet", I, 1, "GL_MAX_ARRAY_TEXTURE_LAYERS"), # 0x88FF
("glGet", F, 1, "GL_MIN_PROGRAM_TEXEL_OFFSET"), # 0x8904
("glGet", F, 1, "GL_MAX_PROGRAM_TEXEL_OFFSET"), # 0x8905
("glGetProgramiv", I, 1, "GL_GEOMETRY_LINKED_VERTICES_OUT_EXT"), # | |
<filename>vistrails/db/versions/v0_3_0/domain/auto_gen.py
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: <EMAIL>
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""generated automatically by auto_dao.py"""
from __future__ import division
class DBChangeParameter(object):
vtType = 'changeParameter'
def __init__(self, moduleId=None, alias=None, functionId=None, function=None, parameterId=None, parameter=None, type=None, value=None):
self.__db_moduleId = moduleId
self.__db_alias = alias
self.__db_functionId = functionId
self.__db_function = function
self.__db_parameterId = parameterId
self.__db_parameter = parameter
self.__db_type = type
self.__db_value = value
def __get_db_moduleId(self):
return self.__db_moduleId
def __set_db_moduleId(self, moduleId):
self.__db_moduleId = moduleId
db_moduleId = property(__get_db_moduleId, __set_db_moduleId)
def db_add_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_change_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_delete_moduleId(self, moduleId):
self.__db_moduleId = None
def __get_db_alias(self):
return self.__db_alias
def __set_db_alias(self, alias):
self.__db_alias = alias
db_alias = property(__get_db_alias, __set_db_alias)
def db_add_alias(self, alias):
self.__db_alias = alias
def db_change_alias(self, alias):
self.__db_alias = alias
def db_delete_alias(self, alias):
self.__db_alias = None
def __get_db_functionId(self):
return self.__db_functionId
def __set_db_functionId(self, functionId):
self.__db_functionId = functionId
db_functionId = property(__get_db_functionId, __set_db_functionId)
def db_add_functionId(self, functionId):
self.__db_functionId = functionId
def db_change_functionId(self, functionId):
self.__db_functionId = functionId
def db_delete_functionId(self, functionId):
self.__db_functionId = None
def __get_db_function(self):
return self.__db_function
def __set_db_function(self, function):
self.__db_function = function
db_function = property(__get_db_function, __set_db_function)
def db_add_function(self, function):
self.__db_function = function
def db_change_function(self, function):
self.__db_function = function
def db_delete_function(self, function):
self.__db_function = None
def __get_db_parameterId(self):
return self.__db_parameterId
def __set_db_parameterId(self, parameterId):
self.__db_parameterId = parameterId
db_parameterId = property(__get_db_parameterId, __set_db_parameterId)
def db_add_parameterId(self, parameterId):
self.__db_parameterId = parameterId
def db_change_parameterId(self, parameterId):
self.__db_parameterId = parameterId
def db_delete_parameterId(self, parameterId):
self.__db_parameterId = None
def __get_db_parameter(self):
return self.__db_parameter
def __set_db_parameter(self, parameter):
self.__db_parameter = parameter
db_parameter = property(__get_db_parameter, __set_db_parameter)
def db_add_parameter(self, parameter):
self.__db_parameter = parameter
def db_change_parameter(self, parameter):
self.__db_parameter = parameter
def db_delete_parameter(self, parameter):
self.__db_parameter = None
def __get_db_type(self):
return self.__db_type
def __set_db_type(self, type):
self.__db_type = type
db_type = property(__get_db_type, __set_db_type)
def db_add_type(self, type):
self.__db_type = type
def db_change_type(self, type):
self.__db_type = type
def db_delete_type(self, type):
self.__db_type = None
def __get_db_value(self):
return self.__db_value
def __set_db_value(self, value):
self.__db_value = value
db_value = property(__get_db_value, __set_db_value)
def db_add_value(self, value):
self.__db_value = value
def db_change_value(self, value):
self.__db_value = value
def db_delete_value(self, value):
self.__db_value = None
def getPrimaryKey(self):
return self.__db_moduleId
"""generated automatically by auto_dao.py"""
class DBDeleteFunction(object):
vtType = 'deleteFunction'
def __init__(self, moduleId=None, functionId=None):
self.__db_moduleId = moduleId
self.__db_functionId = functionId
def __get_db_moduleId(self):
return self.__db_moduleId
def __set_db_moduleId(self, moduleId):
self.__db_moduleId = moduleId
db_moduleId = property(__get_db_moduleId, __set_db_moduleId)
def db_add_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_change_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_delete_moduleId(self, moduleId):
self.__db_moduleId = None
def __get_db_functionId(self):
return self.__db_functionId
def __set_db_functionId(self, functionId):
self.__db_functionId = functionId
db_functionId = property(__get_db_functionId, __set_db_functionId)
def db_add_functionId(self, functionId):
self.__db_functionId = functionId
def db_change_functionId(self, functionId):
self.__db_functionId = functionId
def db_delete_functionId(self, functionId):
self.__db_functionId = None
def getPrimaryKey(self):
return self.__db_moduleId
"""generated automatically by auto_dao.py"""
class DBDeleteConnection(object):
vtType = 'deleteConnection'
def __init__(self, connectionId=None):
self.__db_connectionId = connectionId
def __get_db_connectionId(self):
return self.__db_connectionId
def __set_db_connectionId(self, connectionId):
self.__db_connectionId = connectionId
db_connectionId = property(__get_db_connectionId, __set_db_connectionId)
def db_add_connectionId(self, connectionId):
self.__db_connectionId = connectionId
def db_change_connectionId(self, connectionId):
self.__db_connectionId = connectionId
def db_delete_connectionId(self, connectionId):
self.__db_connectionId = None
def getPrimaryKey(self):
return self.__db_connectionId
"""generated automatically by auto_dao.py"""
class DBAddModule(object):
vtType = 'addModule'
def __init__(self, id=None, cache=None, name=None, x=None, y=None):
self.__db_id = id
self.__db_cache = cache
self.__db_name = name
self.__db_x = x
self.__db_y = y
def __get_db_id(self):
return self.__db_id
def __set_db_id(self, id):
self.__db_id = id
db_id = property(__get_db_id, __set_db_id)
def db_add_id(self, id):
self.__db_id = id
def db_change_id(self, id):
self.__db_id = id
def db_delete_id(self, id):
self.__db_id = None
def __get_db_cache(self):
return self.__db_cache
def __set_db_cache(self, cache):
self.__db_cache = cache
db_cache = property(__get_db_cache, __set_db_cache)
def db_add_cache(self, cache):
self.__db_cache = cache
def db_change_cache(self, cache):
self.__db_cache = cache
def db_delete_cache(self, cache):
self.__db_cache = None
def __get_db_name(self):
return self.__db_name
def __set_db_name(self, name):
self.__db_name = name
db_name = property(__get_db_name, __set_db_name)
def db_add_name(self, name):
self.__db_name = name
def db_change_name(self, name):
self.__db_name = name
def db_delete_name(self, name):
self.__db_name = None
def __get_db_x(self):
return self.__db_x
def __set_db_x(self, x):
self.__db_x = x
db_x = property(__get_db_x, __set_db_x)
def db_add_x(self, x):
self.__db_x = x
def db_change_x(self, x):
self.__db_x = x
def db_delete_x(self, x):
self.__db_x = None
def __get_db_y(self):
return self.__db_y
def __set_db_y(self, y):
self.__db_y = y
db_y = property(__get_db_y, __set_db_y)
def db_add_y(self, y):
self.__db_y = y
def db_change_y(self, y):
self.__db_y = y
def db_delete_y(self, y):
self.__db_y = None
def getPrimaryKey(self):
return self.__db_id
"""generated automatically by auto_dao.py"""
class DBDeleteAnnotation(object):
vtType = 'deleteAnnotation'
def __init__(self, moduleId=None, key=None):
self.__db_moduleId = moduleId
self.__db_key = key
def __get_db_moduleId(self):
return self.__db_moduleId
def __set_db_moduleId(self, moduleId):
self.__db_moduleId = moduleId
db_moduleId = property(__get_db_moduleId, __set_db_moduleId)
def db_add_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_change_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_delete_moduleId(self, moduleId):
self.__db_moduleId = None
def __get_db_key(self):
return self.__db_key
def __set_db_key(self, key):
self.__db_key = key
db_key = property(__get_db_key, __set_db_key)
def db_add_key(self, key):
self.__db_key = key
def db_change_key(self, key):
self.__db_key = key
def db_delete_key(self, key):
self.__db_key = None
def getPrimaryKey(self):
return self.__db_moduleId
"""generated automatically by auto_dao.py"""
class DBDeleteModulePort(object):
vtType = 'deleteModulePort'
def __init__(self, moduleId=None, portType=None, portName=None):
self.__db_moduleId = moduleId
self.__db_portType = portType
self.__db_portName = portName
def __get_db_moduleId(self):
return self.__db_moduleId
def __set_db_moduleId(self, moduleId):
self.__db_moduleId = moduleId
db_moduleId = property(__get_db_moduleId, __set_db_moduleId)
def db_add_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_change_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_delete_moduleId(self, moduleId):
self.__db_moduleId = None
def __get_db_portType(self):
return self.__db_portType
def __set_db_portType(self, portType):
self.__db_portType = portType
db_portType = property(__get_db_portType, __set_db_portType)
def db_add_portType(self, portType):
self.__db_portType = portType
def db_change_portType(self, portType):
self.__db_portType = portType
def db_delete_portType(self, portType):
self.__db_portType = None
def __get_db_portName(self):
return self.__db_portName
def __set_db_portName(self, portName):
self.__db_portName = portName
db_portName = property(__get_db_portName, __set_db_portName)
def db_add_portName(self, portName):
self.__db_portName = portName
def db_change_portName(self, portName):
self.__db_portName = portName
def db_delete_portName(self, portName):
self.__db_portName = None
def getPrimaryKey(self):
return self.__db_moduleId
"""generated automatically by auto_dao.py"""
class DBDeleteModule(object):
vtType = 'deleteModule'
def __init__(self, moduleId=None):
self.__db_moduleId = moduleId
def __get_db_moduleId(self):
return self.__db_moduleId
def __set_db_moduleId(self, moduleId):
self.__db_moduleId = moduleId
db_moduleId = property(__get_db_moduleId, __set_db_moduleId)
def db_add_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_change_moduleId(self, moduleId):
self.__db_moduleId = moduleId
def db_delete_moduleId(self, moduleId):
self.__db_moduleId = None
def getPrimaryKey(self):
return self.__db_moduleId
"""generated automatically by auto_dao.py"""
class DBTag(object):
vtType = 'tag'
def __init__(self, time=None, name=None):
self.__db_time = time
self.__db_name = name
def __get_db_time(self):
return self.__db_time
def __set_db_time(self, time):
self.__db_time = time
db_time = property(__get_db_time, __set_db_time)
def db_add_time(self, time):
self.__db_time = time
def db_change_time(self, time):
self.__db_time = time
def db_delete_time(self, time):
self.__db_time = None
def __get_db_name(self):
return self.__db_name
def __set_db_name(self, name):
self.__db_name = name
db_name = property(__get_db_name, __set_db_name)
def db_add_name(self, name):
self.__db_name = name
def db_change_name(self, name):
self.__db_name = name
def db_delete_name(self, name):
self.__db_name = None
def getPrimaryKey(self):
return self.__db_time
"""generated automatically by auto_dao.py"""
class DBAddModulePort(object):
vtType = 'addModulePort'
def __init__(self, moduleId=None, portType=None, portName=None, portSpec=None):
self.__db_moduleId = moduleId
self.__db_portType = portType
self.__db_portName = portName
self.__db_portSpec = portSpec
def __get_db_moduleId(self):
return self.__db_moduleId
def __set_db_moduleId(self, moduleId):
self.__db_moduleId = moduleId
db_moduleId = property(__get_db_moduleId, __set_db_moduleId)
def | |
# ===============================================================================
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from __future__ import print_function
import yaml
from apptools.preferences.preference_binding import bind_preference
from skimage.color import gray2rgb
from skimage.draw import circle_perimeter, line
from traits.api import Instance, String, Property, Button, Bool, Event, on_trait_change, Str, Float
from pychron.core.ui.thread import Thread as UIThread, sleep
import json
import os
import shutil
import time
from threading import Thread, Timer, Event as TEvent
from numpy import copy, array
from pychron.canvas.canvas2D.camera import Camera, YamlCamera, BaseCamera
from pychron.core.helpers import binpack
from pychron.core.helpers.binpack import pack, format_blob, encode_blob
from pychron.core.helpers.filetools import unique_path, unique_path_from_manifest
from pychron.core.ui.stage_component_editor import VideoComponentEditor
from pychron.image.video import Video, pil_save
from pychron.mv.lumen_detector import LumenDetector
from pychron.paths import paths
from .stage_manager import StageManager
from pychron.core.ui.thread import Thread as QThread
try:
from pychron.canvas.canvas2D.video_laser_tray_canvas import \
VideoLaserTrayCanvas
except ImportError:
from pychron.canvas.canvas2D.laser_tray_canvas import \
LaserTrayCanvas as VideoLaserTrayCanvas
class VideoStageManager(StageManager):
"""
"""
video = Instance(Video)
camera = Instance(BaseCamera)
canvas_editor_klass = VideoComponentEditor
camera_zoom_coefficients = Property(String(enter_set=True, auto_set=False),
depends_on='_camera_zoom_coefficients')
_camera_zoom_coefficients = String
use_auto_center_interpolation = Bool(False)
configure_camera_device_button = Button
autocenter_button = Button('AutoCenter')
configure_autocenter_button = Button('Configure')
autocenter_manager = Instance(
'pychron.mv.autocenter_manager.AutoCenterManager')
autofocus_manager = Instance(
'pychron.mv.focus.autofocus_manager.AutoFocusManager')
# zoom_calibration_manager = Instance(
# 'pychron.mv.zoom.zoom_calibration.ZoomCalibrationManager')
snapshot_button = Button('Snapshot')
auto_save_snapshot = Bool(True)
record = Event
record_label = Property(depends_on='is_recording')
is_recording = Bool
use_db = False
use_video_archiver = Bool(True)
video_archiver = Instance('pychron.core.helpers.archiver.Archiver')
video_identifier = Str
# use_video_server = Bool(False)
# video_server_port = Int
# video_server_quality = Int
# video_server = Instance('pychron.image.video_server.VideoServer')
use_media_storage = Bool(False)
auto_upload = Bool(False)
keep_local_copy = Bool(False)
lumen_detector = Instance(LumenDetector)
render_with_markup = Bool(False)
_auto_correcting = False
stop_timer = Event
pxpermm = Float(23)
_measure_grain_t = None
_measure_grain_evt = None
grain_polygons = None
# test_button = Button
# _test_state = False
# def _test_button_fired(self):
# if self._test_state:
# # self.stop_measure_grain_polygon()
# #
# # time.sleep(2)
# #
# # d = self.get_grain_polygon_blob()
# # print d
# self.parent.disable_laser()
# else:
# self.parent.luminosity_degas_test()
# # self.start_measure_grain_polygon()
# self._test_state = not self._test_state
def motor_event_hook(self, name, value, *args, **kw):
if name == 'zoom':
self._update_zoom(value)
def bind_preferences(self, pref_id):
self.debug('binding preferences')
super(VideoStageManager, self).bind_preferences(pref_id)
if self.autocenter_manager:
self.autocenter_manager.bind_preferences(pref_id)
# bind_preference(self.autocenter_manager, 'use_autocenter',
# '{}.use_autocenter'.format(pref_id))
bind_preference(self, 'render_with_markup',
'{}.render_with_markup'.format(pref_id))
bind_preference(self, 'auto_upload', '{}.auto_upload'.format(pref_id))
bind_preference(self, 'use_media_storage', '{}.use_media_storage'.format(pref_id))
bind_preference(self, 'keep_local_copy', '{}.keep_local_copy'.format(pref_id))
bind_preference(self, 'use_video_archiver',
'{}.use_video_archiver'.format(pref_id))
bind_preference(self, 'video_identifier',
'{}.video_identifier'.format(pref_id))
bind_preference(self, 'use_video_server',
'{}.use_video_server'.format(pref_id))
bind_preference(self.video_archiver, 'archive_months',
'{}.video_archive_months'.format(pref_id))
bind_preference(self.video_archiver, 'archive_days',
'{}.video_archive_days'.format(pref_id))
bind_preference(self.video_archiver, 'archive_hours',
'{}.video_archive_hours'.format(pref_id))
bind_preference(self.video_archiver, 'root',
'{}.video_directory'.format(pref_id))
# bind_preference(self.video, 'output_mode',
# '{}.video_output_mode'.format(pref_id))
# bind_preference(self.video, 'ffmpeg_path',
# '{}.ffmpeg_path'.format(pref_id))
def get_grain_polygon(self):
ld = self.lumen_detector
l, m = ld.lum()
return m.tostring()
def get_grain_polygon_blob(self):
# self.debug('Get grain polygons n={}'.format(len(self.grain_polygons)))
try:
t, md, p = next(self.grain_polygons)
a = pack('ff', ((t, md),))
b = pack('HH', p)
return encode_blob(a + b)
except (StopIteration, TypeError) as e:
self.debug('No more grain polygons. {}'.format(e))
def stop_measure_grain_polygon(self):
self.debug('Stop measure polygons {}'.format(self._measure_grain_evt))
if self._measure_grain_evt:
self._measure_grain_evt.set()
return True
def start_measure_grain_polygon(self):
self._measure_grain_evt = evt = TEvent()
def _measure_grain_polygon():
ld = self.lumen_detector
dim = self.stage_map.g_dimension
ld.pxpermm = self.pxpermm
self.debug('Starting measure grain polygon')
masks = []
display_image = self.autocenter_manager.display_image
mask_dim = dim * 1.05
mask_dim_mm = mask_dim * self.pxpermm
ld.grain_measuring = True
while not evt.is_set():
src = self._get_preprocessed_src()
if src is not None:
targets = ld.find_targets(display_image, src, dim, mask=mask_dim,
search={'start_offset_scalar': 1.5})
if targets:
t = time.time()
targets = [(t, mask_dim_mm, ti.poly_points.tolist()) for ti in targets]
masks.extend(targets)
sleep(0.1)
ld.grain_measuring = False
self.grain_polygons = (m for m in masks)
self.debug('exiting measure grain')
self._measure_grain_t = QThread(target=_measure_grain_polygon)
self._measure_grain_t.start()
return True
def start_recording(self, path=None, use_dialog=False, basename='vm_recording', **kw):
"""
"""
directory = None
if os.path.sep in basename:
args = os.path.split(basename)
directory, basename = os.path.sep.join(args[:-1]), args[-1]
if path is None:
if use_dialog:
path = self.save_file_dialog()
else:
vd = self.video_archiver.root
self.debug('video archiver root {}'.format(vd))
if not vd:
vd = paths.video_dir
if directory:
vd = os.path.join(vd, directory)
if not os.path.isdir(vd):
os.mkdir(vd)
path = unique_path_from_manifest(vd, basename, extension='avi')
kw['path'] = path
kw['basename'] = basename
self._start_recording(**kw)
self.is_recording = True
return path
def stop_recording(self, user='remote', delay=None):
"""
"""
def close():
self.is_recording = False
self.info('stop video recording')
p = self.video.output_path
if self.video.stop_recording(wait=True):
if self.auto_upload:
try:
p = self._upload(p, inform=False)
except BaseException as e:
self.critical('Failed uploading {}. error={}'.format(p, e))
return p
if self.video.is_recording():
if delay:
t = Timer(delay, close)
t.start()
else:
return close()
@property
def video_configuration_path(self):
if self.configuration_dir_path:
return os.path.join(self.configuration_dir_path, 'camera.yaml')
def initialize_video(self):
if self.video:
identifier = 0
p = self.video_configuration_path
if os.path.isfile(p):
with open(p, 'r') as rfile:
yd = yaml.load(rfile)
vid = yd['Device']
identifier = vid.get('identifier', 0)
self.video.open(identifier=identifier)
self.video.load_configuration(p)
self.lumen_detector.pixel_depth = self.video.pixel_depth
def initialize_stage(self):
super(VideoStageManager, self).initialize_stage()
self.initialize_video()
# s = self.stage_controller
# if s.axes:
# xa = s.axes['x'].drive_ratio
# ya = s.axes['y'].drive_ratio
# self._drive_xratio = xa
# self._drive_yratio = ya
self._update_zoom(0)
def autocenter(self, *args, **kw):
return self._autocenter(*args, **kw)
def snapshot(self, path=None, name=None, auto=False,
inform=True, return_blob=False, pic_format='.jpg'):
"""
path: abs path to use
name: base name to use if auto saving in default dir
auto: force auto save
returns:
path: local abs path
upath: remote abs path
"""
if path is None:
if self.auto_save_snapshot or auto:
if name is None:
name = 'snapshot'
path = unique_path_from_manifest(paths.snapshot_dir, name, pic_format)
elif name is not None:
if not os.path.isdir(os.path.dirname(name)):
path = unique_path_from_manifest(paths.snapshot_dir, name, pic_format)
else:
path = name
else:
path = self.save_file_dialog()
if path:
self.info('saving snapshot {}'.format(path))
# play camera shutter sound
# play_sound('shutter')
self._render_snapshot(path)
if self.auto_upload:
upath = self._upload(path, inform=inform)
if upath is None:
upath = ''
if inform:
if self.keep_local_copy:
self.information_dialog('Snapshot saved: "{}".\nUploaded : "{}"'.format(path, upath))
else:
self.information_dialog('Snapshot uploaded to "{}"'.format(upath))
else:
upath = None
if inform:
self.information_dialog('Snapshot saved to "{}"'.format(path))
if return_blob:
with open(path, 'rb') as rfile:
im = rfile.read()
return path, upath, im
else:
return path, upath
def kill(self):
"""
"""
super(VideoStageManager, self).kill()
if self.camera:
self.camera.save_calibration()
self.stop_timer = True
self.canvas.close_video()
if self.video:
self.video.close(force=True)
# if self.use_video_server:
# self.video_server.stop()
# if self._stage_maps:
# for s in self._stage_maps:
# s.dump_correction_file()
self.clean_video_archive()
def clean_video_archive(self):
if self.use_video_archiver:
self.info('Cleaning video directory')
self.video_archiver.clean(('manifest.yaml',))
def is_auto_correcting(self):
return self._auto_correcting
crop_width = 5
crop_height = 5
def get_scores(self, **kw):
ld = self.lumen_detector
src = self._get_preprocessed_src()
return ld.get_scores(src, **kw)
def find_lum_peak(self, min_distance, blur):
ld = self.lumen_detector
src = self._get_preprocessed_src()
dim = self.stage_map.g_dimension
mask_dim = dim * 1.05
# mask_dim_mm = mask_dim * self.pxpermm
if src is not None and src.ndim >= 2:
return ld.find_lum_peak(src, dim, mask_dim,
blur=blur,
min_distance=min_distance)
def get_brightness(self, **kw):
ld = self.lumen_detector
src = self._get_preprocessed_src()
return ld.get_value(src, **kw)
# src = self.video.get_cached_frame()
# csrc = copy(src)
# src, v = ld.get_value(csrc, **kw)
# return csrc, src, v
def get_frame_size(self):
cw = 2 * self.crop_width * self.pxpermm
ch = 2 * self.crop_height * self.pxpermm
return cw, ch
def close_open_images(self):
if self.autocenter_manager:
self.autocenter_manager.close_open_images()
def finish_move_to_hole(self, user_entry):
self.debug('finish move to hole')
# if user_entry and not self.keep_images_open:
# self.close_open_images()
# private
def _get_preprocessed_src(self):
ld = self.lumen_detector
src = copy(self.video.get_cached_frame())
dim = self.stage_map.g_dimension
ld.pxpermm = self.pxpermm
offx, offy = self.canvas.get_screen_offset()
cropdim = dim * 2.5
if src is not None:
if len(src.shape):
src = ld.crop(src, cropdim, cropdim, offx, offy, verbose=False)
return src
def _stage_map_changed_hook(self):
self.lumen_detector.hole_radius = self.stage_map.g_dimension
def _upload(self, src, inform=True):
if not self.use_media_storage:
msg = 'Use Media Storage not enabled in Laser preferences'
if inform:
self.warning_dialog(msg)
else:
self.warning(msg)
else:
srv = 'pychron.media_storage.manager.MediaStorageManager'
msm = self.parent.application.get_service(srv)
if msm is not None:
d = os.path.split(os.path.dirname(src))[-1]
dest = os.path.join(self.parent.name, d,
os.path.basename(src))
msm.put(src, dest)
if not self.keep_local_copy:
self.debug('removing {}'.format(src))
if src.endswith('.avi'):
head, ext = os.path.splitext(src)
vd = '{}-images'.format(head)
self.debug('removing video build directory {}'.format(vd))
shutil.rmtree(vd)
os.remove(src)
dest = '{}/{}'.format(msm.get_base_url(), dest)
return dest
else:
msg = 'Media Storage Plugin not enabled'
if inform:
self.warning_dialog(msg)
else:
self.warning(msg)
def _render_snapshot(self, path):
from chaco.plot_graphics_context import PlotGraphicsContext
c = self.canvas
p = None
was_visible = False
if not self.render_with_markup:
p = c.show_laser_position
c.show_laser_position = False
if self.points_programmer.is_visible:
c.hide_all()
was_visible = True
gc = PlotGraphicsContext((int(c.outer_width), int(c.outer_height)))
c.do_layout()
gc.render_component(c)
# gc.save(path)
from pychron.core.helpers import save_gc
save_gc.save(gc, path)
if p is not None:
c.show_laser_position | |
<gh_stars>0
""" Data loaders
This file provides:
1. A `create_loaders` function that creates two data loaders, one for the training set and one for the test set.
2. A `FloorplanGraphDataset` class which extends torch's Dataset.
Raw data schema:
- dataset is a python list of floorplans
- each floorplan if a python list containing rooms_types, rooms_bounding_boxes, and 4 other elements (which we drop)
- the rooms_types is a list of ints, each signifying the type of each room in the floorplan. The mapping is:
ROOM_CLASS = {"living_room": 1, "kitchen": 2, "bedroom": 3, "bathroom": 4, "missing": 5,
"closet": 6, "balcony": 7, "corridor": 8, "dining_room": 9, "laundry_room": 10}
- the rooms_bounding_boxes is a list of numpy arrays. Each of these arrays has 4 integer elements.
The first two correspond to the coordinates of the point to the upper left of the room's box, and the last two
to the bottom right respectively. The coordinates' values are in the range [0, 256]
- an example of floorplan is the following:
```
[
[6.0, 2.0, 4.0],
[array([array([132, 6, 148, 65]), array([110, 68, 208, 130]), array([132, 91, 160, 130])],
...(the rest are dropped)
]
```
Transformations that the data undergo sequentially:
1. malformed data are dropped
2. (very) small rooms are dropped
3. if the set is for training, the floorplans are augmented, that is, the bounding boxes of the rooms are randomly
flipped and rotated.
4. the bounding boxes are scaled to 0-1 range
5. the bounding box of each room is moved so that its center is identical to the center of the image
6. a graph is created:
1. the nodes are the rooms
2. the edges are tuples of 3 elements
7. the images are created, by rescaling and the bounding boxes and then using them to create 32x32 arrays containing
-1 and 1 in the places of those bounding boxes of the rooms (-1: no room area, 1: room area)
Output schema:
Each FloorplanGraphDataset access (__getitem__ or []) returns:
1. a FloatTensor (why floats?) of shape (n_rooms, 32, 32), 32 being the dimension of the output image (showing the
bounding box for each room),
2. a FloatTensor (why floats?) of shape (n_rooms, 10) which is the onehot encoding of the room types,
3. and a LongTensor of (n_edges, 3), denoting the relationships between the rooms.
Example:
For the example given previously:
```
[
[6.0, 2.0, 4.0],
[array([array([132, 6, 148, 65]), array([110, 68, 208, 130]), array([132, 91, 160, 130])],
...(the rest are dropped)
]
```
we get the following 3 tensors:
1. https://imgur.com/a/HsQkeXg . The 1's are not in the exact locations of the starting bounding boxes because
when training, the data undergoes augmentation randomly (random flips and multiples of 90deg rotations of the
rooms)
2. ```
tensor([[0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0.]])
```
3. ```
tensor([[ 0, 1, 1],
[ 0, -1, 2],
[ 1, 1, 2]])
```
The edges tensor (3.) means the following: Room with index 0 (0) is adjacent (1) to room w/ index 1 (1), room with
index 0 (0) is not adjacent (-1) to room w/ index 2 (2) etc. Rooms overlapping in some way are considered
adjacent.
Dataset collation:
When the Dataset is loaded in batches, the collation function floorplan_collate_fn is called. This concatenates
"vertically" (stacks) all the tensors that __getitem__ returns (so now we get tensors of sum(n_i) sizes,
i={1 to batch size} and n_i the number of rooms in each floorplan of the batch, same with edges you get the idea),
and also returns two more tensors, which __are only used when training in parallel__.
TODO elabolarate further here.
"""
import random
import torch
from torch.nn.functional import one_hot
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from torchvision import transforms
import numpy as np
IMAGE_SIZE_IN = 256 # 256x256
IMAGE_SIZE_OUT = 32 # 32x32
# bounding box limits
MIN_H = 0.03
MIN_W = 0.03
ADJACENCY_THRESHOLD = 0.03
def collate(batch):
n_rooms_total = sum([len(b[0]) for b in batch])
n_edges_total = sum([len(b[2]) for b in batch])
# preallocate tensors to speed up
all_rooms_mks = torch.empty((n_rooms_total, IMAGE_SIZE_OUT, IMAGE_SIZE_OUT), dtype=torch.float)
all_nodes = torch.empty((n_rooms_total, 10), dtype=torch.float)
all_edges = torch.empty((n_edges_total, 3), dtype=torch.long) if n_edges_total > 0 else torch.LongTensor([])
all_node_to_sample = torch.empty(n_rooms_total, dtype=torch.long)
all_edges_to_sample = torch.empty(n_edges_total, dtype=torch.long)
node_offset = 0
edge_offset = 0
for i, (rooms_mks, nodes, edges) in enumerate(batch):
n_nodes = len(nodes)
n_edges = len(edges)
all_rooms_mks[node_offset:node_offset + n_nodes] = rooms_mks
all_nodes[node_offset:node_offset + n_nodes] = nodes
if edges.shape[0] > 0:
all_edges[edge_offset:edge_offset + n_edges] = edges
all_edges[edge_offset:edge_offset + n_edges, 0] += node_offset
all_edges[edge_offset:edge_offset + n_edges, 2] += node_offset
all_node_to_sample[node_offset:node_offset + n_nodes] = torch.LongTensor(n_nodes).fill_(i)
all_edges_to_sample[edge_offset:edge_offset + n_edges] = torch.LongTensor(n_edges).fill_(i)
node_offset += n_nodes
edge_offset += n_edges
return all_rooms_mks, all_nodes, all_edges, all_node_to_sample, all_edges_to_sample
def create_loaders(path, train_batch_size=32, test_batch_size=64, loader_threads=8, n_rooms=(10, 12)):
data = np.load(path, allow_pickle=True)
# filter the data
train_data = []
test_data = []
for floorplan in data:
rooms_types = floorplan[0]
rooms_bbs = floorplan[1] # bounding boxes
# discard malformed samples
# TODO create a version of the dataset that is pre-cleaned
if not rooms_types or any(i == 0 for i in rooms_types) or any(i is None for i in rooms_bbs):
continue
# discard small rooms
types_filtered = []
bbs_filtered = []
for t, bb in zip(rooms_types, rooms_bbs):
if bb[2] - bb[0] > MIN_H and bb[3] - bb[1] > MIN_W:
types_filtered.append(t)
bbs_filtered.append(bb)
# trainset has samples outside the target range for number of rooms, and testset only those inside
if n_rooms[0] <= len(rooms_types) <= n_rooms[1]:
test_data.append([types_filtered, bbs_filtered])
else:
train_data.append([types_filtered, bbs_filtered])
# cap the number of eval samples to 5k
test_data = test_data[:5000]
# create datasets
train_dataset = FloorplanGraphDataset(train_data, augment=True)
test_dataset = FloorplanGraphDataset(test_data)
# create loaders
train_loader = DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True, num_workers=loader_threads,
collate_fn=collate)
test_loader = DataLoader(test_dataset, batch_size=test_batch_size, shuffle=False, num_workers=loader_threads,
collate_fn=collate)
return train_loader, test_loader
class FloorplanGraphDataset(Dataset):
def __init__(self, data, augment=False):
self.data = data
self.augment = augment
self.image_shape = (IMAGE_SIZE_IN, IMAGE_SIZE_IN)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
floorplan = self.data[index]
rooms_type = floorplan[0]
rooms_bbs = floorplan[1] # bounding boxes
if self.augment:
angle = random.randint(0, 3) * 90.0
flip = random.randint(0, 1) == 1
rooms_bbs = [self.augment_bounding_box(bb, angle, flip) for bb in rooms_bbs]
rooms_bbs = np.stack(rooms_bbs) / IMAGE_SIZE_IN # "normalize"
# find the boundary box and centralize all bounding boxes according to it
# i.e. move them so that the center of their boundary box matches the
# center of the (now normalized to [0, 1]) image
# think in terms of vectors to understand how this works
top_left = np.min(rooms_bbs[:, :2], axis=0)
bottom_right = np.max(rooms_bbs[:, 2:], axis=0)
shift = (top_left + bottom_right) / 2.0 - 0.5 # shifting vector
# subtract the shifting vector from all points to centralize them
rooms_bbs[:, :2] -= shift
rooms_bbs[:, 2:] -= shift
# create graph (nodes, edges)
edges = []
for k in range(len(rooms_type)):
for l in range(len(rooms_type)):
if l > k:
nd0, bb0 = rooms_type[k], rooms_bbs[k]
nd1, bb1 = rooms_type[l], rooms_bbs[l]
if is_adjacent(bb0, bb1):
edges.append([k, 1, l])
else:
edges.append([k, -1, l])
nodes = torch.LongTensor(list(map(int, rooms_type)))
edges = torch.LongTensor(edges)
rooms_mks = np.zeros((len(nodes), IMAGE_SIZE_OUT, IMAGE_SIZE_OUT))
for k, bb in enumerate(rooms_bbs):
bb *= IMAGE_SIZE_OUT
x0, y0, x1, y1 = bb.astype(int)
rooms_mks[k, x0:x1 + 1, y0:y1 + 1] = 1.0
# onehot encode and drop class 0 because the rooms' classes are 1-10
nodes = one_hot(nodes, num_classes=11)[:, 1:]
nodes = nodes.float()
rooms_mks = torch.FloatTensor(rooms_mks)
normalize = transforms.Normalize(mean=[0.5],
std=[0.5]) # basically transforms zeros to -1... don't know why this is useful
rooms_mks = normalize(rooms_mks)
return rooms_mks, nodes, edges
def augment_bounding_box(self, bb, angle, flip):
angle_rad = np.deg2rad(angle)
x0, y0 = self.flip_and_rotate(np.array([bb[0], bb[1]]), flip, angle_rad)
x1, y1 = self.flip_and_rotate(np.array([bb[2], bb[3]]), flip, angle_rad)
xmin, ymin = min(x0, x1), min(y0, y1)
xmax, ymax = max(x0, x1), max(y0, y1)
return np.array([xmin, ymin, xmax, ymax]).astype(int)
def flip_and_rotate(self, vector, flip, angle):
image_bounds = np.array(self.image_shape)
center = (image_bounds - 1) / 2
vector = vector - center
rot_matrix = np.array([[np.cos(angle), np.sin(angle)],
| |
col in enumerate(row or []):
self.grid.SetCellValue(i, j, compat.unicode(col))
if self.can_add and self.immediate:
for j, col in enumerate(self.default_row):
self.grid.SetCellValue(rows_new-1, j, compat.unicode(col))
self._changing_value = False
# update state of the remove button and the row label
self._update_apply_button()
self._update_remove_button()
self._update_indices()
def apply(self, event=None):
"""Apply the edited value; called by Apply button.
If self.with_index and self.owner.set_... exists, this will be called with values and indices.
In this case, self.owner.properties_changed will not be called additionally.
Otherwise, the standard mechanism will be used."""
self.grid.SaveEditControlValue() # end editing of the current cell
new_value = self._get_new_value()
if new_value is None: # not modified
if event is not None: event.Skip()
return
if self.with_index:
setter = getattr(self.owner, "set_%s"%self.attributename, None)
if not self.with_index or not setter:
self.on_value_edited(new_value)
self._update_apply_button()
if event is not None: event.Skip()
return
indices = [int(i) if i else None for i in self.indices]
#self._changing_value = True
old_value = self.value[:]
self.value[:] = new_value
setter(old_value, indices)
#self._changing_value = False
self.editing_values = None
self._initialize_indices()
self._update_indices()
self._update_apply_button()
if event is not None: event.Skip()
def flush(self):
self.apply()
def reset(self, event):
"Discard the changes."
self.editing_values = None
self._initialize_indices()
self.update_display()
event.Skip()
def _get_new_value(self):
# returns None if not edited
if self.editing_values is None: return None
ret = self.editing_values[:]
if self.with_index:
indices = self.indices
modified = False
if self.SKIP_EMPTY:
delete = set()
for i,row in enumerate(ret):
if self.with_index and self.indices[i]: continue
if row is not None and not any( r.strip() for r in row ): row = None
if row is None:
delete.add(i)
continue
if self.with_index:
indices = [index for i,index in enumerate(indices) if not i in delete]
ret = [row for i,row in enumerate(ret) if not i in delete]
# compare the lengths of the original vs. current values
if len(self.value) != len(ret):
modified = True
# compare the indices
if self.with_index and indices!=[str(i) for i in range(len(self.value))]:
modified = True
# go through the rows
for i,row in enumerate(ret):
if row is None:
# empty row
ret[i] = [""]*len(self.col_defs)
modified = True
if not modified:
for j, col in enumerate(row):
if col != self.value[i][j]:
modified = True
if not modified:
return None
return ret
# helpers for self.with_index handling ############################################################################
def _initialize_indices(self):
if not self.with_index: return
self.indices = [str(i) for i in range(len(self.value))]
def _update_indices(self):
if not self.grid or not self.with_index: return
for i, index in enumerate(self.indices):
self.grid.SetRowLabelValue(i, index)
# edit handlers; add/remove/insert button handlers #################################################################
def on_cell_changing(self, event):
# XXX validate; event.Veto if not valid
self.on_focus()
if not self.validation_res: return
row,col = event.Row, event.Col
def on_cell_changed(self, event):
# user has entered a value
self.on_focus()
row,col = event.Row, event.Col
value = event.GetEventObject().GetCellValue(row,col) # the new value
if self.validation_res and self.validation_res[col]:
validation_re = self.validation_res[col]
match = validation_re.match(value)
if not match:
wx.Bell()
event.Veto()
return
if self.immediate or (not self.can_add and not self.can_insert and not self.can_insert):
if row>=len(self.value):
self.add_row(None)
# immediate
if self.value[row] is None:
self.value[row] = self.default_row[:]
self.value[row][col] = value
self._notify()
event.Skip()
return
activate_apply = not self.editing_values
data = self._ensure_editing_copy()
if data[row] is None:
data[row] = self.default_row[:]
#if self.col_defs[col][1]==self.STRING:
if self.col_defs[col][1]==self.INT:
value = int(value)
elif self.col_defs[col][1]==self.FLOAT:
value = float(value)
#elif self.col_defs[col][1]==self.BOOL:
#value = bool(value)
data[row][col] = value
if activate_apply: self._update_apply_button()
event.Skip()
def add_row(self, event):
self.on_focus()
values = self._ensure_editing_copy()
self.grid.AppendRows()
self.grid.MakeCellVisible(len(values), 0)
self.grid.ForceRefresh()
values.append( self.default_row[:] )
if self.with_index:
self.indices.append("")
self._update_remove_button()
self._update_apply_button()
self._update_indices()
if compat.version >= (3,0):
self.grid.GoToCell( len(values)-1, self.cur_col )
self.grid.SetFocus()
def remove_row(self, event):
self.on_focus()
if self.immediate and self.can_add and self.cur_row==self.grid.GetNumberRows()-1:
return
if not self.can_remove_last and self.grid.GetNumberRows()==1:
self._logger.warning( _('You can not remove the last entry!') )
return
values = self._ensure_editing_copy()
if values:
self.grid.DeleteRows(self.cur_row)
del values[self.cur_row]
if self.with_index:
del self.indices[self.cur_row]
if self.cur_row>=len(values):
self.cur_row -= 1
self._update_remove_button()
self._update_apply_button()
self._update_indices()
if compat.version >= (3,0):
self.grid.GoToCell( self.cur_row, self.cur_col )
self.grid.SetFocus()
def insert_row(self, event):
self.on_focus()
self.grid.InsertRows(self.cur_row)
self.grid.MakeCellVisible(self.cur_row, 0)
self.grid.ForceRefresh()
values = self._ensure_editing_copy()
values.insert(self.cur_row, self.default_row[:])
if self.with_index:
self.indices.insert(self.cur_row, "")
self._update_remove_button()
self._update_apply_button()
self._update_indices()
if compat.version >= (3,0):
self.grid.GoToCell( self.cur_row, self.cur_col )
self.grid.SetFocus()
def _ensure_editing_copy(self):
if self.immediate: return self.value
if self.editing_values is None:
self.editing_values = [[col for col in row] for row in self.value]
return self.editing_values
def _update_remove_button(self):
"""Enable or disable remove button
The state of the remove button depends on the number of rows and L{self.can_remove_last}."""
if not self.grid or not self.buttons: return
if self.can_remove and not self.can_remove_last:
self.buttons[-1].Enable(self.grid.GetNumberRows() > 1)
def _update_apply_button(self):
if not self.grid or not self.buttons or self.immediate: return
self.buttons[0].Enable( self.editing_values is not None) # the apply button
self.buttons[-1].Enable( self.editing_values is not None) # the reset button
# helpers ##########################################################################################################
def _set_col_sizes(self, sizes):
"""sets the width of the columns.
sizes is a list of integers with the size of each column: a value of 0 stands for a default size,
while -1 means to expand the column to fitthe available space (at most one column can have size -1)"""
col_to_expand = -1
total_w = 0
for i in range(self.grid.GetNumberCols()):
try:
w = sizes[i]
except IndexError:
return
if not w:
self.grid.AutoSizeColumn(i)
total_w += self.grid.GetColSize(i)
elif w < 0:
col_to_expand = i
else:
self.grid.SetColSize(i, w)
total_w += w
if col_to_expand >= 0:
self.grid.AutoSizeColumn(col_to_expand)
w = self.grid.GetSize()[0] - total_w
if w >= self.grid.GetColSize(col_to_expand):
self.grid.SetColSize(col_to_expand, w)
class CodeProperty(TextProperty):
_HORIZONTAL_LAYOUT = False
_PROPORTION = 3
def __init__(self, value="", name=None):
TextProperty.__init__(self, value, multiline=True, name=name, default_value="")
def create_editor(self, panel, sizer):
# we want a monospaced font
TextProperty.create_editor(self, panel, sizer)
font = wx.Font(9, wx.FONTFAMILY_MODERN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
self.text.SetFont(font)
def get_lines(self):
if self.deactivated or not self.value.strip(): return []
ret = self.value.split('\n')
if ret and not ret[-1]: del ret[-1]
return [line+'\n' for line in ret]
class ExtraPropertiesProperty(GridProperty):
LABEL = 'Extra properties for this widget'
TOOLTIP = ('You can use this property to add some extra custom properties to this widget.\n\n'
'For each property "prop" with value "val", wxGlade will generate a'
'"widget.SetProp(val)" line (or a "<prop>val</prop>" line for XRC).')
_PROPORTION = 3
SKIP_EMPTY = True
def __init__(self):
cols = [(_('Property'), GridProperty.STRING),
(_('Value'), GridProperty.STRING)]
value = []
GridProperty.__init__(self, value, cols, immediate=True)
def write(self, output, tabs):
if not self.value: return
inner_xml = []
for row in self.value:
if row is None: continue
name, value = row
if value:
inner_xml += common.format_xml_tag( u'property', value.strip(), tabs+1, name=name )
if inner_xml:
output.extend( common.format_xml_tag( u'extraproperties', inner_xml, tabs, is_xml=True ) )
class ActionButtonProperty(Property):
# just a button to start an action
CONTROLNAMES = ["button"]
background_color = None
HAS_DATA = False # to be ignored by owner.get_properties()
def __init__(self, callback):
self.callback = callback
self.label = None # set to None; when creating an editor, self.set_label() may have been called
Property.__init__(self, None)
def get(self):
return self
def create_editor(self, panel, sizer):
if self.label is None: self.label = self._find_label()
self.button = wx.Button( panel, -1, self.label )
sizer.Add(self.button, 0, wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT, 4)
tooltip = self._find_tooltip()
if tooltip: compat.SetToolTip(self.button, tooltip)
self.button.Bind(wx.EVT_BUTTON, self.on_button)
self.editing = True
if self.background_color is not None:
self.button.SetBackgroundColour(self.background_color)
def set_label(self, label):
self.label = label
if self.editing:
self.button.SetLabel(label)
if self.background_color is not None:
self.button.SetBackgroundColour(self.background_color)
def on_button(self, event):
self.on_focus()
self.callback()
def __call__(self, *args, **kwargs):
self.callback(*args, **kwargs)
def write(self, output, tabs=0):
return
########################################################################################################################
class PropertyOwner(object):
def __init__(self):
# property handling
self.properties = {}
self.property_names = []
# property handling ################################################################################################
def add_property(self, prop, attname):
# link the property to the owner
self.properties[attname] = prop
if prop.name is not None:
# allow also access via property name like 'class', but only via the properties dict
self.properties[prop.name] = prop
self.property_names.append(prop.name)
else:
self.property_names.append(attname)
prop.set_owner(self, attname)
def __getattr__(self, attr):
if attr in self.properties:
# return the value (either the user-provided or the default value)
return self.properties[attr].get()
raise AttributeError("%r object has no attribute %r" %(self.__class__, attr))
def __setattr__(self, name, value):
if isinstance(value, Property):
self.add_property(value, name)
return
if name!="properties" and name in self.properties and config.debugging:
raise ValueError("implementation error: property about to be overwritten")
object.__setattr__(self, name, value)
def copy_properties(self, other, properties):
"copy named properties from other"
# with short cut for properties with 'values_set'
for p in properties:
if hasattr(other, "properties"):
o_prop = other.properties[p]
new = o_prop.value_set if hasattr(o_prop, "value_set") else o_prop.value
else:
new = getattr(other, p)
prop = self.properties[p]
if hasattr(prop, "value_set") and isinstance(new, set):
old = prop.value_set
else:
old = prop.get()
if new!=old:
prop.set(new)
self.properties_changed(properties)
| |
value for `details_level` ({0}), must be one of {1}" # noqa: E501
.format(details_level, allowed_values))
self._details_level = details_level
else:
self._details_level = allowed_values[int(details_level) if six.PY3 else long(details_level)]
@property
def use_frames_for_del_ins_elements(self):
"""
Gets the use_frames_for_del_ins_elements. # noqa: E501
Indicates whether to use frames for shapes in Word Processing and for rectangles in Image documents # noqa: E501
:return: The use_frames_for_del_ins_elements. # noqa: E501
:rtype: bool
"""
return self._use_frames_for_del_ins_elements
@use_frames_for_del_ins_elements.setter
def use_frames_for_del_ins_elements(self, use_frames_for_del_ins_elements):
"""
Sets the use_frames_for_del_ins_elements.
Indicates whether to use frames for shapes in Word Processing and for rectangles in Image documents # noqa: E501
:param use_frames_for_del_ins_elements: The use_frames_for_del_ins_elements. # noqa: E501
:type: bool
"""
if use_frames_for_del_ins_elements is None:
raise ValueError("Invalid value for `use_frames_for_del_ins_elements`, must not be `None`") # noqa: E501
self._use_frames_for_del_ins_elements = use_frames_for_del_ins_elements
@property
def calculate_component_coordinates(self):
"""
Gets the calculate_component_coordinates. # noqa: E501
Indicates whether to calculate coordinates for changed components # noqa: E501
:return: The calculate_component_coordinates. # noqa: E501
:rtype: bool
"""
return self._calculate_component_coordinates
@calculate_component_coordinates.setter
def calculate_component_coordinates(self, calculate_component_coordinates):
"""
Sets the calculate_component_coordinates.
Indicates whether to calculate coordinates for changed components # noqa: E501
:param calculate_component_coordinates: The calculate_component_coordinates. # noqa: E501
:type: bool
"""
if calculate_component_coordinates is None:
raise ValueError("Invalid value for `calculate_component_coordinates`, must not be `None`") # noqa: E501
self._calculate_component_coordinates = calculate_component_coordinates
@property
def mark_changed_content(self):
"""
Gets the mark_changed_content. # noqa: E501
Indicates whether to use frames for shapes in Word Processing and for rectangles in Image documents # noqa: E501
:return: The mark_changed_content. # noqa: E501
:rtype: bool
"""
return self._mark_changed_content
@mark_changed_content.setter
def mark_changed_content(self, mark_changed_content):
"""
Sets the mark_changed_content.
Indicates whether to use frames for shapes in Word Processing and for rectangles in Image documents # noqa: E501
:param mark_changed_content: The mark_changed_content. # noqa: E501
:type: bool
"""
if mark_changed_content is None:
raise ValueError("Invalid value for `mark_changed_content`, must not be `None`") # noqa: E501
self._mark_changed_content = mark_changed_content
@property
def mark_nested_content(self):
"""
Gets the mark_nested_content. # noqa: E501
Gets or sets a value indicating whether to mark the children of the deleted or inserted element as deleted or inserted # noqa: E501
:return: The mark_nested_content. # noqa: E501
:rtype: bool
"""
return self._mark_nested_content
@mark_nested_content.setter
def mark_nested_content(self, mark_nested_content):
"""
Sets the mark_nested_content.
Gets or sets a value indicating whether to mark the children of the deleted or inserted element as deleted or inserted # noqa: E501
:param mark_nested_content: The mark_nested_content. # noqa: E501
:type: bool
"""
if mark_nested_content is None:
raise ValueError("Invalid value for `mark_nested_content`, must not be `None`") # noqa: E501
self._mark_nested_content = mark_nested_content
@property
def clone_metadata(self):
"""
Gets the clone_metadata. # noqa: E501
Gets or sets type of metadata to clone # noqa: E501
:return: The clone_metadata. # noqa: E501
:rtype: str
"""
return self._clone_metadata
@clone_metadata.setter
def clone_metadata(self, clone_metadata):
"""
Sets the clone_metadata.
Gets or sets type of metadata to clone # noqa: E501
:param clone_metadata: The clone_metadata. # noqa: E501
:type: str
"""
if clone_metadata is None:
raise ValueError("Invalid value for `clone_metadata`, must not be `None`") # noqa: E501
allowed_values = ["Default", "Source", "Target", "FileAuthor"] # noqa: E501
if not clone_metadata.isdigit():
if clone_metadata not in allowed_values:
raise ValueError(
"Invalid value for `clone_metadata` ({0}), must be one of {1}" # noqa: E501
.format(clone_metadata, allowed_values))
self._clone_metadata = clone_metadata
else:
self._clone_metadata = allowed_values[int(clone_metadata) if six.PY3 else long(clone_metadata)]
@property
def meta_data(self):
"""
Gets the meta_data. # noqa: E501
Gets or sets user metadata # noqa: E501
:return: The meta_data. # noqa: E501
:rtype: Metadata
"""
return self._meta_data
@meta_data.setter
def meta_data(self, meta_data):
"""
Sets the meta_data.
Gets or sets user metadata # noqa: E501
:param meta_data: The meta_data. # noqa: E501
:type: Metadata
"""
self._meta_data = meta_data
@property
def password_save_option(self):
"""
Gets the password_save_option. # noqa: E501
Gets or sets type of password saving # noqa: E501
:return: The password_save_option. # noqa: E501
:rtype: str
"""
return self._password_save_option
@password_save_option.setter
def password_save_option(self, password_save_option):
"""
Sets the password_save_option.
Gets or sets type of password saving # noqa: E501
:param password_save_option: The password_save_option. # noqa: E501
:type: str
"""
if password_save_option is None:
raise ValueError("Invalid value for `password_save_option`, must not be `None`") # noqa: E501
allowed_values = ["None", "Source", "Target", "User"] # noqa: E501
if not password_save_option.isdigit():
if password_save_option not in allowed_values:
raise ValueError(
"Invalid value for `password_save_option` ({0}), must be one of {1}" # noqa: E501
.format(password_save_option, allowed_values))
self._password_save_option = password_save_option
else:
self._password_save_option = allowed_values[int(password_save_option) if six.PY3 else long(password_save_option)]
@property
def password(self):
"""
Gets the password. # noqa: E501
Gets or sets user password to resultant document # noqa: E501
:return: The password. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""
Sets the password.
Gets or sets user password to resultant document # noqa: E501
:param password: The password. # noqa: E501
:type: str
"""
self._password = password
@property
def diagram_master_setting(self):
"""
Gets the diagram_master_setting. # noqa: E501
Gets or sets master for Diagram document # noqa: E501
:return: The diagram_master_setting. # noqa: E501
:rtype: DiagramMasterSetting
"""
return self._diagram_master_setting
@diagram_master_setting.setter
def diagram_master_setting(self, diagram_master_setting):
"""
Sets the diagram_master_setting.
Gets or sets master for Diagram document # noqa: E501
:param diagram_master_setting: The diagram_master_setting. # noqa: E501
:type: DiagramMasterSetting
"""
self._diagram_master_setting = diagram_master_setting
@property
def original_size(self):
"""
Gets the original_size. # noqa: E501
Gets or sets original document size when picture is compared with other different formats # noqa: E501
:return: The original_size. # noqa: E501
:rtype: Size
"""
return self._original_size
@original_size.setter
def original_size(self, original_size):
"""
Sets the original_size.
Gets or sets original document size when picture is compared with other different formats # noqa: E501
:param original_size: The original_size. # noqa: E501
:type: Size
"""
self._original_size = original_size
@property
def header_footers_comparison(self):
"""
Gets the header_footers_comparison. # noqa: E501
Control to turn on comparison of header/footer contents # noqa: E501
:return: The header_footers_comparison. # noqa: E501
:rtype: bool
"""
return self._header_footers_comparison
@header_footers_comparison.setter
def header_footers_comparison(self, header_footers_comparison):
"""
Sets the header_footers_comparison.
Control to turn on comparison of header/footer contents # noqa: E501
:param header_footers_comparison: The header_footers_comparison. # noqa: E501
:type: bool
"""
if header_footers_comparison is None:
raise ValueError("Invalid value for `header_footers_comparison`, must not be `None`") # noqa: E501
self._header_footers_comparison = header_footers_comparison
@property
def paper_size(self):
"""
Gets the paper_size. # noqa: E501
Gets or sets the result document paper size # noqa: E501
:return: The paper_size. # noqa: E501
:rtype: str
"""
return self._paper_size
@paper_size.setter
def paper_size(self, paper_size):
"""
Sets the paper_size.
Gets or sets the result document paper size # noqa: E501
:param paper_size: The paper_size. # noqa: E501
:type: str
"""
if paper_size is None:
raise ValueError("Invalid value for `paper_size`, must not be `None`") # noqa: E501
allowed_values = ["Default", "A0", "A1", "A2", "A3", "A4", "A5", "A6", "A7", "A8"] # noqa: E501
if not paper_size.isdigit():
if paper_size not in allowed_values:
raise ValueError(
"Invalid value for `paper_size` ({0}), must be one of {1}" # noqa: E501
.format(paper_size, allowed_values))
self._paper_size = paper_size
else:
self._paper_size = allowed_values[int(paper_size) if six.PY3 else long(paper_size)]
@property
def sensitivity_of_comparison(self):
"""
Gets the sensitivity_of_comparison. # noqa: E501
Gets or sets a sensitivity of comparison. Default is 75 # noqa: E501
:return: The sensitivity_of_comparison. # noqa: E501
:rtype: int
"""
return self._sensitivity_of_comparison
@sensitivity_of_comparison.setter
def sensitivity_of_comparison(self, sensitivity_of_comparison):
"""
Sets the sensitivity_of_comparison.
Gets or sets a sensitivity of comparison. Default is 75 # noqa: E501
:param sensitivity_of_comparison: The sensitivity_of_comparison. # noqa: E501
:type: int
"""
if sensitivity_of_comparison is None:
raise ValueError("Invalid value for `sensitivity_of_comparison`, must not be `None`") # noqa: E501
self._sensitivity_of_comparison = sensitivity_of_comparison
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the | |
+ 20.0 and self.y1 > oy1 - 20.0 and self.y2 < oy2 + 20.0:
o.treasure = False # If robot contacts landmark with treasure
ID = o.treasureID
canvas.delete(ID) # Delete treasure object from list
o.treasureID = ""
self.points += 100 #Add 100 to points as treasure has been found
self.rXPos += self.vx
self.rYPos += self.vy
canvas.delete(self.robot)
self.robotDraw()
canvas.update()
self.updateInfo()
time.sleep(0.1)
self.done = True
self.updateInfo()
def robotStop(self): #Function to stop robot by changing values
self.run = False #Run changes to false to stop
self.done = False #Done changes back to default
canvas.delete("robotTag") #Deletes robot from canvas
def updateInfo(self): #Function to update info about robot in GUI
if self.done == True: #If robot is done
self.run = False
rb1Status.config(text='Status: Done') #Change status to done
global rb1T
rb1T.Done() #Stop timer but still display time
elif self.run == True: #Constantly update info if robot is running
rb1Position.config(text='Position: x:' + str(int(self.x1)) + " y:" + str(int(self.y1))) #Change x/y position info
rb1Status.config(text='Status: ' + self.status) #Chnage status
#Yet to add other labels yet
rb1Points.config(text='Points: ' + str(self.points)) #Update points
else:
ResetLabels() #Run function to reset labels to default if robot not running anymore
class Treasure:
#create random spawn location of treasure, coordinates need adjusting with landmarks
def __init__(self, n, x=0,y=0,size = 12,colour='#ffd700'):
self.colour = colour
self.size = size
self.n = n # the given number of treasures to give IDs
self.id = "Treasure" + str(n) # giving the treasure different IDs, easier for robot to detect
#print self.id - put in place to test Treasure IDs
def checkLandmark(self):
global intPlay
if intPlay <=1: # if intial play is less than or equal to one, create random search of objects for treasure
n = random.randint(0,len(obstacles)-1) # chooses random object within obstacle array. index 0 - 8 but -1, because 7 landmarks
if obstacles[n].treasure == False: # if no treasure in landmark
x1,y1,x2,y2=canvas.coords(obstacles[n].lndmrk) # place within middle of random object chosen.
self.x = (x1+x2)/2 # average of the x axis for object
self.y = (y1+y2)/2 # average of the y axis for object to get centre
obstacles[n].treasure = True # random obstacle has treasure inside it
obstacles[n].treasureID = self.id #each treasure in landmark is given an ID
else:
self.checkLandmark() # checks landmarks if there is a treasure present, if so choose another.
def DrawTreasure(self,canvas): #creating the attributes for the treasure
self.checkLandmark() # call checkLandmark to make sure no treasure is present before creating
self.shape = canvas.create_oval(self.x,self.y,self.x + self.size, self.y + self.size,outline = self.colour, fill=self.colour,tag=self.id)
# creating object, size goes against each x and y coordinates. tag inplace to call for deletion
class Timer:
def __init__(self, label):
self.second = 0
self.minute = 0
self.hour = 0
self.time = ""
self.stop = False
self.done = False
self.label = label
self.sections = {}
def Stop(self):
#used to stop timer and get rid of time i.e. when game is done
global intPlay
intPlay = 0
# used so the timer stops
self.stop = True
# used as the robot has not found the treasure
self.done = False
def Done(self): #Change to done if robot is done
#used so that timer stops but still displays time
self.stop = True
self.done = True
def Count(self):
# condition - if the program is running
if self.stop == False:
# second increments by 1
self.second = self.second + 1
if self.second == 60:
# once the timer reaches 60 seconds, a minute is reached and the seconds are set back to 0 to repeat process
self.minute = self.minute + 1
self.second = 0
if self.minute == 60:
# once the timer reaches 60 minutes, an hour is reached and the minutes are set to 0 to repeat the process
self.hour = self.hour + 1
self.minute = 0
#Generate 4 random numbers between 1 - 3 for lights
# lights change every 5 seconds
if self.second % 5 == 0:
light1.ChangeLight()
light2.ChangeLight()
light3.ChangeLight()
light4.ChangeLight()
# formatting of timer display hh:mm:ss
if self.hour < 10:
if self.minute < 10:
if self.second < 10:
# e.g. 01:02:03
self.time = "0" + str(self.hour) + ":0" + str(self.minute) + ":0" + str(self.second)
else:
# e.g. 01:02:34
self.time = "0" + str(self.hour) + ":0" + str(self.minute) + ":" + str(self.second)
else:
if self.second < 10:
# e.g. 01:23:04
self.time = "0" + str(self.hour) + ":" + str(self.minute) + ":0" + str(self.second)
else:
# e.g. 01:23:45
self.time = "0" + str(self.hour) + ":" + str(self.minute) + ":" + str(self.second)
else:
if self.minute < 10:
if self.second < 10:
# e.g. 12:03:04
self.time = str(self.hour) + ":0" + str(self.minute) + ":0" + str(self.second)
else:
# e.g. 12:03:45
self.time = str(self.hour) + ":0" + str(self.minute) + ":" + str(self.second)
else:
if self.second < 10:
# e.g. 12:34:05
self.time = str(self.hour) + ":" + str(self.minute) + ":0" + str(self.second)
else:
#12:34:56
self.time = str(self.hour) + ":" + str(self.minute) + ":" + str(self.second)
# executing the timer display as a string so it can display as a label
exec str(self.label.config(text=(self.time)))
# 1000 ticks == 1 second delay and continues the Count function
self.label.after(1000, self.Count)
else:
# when the robot has found the treasures the timer is stopped, and the time the robot found the treasures in is displayed
if self.done == True:
exec str(self.label.config(text=(self.time)))
else:
# display of timer when Stop is pressed
exec str(self.label.config(text="00:00:00"))
#Class for lights
class Light():
def __init__(self, number):
self.width = 854 #width of canvas
self.height = 480 #height of canvas
self.sectionWidth = 213.5 #width of one section (1/4 of whole width)
self.number = number #number of section
self.colour = "" #string to hold colour of section
def CreateLight(self): #Function to create the lights for GUI
#globalising objects to be made
global lightcolour1
global lightcolour2
global lightcolour3
global lightcolour4
global section1
global section2
global section3
global section4
global light1Text
global light2Text
global light3Text
global light4Text
if self.number == 1: #if section 1, place in left most position
lightcolour1=canvas.create_rectangle(2, 2, self.sectionWidth, 23, fill="#2ecc71", tag="1") #Create light block and tag number
section1=canvas.create_rectangle(0, self.height, self.sectionWidth, 23, dash=(10,10), tag="Green") #Create dashed section and tag colour
light1Text=Label(font=('Helvetica', 8), text='Green', bg="#2ecc71") #Create label to match colour of section
light1Text.place(x=100, y=13) #Place label in correct position
self.colour = "Green" #Change string to hold value of light
elif self.number == 2: #If section 2, place in left mid position
lightcolour2=canvas.create_rectangle(self.sectionWidth, 2, self.sectionWidth * self.number, 23, fill="#f39c12", tag="2") #Create light block and tag number
section2=canvas.create_rectangle(self.sectionWidth, self.height, self.sectionWidth * 2, 23, dash=(10,10), tag="Amber") #Create dashed section and tag colour
light2Text=Label(font=('Helvetica', 8), text='Amber', bg="#f39c12") #Create label to match colour of section
light2Text.place(x=310, y=13) #Place label in correct position
self.colour = "Amber" #Change string to hold value of light
elif self.number == 3: #If section 3, place in right mid position
lightcolour3=canvas.create_rectangle(self.sectionWidth * (self.number - 1), 2, self.sectionWidth * self.number, 23, fill="#e74c3c", tag="3") #Create light block and tag number
section3=canvas.create_rectangle(self.sectionWidth * 2, self.height, self.sectionWidth * 3, 23, dash=(10,10), tag="Red") #Create dashed section and tag colour
light3Text=Label(font=('Helvetica', 8), text='Red', bg="#e74c3c") #Create label to match colour of section
light3Text.place(x=530, y=13) #Place label in correct position
self.colour = "Red" #Change string to hold value of light
elif self.number == 4: #If section 4, place in right most position
lightcolour4=canvas.create_rectangle(self.sectionWidth * (self.number - 1), 2, ((self.sectionWidth * self.number) - 1), 23, fill="#2ecc71", tag="4") #Create light block and tag number
section4=canvas.create_rectangle(self.sectionWidth * 3, self.height, ((self.sectionWidth * 4) - 1), 23, dash=(10,10), tag="Green") #Create dashed section and tag colour
light4Text=Label(font=('Helvetica', 8), text='Green', bg="#2ecc71") #Create label to match colour of section
light4Text.place(x=740, y=13) #Place label in correct position
self.colour = "Green" #Change string to hold value of light
def ChangeLight(self): #Function to change lights, called in timer class count function
intColour = random.randrange(1,4) # Random selection of traffic lights ranging | |
<filename>dftd3/dftd3.py
#!/usr/bin/python
from __future__ import print_function, absolute_import
# THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Comments and/or additions are welcome (send e-mail to:
# <EMAIL>
#######################################################################
# dftd3.py #
# #
# This was a little exercise to translate Grimme's D3 #
# Fortran code into Python. There is no new science as such! #
# It is possible to implement D3 corrections directly within most #
# electronic structure packages so the only possible use for this #
# is pedagogic or to look at individual terms of the total #
# D3-correction between a pair of atoms or molecules. #
# This code will read a Gaussian formatted input/output file and #
# compute the D3-density independent dispersion terms without #
# modification. Zero and Becke-Johnson damping schemes are both #
# implemented. #
#######################################################################
####### Written by: <NAME> and <NAME> ####################
####### Last modified: Mar 20, 2016 #################################
#######################################################################
# Dependent on parameter file
try:
from .pars import *
except:
from pars import *
# For reading Gaussian formatted input/output files
try:
from .ccParse import *
except:
from ccParse import *
#Python libararies
import random, sys, os, subprocess, string, math
## Check for integer when parsing ##
def is_number(s):
try: int(s); return True
except ValueError: return False
## Arrays for attractive and repulsive interactions ##
attractive_vdw=[0]
repulsive_vdw=[0]
total_vdw=[0]
## Functional Specific D3 parameters
rs8 = 1.0
repfac= 1.0
## Distance and energy conversion factors ##
autoang = 0.52917726
autokcal = 627.509541
c6conv=(0.001/2625.4999)/(0.052917726**6)
## Global D3 parameters ##
## Exponents used in distance dependent damping factors for R6, R8 and R10 terms
alpha6 = 14
alpha8 = alpha6 + 2
alpha10 = alpha8 + 2
## Constants used to determine fractional connectivities between 2 atoms:
## k1 is the exponent used in summation, k2 is used a fraction of the summed single-bond radii
k1 = 16.0
k2 = 4.0/3.0
k3 = -4.0
## D3 is parameterized up to element 94
max_elem = 94
## maximum connectivity
maxc = 5
## From connectivity, establish if there is more than one molecule
def getMollist(bondmatrix,startatom):
# The list of atoms in a molecule
atomlist=[]
atomlist.append(startatom)
molecule1=[]
nextlot=[]
count = 0
while count<100:
nextlot=[]
for atom in atomlist:
for i in range(0,len(bondmatrix[atom])):
if bondmatrix[atom][i] == 1:
alreadyfound = 0
for at in atomlist:
if i == at: alreadyfound = 1
if alreadyfound == 0: atomlist.append(i)
count=count+1
return atomlist
## DFT derived values for diatomic cutoff radii from Grimme ##
## These are read from pars.py and converted from atomic units into Angstrom
r = [[0]*max_elem for x in range(max_elem)]
k=0
for i in range(0,max_elem):
for j in range(0,i+1):
r[i][j]=r0ab[k]/autoang
r[j][i]=r0ab[k]/autoang
k=k+1
## PBE0/def2-QZVP atomic values for multipole coefficients read from pars.py ##
for i in range(0,max_elem):
dum=0.5*r2r4[i]*float(i+1)**0.5
r2r4[i]=math.pow(dum,0.5)
## Reference systems are read in to compute coordination number dependent dispersion coefficients
def copyc6(max_elem, maxc):
c6ab = [[0]*max_elem for x in range(max_elem)]
nlines = 32385
for iat in range(0,max_elem):
for jat in range(0,max_elem): c6ab[iat][jat]=[[0]*maxc for x in range(maxc)]
kk=0
for nn in range(0,nlines):
kk=(nn*5)
iadr=0
jadr=0
iat=int(pars[kk+1])-1
jat=int(pars[kk+2])-1
while iat > 99:
iadr=iadr+1
iat=iat-100
while jat > 99:
jadr=jadr+1
jat=jat-100
c6ab[iat][jat][iadr][jadr]=[]
c6ab[iat][jat][iadr][jadr].append(pars[kk])
c6ab[iat][jat][iadr][jadr].append(pars[kk+3])
c6ab[iat][jat][iadr][jadr].append(pars[kk+4])
c6ab[jat][iat][jadr][iadr]=[]
c6ab[jat][iat][jadr][iadr].append(pars[kk])
c6ab[jat][iat][jadr][iadr].append(pars[kk+4])
c6ab[jat][iat][jadr][iadr].append(pars[kk+3])
return c6ab
# Obtain the C6 coefficient for the interaction between atoms a and b
def getc6(maxc,max_elem,c6ab,mxc,atomtype,cn,a,b):
for i in range(0,max_elem):
if atomtype[a].find(elements[i])>-1:iat=i
if atomtype[b].find(elements[i])>-1:jat=i
c6mem = -1.0E99
rsum = 0.0
csum = 0.0
c6 = 0.0
for i in range(0,mxc[iat]):
for j in range(0,mxc[jat]):
if isinstance(c6ab[iat][jat][i][j], (list, tuple)):
c6=c6ab[iat][jat][i][j][0]
if c6>0:
c6mem=c6
cn1=c6ab[iat][jat][i][j][1]
cn2=c6ab[iat][jat][i][j][2]
r=(cn1-cn[a])**2+(cn2-cn[b])**2
tmp1=math.exp(k3*r)
rsum=rsum+tmp1
csum=csum+tmp1*c6
if(rsum>0): c6=csum/rsum
else: c6=c6mem
return c6
# Calculation of atomic coordination numbers
def ncoord(natom, rcov, atomtype, xco, yco, zco, max_elem, autoang, k1, k2):
cn =[]
for i in range(0,natom):
xn = 0.0
for iat in range(0,natom):
if iat != i:
dx = xco[iat] - xco[i]
dy = yco[iat] - yco[i]
dz = zco[iat] - zco[i]
r2 = dx*dx+dy*dy+dz*dz
r = math.pow(r2,0.5)
r = r
for k in range(0,max_elem):
if atomtype[i].find(elements[k])>-1:Zi=k
if atomtype[iat].find(elements[k])>-1:Ziat=k
rco = rcov[Zi]+rcov[Ziat]
rco = rco*k2
rr=rco/r
damp=1.0/(1.0+math.exp(-k1*(rr-1.0)))
xn=xn+damp
cn.append(xn)
return cn
# linear interpolation
def lin(i1,i2):
idum1=max(i1,i2)
idum2=min(i1,i2)
lin=idum2+idum1*(idum1-1)/2
return lin
## Get from pars.py
c6ab = copyc6(max_elem, maxc)
#verbose = None
## The computation of the D3 dispersion correction
class calcD3:
def __init__(self, fileData, functional, s6, rs6, s8, a1, a2, damp, abc, intermolecular, pairwise, verbose):
#verbose = True
## Arrays for atoms and Cartesian coordinates ##
try:
atomtype = fileData.ATOMTYPES
cartesians = fileData.CARTESIANS
except:
atomtype = fileData.atom_types
cartesians = fileData.cartesians
natom = len(atomtype)
xco=[]; yco=[]; zco=[]
for at in cartesians:
xco.append(at[0]); yco.append(at[1]); zco.append(at[2])
## In case something clever needs to be done wrt inter and intramolecular interactions
if hasattr(fileData,"BONDINDEX"):
molAatoms = getMollist(fileData.BONDINDEX,0)
mols = []
for j in range(0,natom):
mols.append(0)
for atom in molAatoms:
if atom == j: mols[j] = 1
## Names are pretty obvious...
self.attractive_r6_vdw = 0.0; self.attractive_r8_vdw = 0.0; self.repulsive_abc = 0.0
mxc=[0]
for j in range(0,max_elem):
mxc.append(0)
for k in range(0,natom):
if atomtype[k].find(elements[j])>-1:
for l in range(0,maxc):
if isinstance(c6ab[j][j][l][l], (list, tuple)):
if c6ab[j][j][l][l][0]>0: mxc[j]=mxc[j]+1
break
## Coordination number based on covalent radii
cn = ncoord(natom, rcov, atomtype, xco, yco, zco, max_elem, autoang, k1, k2)
## C6 - Need to calculate these from fractional coordination
#print "\n R0(Ang) CN"
#print " #########################"
x=0
for j in range(0,natom):
C6jj = getc6(maxc,max_elem,c6ab,mxc,atomtype,cn,j,j)
for k in range(0,natom):
dum = getc6(maxc,max_elem,c6ab,mxc,atomtype,cn,j,k)
x=x+dum
for k in range(0,max_elem):
if atomtype[j].find(elements[k])>-1:z=k
dum = 0.5*autoang*r[z][z]
C8jj = 3.0*C6jj*math.pow(r2r4[z],2.0)
C10jj=49.0/40.0 * math.pow(C8jj,2.0)/C6jj
#print " ",(j+1), atomtype[j], " %.5f" % dum, " %.5f" % cn[j] #, C6jj, C8jj, C10jj
#print " #########################"
icomp = [0]*100000; cc6ab = [0]*100000; r2ab = [0]*100000; dmp = [0]*100000
## Compute and output the individual components of the D3 energy correction ##
#print "\n Atoms Types C6 C8 E6 E8"
if damp == "zero":
if verbose: print("\n D3-dispersion correction with zero-damping:", end=' ')
if s6 == 0.0 or rs6 == 0.0 or s8 == 0.0:
if functional != None:
for parm in zero_parms:
if functional == parm[0]:
[s6,rs6,s8] = parm[1:4]
if verbose: print("detected", parm[0], "functional - using default zero-damping parameters")
else:
if verbose:
print(" WARNING: Damping parameters not specified and no functional could be read!\n"); sys.exit()
else:
if verbose: print(" manual parameters have been defined")
if verbose: print(" Zero-damping parameters:", "s6 =",s6, "rs6 =", rs6, "s8 =",s8)
if damp == "bj":
if verbose: print("\n D3-dispersion correction with Becke_Johnson damping:", end=' ')
if s6 == 0.0 or s8 == 0.0 or a1 == 0.0 or a2 == 0.0:
if functional != None:
for parm in bj_parms:
if functional == parm[0]:
[s6,a1,s8,a2] = parm[1:5]
if verbose: print("detected", parm[0], "functional - using default BJ-damping parameters")
else:
if verbose: print(" WARNING: Damping parameters not specified and no functional could be read!\n"); sys.exit()
else:
if verbose: print(" manual parameters have been defined")
if verbose: print(" BJ-damping parameters:", "s6 =",s6, "s8 =", s8, "a1 =",a1, "a2 =",a2)
if verbose:
if abc == False: print(" 3-body term will not be calculated\n")
else: print(" Including the Axilrod-Teller-Muto 3-body dispersion term\n")
if intermolecular == True: print(" Only computing intermolecular dispersion interactions! This is not the total D3-correction\n")
for j in range(0,natom):
## This could be used to 'switch off' dispersion between bonded or geminal atoms ##
scaling = False
for k in range(j+1,natom):
scalefactor=1.0
if intermolecular == True:
if mols[j] == mols[k]:
scalefactor = 0
print(" --- Ignoring interaction between atoms",(j+1), "and", (k+1))
if scaling==True and hasattr(fileData,"BONDINDEX"):
if fileData.BONDINDEX[j][k]==1: scalefactor = 0
for l in range (0,natom):
if fileData.BONDINDEX[j][l] != 0 and fileData.BONDINDEX[k][l]!=0 and j!=k and fileData.BONDINDEX[j][k]==0: scalefactor = 0
for m in range (0,natom):
if fileData.BONDINDEX[j][l] != 0 and fileData.BONDINDEX[l][m]!=0 and fileData.BONDINDEX[k][m]!=0 and j!=m and k!=l and fileData.BONDINDEX[j][m]==0: scalefactor=1/1.2
if k>j:
## Pythagoras in 3D to work out distance ##
xdist = xco[j]-xco[k]; ydist = yco[j]-yco[k]; zdist = zco[j]-zco[k]
totdist = math.pow(xdist,2)+math.pow(ydist,2)+math.pow(zdist,2)
totdist=math.sqrt(totdist)
C6jk = getc6(maxc,max_elem,c6ab,mxc,atomtype,cn,j,k)
## C8 parameters depend on C6 | |
For heteroscedastic inference this corresponds to the \
sqrt(exp(s^2)) with s^2 predicted value.
- Ypred_std (numpy array): Array with standard deviations computed from regular \
(homoscedastic) inference.
- pred_name (string): Name of data colum or quantity predicted (as extracted \
from the data frame using the col_true index).
"""
Ytrue = df_data.iloc[:, col_true].values
pred_name = df_data.columns[col_true]
Ypred_mean_ = np.mean(df_data.iloc[:, col_pred_start::2], axis=1)
Ypred_mean = Ypred_mean_.values
Ypred_std_ = np.std(df_data.iloc[:, col_pred_start::2], axis=1)
Ypred_std = Ypred_std_.values
yerror = Ytrue - Ypred_mean
s_ = df_data.iloc[:, col_std_pred_start::2]
s_mean = np.mean(s_, axis=1)
var = np.exp(s_mean.values) # variance
sigma = np.sqrt(var) # std
MSE = mean_squared_error(Ytrue, Ypred_mean)
print("MSE: ", MSE)
MSE_STD = np.std((Ytrue - Ypred_mean) ** 2)
print("MSE_STD: ", MSE_STD)
MAE = mean_absolute_error(Ytrue, Ypred_mean)
print("MAE: ", MAE)
r2 = r2_score(Ytrue, Ypred_mean)
print("R2: ", r2)
# p-value 'not entirely reliable, reasonable for datasets > 500'
pearson_cc, pval = pearsonr(Ytrue, Ypred_mean)
print("Pearson CC: %f, p-value: %e" % (pearson_cc, pval))
return Ytrue, Ypred_mean, yerror, sigma, Ypred_std, pred_name
def compute_statistics_quantile(
df_data: DataFrame,
sigma_divisor: float = 2.56,
col_true: int = 4,
col_pred_start: int = 6,
) -> Tuple[Array, ...]:
"""
Extracts ground truth, 50th percentile mean prediction, low percentile
and high percentile mean prediction (usually 1st decile and 9th decile
respectively), error (using 5th decile), standard deviation of prediction
(using 5th decile) and predicted (learned) standard deviation from
interdecile range in inference data frame. The latter includes all the
individual inference realizations.
:param pandas dataframe df_data: Data frame generated by current quantile inference \
experiments. Indices are hard coded to agree with \
current version. (The inference file usually \
has the name: <model>.predicted_INFER_QTL.tsv).
:param float sigma_divisor: Divisor to convert from the intercedile range to the corresponding \
standard deviation for a Gaussian distribution. \
(Default: 2.56, consisten with an interdecile range computed from \
the difference between the 9th and 1st deciles).
:param int col_true: Index of the column in the data frame where the true \
value is stored (Default: 4, index in current QTL format).
:param int col_pred_start: Index of the column in the data frame where the first predicted \
value is stored. All the predicted values during inference \
are stored and are interspaced with other percentile \
predictions (Default: 6 index, step 3, in current QTL format).
:return: Tuple of numpy arrays
- Ytrue (numpy array): Array with true (observed) values
- Ypred (numpy array): Array with predicted values (based on the 50th percentile).
- yerror (numpy array): Array with errors computed (observed - predicted).
- sigma (numpy array): Array with standard deviations learned with deep learning \
model. This corresponds to the interdecile range divided \
by the sigma divisor.
- Ypred_std (numpy array): Array with standard deviations computed from regular \
(homoscedastic) inference.
- pred_name (string): Name of data colum or quantity predicted (as extracted \
from the data frame using the col_true index).
- Ypred_Lp_mean (numpy array): Array with predicted values of the lower percentile \
(usually the 1st decile).
- Ypred_Hp_mean (numpy array): Array with predicted values of the higher percentile \
(usually the 9th decile).
"""
Ytrue = df_data.iloc[:, col_true].values
pred_name = df_data.columns[col_true]
Ypred_5d_mean = np.mean(df_data.iloc[:, col_pred_start::3], axis=1)
Ypred_mean = Ypred_5d_mean.values
Ypred_Lp_mean_ = np.mean(df_data.iloc[:, col_pred_start + 1 :: 3], axis=1)
Ypred_Hp_mean_ = np.mean(df_data.iloc[:, col_pred_start + 2 :: 3], axis=1)
Ypred_Lp_mean = Ypred_Lp_mean_.values
Ypred_Hp_mean = Ypred_Hp_mean_.values
interdecile_range = Ypred_Hp_mean - Ypred_Lp_mean
sigma = interdecile_range / sigma_divisor
yerror = Ytrue - Ypred_mean
Ypred_std_ = np.std(df_data.iloc[:, col_pred_start::3], axis=1)
Ypred_std = Ypred_std_.values
MSE = mean_squared_error(Ytrue, Ypred_mean)
print("MSE: ", MSE)
MSE_STD = np.std((Ytrue - Ypred_mean) ** 2)
print("MSE_STD: ", MSE_STD)
MAE = mean_absolute_error(Ytrue, Ypred_mean)
print("MAE: ", MAE)
r2 = r2_score(Ytrue, Ypred_mean)
print("R2: ", r2)
# p-value 'not entirely reliable, reasonable for datasets > 500'
pearson_cc, pval = pearsonr(Ytrue, Ypred_mean)
print("Pearson CC: %f, p-value: %e" % (pearson_cc, pval))
return (
Ytrue,
Ypred_mean,
yerror,
sigma,
Ypred_std,
pred_name,
Ypred_Lp_mean,
Ypred_Hp_mean,
)
def split_data_for_empirical_calibration(
Ytrue: Array, Ypred: Array, sigma: Array, cal_split: float = 0.8
) -> Tuple[Array, ...]:
"""
Extracts a portion of the arrays provided for the computation of the
calibration and reserves the remainder portion for testing.
:param numpy array Ytrue: Array with true (observed) values
:param numpy array Ypred: Array with predicted values.
:param numpy array sigma: Array with standard deviations learned with deep learning \
model (or std value computed from prediction if homoscedastic \
inference).
:param float cal_split: Split of data to use for estimating the calibration relationship. \
It is assumet that it will be a value in (0, 1). \
(Default: use 80% of predictions to generate empirical calibration).
:return: Tuple of numpy arrays
- index_perm_total (numpy array): Random permutation of the array indices. The first 'num_cal' \
of the indices correspond to the samples that are used for \
calibration, while the remainder are the samples reserved \
for calibration testing.
- pSigma_cal (numpy array): Part of the input sigma array to use for calibration.
- pSigma_test (numpy array): Part of the input sigma array to reserve for testing.
- pPred_cal (numpy array): Part of the input Ypred array to use for calibration.
- pPred_test (numpy array): Part of the input Ypred array to reserve for testing.
- true_cal (numpy array): Part of the input Ytrue array to use for calibration.
- true_test (numpy array): Part of the input Ytrue array to reserve for testing.
"""
# shuffle data for calibration
num_pred_total = sigma.shape[0]
num_cal = np.int(num_pred_total * cal_split)
index_perm_total = np.random.permutation(range(num_pred_total))
# Permute data
pSigma_perm_all = sigma[index_perm_total]
pPred_perm_all = Ypred[index_perm_total]
true_perm_all = Ytrue[index_perm_total]
# Split in calibration and testing
pSigma_cal = pSigma_perm_all[:num_cal]
pSigma_test = pSigma_perm_all[num_cal:]
pPred_cal = pPred_perm_all[:num_cal]
pPred_test = pPred_perm_all[num_cal:]
true_cal = true_perm_all[:num_cal]
true_test = true_perm_all[num_cal:]
print("Size of calibration set: ", true_cal.shape)
print("Size of test set: ", true_test.shape)
return (
index_perm_total,
pSigma_cal,
pSigma_test,
pPred_cal,
pPred_test,
true_cal,
true_test,
)
def compute_empirical_calibration_interpolation(
pSigma_cal: Array, pPred_cal: Array, true_cal: Array, cv: int = 10
):
"""
Use the arrays provided to estimate an empirical mapping between
standard deviation and absolute value of error, both of which have been
observed during inference. Since most of the times the prediction
statistics are very noisy, two smoothing steps (based on scipy's savgol
filter) are performed. Cubic Hermite splines (PchipInterpolator) are
constructed for interpolation. This type of splines preserves the
monotonicity in the interpolation data and does not overshoot if the data
is not smooth. The overall process of constructing a spline to express the
mapping from standard deviation to error is composed of smoothing-
interpolation-smoothing-interpolation.
:param numpy array pSigma_cal: Part of the standard deviations array to use for calibration.
:param numpy array pPred_cal: Part of the predictions array to use for calibration.
:param numpy array true_cal: Part of the true (observed) values array to use for calibration.
:param int cv: Number of cross validations folds to run to determine a 'good' fit.
:return: Tuple of python objects
- splineobj_best : scipy.interpolate python object \
A python object from scipy.interpolate that computes a \
cubic Hermite splines (PchipInterpolator) constructed \
to express the mapping from standard deviation to error after a \
'drastic' smoothing of the predictions. A 'good' fit is \
determined by taking the spline for the fold that produces \
the smaller mean absolute error in testing data (not used \
for the smoothing / interpolation).
- splineobj2 : scipy.interpolate python object \
A python object from scipy.interpolate that computes a \
cubic Hermite splines (PchipInterpolator) constructed \
to express the mapping from standard deviation to error. This \
spline is generated for interpolating the samples generated \
after the smoothing of the first interpolation spline (i.e. \
splineobj_best).
"""
xs3 = pSigma_cal # std
z3 = np.abs(true_cal - pPred_cal) # abs error
test_split = 1.0 / cv
xmin = np.min(pSigma_cal)
xmax = np.max(pSigma_cal)
warnings.filterwarnings("ignore")
print("--------------------------------------------")
print("Using CV for selecting calibration smoothing")
print("--------------------------------------------")
min_error = np.inf
for cv_ in range(cv):
# Split | |
model_only: bool = True, verbose: bool = True):
"""
This function saves the transformation pipeline and trained model object
into the current working directory as a pickle file for later use.
Example
-------
>>> from pycaret.datasets import get_data
>>> data = get_data('airline')
>>> from pycaret.time_series import *
>>> exp_name = setup(data = data, fh = 12)
>>> arima = create_model('arima')
>>> save_model(arima, 'saved_arima_model')
model: sktime compatible object
Trained model object
model_name: str
Name of the model.
model_only: bool, default = True
Parameter not in use for now. Behavior may change in future.
verbose: bool, default = True
Success message is not printed when verbose is set to False.
Returns:
Tuple of the model object and the filename.
"""
return _CURRENT_EXPERIMENT.save_model(
model=model, model_name=model_name, model_only=model_only, verbose=verbose
)
# not using check_if_global_is_not_none on purpose
def load_model(
model_name,
platform: Optional[str] = None,
authentication: Optional[Dict[str, str]] = None,
verbose: bool = True,
):
"""
This function loads a previously saved pipeline/model.
Example
-------
>>> from pycaret.time_series import load_model
>>> saved_arima = load_model('saved_arima_model')
model_name: str
Name of the model.
platform: str, default = None
Name of the cloud platform. Currently supported platforms:
'aws', 'gcp' and 'azure'.
authentication: dict, default = None
dictionary of applicable authentication tokens.
when platform = 'aws':
{'bucket' : 'S3-bucket-name'}
when platform = 'gcp':
{'project': 'gcp-project-name', 'bucket' : 'gcp-bucket-name'}
when platform = 'azure':
{'container': 'azure-container-name'}
verbose: bool, default = True
Success message is not printed when verbose is set to False.
Returns:
Trained Model
"""
experiment = _CURRENT_EXPERIMENT
if experiment is None:
experiment = _EXPERIMENT_CLASS()
return experiment.load_model(
model_name=model_name,
platform=platform,
authentication=authentication,
verbose=verbose,
)
# @check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
# def automl(optimize: str = "R2", use_holdout: bool = False, turbo: bool = True) -> Any:
# """
# This function returns the best model out of all trained models in
# current session based on the ``optimize`` parameter. Metrics
# evaluated can be accessed using the ``get_metrics`` function.
# Example
# -------
# >>> from pycaret.datasets import get_data
# >>> boston = get_data('boston')
# >>> from pycaret.regression import *
# >>> exp_name = setup(data = boston, target = 'medv')
# >>> top3 = compare_models(n_select = 3)
# >>> tuned_top3 = [tune_model(i) for i in top3]
# >>> blender = blend_models(tuned_top3)
# >>> stacker = stack_models(tuned_top3)
# >>> best_mae_model = automl(optimize = 'MAE')
# optimize: str, default = 'R2'
# Metric to use for model selection. It also accepts custom metrics
# added using the ``add_metric`` function.
# use_holdout: bool, default = False
# When set to True, metrics are evaluated on holdout set instead of CV.
# turbo: bool, default = True
# When set to True and use_holdout is False, only models created with default fold
# parameter will be considered. If set to False, models created with a non-default
# fold parameter will be scored again using default fold settings, so that they can be
# compared.
# Returns:
# Trained Model
# """
# return _CURRENT_EXPERIMENT.automl(
# optimize=optimize, use_holdout=use_holdout, turbo=turbo
# )
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def pull(pop: bool = False) -> pd.DataFrame:
"""
Returns last printed score grid. Use ``pull`` function after
any training function to store the score grid in pandas.DataFrame.
pop: bool, default = False
If True, will pop (remove) the returned dataframe from the
display container.
Returns:
pandas.DataFrame
"""
return _CURRENT_EXPERIMENT.pull(pop=pop)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def models(
type: Optional[str] = None, internal: bool = False, raise_errors: bool = True,
) -> pd.DataFrame:
"""
Returns table of models available in the model library.
Example
-------
>>> from pycaret.datasets import get_data
>>> data = get_data('airline')
>>> from pycaret.time_series import *
>>> exp_name = setup(data = data, fh = 12)
>>> models()
type: str, default = None
- baseline : filters and only return baseline models
- classical : filters and only return classical models
- linear : filters and only return linear models
- tree : filters and only return tree based models
- neighbors : filters and only return neighbors models
internal: bool, default = False
When True, will return extra columns and rows used internally.
raise_errors: bool, default = True
When False, will suppress all exceptions, ignoring models
that couldn't be created.
Returns:
pandas.DataFrame
"""
return _CURRENT_EXPERIMENT.models(
type=type, internal=internal, raise_errors=raise_errors
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def get_metrics(
reset: bool = False, include_custom: bool = True, raise_errors: bool = True,
) -> pd.DataFrame:
"""
Returns table of available metrics used for CV.
Example
-------
>>> from pycaret.datasets import get_data
>>> airline = get_data('airline')
>>> from pycaret.time_series import *
>>> exp_name = setup(data = airline, fh = 12)
>>> all_metrics = get_metrics()
reset: bool, default = False
When True, will reset all changes made using the ``add_metric``
and ``remove_metric`` function.
include_custom: bool, default = True
Whether to include user added (custom) metrics or not.
raise_errors: bool, default = True
If False, will suppress all exceptions, ignoring models that
couldn't be created.
Returns:
pandas.DataFrame
"""
return _CURRENT_EXPERIMENT.get_metrics(
reset=reset, include_custom=include_custom, raise_errors=raise_errors,
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def add_metric(
id: str, name: str, score_func: type, greater_is_better: bool = True, **kwargs,
) -> pd.Series:
"""
Adds a custom metric to be used for CV.
Example
-------
>>> from pycaret.datasets import get_data
>>> airline = get_data('airline')
>>> from pycaret.time_series import *
>>> exp_name = setup(data = airline, fh = 12)
>>> from sklearn.metrics import explained_variance_score
>>> add_metric('evs', 'EVS', explained_variance_score)
id: str
Unique id for the metric.
name: str
Display name of the metric.
score_func: type
Score function (or loss function) with signature ``score_func(y, y_pred, **kwargs)``.
greater_is_better: bool, default = True
Whether ``score_func`` is higher the better or not.
**kwargs:
Arguments to be passed to score function.
Returns:
pandas.Series
"""
return _CURRENT_EXPERIMENT.add_metric(
id=id,
name=name,
score_func=score_func,
greater_is_better=greater_is_better,
**kwargs,
)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def remove_metric(name_or_id: str):
"""
Removes a metric from CV.
Example
-------
>>> from pycaret.datasets import get_data
>>> data = get_data('airline')
>>> from pycaret.time_series import *
>>> exp_name = setup(data = data, fh = 12)
>>> remove_metric('MAPE')
name_or_id: str
Display name or ID of the metric.
Returns:
None
"""
return _CURRENT_EXPERIMENT.remove_metric(name_or_id=name_or_id)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def get_logs(experiment_name: Optional[str] = None, save: bool = False) -> pd.DataFrame:
"""
Returns a table of experiment logs. Only works when ``log_experiment``
is True when initializing the ``setup`` function.
Example
-------
>>> from pycaret.datasets import get_data
>>> data = get_data('airline')
>>> from pycaret.time_series import *
>>> exp_name = setup(data = data, fh = 12)
>>> best = compare_models()
>>> exp_logs = get_logs()
experiment_name: str, default = None
When None current active run is used.
save: bool, default = False
When set to True, csv file is saved in current working directory.
Returns:
pandas.DataFrame
"""
return _CURRENT_EXPERIMENT.get_logs(experiment_name=experiment_name, save=save)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def get_config(variable: str):
"""
This function retrieves the global variables created when initializing the
``setup`` function. Following variables are accessible:
- X: Period/Index of X
- y: Time Series as pd.Series
- X_train: Period/Index of X_train
- y_train: Time Series as pd.Series (Train set only)
- X_test: Period/Index of X_test
- y_test: Time Series as pd.Series (Test set only)
- fh: forecast horizon
- enforce_pi: enforce prediction interval in models
- seed: random state set through session_id
- prep_pipe: Transformation pipeline
- n_jobs_param: n_jobs parameter used in model training
- html_param: html_param configured through setup
- master_model_container: model storage container
- display_container: results display container
- exp_name_log: Name of experiment
- logging_param: log_experiment param
- log_plots_param: log_plots param
- USI: Unique session ID parameter
- data_before_preprocess: data before preprocessing
- gpu_param: use_gpu param configured through setup
- fold_generator: CV splitter configured in fold_strategy
- fold_param: fold params defined in the setup
- seasonality_present: seasonality as detected in the setup
- seasonality_period: seasonality_period as detected in the setup
Example
-------
>>> from pycaret.datasets import get_data
>>> airline = get_data('airline')
>>> from pycaret.time_series import *
>>> exp_name = setup(data = airline, fh = 12)
>>> X_train = get_config('X_train')
Returns:
Global variable
"""
return _CURRENT_EXPERIMENT.get_config(variable=variable)
@check_if_global_is_not_none(globals(), _CURRENT_EXPERIMENT_DECORATOR_DICT)
def set_config(variable: str, value):
"""
This function resets the global variables. Following variables are
accessible:
- X: Period/Index of X
- y: Time Series as pd.Series
- X_train: Period/Index of X_train
- y_train: Time Series as pd.Series (Train set only)
- X_test: Period/Index of X_test
- y_test: Time Series as pd.Series (Test set only)
- fh: | |
<gh_stars>10-100
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
Functions for bootstrapping elements
(pipes, configuration, etc)
"""
from __future__ import annotations
from meerschaum.utils.typing import Union, Any, Sequence, SuccessTuple, Optional, Tuple, List
def bootstrap(
action : Optional[List[str]] = None,
**kw : Any
) -> SuccessTuple:
"""
Bootstrap an element (pipes, connectors, config).
Command:
`bootstrap {option}`
Example:
`bootstrap pipes`
"""
from meerschaum.utils.misc import choose_subaction
if action is None:
action = []
options = {
'pipes' : _bootstrap_pipes,
'config' : _bootstrap_config,
'connectors' : _bootstrap_connectors,
}
return choose_subaction(action, options, **kw)
def _bootstrap_pipes(
action : Optional[List[str]] = None,
connector_keys : Optional[List[str]] = None,
metric_keys : Optional[List[str]] = None,
location_keys : Optional[List[Optional[str]]] = None,
yes : bool = False,
force : bool = False,
noask : bool = False,
debug : bool = False,
**kw : Any
) -> SuccessTuple:
"""
Create a new pipe.
If no keys are provided, guide the user through the steps required.
"""
from meerschaum import get_pipes
from meerschaum.config import get_config
from meerschaum.utils.warnings import info, warn, error
from meerschaum.utils.debug import dprint
from meerschaum.utils.prompt import yes_no, prompt, choose
from meerschaum.connectors.parse import is_valid_connector_keys
from meerschaum.utils.misc import get_connector_labels
from meerschaum.utils.formatting._shell import clear_screen
if connector_keys is None:
connector_keys = []
if metric_keys is None:
metric_keys = []
if location_keys is None:
location_keys = []
_clear = get_config('shell', 'clear_screen', patch=True)
abort_tuple = (False, "No pipes were bootstrapped.")
if (
len(connector_keys) > 0 and
len(metric_keys) > 0
):
info(
"You've provided the following keys:\n" +
" - Connector Keys: " + str(connector_keys) + "\n" +
" - Metric Keys: " + str(metric_keys) + "\n" +
(
(" - Location Keys: " + str(location_keys) + "\n")
if len(location_keys) > 0 else ""
)
)
if not force:
try:
if not yes_no(
"Would you like to bootstrap pipes with these keys?\nExisting pipes will be deleted!",
default = 'n',
yes = yes,
noask = noask
):
return abort_tuple
except KeyboardInterrupt:
return abort_tuple
else:
### Get the connector.
new_label = 'New'
info(f"To create a pipe without explicitly using a connector, use the `register pipes` command.\n")
try:
ck = choose(
f"Where are the data coming from?\n\n" +
f"Please type the keys of a connector from below,\n" +
f"or enter '{new_label}' to register a new connector.\n\n" +
f" {get_config('formatting', 'emoji', 'connector')} Connector:\n",
get_connector_labels() + [new_label],
numeric = False
)
except KeyboardInterrupt:
return abort_tuple
if ck == new_label:
if _clear:
clear_screen(debug=debug)
while True:
tup = _bootstrap_connectors(
[], yes=yes, force=force, debug=debug, return_keys=True, **kw
)
if isinstance(tup[0], str):
if _clear:
clear_screen(debug=debug)
ck = tup[0] + ':' + tup[1]
break
elif isinstance(tup[0], bool) and not tup[0]:
return abort_tuple
warn(f"Please register a new connector or press CTRL+C to cancel.", stack=False)
connector_keys = [ck]
### Get the metric.
while True:
if _clear:
clear_screen(debug=debug)
try:
mk = prompt(
f"What kind of data is this?\n\n" +
f"The metric is the label for the contents of the pipe.\n" +
f"For example, 'weather' might be a metric for weather station data.\n\n" +
f" {get_config('formatting', 'emoji', 'metric')} Metric:"
)
except KeyboardInterrupt:
return abort_tuple
if mk:
break
warn("Please enter a metric.", stack=False)
metric_keys = [mk]
### Get the location
if _clear:
clear_screen(debug=debug)
try:
lk = prompt(
f"Where are the data located?\n\n" +
f"You have the option to create multiple pipes with same connector and\n" +
f"metric but different locations.\n\n" +
f"For example, you could create the pipes 'sql_remote_energy_home' and\n" +
f"'sql_remote_energy_work', which would share a connector ('sql:remote') and\n" +
f"metric ('energy'), but may come from different tables.\n\n" +
f"In most cases. you can omit the location.\n\n" +
f" {get_config('formatting', 'emoji', 'location')} Location (Empty to omit):"
)
except KeyboardInterrupt:
return abort_tuple
lk = None if lk == '' else lk
location_keys = [lk]
if _clear:
clear_screen(debug=debug)
_pipes = get_pipes(
connector_keys, metric_keys, location_keys,
method = 'explicit', as_list = True,
debug=debug, **kw
)
pipes = []
for p in _pipes:
if p.get_id() is not None and not force:
try:
if not yes_no(f"Pipe '{p}' already exists. Delete pipe '{p}'?\nData will be lost!", default='n'):
info(f"Skipping bootstrapping pipe '{p}'...")
continue
except KeyboardInterrupt:
return abort_tuple
pipes.append(p)
if len(pipes) == 0:
return abort_tuple
success_dict = {}
successes, failures = 0, 0
for p in pipes:
try:
tup = p.bootstrap(interactive=True, force=force, noask=noask, yes=yes, debug=debug)
except Exception as e:
tup = False, f"Failed to bootstrap pipe '{p}' with exception:\n" + str(e)
success_dict[p] = tup
if tup[0]:
successes += 1
else:
failures += 1
msg = (
f"Finished bootstrapping {len(pipes)} pipe" + ("s" if len(pipes) != 1 else "") + "\n" +
f" ({successes} succeeded, {failures} failed)."
)
return (successes > 0), msg
def _bootstrap_connectors(
action : Optional[List[str]] = None,
connector_keys : Optional[List[str]] = None,
yes : bool = False,
force : bool = False,
noask : bool = False,
debug : bool = False,
return_keys : bool = False,
**kw : Any
) -> Union[SuccessTuple, Tuple[str, str]]:
"""
Prompt the user for the details necessary to create a Connector.
"""
from meerschaum.connectors.parse import is_valid_connector_keys
from meerschaum.connectors import connectors, get_connector
from meerschaum.utils.prompt import prompt, yes_no, choose
from meerschaum.config import get_config
from meerschaum.config._edit import write_config
from meerschaum.utils.formatting import pprint
from meerschaum.utils.formatting._shell import clear_screen
from meerschaum.connectors import attributes as connector_attributes
from meerschaum.utils.warnings import warn, info
from meerschaum.utils.misc import is_int
abort_tuple = False, "No connectors bootstrapped."
_clear = get_config('shell', 'clear_screen', patch=True)
if action is None:
action = []
if connector_keys is None:
connector_keys = []
if len(connector_keys) == 0:
pass
try:
_type = choose(
(
'Please choose a connector type.\n' +
'For more information on connectors, please visit https://meerschaum.io/reference/connectors'
),
sorted(list(connectors)),
default='sql'
)
except KeyboardInterrupt:
return abort_tuple
if _clear:
clear_screen(debug=debug)
_label_choices = sorted(
[label for label in get_config('meerschaum', 'connectors', _type)
if label != 'default']
)
new_connector_label = 'New connector'
_label_choices.append(new_connector_label)
while True:
try:
_label = prompt(f"New label for '{_type}' connector:")
except KeyboardInterrupt:
return abort_tuple
if _label in get_config('meerschaum', 'connectors', _type):
warn(f"Connector '{_type}:{_label}' already exists.", stack=False)
overwrite = yes_no(f"Do you want to overwrite connector '{_type}:{_label}'?", default='n', yes=yes, noask=noask)
if not overwrite and not force:
return False, f"No changes made to connector configuration."
break
elif _label == "":
warn(f"Please enter a label.", stack=False)
else:
break
new_attributes = {}
if 'flavors' in connector_attributes[_type]:
try:
flavor = choose(
f"Flavor for connector '{_type}:{_label}':",
sorted(list(connector_attributes[_type]['flavors'])),
default = (
'timescaledb' if 'timescaledb' in connector_attributes[_type]['flavors']
else None
)
)
except KeyboardInterrupt:
return abort_tuple
new_attributes['flavor'] = flavor
required = sorted(list(connector_attributes[_type]['flavors'][flavor]['requirements']))
default = connector_attributes[_type]['flavors'][flavor]['defaults']
else:
required = sorted(list(connector_attributes[_type]['required']))
default = connector_attributes[_type]['default']
info(
f"Please answer the following questions to configure the new connector '{_type}:{_label}'." + '\n' +
"Press Ctrl+C to skip."
)
for r in required:
try:
val = prompt(f"Value for {r}:")
except KeyboardInterrupt:
continue
if is_int(val):
val = int(val)
new_attributes[r] = val
for k, v in default.items():
### skip already configured attributes, (e.g. flavor or from required)
if k in new_attributes:
continue
try:
val = prompt(f"Value for {k}:", default=str(v))
except KeyboardInterrupt:
continue
if is_int(val):
val = int(val)
new_attributes[k] = val
if _clear:
clear_screen(debug=debug)
try:
conn = get_connector(_type, _label, **new_attributes)
except Exception as e:
return False, f"Failed to bootstrap connector '{_type}:{_label}' with exception:\n{e}"
pprint(new_attributes)
try:
ok = (
yes_no(
f"Are you ok with these new attributes for connector '{conn}'?",
default = 'y',
noask = noask,
yes = yes
) if not yes else yes
)
except KeyboardInterrupt:
ok = False
if not ok:
return False, "No changes made to connectors configuration."
meerschaum_config = get_config('meerschaum')
if 'connectors' not in meerschaum_config:
meerschaum_config['connectors'] = {}
if _type not in meerschaum_config['connectors']:
meerschaum_config['connectors'][_type] = {}
meerschaum_config['connectors'][_type][_label] = new_attributes
write_config({'meerschaum' : meerschaum_config}, debug=debug)
if return_keys:
return _type, _label
return True, "Success"
def _bootstrap_config(
action : Optional[List[str]] = None,
yes : bool = False,
force : bool = False,
debug : bool = False,
**kw : Any
) -> SuccessTuple:
"""
Delete and regenerate the default Meerschaum configuration.
"""
from meerschaum.config._edit import write_default_config, write_config
from meerschaum.config._default import default_config
from meerschaum.actions import actions
if not actions['delete'](['config'], debug=debug, yes=yes, force=force, **kw)[0]:
return False, "Failed to delete configuration files."
if not write_config(default_config, debug=debug, **kw):
return (False, "Failed to write default | |
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_KdotPCalcSecondOrder'))
x_octopus_parserlog_KdotPCalculateEffectiveMasses = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "KdotPCalculateEffectiveMasses" of type "logical" in
section "Linear Response::KdotP"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_KdotPCalculateEffectiveMasses'))
x_octopus_parserlog_KdotPEta = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "KdotPEta" of type "float" in section "Linear
Response::KdotP"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_KdotPEta'))
x_octopus_parserlog_KdotPOccupiedSolutionMethod = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "KdotPOccupiedSolutionMethod" of type "integer" in
section "Linear Response::KdotP"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_KdotPOccupiedSolutionMethod'))
x_octopus_parserlog_KdotPUseNonLocalPseudopotential = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "KdotPUseNonLocalPseudopotential" of type "logical" in
section "Linear Response::KdotP"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_KdotPUseNonLocalPseudopotential'))
x_octopus_parserlog_KdotPVelMethod = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "KdotPVelMethod" of type "integer" in section "Linear
Response::KdotP"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_KdotPVelMethod'))
x_octopus_parserlog_KPointsGrid = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "KPointsGrid" of type "block" in section "Mesh::KPoints"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_KPointsGrid'))
x_octopus_parserlog_KPointsReduced = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "KPointsReduced" of type "block" in section
"Mesh::KPoints"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_KPointsReduced'))
x_octopus_parserlog_KPointsUseSymmetries = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "KPointsUseSymmetries" of type "logical" in section
"Mesh::KPoints"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_KPointsUseSymmetries'))
x_octopus_parserlog_KPointsUseTimeReversal = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "KPointsUseTimeReversal" of type "logical" in section
"Mesh::KPoints"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_KPointsUseTimeReversal'))
x_octopus_parserlog_KPoints = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "KPoints" of type "block" in section "Mesh::KPoints"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_KPoints'))
x_octopus_parserlog_KSInversionAsymptotics = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "KSInversionAsymptotics" of type "integer" in section
"Calculation Modes::Invert KS"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_KSInversionAsymptotics'))
x_octopus_parserlog_KSInversionLevel = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "KSInversionLevel" of type "integer" in section
"Calculation Modes::Invert KS"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_KSInversionLevel'))
x_octopus_parserlog_LatticeParameters = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LatticeParameters" of type "block" in section
"Mesh::Simulation Box"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LatticeParameters'))
x_octopus_parserlog_LatticeVectors = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LatticeVectors" of type "block" in section
"Mesh::Simulation Box"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LatticeVectors'))
x_octopus_parserlog_LB94_modified = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "LB94_modified" of type "logical" in section
"Hamiltonian::XC"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LB94_modified'))
x_octopus_parserlog_LB94_threshold = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "LB94_threshold" of type "float" in section
"Hamiltonian::XC"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LB94_threshold'))
x_octopus_parserlog_LCAOAlternative = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "LCAOAlternative" of type "logical" in section
"SCF::LCAO"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LCAOAlternative'))
x_octopus_parserlog_LCAOComplexYlms = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "LCAOComplexYlms" of type "logical" in section
"SCF::LCAO"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LCAOComplexYlms'))
x_octopus_parserlog_LCAODiagTol = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "LCAODiagTol" of type "float" in section "SCF::LCAO"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LCAODiagTol'))
x_octopus_parserlog_LCAODimension = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LCAODimension" of type "integer" in section "SCF::LCAO"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LCAODimension'))
x_octopus_parserlog_LCAOExtraOrbitals = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "LCAOExtraOrbitals" of type "logical" in section
"SCF::LCAO"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LCAOExtraOrbitals'))
x_octopus_parserlog_LCAOKeepOrbitals = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "LCAOKeepOrbitals" of type "logical" in section
"SCF::LCAO"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LCAOKeepOrbitals'))
x_octopus_parserlog_LCAOMaximumOrbitalRadius = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "LCAOMaximumOrbitalRadius" of type "float" in section
"SCF::LCAO"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LCAOMaximumOrbitalRadius'))
x_octopus_parserlog_LCAOScaleFactor = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "LCAOScaleFactor" of type "float" in section "SCF::LCAO"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LCAOScaleFactor'))
x_octopus_parserlog_LCAOStart = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LCAOStart" of type "integer" in section "SCF::LCAO"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LCAOStart'))
x_octopus_parserlog_LDBaderThreshold = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "LDBaderThreshold" of type "float" in section
"Utilities::oct-local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDBaderThreshold'))
x_octopus_parserlog_LDEnd = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LDEnd" of type "integer" in section "Utilities::oct-
local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDEnd'))
x_octopus_parserlog_LDExtraWrite = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "LDExtraWrite" of type "logical" in section
"Utilities::oct-local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDExtraWrite'))
x_octopus_parserlog_LDFilename = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LDFilename" of type "string" in section "Utilities::oct-
local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDFilename'))
x_octopus_parserlog_LDFolder = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LDFolder" of type "string" in section "Utilities::oct-
local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDFolder'))
x_octopus_parserlog_LDIonicDipole = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "LDIonicDipole" of type "logical" in section
"Utilities::oct-local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDIonicDipole'))
x_octopus_parserlog_LDIterateFolder = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "LDIterateFolder" of type "logical" in section
"Utilities::oct-local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDIterateFolder'))
x_octopus_parserlog_LDMultipoleLmax = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LDMultipoleLmax" of type "integer" in section
"Utilities::oct-local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDMultipoleLmax'))
x_octopus_parserlog_LDOutputFormat = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LDOutputFormat" of type "flag" in section
"Utilities::oct-local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDOutputFormat'))
x_octopus_parserlog_LDOutput = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LDOutput" of type "flag" in section "Utilities::oct-
local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDOutput'))
x_octopus_parserlog_LDOverWrite = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "LDOverWrite" of type "logical" in section
"Utilities::oct-local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDOverWrite'))
x_octopus_parserlog_LDRadiiFile = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LDRadiiFile" of type "string" in section
"Utilities::oct-local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDRadiiFile'))
x_octopus_parserlog_LDRestartFolder = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LDRestartFolder" of type "string" in section
"Utilities::oct-local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDRestartFolder'))
x_octopus_parserlog_LDRestart = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "LDRestart" of type "logical" in section "Utilities::oct-
local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDRestart'))
x_octopus_parserlog_LDStart = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LDStart" of type "integer" in section "Utilities::oct-
local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDStart'))
x_octopus_parserlog_LDStep = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LDStep" of type "integer" in section "Utilities::oct-
local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDStep'))
x_octopus_parserlog_LDUpdate = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "LDUpdate" of type "logical" in section "Utilities::oct-
local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDUpdate'))
x_octopus_parserlog_LDUseAtomicRadii = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "LDUseAtomicRadii" of type "logical" in section
"Utilities::oct-local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LDUseAtomicRadii'))
x_octopus_parserlog_libvdwxcDebug = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "libvdwxcDebug" of type "logical" in section
"Hamiltonian::XC"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_libvdwxcDebug'))
x_octopus_parserlog_libvdwxcMode = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "libvdwxcMode" of type "integer" in section
"Hamiltonian::XC"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_libvdwxcMode'))
x_octopus_parserlog_libvdwxcVDWFactor = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "libvdwxcVDWFactor" of type "float" in section
"Hamiltonian::XC"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_libvdwxcVDWFactor'))
x_octopus_parserlog_LinearSolverMaxIter = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LinearSolverMaxIter" of type "integer" in section
"Linear Response::Solver"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LinearSolverMaxIter'))
x_octopus_parserlog_LinearSolver = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LinearSolver" of type "integer" in section "Linear
Response::Solver"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LinearSolver'))
x_octopus_parserlog_LocalDomains = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LocalDomains" of type "block" in section
"Utilities::oct-local_multipoles"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LocalDomains'))
x_octopus_parserlog_LocalMagneticMomentsSphereRadius = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "LocalMagneticMomentsSphereRadius" of type "float" in
section "Output"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LocalMagneticMomentsSphereRadius'))
x_octopus_parserlog_LRConvAbsDens = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "LRConvAbsDens" of type "float" in section "Linear
Response::SCF in LR calculations"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LRConvAbsDens'))
x_octopus_parserlog_LRConvRelDens = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "LRConvRelDens" of type "float" in section "Linear
Response::SCF in LR calculations"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LRConvRelDens'))
x_octopus_parserlog_LRMaximumIter = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LRMaximumIter" of type "integer" in section "Linear
Response::SCF in LR calculations"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LRMaximumIter'))
x_octopus_parserlog_LRTolAdaptiveFactor = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "LRTolAdaptiveFactor" of type "float" in section "Linear
Response::SCF in LR calculations"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LRTolAdaptiveFactor'))
x_octopus_parserlog_LRTolFinalTol = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "LRTolFinalTol" of type "float" in section "Linear
Response::Solver"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LRTolFinalTol'))
x_octopus_parserlog_LRTolInitTol = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "LRTolInitTol" of type "float" in section "Linear
Response::Solver"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LRTolInitTol'))
x_octopus_parserlog_LRTolIterWindow = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "LRTolIterWindow" of type "float" in section "Linear
Response::SCF in LR calculations"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LRTolIterWindow'))
x_octopus_parserlog_LRTolScheme = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "LRTolScheme" of type "integer" in section "Linear
Response::SCF in LR calculations"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_LRTolScheme'))
x_octopus_parserlog_Lsize = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "Lsize" of type "block" in section "Mesh::Simulation Box"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_Lsize'))
x_octopus_parserlog_MagneticGaugeCorrection = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "MagneticGaugeCorrection" of type "integer" in section
"Linear Response"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_MagneticGaugeCorrection'))
x_octopus_parserlog_MainAxis = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "MainAxis" of type "block" in section "Utilities::oct-
center-geom"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_MainAxis'))
x_octopus_parserlog_MassScaling = Quantity(
type=str,
shape=[],
description='''
Octopus | |
you divide and the dist
# is symmetric then you get a div zero...
for line in self.line_names:
ft_not = np.ones_like(ft_all)
if np.any(ft_line_density[line] == 0):
# have to build up
for not_line in self.line_names:
if not_line != line:
ft_not *= ft_line_density[not_line]
else:
if len(self.line_names) > 1:
ft_not = ft_all / ft_line_density[line]
self.density_df[f'ημ_{line}'] = np.real(ift(ft_not, self.padding, tilt_vector))
self.remove_fuzz(log='update')
# make audit statistics_df df
theoretical_stats = self.statistics_df.T.filter(regex='agg')
theoretical_stats.columns = ['EX1', 'EX2', 'EX3', 'Mean', 'CV', 'Skew', 'Limit', 'P99.9Est']
theoretical_stats = theoretical_stats[['Mean', 'CV', 'Skew', 'Limit', 'P99.9Est']]
# self.audit_percentiles = [0.9, 0.95, 0.99, 0.995, 0.996, 0.999, 0.9999, 1 - 1e-6]
self.audit_df = pd.DataFrame(
columns=['Sum probs', 'EmpMean', 'EmpCV', 'EmpSkew', "EmpKurt", 'EmpEX1', 'EmpEX2', 'EmpEX3'] +
['P' + str(100 * i) for i in self.audit_percentiles])
for col in self.line_names_ex:
sump = np.sum(self.density_df[f'p_{col}'])
t = self.density_df[f'p_{col}'] * self.density_df['loss']
ex1 = np.sum(t)
t *= self.density_df['loss']
ex2 = np.sum(t)
t *= self.density_df['loss']
ex3 = np.sum(t)
t *= self.density_df['loss']
ex4 = np.sum(t)
m, cv, s = MomentAggregator.static_moments_to_mcvsk(ex1, ex2, ex3)
# empirical kurtosis
kurt = (ex4 - 4 * ex3 * ex1 + 6 * ex1 ** 2 * ex2 - 3 * ex1 ** 4) / ((m * cv) ** 4) - 3
ps = np.zeros((len(self.audit_percentiles)))
temp = self.density_df[f'p_{col}'].cumsum()
for i, p in enumerate(self.audit_percentiles):
ps[i] = (temp > p).idxmax()
newrow = [sump, m, cv, s, kurt, ex1, ex2, ex3] + list(ps)
self.audit_df.loc[col, :] = newrow
self.audit_df = pd.concat((theoretical_stats, self.audit_df), axis=1, sort=True)
self.audit_df['MeanErr'] = self.audit_df['EmpMean'] / self.audit_df['Mean'] - 1
self.audit_df['CVErr'] = self.audit_df['EmpCV'] / self.audit_df['CV'] - 1
self.audit_df['SkewErr'] = self.audit_df['EmpSkew'] / self.audit_df['Skew'] - 1
# add exa details
if add_exa:
self.add_exa(self.density_df, details=True)
# default priority analysis
logger.debug('Adding EPDs in Portfolio.update')
if epds is None:
epds = np.hstack(
[np.linspace(0.5, 0.1, 4, endpoint=False)] +
[np.linspace(10 ** -n, 10 ** -(n + 1), 9, endpoint=False) for n in range(1, 7)])
epds = np.round(epds, 7)
self.priority_capital_df = pd.DataFrame(index=pd.Index(epds))
for col in self.line_names:
for i in range(3):
self.priority_capital_df['{:}_{:}'.format(col, i)] = self.epd_2_assets[(col, i)](epds)
self.priority_capital_df['{:}_{:}'.format('total', 0)] = self.epd_2_assets[('total', 0)](
epds)
col = 'not ' + col
for i in range(2):
self.priority_capital_df['{:}_{:}'.format(col, i)] = self.epd_2_assets[(col, i)](epds)
self.priority_capital_df['{:}_{:}'.format('total', 0)] = self.epd_2_assets[('total', 0)](epds)
self.priority_capital_df.columns = self.priority_capital_df.columns.str.split("_", expand=True)
self.priority_capital_df.sort_index(axis=1, level=1, inplace=True)
self.priority_capital_df.sort_index(axis=0, inplace=True)
else:
# at least want F and S to get quantile functions
self.density_df['F'] = np.cumsum(self.density_df.p_total)
self.density_df['S'] = 1 - self.density_df.F
self.ex = self.audit_df.loc['total', 'EmpMean']
self.last_update = np.datetime64('now')
self.hash_rep_at_last_update = hash(self)
if trim_df:
self.trim_df()
# invalidate stored functions
self._linear_quantile_function = None
self.q_temp = None
self._cdf = None
def update_efficiently(self, log2, bs, approx_freq_ge=100, approx_type='slognorm',
sev_calc='discrete', discretization_calc='survival', normalize=True, padding=1):
"""
runs stripped down versions of update and add_exa - bare bones
code copied from those routines and cleaned for comments etc.
:param log2:
:param bs:
:param approx_freq_ge:
:param approx_type:
:param remove_fuzz:
:param sev_calc:
:param discretization_calc:
:param padding:
:return:
"""
self.log2 = log2
self.bs = bs
self.padding = padding
self.approx_type = approx_type
self.sev_calc = sev_calc
self._remove_fuzz = True
self.approx_type = approx_type
self.approx_freq_ge = approx_freq_ge
self.discretization_calc = discretization_calc
ft_line_density = {}
N = 1 << log2
MAXL = N * bs
xs = np.linspace(0, MAXL, N, endpoint=False)
# no tilt for efficient mode
tilt_vector = None
# where the answer will live
self.density_df = pd.DataFrame(index=xs)
self.density_df['loss'] = xs
ft_all = None
for agg in self.agg_list:
raw_nm = agg.name
nm = f'p_{agg.name}'
_a = agg.update_efficiently(xs, self.padding, 'exact' if agg.n < approx_freq_ge else approx_type,
sev_calc, discretization_calc, normalize)
ft_line_density[raw_nm] = agg.ftagg_density
self.density_df[nm] = agg.agg_density
if ft_all is None:
ft_all = np.copy(ft_line_density[raw_nm])
else:
ft_all *= ft_line_density[raw_nm]
self.density_df['p_total'] = np.real(ift(ft_all, self.padding, tilt_vector))
# make the not self.line_density = sum of all but the given line
ft_nots = {}
for line in self.line_names:
ft_not = np.ones_like(ft_all)
if np.any(ft_line_density[line] == 0):
# have to build up
for not_line in self.line_names:
if not_line != line:
ft_not *= ft_line_density[not_line]
else:
if len(self.line_names) > 1:
ft_not = ft_all / ft_line_density[line]
self.density_df[f'ημ_{line}'] = np.real(ift(ft_not, self.padding, tilt_vector))
ft_nots[line] = ft_not
self.remove_fuzz(log='update_efficiently')
# no audit statistics_df
# BEGIN add_exa ================================================================================================
# add exa details now in-line
# def add_exa(self, df, details, ft_nots=None):
# Call is self.add_exa(self.density_df, details=True)
# name in add_exa, keeps code shorter
df = self.density_df
cut_eps = np.finfo(np.float).eps
# sum of p_total is so important...we will rescale it...
if not np.all(df.p_total >= 0):
# have negative densities...get rid of them
first_neg = np.argwhere((df.p_total < 0).to_numpy()).min()
sum_p_total = df.p_total.sum()
df['F'] = np.cumsum(df.p_total)
df['S'] = \
df.p_total.shift(-1, fill_value=min(df.p_total.iloc[-1], max(0, 1. - (df.p_total.sum()))))[::-1].cumsum()[::-1]
# E(min(X, a))
# df['exa_total'] = self.cumintegral(df['S'])
df['exa_total'] = df.S.shift(1, fill_value=0).cumsum() * self.bs
df['lev_total'] = df['exa_total']
df['exlea_total'] = \
(df.exa_total - df.loss * df.S) / df.F
n_ = df.shape[0]
if n_ < 1100:
mult = 1
elif n_ < 15000:
mult = 10
else:
mult = 100
loss_max = df[['loss', 'exlea_total']].query(' exlea_total>loss ').loss.max()
if np.isnan(loss_max):
loss_max = 0
else:
loss_max += mult * bs
# try nan in place of 0 V
df.loc[0:loss_max, 'exlea_total'] = np.nan
df['e_total'] = np.sum(df.p_total * df.loss)
df['exgta_total'] = df.loss + (df.e_total - df.exa_total) / df.S
df['exeqa_total'] = df.loss # E(X | X=a) = a(!) included for symmetry was exa
# FFT functions for use in exa calculations
# computing sums so minimal padding required
def loc_ft(x):
return ft(x, 1, None)
def loc_ift(x):
return ift(x, 1, None)
# where is S=0
Seq0 = (df.S == 0)
for col in self.line_names:
df['exeqa_' + col] = \
np.real(loc_ift(loc_ft(df.loss * df['p_' + col]) *
ft_nots[col])) / df.p_total
df.loc[df.p_total < cut_eps, 'exeqa_' + col] = 0
df['exeqa_ημ_' + col] = \
np.real(loc_ift(loc_ft(df.loss * df['ημ_' + col]) *
loc_ft(df['p_' + col]))) / df.p_total
df.loc[df.p_total < cut_eps, 'exeqa_ημ_' + col] = 0
stemp = 1 - df['p_' + col].cumsum()
# df['lev_' + col] = self.cumintegral(stemp)
df['lev_' + col] = stemp.shift(1, fill_value=0).cumsum() * self.bs
stemp = 1 - df['ημ_' + col].cumsum()
df['lev_ημ_' + col] = stemp.shift(1, fill_value=0).cumsum() * self.bs
# EX_i | X<= a; temp is used in le and gt calcs
temp = np.cumsum(df['exeqa_' + col] * df.p_total)
df['exlea_' + col] = temp / df.F
df.loc[0:loss_max, 'exlea_' + col] = 0 # df.loc[0:loss_max, 'loss']
temp_not = np.cumsum(df['exeqa_ημ_' + col] * df.p_total)
df['exlea_ημ_' + col] = temp_not / df.F
df.loc[0:loss_max, 'exlea_ημ_' + col] = 0 # df.loc[0:loss_max, 'loss']
# constant value, helpful in calculations
# df['e_' + col] = np.sum(df['p_' + col] * df.loss)
# df['e_ημ_' + col] = np.sum(df['ημ_' + col] * df.loss)
#
# df['exgta_' + col] = (df['e_' + col] - temp) / df.S
# temp = df.loss.iloc[0] # loss
# df.loss.iloc[0] = 1 # avoid divide by zero
#
# # df['exi_x_' + col] = np.sum(
# # df['exeqa_' + col] * df.p_total / df.loss)
# temp_xi_x = np.cumsum(df['exeqa_' + col] * df.p_total / df.loss)
# df['exi_xlea_' + col] = temp_xi_x / df.F
# df.loc[0, 'exi_xlea_' + col] = 0 # df.F=0 at zero
# # more generally F=0 error: V
# df.loc[df.exlea_total == 0, 'exi_xlea_' + col] = 0
# # not version
# df['exi_x_ημ_' + col] = np.sum(
# df['exeqa_ημ_' + col] * df.p_total / df.loss)
# # as above
# temp_xi_x_not = np.cumsum(
# df['exeqa_ημ_' + col] * df.p_total / df.loss)
# df['exi_xlea_ημ_' + col] = temp_xi_x_not / df.F
# df.loc[0, 'exi_xlea_ημ_' + col] = 0 # df.F=0 at zero
# # more generally F=0 error:
# df.loc[df.exlea_total == 0, 'exi_xlea_ημ_' + col] = 0
# # put value back
# df.loss.iloc[0] = temp
# this is so important we will calculate it directly
df['exi_xgta_' + col] = ((df[f'exeqa_{col}'] / df.loss *
df.p_total).shift(-1)[
::-1].cumsum()) / df.S
# need this NOT to be nan otherwise exa won't come out correctly
df.loc[Seq0, 'exi_xgta_' + col] = 0.
df['exi_xgta_ημ_' + col] = ((df[f'exeqa_ημ_{col}'] / df.loss *
df.p_total).shift(-1)[
::-1].cumsum()) / df.S
df.loc[Seq0, 'exi_xgta_ημ_' + col] = 0.
df['exi_xeqa_' + col] = df['exeqa_' + col] / df['loss']
df.loc[0, 'exi_xeqa_' + col] = 0
df['exi_xeqa_ημ_' + col] = df['exeqa_ημ_' + col] / df['loss']
df.loc[0, 'exi_xeqa_ημ_' + col] = 0
df[f'exa_{col}'] = (df.S * df['exi_xgta_' + col]).shift(1, fill_value=0).cumsum() * self.bs
df['exa_ημ_' | |
<reponame>NateLehman/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import List, Optional
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AclFailedEntry(msrest.serialization.Model):
"""AclFailedEntry.
:ivar name:
:vartype name: str
:ivar type:
:vartype type: str
:ivar error_message:
:vartype error_message: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
type: Optional[str] = None,
error_message: Optional[str] = None,
**kwargs
):
"""
:keyword name:
:paramtype name: str
:keyword type:
:paramtype type: str
:keyword error_message:
:paramtype error_message: str
"""
super(AclFailedEntry, self).__init__(**kwargs)
self.name = name
self.type = type
self.error_message = error_message
class BlobHierarchyListSegment(msrest.serialization.Model):
"""BlobHierarchyListSegment.
All required parameters must be populated in order to send to Azure.
:ivar blob_prefixes:
:vartype blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix]
:ivar blob_items: Required.
:vartype blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal]
"""
_validation = {
'blob_items': {'required': True},
}
_attribute_map = {
'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]'},
'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'},
}
_xml_map = {
'name': 'Blobs'
}
def __init__(
self,
*,
blob_items: List["BlobItemInternal"],
blob_prefixes: Optional[List["BlobPrefix"]] = None,
**kwargs
):
"""
:keyword blob_prefixes:
:paramtype blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix]
:keyword blob_items: Required.
:paramtype blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal]
"""
super(BlobHierarchyListSegment, self).__init__(**kwargs)
self.blob_prefixes = blob_prefixes
self.blob_items = blob_items
class BlobItemInternal(msrest.serialization.Model):
"""An Azure Storage blob.
All required parameters must be populated in order to send to Azure.
:ivar name: Required.
:vartype name: str
:ivar deleted: Required.
:vartype deleted: bool
:ivar snapshot: Required.
:vartype snapshot: str
:ivar version_id:
:vartype version_id: str
:ivar is_current_version:
:vartype is_current_version: bool
:ivar properties: Required. Properties of a blob.
:vartype properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal
:ivar deletion_id:
:vartype deletion_id: str
"""
_validation = {
'name': {'required': True},
'deleted': {'required': True},
'snapshot': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str'},
'deleted': {'key': 'Deleted', 'type': 'bool'},
'snapshot': {'key': 'Snapshot', 'type': 'str'},
'version_id': {'key': 'VersionId', 'type': 'str'},
'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'},
'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'},
'deletion_id': {'key': 'DeletionId', 'type': 'str'},
}
_xml_map = {
'name': 'Blob'
}
def __init__(
self,
*,
name: str,
deleted: bool,
snapshot: str,
properties: "BlobPropertiesInternal",
version_id: Optional[str] = None,
is_current_version: Optional[bool] = None,
deletion_id: Optional[str] = None,
**kwargs
):
"""
:keyword name: Required.
:paramtype name: str
:keyword deleted: Required.
:paramtype deleted: bool
:keyword snapshot: Required.
:paramtype snapshot: str
:keyword version_id:
:paramtype version_id: str
:keyword is_current_version:
:paramtype is_current_version: bool
:keyword properties: Required. Properties of a blob.
:paramtype properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal
:keyword deletion_id:
:paramtype deletion_id: str
"""
super(BlobItemInternal, self).__init__(**kwargs)
self.name = name
self.deleted = deleted
self.snapshot = snapshot
self.version_id = version_id
self.is_current_version = is_current_version
self.properties = properties
self.deletion_id = deletion_id
class BlobPrefix(msrest.serialization.Model):
"""BlobPrefix.
All required parameters must be populated in order to send to Azure.
:ivar name: Required.
:vartype name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
**kwargs
):
"""
:keyword name: Required.
:paramtype name: str
"""
super(BlobPrefix, self).__init__(**kwargs)
self.name = name
class BlobPropertiesInternal(msrest.serialization.Model):
"""Properties of a blob.
All required parameters must be populated in order to send to Azure.
:ivar creation_time:
:vartype creation_time: ~datetime.datetime
:ivar last_modified: Required.
:vartype last_modified: ~datetime.datetime
:ivar etag: Required.
:vartype etag: str
:ivar content_length: Size in bytes.
:vartype content_length: long
:ivar content_type:
:vartype content_type: str
:ivar content_encoding:
:vartype content_encoding: str
:ivar content_language:
:vartype content_language: str
:ivar content_md5:
:vartype content_md5: bytearray
:ivar content_disposition:
:vartype content_disposition: str
:ivar cache_control:
:vartype cache_control: str
:ivar blob_sequence_number:
:vartype blob_sequence_number: long
:ivar copy_id:
:vartype copy_id: str
:ivar copy_source:
:vartype copy_source: str
:ivar copy_progress:
:vartype copy_progress: str
:ivar copy_completion_time:
:vartype copy_completion_time: ~datetime.datetime
:ivar copy_status_description:
:vartype copy_status_description: str
:ivar server_encrypted:
:vartype server_encrypted: bool
:ivar incremental_copy:
:vartype incremental_copy: bool
:ivar destination_snapshot:
:vartype destination_snapshot: str
:ivar deleted_time:
:vartype deleted_time: ~datetime.datetime
:ivar remaining_retention_days:
:vartype remaining_retention_days: int
:ivar access_tier_inferred:
:vartype access_tier_inferred: bool
:ivar customer_provided_key_sha256:
:vartype customer_provided_key_sha256: str
:ivar encryption_scope: The name of the encryption scope under which the blob is encrypted.
:vartype encryption_scope: str
:ivar access_tier_change_time:
:vartype access_tier_change_time: ~datetime.datetime
:ivar tag_count:
:vartype tag_count: int
:ivar expires_on:
:vartype expires_on: ~datetime.datetime
:ivar is_sealed:
:vartype is_sealed: bool
:ivar last_accessed_on:
:vartype last_accessed_on: ~datetime.datetime
:ivar delete_time:
:vartype delete_time: ~datetime.datetime
"""
_validation = {
'last_modified': {'required': True},
'etag': {'required': True},
}
_attribute_map = {
'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'},
'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'},
'etag': {'key': 'Etag', 'type': 'str'},
'content_length': {'key': 'Content-Length', 'type': 'long'},
'content_type': {'key': 'Content-Type', 'type': 'str'},
'content_encoding': {'key': 'Content-Encoding', 'type': 'str'},
'content_language': {'key': 'Content-Language', 'type': 'str'},
'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'},
'content_disposition': {'key': 'Content-Disposition', 'type': 'str'},
'cache_control': {'key': 'Cache-Control', 'type': 'str'},
'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'},
'copy_id': {'key': 'CopyId', 'type': 'str'},
'copy_source': {'key': 'CopySource', 'type': 'str'},
'copy_progress': {'key': 'CopyProgress', 'type': 'str'},
'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'},
'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'},
'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'},
'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'},
'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'},
'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'},
'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'},
'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'},
'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'},
'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'},
'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'},
'tag_count': {'key': 'TagCount', 'type': 'int'},
'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'},
'is_sealed': {'key': 'Sealed', 'type': 'bool'},
'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'},
'delete_time': {'key': 'DeleteTime', 'type': 'rfc-1123'},
}
_xml_map = {
'name': 'Properties'
}
def __init__(
self,
*,
last_modified: datetime.datetime,
etag: str,
creation_time: Optional[datetime.datetime] = None,
content_length: Optional[int] = None,
content_type: Optional[str] = None,
content_encoding: Optional[str] = None,
content_language: Optional[str] = None,
content_md5: Optional[bytearray] = None,
content_disposition: Optional[str] = None,
cache_control: Optional[str] = None,
blob_sequence_number: Optional[int] = None,
copy_id: Optional[str] = None,
copy_source: Optional[str] = None,
copy_progress: Optional[str] = None,
copy_completion_time: Optional[datetime.datetime] = None,
copy_status_description: Optional[str] = None,
server_encrypted: Optional[bool] = None,
incremental_copy: Optional[bool] = None,
destination_snapshot: Optional[str] = None,
deleted_time: Optional[datetime.datetime] = None,
remaining_retention_days: Optional[int] = None,
access_tier_inferred: Optional[bool] = None,
customer_provided_key_sha256: Optional[str] = None,
encryption_scope: Optional[str] = None,
access_tier_change_time: Optional[datetime.datetime] = None,
tag_count: Optional[int] = None,
expires_on: Optional[datetime.datetime] = None,
is_sealed: Optional[bool] = None,
last_accessed_on: Optional[datetime.datetime] = None,
delete_time: Optional[datetime.datetime] = None,
**kwargs
):
"""
:keyword creation_time:
:paramtype creation_time: ~datetime.datetime
:keyword last_modified: Required.
:paramtype last_modified: ~datetime.datetime
:keyword etag: Required.
:paramtype etag: str
:keyword content_length: Size in bytes.
:paramtype content_length: long
:keyword content_type:
:paramtype content_type: str
:keyword content_encoding:
:paramtype content_encoding: str
:keyword content_language:
:paramtype content_language: str
:keyword content_md5:
:paramtype content_md5: bytearray
:keyword content_disposition:
:paramtype content_disposition: str
:keyword cache_control:
:paramtype cache_control: str
:keyword blob_sequence_number:
:paramtype blob_sequence_number: long
:keyword copy_id:
:paramtype copy_id: str
:keyword copy_source:
:paramtype copy_source: str
:keyword copy_progress:
:paramtype copy_progress: str
:keyword copy_completion_time:
:paramtype copy_completion_time: ~datetime.datetime
:keyword copy_status_description:
:paramtype copy_status_description: str
:keyword server_encrypted:
:paramtype server_encrypted: bool
:keyword incremental_copy:
:paramtype incremental_copy: bool
:keyword destination_snapshot:
:paramtype destination_snapshot: str
:keyword deleted_time:
:paramtype deleted_time: ~datetime.datetime
:keyword remaining_retention_days:
:paramtype remaining_retention_days: int
:keyword access_tier_inferred:
:paramtype access_tier_inferred: bool
:keyword customer_provided_key_sha256:
:paramtype customer_provided_key_sha256: str
:keyword encryption_scope: The name of the encryption scope under which the blob is encrypted.
:paramtype encryption_scope: str
:keyword access_tier_change_time:
:paramtype access_tier_change_time: ~datetime.datetime
:keyword tag_count:
:paramtype tag_count: int
:keyword expires_on:
:paramtype expires_on: ~datetime.datetime
:keyword is_sealed:
:paramtype is_sealed: bool
:keyword last_accessed_on:
:paramtype last_accessed_on: ~datetime.datetime
:keyword delete_time:
:paramtype delete_time: ~datetime.datetime
"""
super(BlobPropertiesInternal, self).__init__(**kwargs)
self.creation_time = creation_time
self.last_modified = last_modified
self.etag = etag
self.content_length = content_length
self.content_type = content_type
self.content_encoding = content_encoding
self.content_language = content_language
self.content_md5 = content_md5
self.content_disposition = content_disposition
self.cache_control = cache_control
self.blob_sequence_number = blob_sequence_number
self.copy_id = copy_id
self.copy_source = copy_source
self.copy_progress = copy_progress
self.copy_completion_time = copy_completion_time
self.copy_status_description = copy_status_description
self.server_encrypted = server_encrypted
self.incremental_copy = incremental_copy
self.destination_snapshot = destination_snapshot
self.deleted_time = deleted_time
self.remaining_retention_days = remaining_retention_days
self.access_tier_inferred = access_tier_inferred
self.customer_provided_key_sha256 = customer_provided_key_sha256
self.encryption_scope = encryption_scope
self.access_tier_change_time = access_tier_change_time
self.tag_count = tag_count
self.expires_on = expires_on
self.is_sealed = is_sealed
self.last_accessed_on = last_accessed_on
self.delete_time = delete_time
class FileSystem(msrest.serialization.Model):
"""FileSystem.
:ivar name:
:vartype name: str
:ivar last_modified:
:vartype last_modified: str
:ivar e_tag:
:vartype e_tag: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'last_modified': {'key': 'lastModified', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] | |
rules"
my_user, channel, _ = params
channel = self.get_channel(channel)
self.logger.log("[%s] NAMES: %s" % (channel.name,
" ".join(user.nick for user in channel.users)))
reactor.callLater(10, self.update_rules, channel=channel)
def userJoined(self, nick, channel):
self.logger.log("[%s] %s has joined." % (channel, nick))
user = self.get_user(nick, create_if_nonexistent=False)
joinchannel = self.get_channel(channel)
users = []
for onlineuser in self.get_channel(str(channel).split("_")[0]).users:
users.append(onlineuser.nick)
for relaychannel in self.channels.itervalues():
try:
if str(relaychannel).split("_")[1]:
if str(relaychannel).split("_")[0] == str(channel).split("_")[0]:
for onlineuser in relaychannel.users:
if not onlineuser.nick in users:
users.append(onlineuser.nick)
except IndexError:
pass
if user == None:
user = self.get_user(nick)
self.send_and_log(joinchannel, user,
"Hello, %s! I haven't seen you before. If you want, you can take a tutorial on how to best use %s by typing '!tutorial'."
% (user, self.nickname))
elif user.nick in ["NickServ", "ChanServ"]:
# This ensures that we'll identify to services after they recover from a crash, assuming ChanServ joins our channel
if self.identify == True:
self.msg('NickServ',
'IDENTIFY %s' % self.identifypassword)
elif len(user.messages) > len(user.readmessages):
self.send_and_log(joinchannel, user,
"You have unread messages. Please check them using '!mail inbox unread'.")
if (not "_" in channel and not self.get_settings().maindisabled) or ("_" in channel and not "silent" in joinchannel.mode):
if not nick in users:
self.relay("%s has joined." % nick, joinchannel, relateduser=self.find_user(nick), chat=False, notifyfriends=True)
joinchannel.users.add(user)
elif self.get_settings().maindisabled:
if not user.channel:
self.dispatch(command="set channel", user=user, reply_to=joinchannel, bypass=True)
self.send_and_log(joinchannel, user, "The main channel has been disabled. Please join %s_%s to chat in this channel." % (joinchannel, str(user).lower()))
if "_" in channel and user.nick not in ["NickServ", "ChanServ"]:
self.dispatch(command="names", user=None, reply_to=joinchannel)
self.check_for_master(user).seen = datetime.datetime.now()
self.checkAway(user=user)
def userLeft(self, nick, channel):
self.logger.log("[%s] %s has left." % (channel, nick))
user = self.get_user(nick)
master = self.check_for_master(user)
leavechannel = self.get_channel(channel)
try:
leavechannel.users.remove(user)
except KeyError:
return
stillonline = False
hideleave = False
for channel in self.channels.itervalues():
if user in channel.users:
if not "silent" in channel.mode:
stillonline = True
else:
hideleave = True
if not stillonline:
if not hideleave:
self.relay("%s has left." % nick, leavechannel, relateduser=user, chat=False)
if master.autologout:
user.logged_in = False
self.update_rules(channel=channel)
master.seen = datetime.datetime.now()
user.lastlogout = datetime.datetime.now()
def userQuit(self, nick, message):
self.logger.log("%s has quit (%s)." % (nick, message))
user = self.get_user(nick)
exclude = []
for channel in self.channels.itervalues():
channel.users.discard(user)
if user in channel.users or ("_" in channel and "silent" in channel.mode):
exclude.append(channel)
if not message.startswith("Quit: "):
self.relay("%s has quit (%s)." % (nick, message), relateduser=user, chat=False, exclude=exclude)
else:
self.relay("%s has quit." % nick, relateduser=user, chat=False, exclude=exclude)
master = self.check_for_master(user)
master.seen = datetime.datetime.now()
user.lastlogout = datetime.datetime.now()
if master.autologout:
user.logged_in = False
def userKicked(self, kicked, channel, kicker, message):
try:
oldmessage = message
message = message.split(": ")[1]
kicker = oldmessage.split(": ")[0]
except IndexError:
pass
self.logger.log("[%s] %s was kicked by %s (%s)." %
(channel, kicked, kicker, message))
channel = self.get_channel(channel)
try:
channel.users.remove(self.get_user(kicked))
except KeyError:
return
users = []
for onlineuser in self.get_channel(str(channel).split("_")[0]).users:
users.append(onlineuser.nick)
for relaychannel in self.channels.itervalues():
try:
if str(relaychannel).split("_")[1]:
if str(relaychannel).split("_")[0] == str(channel).split("_")[0] and relaychannel != channel:
for onlineuser in relaychannel.users:
if not onlineuser.nick in users:
users.append(onlineuser.nick)
except IndexError:
pass
if not kicked in users:
self.relay("%s has left (kicked by %s)." %
(kicked, kicker), channel, relateduser=self.find_user(kicked), chat=False)
else:
self.relay("%s was kicked by %s." %
(kicked, kicker), channel, relateduser=self.find_user(kicked), chat=False)
self.update_rules(channel=channel)
user = self.get_user(kicked)
self.check_for_master(user).seen = datetime.datetime.now()
user.lastlogout = datetime.datetime.now()
def irc_RPL_AWAY(self, prefix, params):
nick = params[1]
self.get_user(nick).away = True
self.update_rules()
def admin_command(f):
def wrapper(bot, params, user, recipient, mainchannel, bypass=False):
master = bot.check_for_master(user)
# This 'commandcores' silliness is to allow users to execute all subcommands of allowed commands
commandcores = []
for x in range(1, len(wrapper.__name__.split("_"))+1):
commandcores.append("_".join(wrapper.__name__.split("_")[:x]))
if not master.admin and not any(commandcore in master.admincommandsallowed for commandcore in commandcores):
bot.send_and_log(recipient, user,
"You are not authorized to use this command.")
else:
return f(bot, params, user, recipient, mainchannel, bypass)
wrapper.__name__ = f.__name__
return wrapper
def channelmod_command(f):
def wrapper(bot, params, user, recipient, mainchannel, bypass=False):
master = bot.check_for_master(user)
if not is_channel_name(str(mainchannel)):
bot.send_and_log(recipient, user,
"This command needs to be executed in-channel, or given the channel name as first parameter. Please run it in either a main or triggersafe channel, or give the channel name.")
elif not master.nick in mainchannel.admins and not master.admin:
bot.send_and_log(recipient, user,
"You are not authorized to use this command.")
else:
return f(bot, params, user, recipient, mainchannel, bypass)
wrapper.__name__ = f.__name__
return wrapper
def toggleable_command(f):
def wrapper(bot, params, user, recipient, mainchannel, bypass=False):
commandcores = []
for x in range(1, len(wrapper.__name__.split("_"))+1):
commandcores.append("_".join(wrapper.__name__.split("_")[:x]))
if any(commandcore in bot.get_settings().disabledcommands for commandcore in commandcores):
bot.send_and_log(recipient, user,
"This command has been disabled by an administrator.")
else:
return f(bot, params, user, recipient, mainchannel, bypass)
wrapper.__name__ = f.__name__
return wrapper
def protected_command(f):
def wrapper(bot, params, user, recipient, mainchannel, bypass=False):
master = bot.check_for_master(user)
if not bypass and master.password != None and user.logged_in != True:
raise AuthenticationError
else:
return f(bot, params, user, recipient, mainchannel, bypass)
wrapper.__name__ = f.__name__
return wrapper
def logged_command(f):
def wrapper(bot, params, user_executed, recipient, mainchannel, bypass=False):
user = bot.check_for_master(user_executed)
user.logs[datetime.datetime.now()] = [mainchannel, "%s %s" % (" ".join(wrapper.__name__.split("_")), " ".join(params))]
return f(bot, params, user_executed, recipient, mainchannel, bypass)
wrapper.__name__ = f.__name__
return wrapper
def register_commands():
"""All the commands are defined in here"""
command = TriggerBot.add_command
@command("Lists the bot admins that are currently online.\n"
"admins")
@toggleable_command
def admins(bot, params, user_executed, recipient, mainchannel, bypass=False):
avail_admins = [user.nick for channel in bot.channels.itervalues()
for user in channel.users
if not user.away and (user.admin or (user.nick in mainchannel.admins and str(mainchannel) == str(channel).split("_")[0]))]
unavail_admins = [user.nick for user in bot.users.itervalues()
if (user.admin or user.nick in mainchannel.admins) and user.nick not in avail_admins]
if avail_admins:
bot.send_and_log(recipient, user_executed,
"The following people are currently available: "
+ ", ".join(["%s (%s)" % (user.nick, "head admin" if user.admin == 1 else ("admin" if user.admin else "channel admin")) for user in bot.users.itervalues() if user.nick in avail_admins]))
else:
bot.send_and_log(recipient, user_executed,
"Nobody is currently available.")
if unavail_admins:
bot.send_and_log(recipient, user_executed,
"The following people are currently unavailable: "
+ ", ".join(["%s (%s)" % (user.nick, "head admin" if user.admin == 1 else ("admin" if user.admin else "channel admin")) for user in bot.users.itervalues() if user.nick in unavail_admins]))
@command("Anonymously request a topic change.\n"
"change [<channel>]")
@toggleable_command
def change(bot, params, user, recipient, mainchannel, bypass=False):
if len(params) > 0:
channel = bot.get_channel(params[0])
if not is_channel_name(channel.name):
channel.name = "#%s" % channel.name
else:
channel = recipient
if not getattr(channel, "is_channel", False):
bot.send_and_log(recipient, user,
"Please specify a channel to request a topic change for.")
return
bot.relay_safe(message="Someone is feeling uncomfortable with this disussion. Could we talk about something else?", channel=channel)
bot.notifyAdmins(mainchannel, "%s issued !change for %s. Please ensure everyone is sticking to the rules." % (user, channel))
# TODO: Add channel mod commands here
@command("Manage a channel.\n"
"channel")
def channel(bot, params, user_executed, recipient, mainchannel, bypass=False):
raise BadCommand
@command("Add one or more users as channel administrator (admin).\n"
"channel add <user>")
@channelmod_command
@protected_command
@logged_command
def channel_add(bot, params, user_executed, recipient, mainchannel, bypass=False):
if params:
for nick in params:
nick = bot.find_user(nick)
master = bot.check_for_master(nick)
if master.nick not in mainchannel.admins:
mainchannel.admins.append(master.nick)
bot.send_and_log(recipient, user_executed,
"Requested user(s) now have channel administrator status.")
bot.changed()
else:
raise MissingParams
@command("Remove the channel administrator status from one or more users.\n"
"channel remove <user>")
@channelmod_command
@protected_command
@logged_command
def channel_remove(bot, params, user_executed, recipient, mainchannel, bypass=False):
if params:
for nick in params:
if nick in mainchannel.admins:
mainchannel.admins.remove(nick)
bot.send_and_log(recipient, user_executed,
"Requested user(s) no longer have channel administrator status.")
bot.changed()
else:
raise MissingParams
@command("Sent an announcement to all users in the channel.\n"
"channel announce <message>")
@channelmod_command
@protected_command
@logged_command
def channel_announce(bot, params, user, recipient, mainchannel, bypass=False):
if params:
for channel in bot.channels.itervalues():
if str(channel) == str(mainchannel) or str(channel).startswith("%s_" % mainchannel):
bot.notice(channel,
"Announcement from %s: %s" %
(user.nick, " ".join(params)))
else:
raise MissingParams
@command("Order the bot to kick someone out.\n"
"channel kick <user> [<reason>]")
@channelmod_command
@protected_command
@logged_command
def channel_kick(bot, params, user, recipient, mainchannel, bypass=False):
if not params:
raise MissingParams
for channel in bot.channels.itervalues():
if str(channel) == str(mainchannel) or str(channel).startswith("%s_" % mainchannel):
bot.kick(str(channel), params[0], "%s: %s" % (user, params[1:] if len(params) >= 2 else "no reason specified."))
@command("Order the bot to ban an user.\n"
"channel ban <user>")
@channelmod_command
@protected_command
@logged_command
def channel_ban(bot, params, user, recipient, mainchannel, bypass=False):
if not params:
raise MissingParams
toban = bot.get_user(params[0])
for channel in bot.channels.itervalues():
if str(channel) == str(mainchannel) or str(channel).startswith("%s_" % mainchannel):
bot.mode(chan=str(channel), set=True, modes="b", mask="*!*@%s" % | |
args=["build", element_name])
result.assert_success()
assert cli.get_element_state(project, element_name) == "cached"
key_2 = cli.get_element_key(project, element_name)
assert key_2 != "{:?<64}".format("")
# workspace keys are not recalculated
assert key_1 == key_2
wait_for_cache_granularity()
# Modify the workspace in various different ways, ensuring we
# properly detect the changes.
#
if modification == "addfile":
os.makedirs(os.path.join(workspace, "etc"))
with open(os.path.join(workspace, "etc", "pony.conf"), "w", encoding="utf-8") as f:
f.write("PONY='pink'")
elif modification == "removefile":
os.remove(os.path.join(workspace, "usr", "bin", "hello"))
elif modification == "modifyfile":
with open(os.path.join(workspace, "usr", "bin", "hello"), "w", encoding="utf-8") as f:
f.write("cookie")
else:
# This cannot be reached
assert 0
# First assert that the state is properly detected
assert cli.get_element_state(project, element_name) == "buildable"
key_3 = cli.get_element_key(project, element_name)
assert key_3 != "{:?<64}".format("")
# Since there are different things going on at `bst build` time
# than `bst show` time, we also want to build / checkout again,
# and ensure that the result contains what we expect.
result = cli.run(project=project, args=["build", element_name])
result.assert_success()
assert cli.get_element_state(project, element_name) == "cached"
key_4 = cli.get_element_key(project, element_name)
assert key_4 != "{:?<64}".format("")
# workspace keys are not recalculated
assert key_3 == key_4
# workspace keys are determined by the files
assert key_1 != key_3
# Checkout the result
result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout])
result.assert_success()
# Check the result for the changes we made
#
if modification == "addfile":
filename = os.path.join(checkout, "etc", "pony.conf")
assert os.path.exists(filename)
elif modification == "removefile":
assert not os.path.exists(os.path.join(checkout, "usr", "bin", "hello"))
elif modification == "modifyfile":
with open(os.path.join(workspace, "usr", "bin", "hello"), "r", encoding="utf-8") as f:
data = f.read()
assert data == "cookie"
else:
# This cannot be reached
assert 0
# Ensure that various versions that should not be accepted raise a
# LoadError.INVALID_DATA
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
"workspace_cfg",
[
# Test loading a negative workspace version
{"format-version": -1},
# Test loading version 0 with two sources
{"format-version": 0, "alpha.bst": {0: "/workspaces/bravo", 1: "/workspaces/charlie",}},
# Test loading a version with decimals
{"format-version": 0.5},
# Test loading an unsupported old version
{"format-version": 3},
# Test loading a future version
{"format-version": BST_WORKSPACE_FORMAT_VERSION + 1},
],
)
def test_list_unsupported_workspace(cli, datafiles, workspace_cfg):
project = str(datafiles)
os.makedirs(os.path.join(project, ".bst2"))
workspace_config_path = os.path.join(project, ".bst2", "workspaces.yml")
_yaml.roundtrip_dump(workspace_cfg, workspace_config_path)
result = cli.run(project=project, args=["workspace", "list"])
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
# Ensure that various versions that should be accepted are parsed
# correctly.
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
"workspace_cfg,expected",
[
# Test loading version 4
(
{"format-version": 4, "workspaces": {"alpha.bst": {"path": "/workspaces/bravo"}},},
{
"format-version": BST_WORKSPACE_FORMAT_VERSION,
"workspaces": {"alpha.bst": {"path": "/workspaces/bravo"}},
},
),
],
)
def test_list_supported_workspace(cli, tmpdir, datafiles, workspace_cfg, expected):
def parse_dict_as_yaml(node):
tempfile = os.path.join(str(tmpdir), "yaml_dump")
_yaml.roundtrip_dump(node, tempfile)
return _yaml.load(tempfile, shortname=None).strip_node_info()
project = str(datafiles)
os.makedirs(os.path.join(project, ".bst"))
workspace_config_path = os.path.join(project, ".bst", "workspaces.yml")
_yaml.roundtrip_dump(workspace_cfg, workspace_config_path)
# Check that we can still read workspace config that is in old format
result = cli.run(project=project, args=["workspace", "list"])
result.assert_success()
loaded_config = _yaml.load(workspace_config_path, shortname=None).strip_node_info()
# Check that workspace config remains the same if no modifications
# to workspaces were made
assert loaded_config == parse_dict_as_yaml(workspace_cfg)
# Create a test bst file
bin_files_path = os.path.join(project, "files", "bin-files")
element_path = os.path.join(project, "elements")
element_name = "workspace-test.bst"
workspace = os.path.join(str(tmpdir), "workspace")
# Create our repo object of the given source type with
# the bin files, and then collect the initial ref.
#
repo = create_repo("git", str(tmpdir))
ref = repo.create(bin_files_path)
# Write out our test target
element = {"kind": "import", "sources": [repo.source_config(ref=ref)]}
_yaml.roundtrip_dump(element, os.path.join(element_path, element_name))
# Make a change to the workspaces file
result = cli.run(project=project, args=["workspace", "open", "--directory", workspace, element_name])
result.assert_success()
result = cli.run(project=project, args=["workspace", "close", "--remove-dir", element_name])
result.assert_success()
# Check that workspace config is converted correctly if necessary
loaded_config = _yaml.load(workspace_config_path, shortname=None).strip_node_info()
assert loaded_config == parse_dict_as_yaml(expected)
@pytest.mark.datafiles(DATA_DIR)
def test_inconsitent_pipeline_message(cli, tmpdir, datafiles):
element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, "git")
shutil.rmtree(workspace)
result = cli.run(project=project, args=["build", element_name])
result.assert_main_error(ErrorDomain.PIPELINE, "inconsistent-pipeline-workspaced")
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("strict", [("strict"), ("non-strict")])
def test_cache_key_workspace_in_dependencies(cli, tmpdir, datafiles, strict):
checkout = os.path.join(str(tmpdir), "checkout")
element_name, project, workspace = open_workspace(cli, os.path.join(str(tmpdir), "repo-a"), datafiles, "git")
element_path = os.path.join(project, "elements")
back_dep_element_name = "workspace-test-back-dep.bst"
# Write out our test target
element = {"kind": "compose", "depends": [{"filename": element_name, "type": "build"}]}
_yaml.roundtrip_dump(element, os.path.join(element_path, back_dep_element_name))
# Modify workspace
shutil.rmtree(os.path.join(workspace, "usr", "bin"))
os.makedirs(os.path.join(workspace, "etc"))
with open(os.path.join(workspace, "etc", "pony.conf"), "w", encoding="utf-8") as f:
f.write("PONY='pink'")
# Configure strict mode
strict_mode = True
if strict != "strict":
strict_mode = False
cli.configure({"projects": {"test": {"strict": strict_mode}}})
# Build artifact with dependency's modified workspace
assert cli.get_element_state(project, element_name) == "buildable"
key_a1 = cli.get_element_key(project, element_name)
assert key_a1 != "{:?<64}".format("")
assert cli.get_element_state(project, back_dep_element_name) == "waiting"
key_b1 = cli.get_element_key(project, back_dep_element_name)
assert key_b1 != "{:?<64}".format("")
result = cli.run(project=project, args=["build", back_dep_element_name])
result.assert_success()
assert cli.get_element_state(project, element_name) == "cached"
key_a2 = cli.get_element_key(project, element_name)
assert key_a2 != "{:?<64}".format("")
assert cli.get_element_state(project, back_dep_element_name) == "cached"
key_b2 = cli.get_element_key(project, back_dep_element_name)
assert key_b2 != "{:?<64}".format("")
result = cli.run(project=project, args=["build", back_dep_element_name])
result.assert_success()
# workspace keys are not recalculated
assert key_a1 == key_a2
assert key_b1 == key_b2
# Checkout the result
result = cli.run(project=project, args=["artifact", "checkout", back_dep_element_name, "--directory", checkout])
result.assert_success()
# Check that the pony.conf from the modified workspace exists
filename = os.path.join(checkout, "etc", "pony.conf")
assert os.path.exists(filename)
# Check that the original /usr/bin/hello is not in the checkout
assert not os.path.exists(os.path.join(checkout, "usr", "bin", "hello"))
@pytest.mark.datafiles(DATA_DIR)
def test_multiple_failed_builds(cli, tmpdir, datafiles):
element_config = {"kind": "manual", "config": {"configure-commands": ["unknown_command_that_will_fail"]}}
element_name, project, _ = open_workspace(cli, tmpdir, datafiles, "git", element_attrs=element_config)
for _ in range(2):
result = cli.run(project=project, args=["build", element_name])
assert "BUG" not in result.stderr
assert cli.get_element_state(project, element_name) != "cached"
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("subdir", [True, False], ids=["subdir", "no-subdir"])
@pytest.mark.parametrize("guess_element", [True, False], ids=["guess", "no-guess"])
def test_external_fetch(cli, datafiles, tmpdir_factory, subdir, guess_element):
# An element with an open workspace can't be fetched, but we still expect fetches
# to fetch any dependencies
tmpdir = tmpdir_factory.mktemp(BASE_FILENAME)
depend_element = "fetchable.bst"
# Create an element to fetch (local sources do not need to fetch)
create_element_size(depend_element, str(datafiles), "elements", [], 1024)
element_name, project, workspace = open_workspace(
cli, tmpdir, datafiles, "git", no_checkout=True, element_attrs={"depends": [depend_element]}
)
arg_elm = [element_name] if not guess_element else []
if subdir:
call_dir = os.path.join(workspace, "usr")
os.makedirs(call_dir, exist_ok=True)
else:
call_dir = workspace
# Assert that the depended element is not fetched yet
assert cli.get_element_state(str(datafiles), depend_element) == "fetch needed"
# Fetch the workspaced element
result = cli.run(project=project, args=["-C", call_dir, "source", "fetch", "--deps", "all", *arg_elm])
result.assert_success()
# Assert that the depended element has now been fetched
assert cli.get_element_state(str(datafiles), depend_element) == "buildable"
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("guess_element", [True, False], ids=["guess", "no-guess"])
def test_external_push_pull(cli, datafiles, tmpdir_factory, guess_element):
# Pushing and pulling to/from an artifact cache works from an external workspace
tmpdir = tmpdir_factory.mktemp(BASE_FILENAME)
element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, "git")
arg_elm = [element_name] if not guess_element else []
with create_artifact_share(os.path.join(str(tmpdir), "artifactshare")) as share:
result = cli.run(project=project, args=["-C", workspace, "build", element_name])
result.assert_success()
cli.configure({"artifacts": {"servers": [{"url": share.repo, "push": True}]}})
result = cli.run(project=project, args=["-C", workspace, "artifact", "push", *arg_elm])
result.assert_success()
result = cli.run(project=project, args=["-C", workspace, "artifact", "pull", "--deps", "all", *arg_elm])
result.assert_success()
# Attempting to track in an open workspace is not a sensible thing and it's not compatible with workspaces as plugin
# sources: The new ref (if it differed from the old) would have been ignored regardless.
# The user should be expected to simply close the workspace before tracking.
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("guess_element", [True, False], ids=["guess", "no-guess"])
def test_external_track(cli, datafiles, tmpdir_factory, guess_element):
tmpdir = tmpdir_factory.mktemp(BASE_FILENAME)
element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, "git")
element_file = os.path.join(str(datafiles), "elements", element_name)
arg_elm = [element_name] if not guess_element else []
# Delete the ref from the source so that we can detect if the
# element has been tracked after closing the workspace
element_contents = _yaml.load(element_file, shortname=None)
ref1 = element_contents.get_sequence("sources").mapping_at(0).get_str("ref")
del element_contents.get_sequence("sources").mapping_at(0)["ref"]
_yaml.roundtrip_dump(element_contents, element_file)
result = cli.run(project=project, args=["-C", workspace, "source", "track", *arg_elm])
result.assert_success()
# Element is not tracked now
element_contents = _yaml.load(element_file, shortname=None)
assert "ref" not in element_contents.get_sequence("sources").mapping_at(0)
# close the workspace
result = cli.run(project=project, args=["-C", workspace, "workspace", "close", *arg_elm])
result.assert_success()
# and retrack the element
result = cli.run(project=project, args=["source", "track", element_name])
result.assert_success()
element_contents = _yaml.load(element_file, shortname=None)
ref2 = element_contents.get_sequence("sources").mapping_at(0).get_str("ref")
# these values should be equivalent
assert ref1 == ref2
@pytest.mark.datafiles(DATA_DIR)
def test_external_open_other(cli, datafiles, tmpdir_factory):
# From inside an external workspace, open another workspace
tmpdir1 = tmpdir_factory.mktemp(BASE_FILENAME)
tmpdir2 = tmpdir_factory.mktemp(BASE_FILENAME)
# Making use of the assumption that it's the same project in both invocations of open_workspace
_, project, alpha_workspace = open_workspace(cli, tmpdir1, datafiles, "git", suffix="-alpha")
beta_element, _, beta_workspace = open_workspace(cli, tmpdir2, datafiles, "git", suffix="-beta")
# Closing | |
line))
mtl.close()
def split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP):
'''
Takes vert_loc and faces, and separates into multiple sets of
(verts_loc, faces, unique_materials, dataname)
'''
filename = os.path.splitext((os.path.basename(filepath)))[0]
if not SPLIT_OB_OR_GROUP:
# use the filename for the object name since we arnt chopping up the mesh.
return [(verts_loc, faces, unique_materials, filename)]
def key_to_name(key):
# if the key is a tuple, join it to make a string
if not key:
return filename # assume its a string. make sure this is true if the splitting code is changed
else:
return key
# Return a key that makes the faces unique.
face_split_dict = {}
oldkey = -1 # initialize to a value that will never match the key
for face in faces:
key = face[4]
if oldkey != key:
# Check the key has changed.
try:
verts_split, faces_split, unique_materials_split, vert_remap = face_split_dict[key]
except KeyError:
faces_split = []
verts_split = []
unique_materials_split = {}
vert_remap = [-1] * len(verts_loc)
face_split_dict[key] = (verts_split, faces_split, unique_materials_split, vert_remap)
oldkey = key
face_vert_loc_indices = face[0]
# Remap verts to new vert list and add where needed
for enum, i in enumerate(face_vert_loc_indices):
if vert_remap[i] == -1:
new_index = len(verts_split)
vert_remap[i] = new_index # set the new remapped index so we only add once and can reference next time.
face_vert_loc_indices[enum] = new_index # remap to the local index
verts_split.append(verts_loc[i]) # add the vert to the local verts
else:
face_vert_loc_indices[enum] = vert_remap[i] # remap to the local index
matname = face[2]
if matname and matname not in unique_materials_split:
unique_materials_split[matname] = unique_materials[matname]
faces_split.append(face)
# remove one of the itemas and reorder
return [(value[0], value[1], value[2], key_to_name(key)) for key, value in list(face_split_dict.items())]
def create_mesh(new_objects,
has_ngons,
use_ngons,
use_edges,
verts_loc,
verts_tex,
faces,
unique_materials,
unique_material_images,
unique_smooth_groups,
vertex_groups,
dataname,
):
'''
Takes all the data gathered and generates a mesh, adding the new object to new_objects
deals with fgons, sharp edges and assigning materials
'''
from bpy_extras.mesh_utils import ngon_tesselate
if not has_ngons:
use_ngons = False
if unique_smooth_groups:
sharp_edges = {}
smooth_group_users = {context_smooth_group: {} for context_smooth_group in list(unique_smooth_groups.keys())}
context_smooth_group_old = -1
# Split fgons into tri's
fgon_edges = {} # Used for storing fgon keys
if use_edges:
edges = []
context_object = None
# reverse loop through face indices
for f_idx in range(len(faces) - 1, -1, -1):
(face_vert_loc_indices,
face_vert_tex_indices,
context_material,
context_smooth_group,
context_object,
) = faces[f_idx]
len_face_vert_loc_indices = len(face_vert_loc_indices)
if len_face_vert_loc_indices == 1:
faces.pop(f_idx) # cant add single vert faces
elif not face_vert_tex_indices or len_face_vert_loc_indices == 2: # faces that have no texture coords are lines
if use_edges:
# generators are better in python 2.4+ but can't be used in 2.3
# edges.extend( (face_vert_loc_indices[i], face_vert_loc_indices[i+1]) for i in xrange(len_face_vert_loc_indices-1) )
edges.extend([(face_vert_loc_indices[i], face_vert_loc_indices[i + 1]) for i in range(len_face_vert_loc_indices - 1)])
faces.pop(f_idx)
else:
# Smooth Group
if unique_smooth_groups and context_smooth_group:
# Is a part of of a smooth group and is a face
if context_smooth_group_old is not context_smooth_group:
edge_dict = smooth_group_users[context_smooth_group]
context_smooth_group_old = context_smooth_group
for i in range(len_face_vert_loc_indices):
i1 = face_vert_loc_indices[i]
i2 = face_vert_loc_indices[i - 1]
if i1 > i2:
i1, i2 = i2, i1
try:
edge_dict[i1, i2] += 1
except KeyError:
edge_dict[i1, i2] = 1
# FGons into triangles
if has_ngons and len_face_vert_loc_indices > 4:
ngon_face_indices = ngon_tesselate(verts_loc, face_vert_loc_indices)
faces.extend([([face_vert_loc_indices[ngon[0]],
face_vert_loc_indices[ngon[1]],
face_vert_loc_indices[ngon[2]],
],
[face_vert_tex_indices[ngon[0]],
face_vert_tex_indices[ngon[1]],
face_vert_tex_indices[ngon[2]],
],
context_material,
context_smooth_group,
context_object,
)
for ngon in ngon_face_indices]
)
# edges to make fgons
if use_ngons:
edge_users = {}
for ngon in ngon_face_indices:
for i in (0, 1, 2):
i1 = face_vert_loc_indices[ngon[i]]
i2 = face_vert_loc_indices[ngon[i - 1]]
if i1 > i2:
i1, i2 = i2, i1
try:
edge_users[i1, i2] += 1
except KeyError:
edge_users[i1, i2] = 1
for key, users in edge_users.items():
if users > 1:
fgon_edges[key] = None
# remove all after 3, means we dont have to pop this one.
faces.pop(f_idx)
# Build sharp edges
if unique_smooth_groups:
for edge_dict in list(smooth_group_users.values()):
for key, users in list(edge_dict.items()):
if users == 1: # This edge is on the boundry of a group
sharp_edges[key] = None
# map the material names to an index
material_mapping = {name: i for i, name in enumerate(unique_materials)} # enumerate over unique_materials keys()
materials = [None] * len(unique_materials)
for name, index in list(material_mapping.items()):
materials[index] = unique_materials[name]
me = bpy.data.meshes.new(dataname.decode('utf-8', "replace"))
# make sure the list isnt too big
for material in materials:
me.materials.append(material)
me.vertices.add(len(verts_loc))
me.faces.add(len(faces))
# verts_loc is a list of (x, y, z) tuples
me.vertices.foreach_set("co", unpack_list(verts_loc))
# faces is a list of (vert_indices, texco_indices, ...) tuples
# XXX faces should contain either 3 or 4 verts
# XXX no check for valid face indices
me.faces.foreach_set("vertices_raw", unpack_face_list([f[0] for f in faces]))
if verts_tex and me.faces:
me.uv_textures.new()
context_material_old = -1 # avoid a dict lookup
mat = 0 # rare case it may be un-initialized.
me_faces = me.faces
for i, face in enumerate(faces):
if len(face[0]) < 2:
pass # raise "bad face"
elif len(face[0]) == 2:
if use_edges:
edges.append(face[0])
else:
blender_face = me.faces[i]
(face_vert_loc_indices,
face_vert_tex_indices,
context_material,
context_smooth_group,
context_object,
) = face
if context_smooth_group:
blender_face.use_smooth = True
if context_material:
if context_material_old is not context_material:
mat = material_mapping[context_material]
context_material_old = context_material
blender_face.material_index = mat
# blender_face.mat= mat
if verts_tex:
blender_tface = me.uv_textures[0].data[i]
if context_material:
image, has_data = unique_material_images[context_material]
if image: # Can be none if the material dosnt have an image.
blender_tface.image = image
if has_data and image.depth == 32:
blender_tface.alpha_blend = 'ALPHA'
# BUG - Evil eekadoodle problem where faces that have vert index 0 location at 3 or 4 are shuffled.
if len(face_vert_loc_indices) == 4:
if face_vert_loc_indices[2] == 0 or face_vert_loc_indices[3] == 0:
face_vert_tex_indices = face_vert_tex_indices[2], face_vert_tex_indices[3], face_vert_tex_indices[0], face_vert_tex_indices[1]
else: # length of 3
if face_vert_loc_indices[2] == 0:
face_vert_tex_indices = face_vert_tex_indices[1], face_vert_tex_indices[2], face_vert_tex_indices[0]
# END EEEKADOODLE FIX
# assign material, uv's and image
blender_tface.uv1 = verts_tex[face_vert_tex_indices[0]]
blender_tface.uv2 = verts_tex[face_vert_tex_indices[1]]
blender_tface.uv3 = verts_tex[face_vert_tex_indices[2]]
if len(face_vert_loc_indices) == 4:
blender_tface.uv4 = verts_tex[face_vert_tex_indices[3]]
# for ii, uv in enumerate(blender_face.uv):
# uv.x, uv.y= verts_tex[face_vert_tex_indices[ii]]
del me_faces
# del ALPHA
if use_edges and not edges:
use_edges = False
if use_edges:
me.edges.add(len(edges))
# edges should be a list of (a, b) tuples
me.edges.foreach_set("vertices", unpack_list(edges))
# me_edges.extend( edges )
# del me_edges
# Add edge faces.
# me_edges= me.edges
def edges_match(e1, e2):
return (e1[0] == e2[0] and e1[1] == e2[1]) or (e1[0] == e2[1] and e1[1] == e2[0])
# XXX slow
# if use_ngons and fgon_edges:
# for fgon_edge in fgon_edges.keys():
# for ed in me.edges:
# if edges_match(fgon_edge, ed.vertices):
# ed.is_fgon = True
# if use_ngons and fgon_edges:
# FGON= Mesh.EdgeFlags.FGON
# for ed in me.findEdges( fgon_edges.keys() ):
# if ed is not None:
# me_edges[ed].flag |= FGON
# del FGON
# XXX slow
# if unique_smooth_groups and sharp_edges:
# for sharp_edge in sharp_edges.keys():
# for ed in me.edges:
# if edges_match(sharp_edge, ed.vertices):
# ed.use_edge_sharp = True
# if unique_smooth_groups and sharp_edges:
# SHARP= Mesh.EdgeFlags.SHARP
# for ed in me.findEdges( sharp_edges.keys() ):
# if ed is not None:
# me_edges[ed].flag |= SHARP
# del SHARP
me.validate()
me.update(calc_edges=use_edges)
ob = bpy.data.objects.new("Mesh", me)
new_objects.append(ob)
# Create the vertex groups. No need to have the flag passed here since we test for the
# content of the vertex_groups. If the user selects to NOT have vertex groups saved then
# the following test will never run
for group_name, group_indices in vertex_groups.items():
group = ob.vertex_groups.new(group_name.decode('utf-8', "replace"))
group.add(group_indices, 1.0, 'REPLACE')
def create_nurbs(context_nurbs, vert_loc, new_objects):
'''
Add nurbs object to blender, only support one type at the moment
'''
deg = context_nurbs.get(b'deg', (3,))
curv_range = context_nurbs.get(b'curv_range')
curv_idx = context_nurbs.get(b'curv_idx', [])
parm_u = context_nurbs.get(b'parm_u', [])
parm_v = context_nurbs.get(b'parm_v', [])
name = context_nurbs.get(b'name', b'ObjNurb')
cstype = context_nurbs.get(b'cstype')
if cstype is None:
print('\tWarning, cstype not found')
return
if cstype != b'bspline':
print('\tWarning, cstype is not supported (only bspline)')
return
if not curv_idx:
print('\tWarning, curv argument empty or not set')
return
if len(deg) > 1 or parm_v:
print('\tWarning, surfaces not supported')
return
cu = bpy.data.curves.new(name.decode('utf-8', "replace"), 'CURVE')
cu.dimensions = '3D'
nu = cu.splines.new('NURBS')
nu.points.add(len(curv_idx) - 1) # a point is added to start with
nu.points.foreach_set("co", [co_axis for vt_idx in curv_idx for co_axis in (vert_loc[vt_idx] + (1.0,))])
nu.order_u = deg[0] + 1
# get for endpoint flag from | |
re.sub(r'[^\w ]', '', s)
return s
def numbers_only(s):
s = re.sub(r'[^\d]', '', s)
return s
# Some mobile browsers which look like desktop browsers.
RE_MOBILE = re.compile(r"(iphone|ipod|blackberry|android|palm|windows\s+ce)", re.I)
RE_DESKTOP = re.compile(r"(windows|linux|os\s+[x9]|solaris|bsd)", re.I)
RE_BOT = re.compile(r"(spider|crawl|slurp|bot)", re.I)
def is_desktop(user_agent):
"""
Anything that looks like a phone isn't a desktop.
Anything that looks like a desktop probably is.
Anything that looks like a bot should default to desktop.
"""
return not bool(RE_MOBILE.search(user_agent)) and \
bool(RE_DESKTOP.search(user_agent)) or \
bool(RE_BOT.search(user_agent))
def getStateToken():
i = random.randint(0,1000000)
md = hashlib.md5()
md.update(str(i))
return md.hexdigest()
def gets(self, strings=[], lists=[], floats=[], integers=[], booleans=[], dates=[], times=[], json=[], multi=False, addMultiBrackets=False, getDefault=None, ignoreMissing=True, supportTextBooleans=False):
'''
Use ignoreMissing if resulting dictionary should not contain params that were not passed via request
'''
vals = {}
if ignoreMissing:
# Strip [] for multi params
all_args = [arg.replace('[]','') for arg in self.request.arguments()]
# Filter params to only return params that are in the arguments list
for param_list in [strings, lists, integers, booleans, dates, json]:
param_list[:] = [x for x in param_list if x in all_args]
for arg in strings:
val = self.request.get(arg, default_value=getDefault)
if val != getDefault or not ignoreMissing:
vals[arg] = val
for arg in lists:
if multi:
_arg = arg + '[]' if addMultiBrackets else arg
vals[arg] = self.request.get_all(_arg)
else:
raw = self.request.get(arg, default_value=getDefault)
if raw:
vals[arg] = raw.replace(', ',',').split(',')
else:
vals[arg] = []
for arg in booleans:
if supportTextBooleans:
val = self.request.get(arg, default_value=getDefault)
if val != getDefault:
if val.isdigit():
vals[arg] = bool(int(val))
else:
vals[arg] = val.lower() in ['true']
else:
vals[arg] = self.request.get_range(arg) == 1
for arg in integers:
vals[arg] = self.request.get_range(arg, default=getDefault)
for arg in floats:
val = self.request.get(arg, default_value=getDefault)
if val is not None:
try:
vals[arg] = float(val)
except ValueError:
pass
for arg in json:
raw = self.request.get(arg)
vals[arg] = getJson(raw)
for arg in dates:
raw = self.request.get(arg, default_value=getDefault)
if raw:
vals[arg] = datetime.strptime(raw, '%m/%d/%Y')
else:
vals[arg] = None
for arg in times:
raw = self.request.get(arg, default_value=getDefault)
if raw:
vals[arg] = parseTimeString(raw)
else:
vals[arg] = None
return vals
def getSHA(pw, salt=None):
pw = cgi.escape(pw)
if not salt:
POOL = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
chars=[]
for i in range(32):
chars.append(random.choice(POOL))
salt = ''.join(chars)
sha = hashlib.sha256()
sha.update(pw)
sha.update(salt)
pw_sha = sha.hexdigest()
return [salt, pw_sha]
def validJson(raw, default=None):
try:
json_dict = json.loads(raw)
if json_dict is not None:
return json.dumps(json_dict)
except Exception, e:
logging.error("Error validating json: %s" % e)
return default
def getJson(raw):
'''
Returns either a list or dictionary, or None
'''
j = None
if raw:
try:
j = json.loads(raw)
if isinstance(j,str) or isinstance(j,unicode):
# Handle double-encoded JSON
j = json.loads(j)
except Exception, e:
pass
if type(j) in [list, dict]:
return j
else:
return None
def getKey(cls, prop, entity, asID=True, keyObj=False, asKeyName=False):
props = cls.properties() if cls else entity.properties()
prop = props.get(prop)
if prop:
key = prop.get_value_for_datastore(entity)
if key:
if asID:
return key.id()
elif asKeyName:
return key.name()
elif keyObj:
return key
else:
return str(key)
return None
def normalize_to_ascii(text):
if text is None:
return None
import unicodedata
if not isinstance(text, basestring):
text = str(text).decode('utf-8')
elif not isinstance(text, unicode):
text = text.decode('utf-8')
normalized_text = unicodedata.normalize('NFKD', text).encode('ascii','ignore')
return normalized_text
def normalize_list_to_ascii(l):
return [normalize_to_ascii(v) for v in l]
def safe_geopoint(geo_str):
'''
geo_str as lat,lon
returns a db.GeoPt if possible and if not None
'''
gp = None
if geo_str is None:
return None
try:
gp = db.GeoPt(geo_str)
except Exception, e:
pass
logging.error(str(e))
if gp and gp.lat == 0.0 and gp.lon == 0.0:
gp = None
return gp
def safeIsDigit(val):
if type(val) in [str, unicode]:
return val.isdigit()
else:
return type(val) in [int, long]
def safe_number(str_or_num):
try:
if isinstance(str_or_num, basestring) and ',' in str_or_num:
str_or_num = str_or_num.replace(',','')
return float(str_or_num)
except Exception, e:
logging.error("Failed to convert '%s' to number - %s" % (str_or_num, e))
return None
def safe_add_task(callable, *args, **kwargs):
"""This function guarantees addition of a task to a queue.
It retries safe_add_tasks adding task if any error occurs during task creation.
There are 3 ways to use this function
1. Adding a single task
tools.safe_add_task("/admin/sms", params={'recipient':'254731501591', queue_name='admin-queue'})
2. Adding a list of tasks
tools.safe_add_task([{url="/admin/sms", params={'recipient':'254731501591'}, {url="/admin/sms", params={'recipient':'254731501592'}], queue_name='admin-queue')
3. Adding a deffered task
tools.safe_add_task(myworker.run, params={'targetGroup':'TESTG', queue_name='worker-queue'})
"""
task_add_retries = kwargs.pop("task_add_retries", 0)
TASK_BATCH_SIZE = 100
from constants import BACKGROUND_SERVICE
success = True
try:
if isinstance(callable, basestring): # a url string
task_dict = dict(kwargs)
task_dict['url'] = callable
kwargs = {
"queue_name": task_dict.pop("queue_name", "default"),
}
task_dict['eta'] = task_dict.pop("eta", None)
callable = [task_dict]
if isinstance(callable, list): # a list of tasks
# create a list of taskqueue.Task Objects from the list of dicts
task_list = []
for task_dict in callable:
task_dict.setdefault("name", uuid.uuid4().hex)
# run tasks on the crons micro-service (if non specified)
if on_default_version():
task_dict.setdefault("target", BACKGROUND_SERVICE)
task = taskqueue.Task(**task_dict)
task_list.append(task)
# if no queue_name is provided, default is used.
queue_name = kwargs.get('queue_name', 'default')
queue = taskqueue.Queue(queue_name)
while len(task_list) > 0:
tasks_to_add = task_list[:TASK_BATCH_SIZE]
queue.add(tasks_to_add)
logging.info("Queued up %d tasks" % len(tasks_to_add))
task_list = task_list[TASK_BATCH_SIZE:]
else:
# Simple callable passed in
kwargs.setdefault("_name", uuid.uuid4().hex)
if on_default_version():
kwargs.setdefault("_target", BACKGROUND_SERVICE)
deferred.defer(callable, *args, **kwargs)
return success
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
return success
except Exception, e:
exception_name = sys.exc_info()[0].__name__
exception_details = str(sys.exc_info()[1])
if task_add_retries >= 10:
logging.error("TASK CREATION ABORTED AFTER %d RETRIES!: %s %s %s" % (task_add_retries, kwargs, exception_name, exception_details))
return False
else:
logging.warning("TASK CREATION FAILED RETRYING!: %s %s %s %s" % (callable, kwargs, exception_name, exception_details))
kwargs["task_add_retries"] = task_add_retries+1
return safe_add_task(callable, *args, **kwargs)
def first_non_none(list, default=None):
return next((item for item in list if item is not None), default)
def variable_replacement(text, replacements=None, var_parens="{}"):
if replacements:
for key, val in replacements.items():
paren_open = var_parens[0]
paren_close = var_parens[1]
key = paren_open + key + paren_close
if key in text:
if callable(val):
set_val = val()
if set_val is None:
set_val = "--"
else:
set_val = val
text = text.replace(key, str(set_val))
return text
def make_function_signature(func_name, *args, **kwargs):
alpha_kwargs = sorted(kwargs.items(), key=lambda x : x[0])
return "-".join([func_name, str(args), str(alpha_kwargs)])
def lower_no_spaces(s):
if s:
return strip_symbols(s.lower()).replace(' ','')
return ""
def toDecimal(amount):
amount = amount if amount else 0
value = None
if type(amount) in [str, unicode] and ',' in amount:
amount = amount.replace(',','')
try:
value = Decimal(amount)
except Exception, e:
logging.error("Error in toDecimal: %s" % e)
pass
return value
def in_same_period(ms1, ms2, period_type=4):
'''Check whether or not two timestamps (ms) are in the same
period (as defined by period_type).
Args:
ms1: First timestamp (ms)
ms2: Second timestamp (ms)
period_type (int): defaults to RULE.DAY
Returns:
boolean
'''
from constants import RULE, MS_PER_SECOND, MS_PER_MINUTE, MS_PER_HOUR, MS_PER_DAY
if period_type == RULE.SECOND:
p1, p2 = ms1 / MS_PER_SECOND, ms2 / MS_PER_SECOND
return p1 == p2
elif period_type == RULE.MINUTE:
p1, p2 = ms1 / MS_PER_MINUTE, ms2 / MS_PER_MINUTE
return p1 == p2
elif period_type == RULE.HOUR:
p1, p2 = ms1 / MS_PER_HOUR, ms2 / MS_PER_HOUR
return p1 == p2
elif period_type == RULE.DAY:
p1, p2 = ms1 / MS_PER_DAY, ms2 / MS_PER_DAY
return p1 == p2
elif period_type == RULE.WEEK:
# TODO
ms1_last_monday = last_monday(dt_from_ts(ms1))
ms2_last_monday = last_monday(dt_from_ts(ms2))
return ms1_last_monday == ms2_last_monday
elif period_type == RULE.MONTH:
ms1_mo_begin = get_first_day(dt_from_ts(ms1))
ms2_mo_begin = get_first_day(dt_from_ts(ms2))
return ms1_mo_begin == ms2_mo_begin
def int_hash(value):
'''returns the last 8 digits of sha1 hash of the string'''
return int(hashlib.sha1(str(value)).hexdigest(), 16) % (10 ** 8)
def batched_runtime_with_jitter(now, interval_mins=5, name_prefix=None, max_jitter_pct=0.0):
runAt = now - timedelta(
minutes=(now.minute % interval_mins) - interval_mins,
seconds=now.second,
microseconds=now.microsecond
)
if max_jitter_pct:
r = random.Random(int_hash(name_prefix))
jitter_secs = interval_mins * 60 * max_jitter_pct * r.random() # always same for same name_prefix
runAt += timedelta(seconds=jitter_secs)
return runAt
def on_default_version():
return modules.get_current_version_name() ==\
modules.get_default_version()
def add_batched_task(callable, name_prefix, interval_mins=5, max_jitter_pct=0.0, warnOnDuplicate=True, *args, **kwargs):
"""Add a task batched to the nearest synchronized interval.
Adds a task batched and scheduled for the next even (synchronized) X minutes
Tasks with same name already scheduled for this time will not be re-added
Useful for processing work that should be run exactly once within a certain interval,
and do not need to be run immediately.
For example, if run at 12:43 with a 5 minute interval, schedule will be for 12:45
Args:
callable: must be a deferred task
name_prefix: string to add to task name
interval_mins (int): interval for scheduling
max_jitter_pct (float, 0.0-1.0): if non-zero, will delay | |
<reponame>Stitch-Zhang/xls2xlsx<gh_stars>10-100
from bs4 import BeautifulSoup, UnicodeDammit, NavigableString, Comment, CData, ProcessingInstruction, Declaration, Doctype # pip install beautifulsoup4
import quopri
from bs4 import GuessedAtParserWarning
from openpyxl import Workbook
from openpyxl.styles import PatternFill, Border, Alignment, Font, Side, Color
from openpyxl.comments import Comment as OpenpyxlComment
from openpyxl.utils import get_column_letter, column_index_from_string
from openpyxl.drawing.image import Image
from openpyxl.styles import numbers
from openpyxl.styles.numbers import BUILTIN_FORMATS, BUILTIN_FORMATS_REVERSE
import requests
import copy
import io
import os
import re
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import cssutils
import webcolors
import traceback
import logging
import email # For mht files
import shutil
from functools import lru_cache
from dateutil.parser import parse as date_parse
from datetime import datetime, date, timedelta
from datetime import time as tm
from urllib.parse import urljoin
from time import sleep
from PIL import ImageFont
import math
from fontTools import ttLib # pip install fonttools
import yaml
import sys
import calendar
try:
import currency_symbols.constants as currency_symbols_constants
except Exception:
import importlib
currency_symbols_constants = importlib.import_module('currency-symbols.constants')
TRACE=False
cssutils.log.setLevel(logging.CRITICAL) # Remove 'Unknown Property name' messages
BUILTIN_FORMATS[14] = 'm/d/yyyy' # See https://foss.heptapod.net/openpyxl/openpyxl/-/issues/1534
del BUILTIN_FORMATS_REVERSE['mm-dd-yy']
BUILTIN_FORMATS_REVERSE['m/d/yyyy'] = 14
class FontUtils:
LINE_HEIGHT_FACTOR = 1.25 # line_height in px = Font size in px * LINE_HEIGHT_FACTOR
def __init__(self):
self.skinny_chars = "1!|iIl.,;:' "
self.upper_chars = "ABCDEFGHJKLMNOPQRSTUVXYZmw()[]$&*-+{}<>/?"
self.fat_chars = 'MW@#%_'
fontnames_file = os.path.join(os.path.dirname(__file__), 'fontnames.yaml')
self.name_to_path = {}
if os.path.isfile(fontnames_file):
with open(fontnames_file, 'r') as fn:
self.name_to_path = yaml.safe_load(fn)
else:
dirs = ['fonts']
# Code 'borrowed' from PIL:
if sys.platform == "win32":
windir = os.environ.get("WINDIR")
if windir:
dirs.append(os.path.join(windir, "fonts"))
elif sys.platform in ("linux", "linux2"):
lindirs = os.environ.get("XDG_DATA_DIRS", "")
if not lindirs:
lindirs = "/usr/share"
dirs += [os.path.join(lindir, "fonts") for lindir in lindirs.split(":")]
elif sys.platform == "darwin":
dirs += ["/Library/Fonts", "/System/Library/Fonts",
os.path.expanduser("~/Library/Fonts"),
]
FONT_SPECIFIER_NAME_ID = 4
for d in dirs:
if os.path.isdir(d):
files = os.listdir(d)
for f in files:
path = os.path.join(d, f)
try:
tt = ttLib.TTFont(path)
name = None
for r in tt['name'].names:
if r.nameID == FONT_SPECIFIER_NAME_ID:
if b'\000' in r.string:
try:
name = str(r.string, 'utf-16-be')
break
except Exception:
pass
else:
name = str(r.string, 'latin-1')
break
if name:
nl = name.lower()
if nl != name:
self.name_to_path[nl] = '!' + name # '!' means alias
self.name_to_path[name] = path
else:
print(f'No name for {path}')
except Exception:
pass
with open(fontnames_file, 'w') as fn:
yaml.dump(self.name_to_path, fn)
def get_real_font_name(self, name, bold=False, italic=False):
"""Given a font name which may not be in the proper case, return the real font name, or None if not found"""
fn = name.lower()
if bold:
fn += ' bold'
if italic:
fn += ' italic'
result = self.name_to_path.get(fn)
if result is not None:
if result[0] == '!': # Alias
return result[1:] # Real name
else:
return fn # We found it in lower case
def get_font_path(self, name, bold=False, italic=False):
"""Given a font name (which may not be in the proper case), return the path to the font file or None if not found"""
real_name = self.get_real_font_name(name, bold, italic)
if real_name is None:
return None
return self.name_to_path.get(real_name)
@lru_cache(maxsize=32)
def get_font(self, name, size=10, bold=False, italic=False):
# Pillow creates images that are 72 pixels per inch by default, and a point is 1/72 of an inch, so
# we must convert the size to pixels in order to get the proper size.
size = self.pt_to_px(size)
size = math.ceil(size)
font_path = self.get_font_path(name, bold, italic)
if not font_path:
return None
return ImageFont.truetype(font_path, size)
@staticmethod
def str_to_filename(s, ext): # pragma nocover
"""Convert this string to a valid filename (used for debugging only)"""
result = re.sub(r'[.<>:"/\\|?*\[\]\s]', '_', s) + ext
if len(result) > 100:
result = result[:100] + ext
return result
def get_font_size(self, font, s):
"""Get the width and height of text 's' in the given font. 's' is a single line of text (without newlines)"""
height = self.pt_to_px(font.sz) * self.LINE_HEIGHT_FACTOR
if not font or not s:
return (0, height)
tt_font = self.get_font(font.name, font.sz, font.b, font.i)
if tt_font:
#return font.getsize_multiline(s, spacing=font.size/3)
width, _ = tt_font.getsize(s)
#width = self.pt_to_px(width)
if TRACE:
os.makedirs('trace', exist_ok=True)
from PIL import ImageDraw
from PIL import Image as PILImage
img = PILImage.new('RGB', (math.ceil(width), math.ceil(height)), color='white')
draw = ImageDraw.Draw(img)
draw.text((0, 0), s, fill='black', font=tt_font)
img.save(os.path.join('trace', self.str_to_filename(f'{font.name}_{font.sz}_{s}', '.png')))
else:
# Estimate the font size if we can't find the true answer
width = 0
for c in s:
if c in self.skinny_chars:
width += 0.6
elif c in self.fat_chars:
if font.b and font.name == 'Calibri':
width += 2.1
else:
width += 1.9
elif c in self.upper_chars:
width += 1.4
else:
width += 1
if font.b and font.name != 'Calibri':
width *= 1.1 # 10% wider for non-Calibri fonts in bold (like Arial)
width += 1 # Give it some margin
width *= (font.sz/11)
width *= 7 # Convert chars to px
return (width, height)
def lines_needed(self, img_width, s, font):
"""How many lines are needed to render this text 's' using img_width pixels?"""
if '\n' in s:
lines = s.split('\n')
result = 0
for line in lines:
result += self.lines_needed(img_width, line, font)
if TRACE:
print(f'lines_needed({img_width}, {s}, {font.sz}) = {result} (1)')
return result
number_of_lines = 0
# count how many lines are needed to break the string into multi-lines that fit img_width
line = ''
for token in s.split():
if line:
line += ' ' + token
else:
line = token
w = self.get_font_size(font, line)[0]
if w > img_width:
number_of_lines += int(w // img_width)
line = token
elif w == img_width:
number_of_lines += 1
line = ''
if line:
number_of_lines += 1
if number_of_lines == 0: # E.g. if you send the empty string
number_of_lines = 1
if TRACE:
print(f'lines_needed({img_width}, {s}, {font.sz}) = {number_of_lines} (2)')
return number_of_lines
@staticmethod
def pt_to_px(pt):
if pt is None:
return pt
return pt / 0.75
@staticmethod
def px_to_pt(px):
if px is None:
return px
return px * 0.75
class CSSStyle:
RETRIES=6
DEFAULT_POINT_SIZE = 10
SPREADSHEET_WIDTH_PX = 1900 # e.g. to determine what width="5%" means
SPREADSHEET_HEIGHT_PX = 800
MAX_CELL_HEIGHT_PT = 409 # Excel limit on cell height
MAX_CELL_WIDTH_UNITS = 255 # Excel limit on cell width
MIN_CELL_HEIGHT_PT = FontUtils.px_to_pt(15)
MIN_CELL_WIDTH_PX = 15
DEFAULT_CELL_WIDTH_PX = 64
def __init__(self):
self.stylemap = {}
self.pt_list = ('7.5pt', '10pt', '12pt', '13.5pt', '18pt', '24pt', '36pt')
self.inherited_properties = { 'border-collapse', 'border-spacing', 'caption-side', 'color', 'cursor',
'direction', 'empty-cells', 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'font-size-adjust', 'font-stretch', 'font', 'letter-spacing', 'line-height', 'list-style-image',
'list-style-position', 'list-style-type', 'list-style', 'orphans', 'quotes', 'tab-size', 'text-align',
'text-align-last', 'text-decoration-color', 'text-indent', 'text-justify', 'text-shadow', 'text-transform',
'visibility', 'white-space', 'widows', 'word-break', 'word-spacing', 'word-wrap' }
# We use Microsoft's special mso-style-parent to inherit the style of the <td> on the next element, which
# we send thru depending on what tag it is. This is used to style the entire cell, since we don't support
# a per-element style (openpyxl doesn't support rich text)
self.default_styles = """
.htmlxls2xlsx {background: inherit; background-color: inherit;
border: inherit; border-color: inherit; border-width: inherit; border-bottom-color: inherit; border-left-color: inherit;
border-right-color: inherit; border-top-color: inherit; border-top: inherit; border-right: inherit; border-bottom: inherit; border-left: inherit;
border-top-width: inherit; border-right-width: inherit; border-bottom-width: inherit; border-left-width: inherit;
border-top-style: inherit; border-right-style: inherit; border-bottom-style: inherit; border-left-style: inherit;
border-top-color: inherit; border-right-color: inherit; border-bottom-color: inherit; border-left-color: inherit;
height: inherit; layout-flow: inherit; max-height: inherit; max-width: inherit; min-height: inherit; min-width: inherit;
mso-ignore: inherit; mso-char-indent-count: inherit; mso-number-format: inherit; mso-rotate: inherit; mso-text-control: inherit;
padding: inherit; padding-top: inherit; padding-right: inherit; padding-bottom: inherit; padding-left: inherit;
text-decoration: inherit; vertical-align: inherit; width: inherit; writing-mode: inherit; }
.msocomtxt {display: none; }
.msocomanch {display: none; }
.msocomhide {display: none; }
a {mso-style-parent:htmlxls2xlsx; color: #0563C1; text-decoration: underline;}
b {mso-style-parent:htmlxls2xlsx; font-weight: bold;}
big {mso-style-parent:htmlxls2xlsx; font-size: 1.33em;}
center {mso-style-parent:htmlxls2xlsx; text-align: center;}
code {mso-style-parent:htmlxls2xlsx; font-family: monospace;}
div {mso-style-parent:htmlxls2xlsx;}
em {mso-style-parent:htmlxls2xlsx; font-style: italic;}
font {mso-style-parent:htmlxls2xlsx; }
h1 {mso-style-parent:htmlxls2xlsx; display: block; font-size: 2em; margin-top: 0.67em; margin-bottom: 0.67em; margin-left: 0; margin-right: 0; font-weight: bold;}
h2 {mso-style-parent:htmlxls2xlsx; display: block; font-size: 1.5em; margin-top: 0.83em; margin-bottom: 0.83em; margin-left: 0; margin-right: 0; font-weight: bold;}
h3 {mso-style-parent:htmlxls2xlsx; display: block; font-size: 1.17em; margin-top: 1em; margin-bottom: 1em; margin-left: 0; margin-right: 0; font-weight: bold;}
h4 {mso-style-parent:htmlxls2xlsx; display: block; margin-top: 1.33em; margin-bottom: 1.33em; margin-left: 0; margin-right: 0; font-weight: bold; }
h5 {mso-style-parent:htmlxls2xlsx; display: block; font-size: .83em; margin-top: 1.67em; margin-bottom: 1.67em; margin-left: 0; margin-right: 0; font-weight: bold; }
h6 {mso-style-parent:htmlxls2xlsx; display: block; font-size: .67em; margin-top: 2.33em; margin-bottom: 2.33em; margin-left: 0; margin-right: 0; font-weight: bold; }
hr {mso-style-parent:htmlxls2xlsx; border-bottom: 0.5pt solid windowtext; }
i {mso-style-parent:htmlxls2xlsx; font-style: italic;}
p {mso-style-parent:htmlxls2xlsx; display: block; margin-top: 1em; margin-bottom: 1em; margin-left: 0; margin-right: 0;}
pre {mso-style-parent:htmlxls2xlsx; display: block; font-family: monospace; white-space: pre; margin: 1em 0;}
u {mso-style-parent:htmlxls2xlsx; text-decoration: underline;}
s | |
<gh_stars>1-10
#!/usr/bin/env python3
'''
Copyright 2018, VDMS
Licensed under the terms of the BSD 2-clause license. See LICENSE file for terms.
Schedule3.py Take adavantage of a Setup (Hopefully non-root) SaltSSH Environment
And Utilize it to make my SSH based collections.
'''
# Stdlib
from colorama import Fore, Back, Style
import argparse
import logging
import multiprocessing
import queue
import sys
import time
import os
import re
import signal
import random
import hashlib
import json
# Pips
import yaml
import saltcell.clientcollector
# Local
import manoward
from manoward.storage import storage
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", help="Config File for Scheduler", required=False, default=None)
parser.add_argument(
"-r", "--regex", help="Only Hosts that Match this Regex", default=None)
parser.add_argument("-v", "--verbose", action='append_const',
help="Turn on Verbosity", const=1, default=[])
parser.add_argument("-p", "--print", action="store_true",
help="Print Results to Screen", default=False)
parser.add_argument(
"-s", "--shard", help="Match only This Shard", default=None)
args = parser.parse_args()
CONFIG = manoward.get_manoward(explicit_config=args.config,
only_file=False)
VERBOSE = len(args.verbose)
if VERBOSE == 0:
logging.basicConfig(level=logging.ERROR)
elif VERBOSE == 1:
logging.basicConfig(level=logging.WARNING)
elif VERBOSE == 2:
logging.basicConfig(level=logging.INFO)
elif VERBOSE > 2:
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger()
LOGGER.info("Welcome to Schedule3.py")
def read_hosts(config_items, regex=None, shard=None):
'''
Read the roster file (I'ts a Yaml File). And return the dictionary
of items associated with it.
'''
logger = logging.getLogger("schedule3.py:read_hosts")
roster_file = os.path.join(config_items["schedule"].get(
"salt_ssh_basedir", "./etc/manowar/salt"), "roster")
logger.debug(roster_file)
final_roster = dict()
with open(roster_file, "r") as roster_file_obj:
try:
roster = yaml.safe_load(roster_file_obj)
except Exception as roster_load_error:
logger.error(
"Unable to Read Roster File {}, Yaml Error".format(roster_file))
logger.debug("Error : {}".format(roster_load_error))
roster = dict()
else:
if isinstance(shard, str):
logger.info(
"Choosing Items that Match Shard : {}".format(shard.upper()))
sharded_roster = dict()
for roster_id, roster_args in roster.items():
this_hash = hashlib.sha256(roster_id).hexdigest()
if this_hash.upper().startswith(shard.upper()):
logger.info(
"Adding Host {} from Shard.".format(roster_id))
sharded_roster[roster_id] = roster_args
else:
logger.debug(
"Ignoring Host {} from Shard.".format(roster_id))
roster = sharded_roster
else:
logger.debug("Sharded Roster Not Requested.")
finally:
if isinstance(regex, str):
for roster_id, roster_args in roster.items():
if re.match(regex, roster_id):
logger.info(
"Adding {} on Regex Match".format(roster_id))
final_roster[roster_id] = roster_args
else:
logger.debug(
"Ignoring {} No Regex Match".format(roster_id))
else:
final_roster = roster
return final_roster
def dequeue_hosts(thread_num, host_queue, result_queue, this_configs):
'''
Pull Data off of the Queue and Collect for that Particular Host
'''
logger = logging.getLogger("schedule3.py:dequeue_hosts")
while host_queue.empty() == False:
# Store Results Set Failure By Default
this_status = [False, True, False]
try:
roster_id, roster_args = host_queue.get(timeout=3)
logger.debug("{} Pulled Host {} in thread {}{}".format(Fore.GREEN,
roster_id,
thread_num,
Style.RESET_ALL))
except Exception as pull_error:
logger.warning("{}Could not pull host of queue in thread {} with Error.{}".format(Fore.RED,
thread_num,
Style.RESET_ALL))
logger.debug("Error : {}".format(pull_error))
roster_id = "unknown"
else:
roster_key_ban = ["user", "host", "sudo"]
this_host_args = dict()
for item, value in roster_args.items():
if item not in roster_key_ban:
this_host_args[item] = value
collection_args = {"remote": True,
"salt_ssh_basedir": this_configs["schedule"]["salt_ssh_basedir"],
"remote_host_id": roster_id,
"remote_timeout": this_configs["schedule"].get("indv_call_timeout", 600),
"noupload": True,
"host_configs": this_host_args,
"ipintel_configs": {"dointel": True},
"sapi_configs": {"sapi_do_api": False},
"hardcrash": False,
"base_config_file": this_configs["schedule"]["collection_config_file"],
"local_cols": [this_configs["schedule"].get("local_collections", False),
this_configs["schedule"].get("local_collections_location", "/etc/manowar_agent/collections.d")],
"relative_venv": this_configs["schedule"].get("relative_venv", False)
}
logger.debug("Collection Arguments : {}".format(collection_args))
this_host = saltcell.clientcollector.Host(**collection_args)
try:
this_store_stats = storage(this_configs,
this_host.todict())
except Exception as storage_error:
logger.error(
"Unable to Store Collected Items for {}".format(roster_id))
this_status[1] = True
else:
logger.debug("Stored Results for {}".format(roster_id))
this_status[0] = True
if roster_args.get("status", "unknown") in ("prod", "mxhot", "mxcold"):
this_status[2] = True
finally:
try:
logger.info("Placing Result on Queue for {}".format(roster_id))
logger.debug("Result : {}".format(this_status))
result_queue.put([roster_id, *this_status])
logger.debug("{} Placed {} on Result Queue{}".format(
Fore.GREEN, roster_id, Style.RESET_ALL))
except Exception as placement_error:
logger.error("{}Unable to Place Result on Queue for Host : {}{}".format(
Fore.RED, roster_id, Style.RESET_ALL))
logger.debug("Error : {}".format(placement_error))
logger.info("{}Queue Empty breaking thread loop {}{}".format(
Style.DIM, thread_num, Style.RESET_ALL))
return
def schedule(config_items, regex=None, shard=None, do_print=False):
'''
Schedule UP all My Tasks on THe Queue
'''
logger = logging.getLogger("schedule3.py:schedule")
schedule_stats = dict()
# Counters for Results
glbl_success_hosts = list()
glbl_fail_hosts = list()
glbl_fail_hosts_prod = list()
sapi_hosts_list = list()
THREADS = int(config_items["schedule"]["max_threads"])
schedule_stats["threads"] = THREADS
MAXRUNTIME = int(config_items["schedule"]["max_runtime"])
output_report = config_items["schedule"]["output_report"],
if isinstance(output_report, str):
json_out = (True, output_report)
else:
json_out = (False, "/dev/null")
active_hosts = read_hosts(config_items, regex=regex)
# Create my Manager Object
manager = multiprocessing.Manager()
# Create a Queue for our hosts to live in.
host_queue = manager.Queue(maxsize=len(active_hosts.keys())+1)
# Results Queue
result_queue = manager.Queue(maxsize=len(active_hosts.keys())+1)
# Record When Enqueing Started
start = time.time()
sapi_hosts_list = list()
found_sapi_hosts = manoward.grab_all_sapi(config_items)
logger.info("Found {} Sapi Hosts to ignore".format(len(found_sapi_hosts)))
# Toss Host on Queue
for rosterid, host_arguments in active_hosts.items():
# Check if Host is a SAPI host
if rosterid in found_sapi_hosts:
sapi_hosts_list.append(rosterid)
logger.debug("{} Host {} is a recent SAPI Host, not placing on Queue.{}".format(
Style.DIM, rosterid, Style.RESET_ALL))
elif host_arguments.get("resource", None) is not None and host_arguments.get("resource") in found_sapi_hosts:
sapi_hosts_list.append(rosterid)
logger.debug("{} Host {} is a recent SAPI Host, not placing on Queue.{}".format(
Style.DIM, rosterid, Style.RESET_ALL))
elif host_arguments.get("hostname", None) is not None and host_arguments.get("hostname") in found_sapi_hosts:
sapi_hosts_list.append(rosterid)
logger.debug("{} Host {} is a recent SAPI Host, not placing on Queue.{}".format(
Style.DIM, rosterid, Style.RESET_ALL))
else:
host_queue.put([rosterid, host_arguments])
# Allows muliprocess queue to settle
logger.debug("Sleeping 5 Seconds for Saftey.")
time.sleep(5)
thread_array = dict()
# Take Hosts Off Queue
for thread_num in range(THREADS):
# Set THread Target Always pass it our config_array
if host_queue.empty() == False:
logger.debug("{} Provisining Thread Number : {} {}".format(Style.DIM,
thread_num,
Style.RESET_ALL))
try:
thread_array[thread_num] = multiprocessing.Process(target=dequeue_hosts,
args=(thread_num,
host_queue,
result_queue,
config_items)
)
# Make Threads Die if Parent is Killed
thread_array[thread_num].daemon = True
# Start my Threads
thread_array[thread_num].start()
except Exception as thread_init_error: # pylint: disable=broad-except, invalid-name
logger.error("Unable to start up thread {} of {}".format(
thread_num, THREADS))
logger.debug("Error : {}".format(thread_init_error))
finally:
# I know this is janky. But I've found that it's neccessary to keep weird things
# Happening with too many threads hitting the queue simultaneously.
if thread_num < 50:
# Always sleep for first 50 threads
time.sleep(1)
elif thread_num % 7 == 0:
# Do new threads 7 at a time
time.sleep(1)
else:
logger.warning("{} Queue is Emptpy Prior to Allocation of All threads. Prematurely stopping allocation. {}".format(
Style.DIM, Style.RESET_ALL))
logger.debug("Allocation Stopping At {} Threads out of Planned {}".format(
thread_num, THREADS))
break
logger.info("{} {} {} Thread Allocation Complete.{}".format(Style.DIM,
Fore.MAGENTA,
Back.CYAN,
Style.RESET_ALL))
# Wait until Unfinished Tasks is less than 10
current_check_iteration = 0
while True:
# Check if were past our timeout
current_run_time = time.time() - start
if current_run_time < MAXRUNTIME:
current_check_iteration += 1
if (current_check_iteration % 250) == 0:
FULL_PRINT = True
logger.debug("On Iteration {} Printing PID List.".format(
current_check_iteration))
else:
FULL_PRINT = False
any_threads_alive = False
threads_finished = 0
threads_running = 0
for thread in thread_array.keys():
# If the Thread is Alive
if thread_array[thread].is_alive() == True:
any_threads_alive = True
threads_running += 1
if FULL_PRINT:
logger.debug("Thread {} Still Running with pid {}".format(thread,
thread_array[thread].pid))
else:
# Can Print Finished Threads
threads_finished += 1
pass
# Always VERBOSE Print
if FULL_PRINT:
logger.debug("Stats at Time : {}".format(current_run_time))
logger.debug("Running Threads : {}".format(threads_running))
logger.debug("Finished Threads : {}".format(threads_finished))
logger.debug("Hosts Left : {}".format(host_queue.qsize()))
logger.debug("Results Recieved : {}".format(
results_queue.qsize()))
if any_threads_alive == False:
# Break While Loop
break
# I have running threads keep chugging along
else:
# Timeout Has Been Reached
schedule_stats["Timeout"] = "Timeout reached at {} seconds with {} items left on the queue".format(current_run_time,
host_queue.qsize())
logger.warning("TIMEOUT: {}".format(schedule_stats["Timeout"]))
break
# If I'm still going, Let's wait 20 seconds before checking again.
time.sleep(20)
# Let result_queue settle
time.sleep(5)
# Adjust Count By Failure_Adjustment for failed Pulls off Queue.
failure_adjustment = 0
current_count = result_queue.qsize() - failure_adjustment
while current_count > 0:
# Result Queue Isn't Empty Pull Results
try:
this_result = result_queue.get(timeout=5)
except Exception as get_error:
# Modify Failure Adjustment
failure_adjustment += 1
logger.error("{} Error grabbing results Adusting Failure Results.{}".format(Fore.RED,
Style.RESET_ALL))
logger.debug("Get Error : {}".format(get_error))
else:
# List Format for Results
logger.debug("This Result : {}".format(this_result))
this_one_host_array = this_result[0]
success = this_result[1]
fail = this_result[2]
fail_prod = this_result[3]
if success:
glbl_success_hosts.append(this_one_host_array)
elif fail:
glbl_fail_hosts.append(this_one_host_array)
if fail_prod:
glbl_fail_hosts_prod.append(this_one_host_array)
finally:
# Placeholder
current_count = result_queue.qsize() - failure_adjustment
#print("Items left on queue: ", current_count )
pass
# Store Stats Values
schedule_stats["global_success_hosts_list"] = glbl_success_hosts
schedule_stats["global_success_hosts"] = len(glbl_success_hosts)
schedule_stats["global_fail_hosts_list"] = glbl_fail_hosts
schedule_stats["global_fail_hosts"] = len(glbl_fail_hosts)
schedule_stats["global_fail_prod_list"] = glbl_fail_hosts_prod
schedule_stats["global_fail_prod"] = len(glbl_fail_hosts_prod)
schedule_stats["sapi_hosts"] = len(sapi_hosts_list)
schedule_stats["sapi_hosts_list"] = sapi_hosts_list
schedule_stats["jobtime"] = "Entire job took:" + str(time.time() - start)
if __name__ == "__main__":
print(json.dumps(schedule_stats, sort_keys=True, indent=4))
output_filename = config_items["schedule"].get("output_report", False)
if isinstance(output_filename, str) is True:
logger.debug(
"Writing Output File Report to {}".format(output_filename))
with open(output_filename, 'w') as json_out_file:
json_out_file.write(json.dumps(schedule_stats, indent=4))
else:
logger.info("Output File Write Not Requested.")
return schedule_stats
if __name__ == "__main__":
schedule(CONFIG, regex=args.regex, shard=args.shard, | |
that I can seem to find
time for).
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could, if
you have two incomplete dictionaries
:param other: the new sideband data to add to the larger spectrum. Add means append, no additino is performed
:type other: HighSidebandPMT
:return:
"""
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could
"""
self.parameters["files included"].append(other.fname)
if other.initial_sb in self.sb_list:
self.sb_list.append(other.initial_sb)
# Make things comma delimited?
try:
self.sb_dict[other.initial_sb] = np.row_stack(
(self.sb_dict[other.initial_sb], other.initial_data)
)
except KeyError:
self.sb_dict[other.initial_sb] = np.array(other.initial_data)
except Exception as e:
print("THIS IS THE OTHER ERROR", e)
raise
def process_sidebands(self, verbose=False, baselineCorr = False):
"""
This bad boy will clean up the garbled mess that is the object before hand,
including clearing out misfired shots and doing the averaging.
Affects:
self.sb_dict = Averages over sidebands
Creates:
self.sb_list = The sideband orders included in this object.
:param verbose: Flag to see the nitty gritty details.
:type verbose: bool
:param baselineCorr: Whether to subtract the average across
the two endpoints
:return: None
"""
for sb_num, sb in list(self.sb_dict.items()):
if sb_num == 0:
fire_condition = -np.inf # This way the FEL doesn't need to be on during laser line measurement
else:
fire_condition = np.mean(sb[:, 2]) / 2 # Say FEL fired if the
# cavity dump signal is
# more than half the mean
# of the cavity dump signal
frequencies = sorted(list(set(sb[:, 0])))
temp = None
for freq in frequencies:
data_temp = np.array([])
for point in sb:
if point[0] == freq and point[2] > fire_condition:
data_temp = np.hstack((data_temp, point[3]))
try:
temp = np.vstack(
(temp, np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])))
except:
temp = np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])
# temp[:, 0] = temp[:, 0] / 8065.6 # turn NIR freq into eV
temp = temp[temp[:, 0].argsort()]
if baselineCorr:
x = temp[[0, -1], 0]
y = temp[[0, -1], 1]
p = np.polyfit(x, y, 1)
temp[:, 1] -= np.polyval(p, temp[:,0])
self.sb_dict[sb_num] = np.array(temp)
self.sb_list = sorted(self.sb_dict.keys())
if verbose:
print("Sidebands included", self.sb_list)
def integrate_sidebands(self, verbose=False, cutoff=1.5, **kwargs):
"""
This method will integrate the sidebands to find their strengths, and then
use a magic number to define the width, since they are currently so utterly
undersampled for fitting.
cutoff is the ratio of area/error which must be exceeded to count
It is currently the preferred method for calculating sideband strengths.
self.fit_sidebands is probably better with better-sampled lines.
Creates:
self.sb_results = full list of integrated data. Column order is:
[sb order, Freq (eV), "error" (eV), Integrate area (arb.), area error, "Linewidth" (eV), "Linewidth error" (eV)
self.full_dict = Dictionary where the SB order column is removed and turned into the keys. The values
are the rest of that sideband's results.
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
if verbose:
print("="*15)
print()
print("Integrating PMT Sidebands")
print("Cutoff: {}".format(cutoff))
print(os.path.basename(self.fname))
print()
print("=" * 15)
self.full_dict = {}
for sideband in list(self.sb_dict.items()):
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
# stroff = np.nan_to_num(sideband[1][[0,1,-2,1], 1]).sum()/4.
area = np.trapz(np.nan_to_num(sideband[1][:, 1]), sideband[1][:, 0])
error = np.sqrt(np.sum(np.nan_to_num(
sideband[1][:, 2]) ** 2)) / 8065.6 # Divide by the step size?
if verbose:
print("\torder: {}, area: {:.3g}, error: {:.3g}, ratio: {:.3f}".format(
sideband[0], area, error, area/error
))
details = np.array(
[sideband[0], nir_frequency, 1 / 8065.6, area, error, 2 / 8065.6,
1 / 8065.6])
if area < 0:
if verbose:
print("\t\tarea < 0")
continue
elif area < cutoff * error: # Two seems like a good cutoff?
if verbose:
print("\t\tI did not keep sideband")
continue
try:
self.sb_results = np.vstack((self.sb_results, details))
except:
self.sb_results = np.array(details)
self.full_dict[sideband[0]] = details[1:]
try:
self.sb_results = self.sb_results[self.sb_results[:, 0].argsort()]
except (IndexError, AttributeError):
# IndexError where there's only one sideband
# AttributeError when there aren't any (one sb which wasn't fit)
pass
if verbose:
print('-'*19)
def fit_sidebands(self, plot=False, verbose=False):
"""
This method will fit a gaussian to each of the sidebands provided in
the self.sb_dict and make a list just like in the EMCCD version. It
will also use the standard error of the integral of the PMT peak as the
error of the gaussian area instead of that element from the covariance
matrix. Seems more legit.
attributes:
self.sb_results: the numpy array that contains all of the fit info just
like it does in the CCD class.
self.full_dict = A dictionary version of self.sb_results
:param plot: Flag to see the results plotted
:type plot: bool
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
sb_fits = {}
for sideband in list(self.sb_dict.items()):
if verbose:
print("Sideband number", sideband[0])
print("Sideband data:\n", sideband[1])
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
peak = sideband[1][index, 1]
width_guess = 0.0001 # Yep, another magic number
p0 = [nir_frequency, peak * width_guess, width_guess, 0.00001]
if verbose:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *p0),
label="fit :{}".format(sideband[1]))
print("p0:", p0)
try:
coeff, var_list = curve_fit(gauss, sideband[1][:, 0], sideband[1][:, 1],
sigma=sideband[1][:, 2], p0=p0)
coeff[1] = abs(coeff[1])
coeff[2] = abs(coeff[2])
if verbose:
print("coeffs:", coeff)
print("stdevs:", np.sqrt(np.diag(var_list)))
print("integral", np.trapz(sideband[1][:, 1], sideband[1][:, 0]))
if np.sqrt(np.diag(var_list))[0] / coeff[
0] < 0.5: # The error on where the sideband is should be small
sb_fits[sideband[0]] = np.concatenate(
(np.array([sideband[0]]), coeff, np.sqrt(np.diag(var_list))))
# print "error then:", sb_fits[sideband[0]][6]
relative_error = np.sqrt(sum([x ** 2 for x in
sideband[1][index - 1:index + 2,
2]])) / np.sum(
sideband[1][index - 1:index + 2, 1])
if verbose:
print("relative error:", relative_error)
sb_fits[sideband[0]][6] = coeff[1] * relative_error
# print "error now:", sb_fits[sideband[0]][6]
if plot:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *coeff))
# plt.plot(x_vals, gauss(x_vals, *p0))
else:
print("what happened?")
except:
print("God damn it, Leroy.\nYou couldn't fit this.")
sb_fits[sideband[0]] = None
for result in sorted(sb_fits.keys()):
try:
self.sb_results = np.vstack((self.sb_results, sb_fits[result]))
except:
self.sb_results = np.array(sb_fits[result])
self.sb_results = self.sb_results[:, [0, 1, 5, 2, 6, 3, 7, 4, 8]]
self.sb_results = self.sb_results[:, :7]
if verbose:
print("And the results, please:\n", self.sb_results)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def laser_line(self, verbose=False, **kwargs):
"""
This method is designed to scale everything in the PMT to the conversion
efficiency based on our measurement of the laser line with a fixed
attenuation.
Creates:
self.parameters['normalized?'] = Flag to specify if the laser has been
accounted for.
:return: None
"""
if 0 not in self.sb_list:
self.parameters['normalized?'] = False
return
else:
laser_index = np.where(self.sb_results[:, 0] == 0)[0][0]
if verbose:
print("sb_results", self.sb_results[laser_index, :])
print("laser_index", laser_index)
laser_strength = np.array(self.sb_results[laser_index, 3:5])
if verbose:
print("Laser_strength", laser_strength)
for sb in self.sb_results:
sb[4] = (sb[3] / laser_strength[0]) * np.sqrt(
(sb[4] / sb[3]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[3] = sb[3] / laser_strength[0]
for sb in list(self.full_dict.values()):
sb[3] = (sb[2] / laser_strength[0]) * np.sqrt(
(sb[3] / sb[2]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[2] = sb[2] / laser_strength[0]
self.parameters['normalized?'] = True
def save_processing(self, file_name, folder_str, marker='', index='', verbose=False):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files:
self.proc_data = the continuous spectrum
self.sb_results = the individual sideband details
:param file_name: The base name for the saved file
:type file_name: str
:param folder_str: The full name for the folder hte file is saved it. Folder can be created
:type folder_str: str
:param marker: Marker for the file, appended | |
replace arrays
for frame in self.tracked:
frame[frame == label_2] = label_1
# replace fields
track_1 = self.tracks[label_1]
track_2 = self.tracks[label_2]
for d in track_1["daughters"]:
self.tracks[d]["parent"] = None
track_1["frames"] = sorted(set(track_1["frames"] + track_2["frames"]))
track_1["daughters"] = track_2["daughters"]
track_1["frame_div"] = track_2["frame_div"]
track_1["capped"] = track_2["capped"]
del self.tracks[label_2]
for _, track in self.tracks.items():
try:
track["daughters"].remove(label_2)
except ValueError:
pass
# in case label_2 was a daughter of label_1
try:
track_1["daughters"].remove(label_2)
except ValueError:
pass
self.update_image = True
def action_fill_hole(self):
'''
fill a "hole" in a cell annotation with the cell label
'''
img_ann = self.tracked[self.current_frame,:,:,0]
filled_img_ann = flood_fill(img_ann, self.hole_fill_seed, self.mode.label, connectivity = 1)
self.tracked[self.current_frame,:,:,0] = filled_img_ann
self.update_image = True
def action_delete(self):
"""
Deletes label from current frame only
"""
selected_label, current_frame = self.mode.label, self.mode.frame
# Set selected label to 0 in current frame
ann_img = self.tracked[current_frame]
ann_img = np.where(ann_img == selected_label, 0, ann_img)
self.tracked[current_frame] = ann_img
self.del_cell_info(del_label = selected_label, frame = current_frame)
self.update_image = True
def action_flood_contiguous(self):
'''
Flood fill a label (not background) with a unique new label;
alternative to watershed for fixing duplicate label issue (if cells
are not touching). If there are no other pixels of the old label left
after flooding, this action has the same effect as single-frame create.
This action never changes pixels to 0. Uses self.mode.frame (the frame that
was clicked on) instead of self.current_frame to prevent potential buggy
behavior (eg, user changes frames before confirming action, and self.hole_fill_seed
in new frame corresponds to a different label from self.mode.label).
Uses:
self.annotated, self.mode.frame, self.feature to get image to modify
self.mode.label is the label being flooded with a new value
self.hole_fill_seed to get starting point for flooding
self.cell_ids to get unused label to flood with
self.add_cell_info always needed to add new label to cell_info
self.del_cell_info sometimes needed to delete old label from frame
'''
# old label is definitely in original, check later if in modified
old_label = self.mode.label
# label used to flood area
new_label = self.get_new_label()
# use frame where label was selected, not current frame
frame = self.mode.frame
# annotation to modify
img_ann = self.tracked[frame,:,:,0]
# flood connected pixels of old_label with new_label, from origin point
# of self.hole_fill_seed
filled_img_ann = flood_fill(img_ann, self.hole_fill_seed, new_label)
# update annotation with modified image
self.tracked[frame,:,:,0] = filled_img_ann
# bool, whether any pixels of old_label remain in flooded image
in_modified = np.any(np.isin(filled_img_ann, old_label))
# this action will always add new_label to the annotation in this frame
self.add_cell_info(add_label=new_label, frame = frame)
# check to see if flooding removed old_label from the frame completely
if not in_modified:
self.del_cell_info(del_label = old_label, frame = frame)
# reset hole_fill_seed
self.hole_fill_seed = None
self.update_image = True
def action_trim_pixels(self):
'''
Trim away any stray (unconnected) pixels of selected label; pixels in
frame with that label that are not connected to self.hole_fill_seed
will be set to 0. This action will never completely delete label from frame,
since the seed point will always be left unmodified. Used to clean up messy
annotations, especially those with only a few pixels elsewhere in the frame,
or to quickly clean up thresholding results.
Uses:
self.annotated, self.mode.frame, self.feature to get image to modify
self.mode.label is the label being trimmed
self.hole_fill_seed is starting point to determine parts of label that
will remain unmodified
'''
# use frame where label was selected, not current frame
frame = self.mode.frame
# label to be trimmed
label = self.mode.label
# image to modify
img_ann = self.tracked[frame,:,:,0]
# boolean array of all pixels of label that are connected to self.hole_fill_seed
contig_cell = flood(image = img_ann, seed_point = self.hole_fill_seed)
# any pixels in img_ann that have value 'label' and are NOT connected to hole_fill_seed
# get changed to 0, all other pixels retain their original value
img_trimmed = np.where(np.logical_and(np.invert(contig_cell), img_ann == label), 0, img_ann)
# update annotation with trimmed image
self.tracked[frame,:,:,0] = img_trimmed
# reset hole fill seed
self.hole_fill_seed = None
self.update_image = True
def add_cell_info(self, add_label, frame):
'''
helper function for actions that add a cell to the trk
'''
if add_label != 0:
#if cell already exists elsewhere in trk:
try:
old_frames = self.tracks[add_label]['frames']
updated_frames = np.append(old_frames, frame)
updated_frames = np.unique(updated_frames).tolist()
self.tracks[add_label].update({'frames': updated_frames})
#cell does not exist anywhere in trk:
except KeyError:
self.tracks.update({add_label: {}})
self.tracks[add_label].update({'label': int(add_label)})
self.tracks[add_label].update({'frames': [frame]})
self.tracks[add_label].update({'daughters': []})
self.tracks[add_label].update({'frame_div': None})
self.tracks[add_label].update({'parent': None})
self.tracks[add_label].update({'capped': False})
def del_cell_info(self, del_label, frame):
'''
helper function for actions that remove a cell from the trk
'''
if del_label != 0:
#remove cell from frame
old_frames = self.tracks[del_label]['frames']
updated_frames = np.delete(old_frames, np.where(old_frames == np.int64(frame))).tolist()
self.tracks[del_label].update({'frames': updated_frames})
#if that was the last frame, delete the entry for that cell
if self.tracks[del_label]['frames'] == []:
del self.tracks[del_label]
# If deleting lineage data, remove parent/daughter entries
for _, track in self.tracks.items():
try:
track["daughters"].remove(del_label)
except ValueError:
pass
if track["parent"] == del_label:
track["parent"] = None
def save(self):
backup_file = self.filename + "_original.trk"
if not os.path.exists(backup_file):
shutil.copyfile(self.filename + ".trk", backup_file)
# clear any empty tracks before saving file
empty_tracks = []
for key in self.tracks:
if not self.tracks[key]['frames']:
empty_tracks.append(self.tracks[key]['label'])
for track in empty_tracks:
del self.tracks[track]
with tarfile.open(self.filename + ".trk", "w") as trks:
with tempfile.NamedTemporaryFile("w") as lineage_file:
json.dump(self.tracks, lineage_file, indent=1)
lineage_file.flush()
trks.add(lineage_file.name, "lineage.json")
with tempfile.NamedTemporaryFile() as raw_file:
np.save(raw_file, self.raw)
raw_file.flush()
trks.add(raw_file.name, "raw.npy")
with tempfile.NamedTemporaryFile() as tracked_file:
np.save(tracked_file, self.tracked)
tracked_file.flush()
trks.add(tracked_file.name, "tracked.npy")
class ZStackReview(CalibanWindow):
save_prompt_text = ("\nSave current file?"
"\nSPACE = SAVE"
"\nT = SAVE AS .TRK FILE"
"\nESC = CANCEL")
def __init__(self, filename, raw, annotated, save_vars_mode):
'''
Set object attributes to store raw and annotated images (arrays),
various settings, bind event handlers to pyglet window, and begin
running application. Uses the filename and the output of load_npz(filename)
as input.
Assumes raw array is in format (frames, y, x, channels) and annotated array is
in format (frames, y, x, features).
'''
# store inputs as part of ZStackReview object
# filename used to save file later
self.filename = filename
# raw data used to display images and used in some actions (watershed, threshold)
self.raw = raw
# modifying self.annotated with actions is the main purpose of this tool
self.annotated = annotated
# used to determine variable names for npz upon saving file
self.save_vars_mode = save_vars_mode
# empty dictionary for lineage, will be populated if file is saved as trk
self.lineage = {}
# file opens to the first feature (like channel, but of annotation array)
self.feature = 0
# how many features contained in self.annotated (assumes particular data format)
self.feature_max = self.annotated.shape[-1]
# file opens to the first channel
self.channel = 0
# unpack the shape of the raw array
self.num_frames, self.height, self.width, self.channel_max = raw.shape
# info dictionaries that will be populated with info about labels for
# each feature of annotation array
self.cell_ids = {}
self.cell_info = {}
# populate cell_info and cell_ids with info for each feature in annotation
# analogous to .trk lineage but do not need relationships between cells included
for feature in range(self.feature_max):
self.create_cell_info(feature)
# don't display 'frames' just 'slices' in sidebar (updated on_draw)
try:
first_key = list(self.cell_info[0])[0]
display_info_types = self.cell_info[0][first_key]
self.display_info = [*sorted(set(display_info_types) - {'frames'})]
# if there are no labels in the feature, hardcode the display info
except:
self.display_info = ['label', 'slices']
# open file to first frame of annotation stack
self.current_frame = 0
# keeps track of information about brightness of each channel in raw images
self.max_intensity_dict = {}
for channel in range(self.channel_max):
self.max_intensity_dict[channel] = np.max(self.raw[0,:,:,channel])
# max_intensity for initial channel
self.max_intensity = self.max_intensity_dict[self.channel]
self.min_intensity_dict = {}
for channel in range(self.channel_max):
self.min_intensity_dict[channel] = 0
self.vmin = self.min_intensity_dict[self.channel]
# keeps track of information about adjustment of colormap for viewing annotation labels
self.adjustment_dict = {}
for feature in range(self.feature_max):
self.adjustment_dict[feature] = 0
# adjustment for initial feature
self.adjustment = self.adjustment_dict[self.feature]
# mouse position in coordinates of array being viewed as image, (0,0) is placeholder
# will be updated on mouse motion
self.x = 0
self.y = 0
# self.mode keeps track of selected labels, pending actions, displaying
# prompts | |
m += 1
self.plot_selected_energy_range_original()
self._update_ylimit()
self.log_linear_plot()
self._update_canvas()
def plot_emission_line(self):
"""
Plot emission line and escape peaks associated with given lines.
The value of self.max_v is needed in this function in order to plot
the relative height of each emission line.
"""
while len(self.eline_obj):
self.eline_obj.pop().remove()
escape_e = self.escape_e
if len(self.elist):
for i in range(len(self.elist)):
(eline,) = self._ax.plot(
[self.elist[i][0], self.elist[i][0]],
[0, self.elist[i][1] * self.max_v],
color=self.plot_style["emission_line"]["color"],
linewidth=self.plot_style["emission_line"]["linewidth"],
)
self.eline_obj.append(eline)
if self.plot_escape_line and self.elist[i][0] > escape_e:
(eline,) = self._ax.plot(
[self.elist[i][0] - escape_e, self.elist[i][0] - escape_e],
[0, self.elist[i][1] * self.max_v],
color=self.plot_style["escape"]["color"],
linewidth=self.plot_style["emission_line"]["linewidth"],
)
self.eline_obj.append(eline)
def _set_eline_select_controls(self, *, element_id=None, data="use_self_data"):
if element_id is None:
element_id = self.element_id
if data == "use_self_data":
data = self.io_model.data
def is_line_in_selected_list(self, n_id):
"""
Checks if the line with ID ``n_id`` is in the list of
selected element lines.
Used to enable/disable 'Add Line' and 'Remove Line' buttons.
Parameters
----------
n_id : Int
index of the element emission line in the list
(often equal to ``self.element_id``)
Returns True if the element line
is in the list of selected lines. False otherwise.
"""
ename = self.get_element_line_name_by_id(n_id)
if ename is None:
return False
if self.param_model.EC.is_element_in_list(ename):
return True
else:
return False
def is_element_line_id_valid(self, n_id):
"""
Checks if ID (``n_id``) of the element emission line is valid,
i.e. the name of the line may be obtained by using the ID.
Parameters
----------
n_id : Int
index of the element emission line in the list
(often equal to 'self.element_id')
Returns True if the element line is valid
"""
# There may be a more efficient way to check 'n_id',
# but we want to use the same function as we use
# to retrive the line name
ename = self.get_element_line_name_by_id(n_id)
if ename is None:
return False
else:
return True
def get_element_line_name_by_id(self, n_id):
"""
Retrieves the name of the element emission line from its ID
(the number in the list). The lines are numbered starting with 1.
If the ID is invalid, the function returns None.
Parameters
----------
n_id : int
index of the element emission line in the list
(often equal to 'self.element_id')
Returns the line name (str). If the name can not be retrieved, then
the function returns None.
"""
if n_id < 1:
# Elements are numbered starting with 1. Element #0 does not exist.
# (Element #0 means that no element is selected)
return None
# This is the fixed list of element emission line names.
# The element with ID==1 is found in total_list[0]
total_list = self.param_model.get_user_peak_list()
try:
ename = total_list[n_id - 1]
except Exception:
ename = None
return ename
def _vertical_marker_set_inside_range(self, *, e_low=None, e_high=None):
"""
Don't move the marker if it is inside range. If it is outside range,
then set the marker to the center of the range
"""
# The range of energy selected for analysis
if e_low is None:
e_low = self.param_model.param_new["non_fitting_values"]["energy_bound_low"]["value"]
if e_high is None:
e_high = self.param_model.param_new["non_fitting_values"]["energy_bound_high"]["value"]
# By default, place the marker in the middle of the range if its original position
# is outside the range
if (self.vertical_marker_kev > e_high) or (self.vertical_marker_kev < e_low):
self.vertical_marker_kev = (e_low + e_high) / 2.0
def _fill_elist(self):
_elist = []
incident_energy = self.incident_energy
k_len = len(K_TRANSITIONS)
l_len = len(L_TRANSITIONS)
m_len = len(M_TRANSITIONS)
ename = self.get_element_line_name_by_id(self.element_id)
if ename is not None:
_elist = []
if ename.lower().startswith("userpeak"):
# Make sure that the marker is in the selected range of energies
self._vertical_marker_set_inside_range()
# The tuple structure: (center_energy, ratio)
_elist.append((self.vertical_marker_kev, 1.0))
elif "_K" in ename:
e = Element(ename[:-2])
if e.cs(incident_energy)["ka1"] != 0:
for i in range(k_len):
_elist.append(
(
e.emission_line.all[i][1],
e.cs(incident_energy).all[i][1] / e.cs(incident_energy).all[0][1],
)
)
elif "_L" in ename:
e = Element(ename[:-2])
if e.cs(incident_energy)["la1"] != 0:
for i in range(k_len, k_len + l_len):
_elist.append(
(
e.emission_line.all[i][1],
e.cs(incident_energy).all[i][1] / e.cs(incident_energy).all[k_len][1],
)
)
else:
e = Element(ename[:-2])
if e.cs(incident_energy)["ma1"] != 0:
for i in range(k_len + l_len, k_len + l_len + m_len):
_elist.append(
(
e.emission_line.all[i][1],
e.cs(incident_energy).all[i][1] / e.cs(incident_energy).all[k_len + l_len][1],
)
)
return _elist
def _get_pileup_lines(self, eline):
"""
Returns the energy (center) of pileup peak. And the energies of two components.
Parameters
----------
eline: str
Name of the pileup peak, e.g. V_Ka1-Co_Ka1
Returns
-------
list(float)
Energy in keV of pileup peak and two components
"""
try:
element_line1, element_line2 = eline.split("-")
e1_cen = get_eline_parameters(element_line1, self.incident_energy)["energy"]
e2_cen = get_eline_parameters(element_line2, self.incident_energy)["energy"]
en = [e1_cen + e2_cen, e1_cen, e2_cen]
except Exception:
en = []
return en
def _fill_elist_pileup(self, eline=None):
if eline is None:
eline = self.param_model.e_name
elist = []
energies = self._get_pileup_lines(eline)
if energies:
elist = list(zip(energies, [1, 0.2, 0.2]))
return elist
def _fill_elist_userpeak(self):
"""
Fill the list of 'emission lines' for user defined peak. There is only ONE
'emission line', with position determined by the location of the marker.
If the marker is not currently visible, then don't put any emission lines in the list.
The list is used during adding user-defined peaks.
"""
elist = []
energy, marker_visible = self.get_suggested_new_manual_peak_energy()
if marker_visible:
elist.append((energy, 1))
return elist
def _reset_eline_plot(self):
while len(self.eline_obj):
self.eline_obj.pop().remove()
self.elist = []
self._fig.canvas.draw()
@observe("element_id")
def set_element(self, change):
self._set_eline_select_controls(element_id=change["value"])
self.compute_manual_peak_intensity(n_id=change["value"])
if change["value"] == 0:
self._reset_eline_plot()
return
self.plot_current_eline()
def plot_current_eline(self, eline=None):
"""
Plots emission lines for the selected peak based on 'self.element_id` and provided `eline`.
"""
if eline is None:
eline = self.param_model.e_name
incident_energy = self.incident_energy
# Name of the emission line (if emission line is selected)
ename = self.get_element_line_name_by_id(self.element_id)
# Check if pileup peak is selected
is_pileup = self.param_model.get_eline_name_category(eline) == "pileup"
if (ename is not None) or is_pileup:
logger.debug(
"Plot emission line for element: "
"{} with incident energy {}".format(self.element_id, incident_energy)
)
if ename is not None:
self.elist = self._fill_elist()
elif is_pileup:
self.elist = self._fill_elist_pileup(eline)
else:
self.elist = [] # Just in case
self.plot_emission_line()
self._update_canvas()
# Do it the second time, since the 'self.elist' has changed
self.compute_manual_peak_intensity(n_id=self.element_id)
else:
self._reset_eline_plot()
logger.debug(f"Selected emission line with ID #{self.element_id} is not in the list.")
@observe("det_materials")
def _update_det_materials(self, change):
if change["value"] == 0:
self.escape_e = 1.73998
else:
self.escape_e = 9.88640
def change_escape_peak_settings(self, plot_escape_line, det_material):
self.plot_escape_line = plot_escape_line
self.det_materials = det_material
# Now update the displayed emission line
self.plot_emission_line()
self._update_canvas()
def plot_roi_bound(self):
"""
Plot roi with low, high and ceter value.
"""
for k, v in self.roi_plot_dict.items():
for data in v:
data.remove()
self.roi_plot_dict.clear()
if len(self.roi_dict):
# self._ax.hold(True)
for k, v in self.roi_dict.items():
temp_list = []
for linev in np.array([v.left_val, v.line_val, v.right_val]) / 1000.0:
(lineplot,) = self._ax.plot(
[linev, linev],
[0, 1 * self.max_v],
color=self.plot_style["roi_line"]["color"],
linewidth=self.plot_style["roi_line"]["linewidth"],
)
if v.show_plot:
lineplot.set_visible(True)
else:
lineplot.set_visible(False)
temp_list.append(lineplot)
self.roi_plot_dict.update({k: temp_list})
self._update_canvas()
@observe("roi_dict")
def show_roi_bound(self, change):
logger.debug("roi dict changed {}".format(change["value"]))
self.plot_roi_bound()
if len(self.roi_dict):
for k, v in self.roi_dict.items():
if v.show_plot:
for ln in self.roi_plot_dict[k]:
ln.set_visible(True)
else:
for ln in self.roi_plot_dict[k]:
ln.set_visible(False)
self._update_canvas()
def get_suggested_new_manual_peak_energy(self):
"""
Returns energy pointed by the vertical marker in keV and the status of the marker.
Returns
-------
float
Energy of the manual peak center in keV. The energy is determined
by vertical marker on the screen.
bool
True if the vertical marker is visible, otherwise False.
"""
energy = self.vertical_marker_kev
marker_visible = self.vertical_marker_is_visible
return energy, marker_visible
def _compute_intensity(self, elist):
# Some default value
intensity = 1000.0
if (
self.io_model.data is not None
and self.param_model.param_new is not None
and self.param_model.prefit_x is not None
and self.param_model.total_y is not None
and len(self.io_model.data) > 1
and len(self.param_model.prefit_x) > 1
):
# Range of energies in fitting results
e_fit_min = self.param_model.prefit_x[0]
e_fit_max = self.param_model.prefit_x[-1]
de_fit = (e_fit_max - e_fit_min) / (len(self.param_model.prefit_x) - 1)
e_raw_min = self.param_model.param_new["e_offset"]["value"]
e_raw_max = (
self.param_model.param_new["e_offset"]["value"]
+ (len(self.io_model.data) - 1) * self.param_model.param_new["e_linear"]["value"]
+ (len(self.io_model.data) - 1) ** 2 * self.param_model.param_new["e_quadratic"]["value"]
)
de_raw = (e_raw_max - e_raw_min) / (len(self.io_model.data) - 1)
# Note: the above algorithm for finding 'de_raw' is far from perfect but will
# work for now. As a result 'de_fit' and
# 'de_raw' == sself.param_model.param_new['e_linear']['value'].
# So the quadratic coefficent is ignored. This is OK, since currently
# quadratic coefficient is always ZERO. When the program is rewritten,
# the complete algorithm should be revised.
| |
beseeming ornaments
To wield old partisans, in hands as old,
Cank'red with peace, to part your cank'red hate.
If ever you disturb our streets again,
Your lives shall pay the forfeit of the peace.
For this time all the rest depart away.
You, Capulet, shall go along with me;
And, Montague, come you this afternoon,
To know our farther pleasure in this case,
To old Freetown, our common judgment place.
Once more, on pain of death, all men depart.
Exeunt [all but Montague, his Wife, and Benvolio].
Mon. Who set this ancient quarrel new abroach?
Speak, nephew, were you by when it began?
Ben. Here were the servants of your adversary
And yours, close fighting ere I did approach.
I drew to part them. In the instant came
The fiery Tybalt, with his sword prepar'd;
Which, as he breath'd defiance to my ears,
He swung about his head and cut the winds,
Who, nothing hurt withal, hiss'd him in scorn.
While we were interchanging thrusts and blows,
Came more and more, and fought on part and part,
Till the Prince came, who parted either part.
M. Wife. O, where is Romeo? Saw you him to-day?
Right glad I am he was not at this fray.
Ben. Madam, an hour before the worshipp'd sun
Peer'd forth the golden window of the East,
A troubled mind drave me to walk abroad;
Where, underneath the grove of sycamore
That westward rooteth from the city's side,
So early walking did I see your son.
Towards him I made; but he was ware of me
And stole into the covert of the wood.
I- measuring his affections by my own,
Which then most sought where most might not be found,
Being one too many by my weary self-
Pursu'd my humour, not Pursuing his,
And gladly shunn'd who gladly fled from me.
Mon. Many a morning hath he there been seen,
With tears augmenting the fresh morning's dew,
Adding to clouds more clouds with his deep sighs;
But all so soon as the all-cheering sun
Should in the furthest East bean to draw
The shady curtains from Aurora's bed,
Away from light steals home my heavy son
And private in his chamber pens himself,
Shuts up his windows, locks fair daylight
And makes himself an artificial night.
Black and portentous must this humour prove
Unless good counsel may the cause remove.
Ben. My noble uncle, do you know the cause?
Mon. I neither know it nor can learn of him
Ben. Have you importun'd him by any means?
Mon. Both by myself and many other friend;
But he, his own affections' counsellor,
Is to himself- I will not say how true-
But to himself so secret and so close,
So far from sounding and discovery,
As is the bud bit with an envious worm
Ere he can spread his sweet leaves to the air
Or dedicate his beauty to the sun.
Could we but learn from whence his sorrows grow,
We would as willingly give cure as know.
Enter Romeo.
Ben. See, where he comes. So please you step aside,
I'll know his grievance, or be much denied.
Mon. I would thou wert so happy by thy stay
To hear true shrift. Come, madam, let's away,
Exeunt [Montague and Wife].
Ben. Good morrow, cousin.
Rom. Is the day so young?
Ben. But new struck nine.
Rom. Ay me! sad hours seem long.
Was that my father that went hence so fast?
Ben. It was. What sadness lengthens Romeo's hours?
Rom. Not having that which having makes them short.
Ben. In love?
Rom. Out-
Ben. Of love?
Rom. Out of her favour where I am in love.
Ben. Alas that love, so gentle in his view,
Should be so tyrannous and rough in proof!
Rom. Alas that love, whose view is muffled still,
Should without eyes see pathways to his will!
Where shall we dine? O me! What fray was here?
Yet tell me not, for I have heard it all.
Here's much to do with hate, but more with love.
Why then, O brawling love! O loving hate!
O anything, of nothing first create!
O heavy lightness! serious vanity!
Misshapen chaos of well-seeming forms!
Feather of lead, bright smoke, cold fire, sick health!
Still-waking sleep, that is not what it is
This love feel I, that feel no love in this.
Dost thou not laugh?
Ben. No, coz, I rather weep.
Rom. Good heart, at what?
Ben. At thy good heart's oppression.
Rom. Why, such is love's transgression.
Griefs of mine own lie heavy in my breast,
Which thou wilt propagate, to have it prest
With more of thine. This love that thou hast shown
Doth add more grief to too much of mine own.
Love is a smoke rais'd with the fume of sighs;
Being purg'd, a fire sparkling in lovers' eyes;
Being vex'd, a sea nourish'd with lovers' tears.
What is it else? A madness most discreet,
A choking gall, and a preserving sweet.
Farewell, my coz.
Ben. Soft! I will go along.
An if you leave me so, you do me wrong.
Rom. Tut! I have lost myself; I am not here:
This is not Romeo, he's some other where.
Ben. Tell me in sadness, who is that you love?
Rom. What, shall I groan and tell thee?
Ben. Groan? Why, no;
But sadly tell me who.
Rom. Bid a sick man in sadness make his will.
Ah, word ill urg'd to one that is so ill!
In sadness, cousin, I do love a woman.
Ben. I aim'd so near when I suppos'd you lov'd.
Rom. A right good markman! And she's fair I love.
Ben. A right fair mark, fair coz, is soonest hit.
Rom. Well, in that hit you miss. She'll not be hit
With Cupid's arrow. She hath Dian's wit,
And, in strong proof of chastity well arm'd,
From Love's weak childish bow she lives unharm'd.
She will not stay the siege of loving terms,
Nor bide th' encounter of assailing eyes,
Nor ope her lap to saint-seducing gold.
O, she's rich in beauty; only poor
That, when she dies, with beauty dies her store.
Ben. Then she hath sworn that she will still live chaste?
Rom. She hath, and in that sparing makes huge waste;
For beauty, starv'd with her severity,
Cuts beauty off from all posterity.
She is too fair, too wise, wisely too fair,
To merit bliss by making me despair.
She hath forsworn to love, and in that vow
Do I live dead that live to tell it now.
Ben. Be rul'd by me: forget to think of her.
Rom. O, teach me how I should forget to think!
Ben. By giving liberty unto thine eyes.
Examine other beauties.
Rom. 'Tis the way
To call hers (exquisite) in question more.
These happy masks that kiss fair ladies' brows,
Being black puts us in mind they hide the fair.
He that is strucken blind cannot forget
The precious treasure of his eyesight lost.
Show me a mistress that is passing fair,
What doth her beauty serve but as a note
Where I may read who pass'd that passing fair?
Farewell. Thou canst not teach me to forget.
Ben. I'll pay that doctrine, or else die in debt. Exeunt.
Scene II.
A Street.
Enter Capulet, County Paris, and [Servant] -the Clown.
Cap. But Montague is bound as well as I,
In penalty alike; and 'tis not hard, I think,
For men so old as we to keep the peace.
Par. Of honourable reckoning are you both,
And pity 'tis you liv'd at odds so long.
But now, my lord, what say you to my suit?
Cap. But saying o'er what | |
def get_pure_virtual_methods( self, type='public' ):
r = {}
for meth in self['methods'][ type ]:
if meth['pure_virtual']: r[ meth['name'] ] = meth
return r
def __init__(self, nameStack):
self['nested_classes'] = []
self['parent'] = None
self['abstract'] = False
self._public_enums = {}
self._public_structs = {}
self._public_typedefs = {}
self._public_forward_declares = []
self['namespace'] = ""
debug_print( "Class: %s"%nameStack )
if (len(nameStack) < 2):
nameStack.insert(1, "")#anonymous struct
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
self["name"] = nameStack[1]
self["line_number"] = detect_lineno(nameStack[0])
#Handle template classes
if len(nameStack) > 3 and nameStack[2].startswith("<"):
open_template_count = 0
param_separator = 0
found_first = False
i = 0
for elm in nameStack:
if '<' in elm :
open_template_count += 1
found_first = True
elif '>' in elm:
open_template_count -= 1
if found_first and open_template_count == 0:
self["name"] = "".join(nameStack[1:i + 1])
break;
i += 1
elif ":" in nameStack:
self['name'] = nameStack[ nameStack.index(':') - 1 ]
inheritList = []
if nameStack.count(':') == 1:
nameStack = nameStack[nameStack.index(":") + 1:]
while len(nameStack):
tmpStack = []
tmpInheritClass = {"access":"private", "virtual": False}
if "," in nameStack:
tmpStack = nameStack[:nameStack.index(",")]
nameStack = nameStack[nameStack.index(",") + 1:]
else:
tmpStack = nameStack
nameStack = []
# Convert template classes to one name in the last index
for i in range(0, len(tmpStack)):
if '<' in tmpStack[i]:
tmpStack2 = tmpStack[:i-1]
tmpStack2.append("".join(tmpStack[i-1:]))
tmpStack = tmpStack2
break
if len(tmpStack) == 0:
break;
elif len(tmpStack) == 1:
tmpInheritClass["class"] = tmpStack[0]
elif len(tmpStack) == 2:
tmpInheritClass["access"] = tmpStack[0]
tmpInheritClass["class"] = tmpStack[1]
elif len(tmpStack) == 3 and "virtual" in tmpStack:
tmpInheritClass["access"] = tmpStack[1] if tmpStack[1] != "virtual" else tmpStack[0]
tmpInheritClass["class"] = tmpStack[2]
tmpInheritClass["virtual"] = True
else:
warning_print( "Warning: can not parse inheriting class %s"%(" ".join(tmpStack)))
if '>' in tmpStack: pass # allow skip templates for now
else: raise NotImplemented
if 'class' in tmpInheritClass: inheritList.append(tmpInheritClass)
elif nameStack.count(':') == 2: self['parent'] = self['name']; self['name'] = nameStack[-1]
elif nameStack.count(':') > 2 and nameStack[0] in ("class", "struct"):
tmpStack = nameStack[nameStack.index(":") + 1:]
superTmpStack = [[]]
for tok in tmpStack:
if tok == ',':
superTmpStack.append([])
else:
superTmpStack[-1].append(tok)
for tmpStack in superTmpStack:
tmpInheritClass = {"access":"private"}
if len(tmpStack) and tmpStack[0] in supportedAccessSpecifier:
tmpInheritClass["access"] = tmpStack[0]
tmpStack = tmpStack[1:]
inheritNSStack = []
while len(tmpStack) > 3:
if tmpStack[0] == ':': break;
if tmpStack[1] != ':': break;
if tmpStack[2] != ':': break;
inheritNSStack.append(tmpStack[0])
tmpStack = tmpStack[3:]
if len(tmpStack) == 1 and tmpStack[0] != ':':
inheritNSStack.append(tmpStack[0])
tmpInheritClass["class"] = "::".join(inheritNSStack)
inheritList.append(tmpInheritClass)
self['inherits'] = inheritList
methodAccessSpecificList = {}
propertyAccessSpecificList = {}
enumAccessSpecificList = {}
structAccessSpecificList = {}
typedefAccessSpecificList = {}
forwardAccessSpecificList = {}
for accessSpecifier in supportedAccessSpecifier:
methodAccessSpecificList[accessSpecifier] = []
propertyAccessSpecificList[accessSpecifier] = []
enumAccessSpecificList[accessSpecifier] = []
structAccessSpecificList[accessSpecifier] = []
typedefAccessSpecificList[accessSpecifier] = []
forwardAccessSpecificList[accessSpecifier] = []
self['methods'] = methodAccessSpecificList
self['properties'] = propertyAccessSpecificList
self['enums'] = enumAccessSpecificList
self['structs'] = structAccessSpecificList
self['typedefs'] = typedefAccessSpecificList
self['forward_declares'] = forwardAccessSpecificList
def show(self):
"""Convert class to a string"""
namespace_prefix = ""
if self["namespace"]: namespace_prefix = self["namespace"] + "::"
rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"])
if self['abstract']: rtn += ' (abstract)\n'
else: rtn += '\n'
if 'doxygen' in self.keys(): rtn += self["doxygen"] + '\n'
if 'parent' in self.keys() and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n'
if "inherits" in self.keys():
rtn += " Inherits: "
for inheritClass in self["inherits"]:
if inheritClass["virtual"]: rtn += "virtual "
rtn += "%s %s, "%(inheritClass["access"], inheritClass["class"])
rtn += "\n"
rtn += " {\n"
for accessSpecifier in supportedAccessSpecifier:
rtn += " %s\n"%(accessSpecifier)
#Enums
if (len(self["enums"][accessSpecifier])):
rtn += " <Enums>\n"
for enum in self["enums"][accessSpecifier]:
rtn += " %s\n"%(repr(enum))
#Properties
if (len(self["properties"][accessSpecifier])):
rtn += " <Properties>\n"
for property in self["properties"][accessSpecifier]:
rtn += " %s\n"%(repr(property))
#Methods
if (len(self["methods"][accessSpecifier])):
rtn += " <Methods>\n"
for method in self["methods"][accessSpecifier]:
rtn += "\t\t" + method.show() + '\n'
rtn += " }\n"
print rtn
def __repr__(self):
"""Convert class to a string"""
namespace_prefix = ""
if self["namespace"]: namespace_prefix = self["namespace"] + "::"
rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"])
if self['abstract']: rtn += ' (abstract)\n'
else: rtn += '\n'
if 'doxygen' in self.keys(): rtn += self["doxygen"] + '\n'
if 'parent' in self.keys() and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n'
if "inherits" in self.keys() and len(self["inherits"]):
rtn += "Inherits: "
for inheritClass in self["inherits"]:
if inheritClass.get("virtual", False): rtn += "virtual "
rtn += "%s %s, "%(inheritClass["access"], inheritClass["class"])
rtn += "\n"
rtn += "{\n"
for accessSpecifier in supportedAccessSpecifier:
rtn += "%s\n"%(accessSpecifier)
#Enums
if (len(self["enums"][accessSpecifier])):
rtn += " // Enums\n"
for enum in self["enums"][accessSpecifier]:
rtn += " %s\n"%(repr(enum))
#Properties
if (len(self["properties"][accessSpecifier])):
rtn += " // Properties\n"
for property in self["properties"][accessSpecifier]:
rtn += " %s\n"%(repr(property))
#Methods
if (len(self["methods"][accessSpecifier])):
rtn += " // Methods\n"
for method in self["methods"][accessSpecifier]:
rtn += " %s\n"%(repr(method))
rtn += "}\n"
return rtn
class CppUnion( CppClass ):
"""Takes a name stack and turns it into a union
Contains the following Keys:
self['name'] - Name of the union
self['doxygen'] - Doxygen comments associated with the union if they exist
self['members'] - List of members the union has
An example of how this could look is as follows:
#self =
{
'name': ""
'members': []
}
"""
def __init__(self, nameStack):
CppClass.__init__(self, nameStack)
self["name"] = "union " + self["name"]
self["members"] = self["properties"]["public"]
def transform_to_union_keys(self):
print "union keys: %s"%self.keys()
for key in ['inherits', 'parent', 'abstract', 'namespace', 'typedefs', 'methods']:
del self[key]
def show(self):
"""Convert class to a string"""
print self
def __repr__(self):
"""Convert class to a string"""
namespace_prefix = ""
if self["namespace"]: namespace_prefix = self["namespace"] + "::"
rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"])
if self['abstract']: rtn += ' (abstract)\n'
else: rtn += '\n'
if 'doxygen' in self.keys(): rtn += self["doxygen"] + '\n'
if 'parent' in self.keys() and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n'
rtn += "{\n"
for member in self["members"]:
rtn += " %s\n"%(repr(member))
rtn += "}\n"
return rtn
class _CppMethod( dict ):
def _params_helper1( self, stack ):
# deal with "throw" keyword
if 'throw' in stack: stack = stack[ : stack.index('throw') ]
## remove GCC keyword __attribute__(...) and preserve returns ##
cleaned = []
hit = False; hitOpen = 0; hitClose = 0
for a in stack:
if a == '__attribute__': hit = True
if hit:
if a == '(': hitOpen += 1
elif a == ')': hitClose += 1
if a==')' and hitOpen == hitClose:
hit = False
else:
cleaned.append( a )
stack = cleaned
# also deal with attribute((const)) function prefix #
# TODO this needs to be better #
if len(stack) > 5:
a = ''.join(stack)
if a.startswith('((__const__))'): stack = stack[ 5 : ]
elif a.startswith('__attribute__((__const__))'): stack = stack[ 6 : ]
stack = stack[stack.index('(') + 1: ]
if not stack: return []
if len(stack)>=3 and stack[0]==')' and stack[1]==':': # is this always a constructor?
self['constructor'] = True
return []
stack.reverse(); _end_ = stack.index(')'); stack.reverse()
stack = stack[ : len(stack)-(_end_+1) ]
if '(' not in stack: return stack # safe to return, no defaults that init a class
# transforms ['someclass', '(', '0', '0', '0', ')'] into "someclass(0,0,0)'"
r = []; hit=False
for a in stack:
if a == '(': hit=True
elif a == ')': hit=False
if hit or a == ')': r[-1] = r[-1] + a
else: r.append( a )
return r
def _params_helper2( self, params ):
for p in params:
p['method'] = self # save reference in variable to parent method
if '::' in p['type']:
ns = p['type'].split('::')[0]
if ns not in Resolver.NAMESPACES and ns in Resolver.CLASSES:
p['type'] = self['namespace'] + p['type']
else: p['namespace'] = self[ 'namespace' ]
class CppMethod( _CppMethod ):
"""Takes a name stack and turns it into a method
Contains the following Keys:
self['rtnType'] - Return type of the method (ex. "int")
self['name'] - Name of the method (ex. "getSize")
self['doxygen'] - Doxygen comments associated with the method if they exist
self['parameters'] - List of CppVariables
"""
def show(self):
r = ['method name: %s (%s)' %(self['name'],self['debug']) ]
if | |
pointers, action):
"""Get metadata about objects pointed by pointers for given action
Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
"""
objects = [
{'oid': pycompat.strurl(p.oid()), 'size': p.size()}
for p in pointers
]
requestdata = pycompat.bytesurl(
json.dumps(
{
'objects': objects,
'operation': pycompat.strurl(action),
}
)
)
url = b'%s/objects/batch' % self.baseurl
batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
try:
with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
rawjson = rsp.read()
except util.urlerr.httperror as ex:
hints = {
400: _(
b'check that lfs serving is enabled on %s and "%s" is '
b'supported'
)
% (self.baseurl, action),
404: _(b'the "lfs.url" config may be used to override %s')
% self.baseurl,
}
hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
raise LfsRemoteError(
_(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
hint=hint,
)
except util.urlerr.urlerror as ex:
hint = (
_(b'the "lfs.url" config may be used to override %s')
% self.baseurl
)
raise LfsRemoteError(
_(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
)
try:
response = pycompat.json_loads(rawjson)
except ValueError:
raise LfsRemoteError(
_(b'LFS server returns invalid JSON: %s')
% rawjson.encode("utf-8")
)
if self.ui.debugflag:
self.ui.debug(b'Status: %d\n' % rsp.status)
# lfs-test-server and hg serve return headers in different order
headers = pycompat.bytestr(rsp.info()).strip()
self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
if 'objects' in response:
response['objects'] = sorted(
response['objects'], key=lambda p: p['oid']
)
self.ui.debug(
b'%s\n'
% pycompat.bytesurl(
json.dumps(
response,
indent=2,
separators=('', ': '),
sort_keys=True,
)
)
)
def encodestr(x):
if isinstance(x, pycompat.unicode):
return x.encode('utf-8')
return x
return pycompat.rapply(encodestr, response)
def _checkforservererror(self, pointers, responses, action):
"""Scans errors from objects
Raises LfsRemoteError if any objects have an error"""
for response in responses:
# The server should return 404 when objects cannot be found. Some
# server implementation (ex. lfs-test-server) does not set "error"
# but just removes "download" from "actions". Treat that case
# as the same as 404 error.
if b'error' not in response:
if action == b'download' and action not in response.get(
b'actions', []
):
code = 404
else:
continue
else:
# An error dict without a code doesn't make much sense, so
# treat as a server error.
code = response.get(b'error').get(b'code', 500)
ptrmap = {p.oid(): p for p in pointers}
p = ptrmap.get(response[b'oid'], None)
if p:
filename = getattr(p, 'filename', b'unknown')
errors = {
404: b'The object does not exist',
410: b'The object was removed by the owner',
422: b'Validation error',
500: b'Internal server error',
}
msg = errors.get(code, b'status code %d' % code)
raise LfsRemoteError(
_(b'LFS server error for "%s": %s') % (filename, msg)
)
else:
raise LfsRemoteError(
_(b'LFS server error. Unsolicited response for oid %s')
% response[b'oid']
)
def _extractobjects(self, response, pointers, action):
"""extract objects from response of the batch API
response: parsed JSON object returned by batch API
return response['objects'] filtered by action
raise if any object has an error
"""
# Scan errors from objects - fail early
objects = response.get(b'objects', [])
self._checkforservererror(pointers, objects, action)
# Filter objects with given action. Practically, this skips uploading
# objects which exist in the server.
filteredobjects = [
o for o in objects if action in o.get(b'actions', [])
]
return filteredobjects
def _basictransfer(self, obj, action, localstore):
"""Download or upload a single object using basic transfer protocol
obj: dict, an object description returned by batch API
action: string, one of ['upload', 'download']
localstore: blobstore.local
See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
basic-transfers.md
"""
oid = obj[b'oid']
href = obj[b'actions'][action].get(b'href')
headers = obj[b'actions'][action].get(b'header', {}).items()
request = util.urlreq.request(pycompat.strurl(href))
if action == b'upload':
# If uploading blobs, read data from local blobstore.
if not localstore.verify(oid):
raise error.Abort(
_(b'detected corrupt lfs object: %s') % oid,
hint=_(b'run hg verify'),
)
for k, v in headers:
request.add_header(pycompat.strurl(k), pycompat.strurl(v))
try:
if action == b'upload':
request.data = lfsuploadfile(self.ui, localstore.path(oid))
request.get_method = lambda: 'PUT'
request.add_header('Content-Type', 'application/octet-stream')
request.add_header('Content-Length', request.data.length)
with contextlib.closing(self.urlopener.open(request)) as res:
contentlength = res.info().get(b"content-length")
ui = self.ui # Shorten debug lines
if self.ui.debugflag:
ui.debug(b'Status: %d\n' % res.status)
# lfs-test-server and hg serve return headers in different
# order
headers = pycompat.bytestr(res.info()).strip()
ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
if action == b'download':
# If downloading blobs, store downloaded data to local
# blobstore
localstore.download(oid, res, contentlength)
else:
blocks = []
while True:
data = res.read(1048576)
if not data:
break
blocks.append(data)
response = b"".join(blocks)
if response:
ui.debug(b'lfs %s response: %s' % (action, response))
except util.urlerr.httperror as ex:
if self.ui.debugflag:
self.ui.debug(
b'%s: %s\n' % (oid, ex.read())
) # XXX: also bytes?
raise LfsRemoteError(
_(b'LFS HTTP error: %s (oid=%s, action=%s)')
% (stringutil.forcebytestr(ex), oid, action)
)
except util.urlerr.urlerror as ex:
hint = _(b'attempted connection to %s') % pycompat.bytesurl(
util.urllibcompat.getfullurl(request)
)
raise LfsRemoteError(
_(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
)
finally:
if request.data:
request.data.close()
def _batch(self, pointers, localstore, action):
if action not in [b'upload', b'download']:
raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
response = self._batchrequest(pointers, action)
objects = self._extractobjects(response, pointers, action)
total = sum(x.get(b'size', 0) for x in objects)
sizes = {}
for obj in objects:
sizes[obj.get(b'oid')] = obj.get(b'size', 0)
topic = {
b'upload': _(b'lfs uploading'),
b'download': _(b'lfs downloading'),
}[action]
if len(objects) > 1:
self.ui.note(
_(b'lfs: need to transfer %d objects (%s)\n')
% (len(objects), util.bytecount(total))
)
def transfer(chunk):
for obj in chunk:
objsize = obj.get(b'size', 0)
if self.ui.verbose:
if action == b'download':
msg = _(b'lfs: downloading %s (%s)\n')
elif action == b'upload':
msg = _(b'lfs: uploading %s (%s)\n')
self.ui.note(
msg % (obj.get(b'oid'), util.bytecount(objsize))
)
retry = self.retry
while True:
try:
self._basictransfer(obj, action, localstore)
yield 1, obj.get(b'oid')
break
except socket.error as ex:
if retry > 0:
self.ui.note(
_(b'lfs: failed: %r (remaining retry %d)\n')
% (stringutil.forcebytestr(ex), retry)
)
retry -= 1
continue
raise
# Until https multiplexing gets sorted out
if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
oids = worker.worker(
self.ui,
0.1,
transfer,
(),
sorted(objects, key=lambda o: o.get(b'oid')),
)
else:
oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
with self.ui.makeprogress(
topic, unit=_(b"bytes"), total=total
) as progress:
progress.update(0)
processed = 0
blobs = 0
for _one, oid in oids:
processed += sizes[oid]
blobs += 1
progress.update(processed)
self.ui.note(_(b'lfs: processed: %s\n') % oid)
if blobs > 0:
if action == b'upload':
self.ui.status(
_(b'lfs: uploaded %d files (%s)\n')
% (blobs, util.bytecount(processed))
)
elif action == b'download':
self.ui.status(
_(b'lfs: downloaded %d files (%s)\n')
% (blobs, util.bytecount(processed))
)
def __del__(self):
# copied from mercurial/httppeer.py
urlopener = getattr(self, 'urlopener', None)
if urlopener:
for h in urlopener.handlers:
h.close()
getattr(h, "close_all", lambda: None)()
class _dummyremote(object):
"""Dummy store storing blobs to temp directory."""
def __init__(self, repo, url):
fullpath = repo.vfs.join(b'lfs', url.path)
self.vfs = lfsvfs(fullpath)
def writebatch(self, pointers, fromstore):
for p in _deduplicate(pointers):
content = fromstore.read(p.oid(), verify=True)
with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
fp.write(content)
def readbatch(self, pointers, tostore):
for p in _deduplicate(pointers):
with self.vfs(p.oid(), b'rb') as fp:
tostore.download(p.oid(), fp, None)
class _nullremote(object):
"""Null store storing blobs to /dev/null."""
def __init__(self, repo, url):
pass
def writebatch(self, pointers, fromstore):
pass
def readbatch(self, pointers, tostore):
pass
class _promptremote(object):
"""Prompt user to set lfs.url when accessed."""
def __init__(self, repo, url):
pass
def writebatch(self, pointers, fromstore, ui=None):
self._prompt()
def readbatch(self, pointers, tostore, ui=None):
self._prompt()
def _prompt(self):
raise error.Abort(_(b'lfs.url needs to be configured'))
_storemap = {
b'https': _gitlfsremote,
b'http': _gitlfsremote,
b'file': _dummyremote,
b'null': _nullremote,
None: _promptremote,
}
def _deduplicate(pointers):
"""Remove any duplicate oids that exist in the list"""
reduced = util.sortdict()
for p in pointers:
reduced[p.oid()] = p
return reduced.values()
def _verify(oid, content):
realoid = hex(hashlib.sha256(content).digest())
if realoid != oid:
raise LfsCorruptionError(
_(b'detected corrupt lfs object: %s') % oid,
hint=_(b'run hg verify'),
)
def remote(repo, remote=None):
"""remotestore factory. return a store in _storemap depending on config
If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
infer the endpoint, based on the remote repository using the same path
adjustments as git. As an extension, 'http' is supported as well so that
``hg serve`` works out of the box.
https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
"""
lfsurl = repo.ui.config(b'lfs', b'url')
url = util.url(lfsurl or b'')
if lfsurl is None:
if remote:
path = remote
elif util.safehasattr(repo, b'_subtoppath'):
# The pull command sets this during the optional update phase, which
# tells exactly where the pull originated, whether 'paths.default'
# or explicit.
path = repo._subtoppath
else:
# TODO: investigate 'paths.remote:lfsurl' style path customization,
# and fall back | |
from tkinter import (
Tk,
Button,
Scale,
HORIZONTAL,
PhotoImage,
Label,
Listbox,
END,
Scrollbar,
VERTICAL,
)
from tkinter.filedialog import askopenfilename, askdirectory
from pygame.mixer import music, init, quit
from tinytag import TinyTag
from glob import glob
# инилизация mixer, без него программа не будет работать
init()
# это переменные для функций
pause: bool = False
restart: bool = False
# пустой список для адресов
playlist: list = []
# позиция списка
trek: int = 0
# для тегов mp3 - TinyTag
a: None = None
# выбран язык
choose: int = 0
# первые настройки
lang: str = "language: English"
track_null: str = "add track"
track_none: str = "add track!"
added_file: str = "add"
added_folder: str = "add folder"
nesting: str = "next"
earning: str = "early"
name_trek_none: str = "no track name"
name_album_none: str = "no album name"
name_artist_none: str = "no performer"
pauses: str = "pause"
play: str = "play"
# он просто для громкости
def get_v(value):
music.set_volume(int(value) / 100)
# убирает паузу если паузой не привычно
def playing(event=None):
global pause
if pause:
music.unpause()
# добавляет в список адресс если адреса нет просто пропускаем
def add(event=None, file=None):
if file is None:
get_address: str = askopenfilename(
filetypes=(
("MP3", "*.mp3"),
("Windows audio file", "*.wav"),
("instruction MIDI", "*.mid"),
("instruction MIDI", "*.midi"),
("all audio files", "*.midi",),
("all audio files", "*.mid"),
("all audio files", "*.wav"),
("all audio files", "*.mp3"),
)
)
else:
get_address: str = file
if get_address is None:
pass
else:
try:
if (
TinyTag.get(get_address).title == " "
or TinyTag.get(get_address).title == " "
or TinyTag.get(get_address).title == ""
or TinyTag.get(get_address).title is None
):
name = get_address.split("/")
list_track.insert(END, name[len(name) - 1][:-4])
else:
list_track.insert(END, TinyTag.get(get_address).title)
playlist.append(get_address)
except AttributeError:
name = get_address.split("/")
list_track.insert(0, name[len(name) - 1][:-4])
except FileNotFoundError:
pass
def add_folder(event=None, file=None):
if file == None:
get_address: str = askdirectory()
else:
get_address: str = file
if get_address is None:
pass
else:
for filename in glob(get_address + "/*.mp3"):
playlist.append(filename)
try:
if (
TinyTag.get(filename).title == " "
or TinyTag.get(filename).title == " "
or TinyTag.get(filename).title == ""
or TinyTag.get(filename).title is None
):
name = filename.split("/")
list_track.insert(END, name[len(name) - 1][:-4])
else:
list_track.insert(END, TinyTag.get(filename).title)
except AttributeError:
name = filename.split("/")
list_track.insert(END, name[len(name) - 1][:-4])
except LookupError:
name = filename.split("/")
list_track.insert(END, name[len(name) - 1][:-4])
for filename in glob(get_address + "/*.mid"):
playlist.append(filename)
try:
if (
TinyTag.get(filename).title == " "
or TinyTag.get(filename).title == " "
or TinyTag.get(filename).title == ""
or TinyTag.get(filename).title is None
):
name = filename.split("/")
list_track.insert(END, name[len(name) - 1][:-4])
else:
list_track.insert(END, TinyTag.get(filename).title)
except AttributeError:
name = filename.split("/")
list_track.insert(END, name[len(name) - 1][:-4])
except LookupError:
name = filename.split("/")
list_track.insert(END, name[len(name) - 1][:-4])
for filename in glob(get_address + "/*.wav"):
playlist.append(filename)
try:
if (
TinyTag.get(filename).title == " "
or TinyTag.get(filename).title == " "
or TinyTag.get(filename).title == ""
or TinyTag.get(filename).title is None
):
name = filename.split("/")
list_track.insert(END, name[len(name) - 1][:-4])
else:
list_track.insert(END, TinyTag.get(filename).title)
except AttributeError:
name = filename.split("/")
list_track.insert(END, name[len(name) - 1][:-4])
except LookupError:
name = filename.split("/")
list_track.insert(END, name[len(name) - 1][:-4])
for filename in glob(get_address + "/*.midi"):
playlist.append(filename)
try:
if (
TinyTag.get(filename).title == " "
or TinyTag.get(filename).title == " "
or TinyTag.get(filename).title == ""
or TinyTag.get(filename).title is None
):
name = filename.split("/")
list_track.insert(END, name[len(name) - 1][:-4])
else:
list_track.insert(END, TinyTag.get(get_address).title)
except AttributeError:
name = filename.split("/")
list_track.insert(END, name[len(name) - 1][:-4])
except LookupError:
name = filename.split("/")
list_track.insert(END, name[len(name) - 1][:-4])
# пауза еще раз и воспроизведется как раньше
def stopping(event=None):
global pause
if pause:
music.unpause()
pause = False
else:
music.pause()
pause = True
# следущий трек(здесь выбирает по позиции адрес из списка и загружает трек по этому адресу
# потом вырезает её имя из адреса и убирает расширение и выводит на экран
def next(event=None):
global trek, a
if trek >= len(playlist):
trek = 0
try:
trek += 1
if trek >= len(playlist):
trek = 0
music.load(playlist[trek])
music.play(loops=-1)
try:
a = TinyTag.get(playlist[trek])
except LookupError:
pass
name_trek.configure(text=name_trek_none)
try:
if a.title == " " or a.title == " " or a.title == "":
pass
else:
name_trek.configure(text=a.title)
except AttributeError:
pass
x_center_name_new = WIDTH // 2 - name_trek.winfo_reqwidth() // 2
name_trek.place(x=x_center_name_new, y=0)
name_artist.configure(text=name_artist_none)
try:
if a.artist == " " or a.artist == " " or a.artist == "":
pass
else:
name_artist.configure(text=a.artist)
except AttributeError:
pass
x_center_artist_new = WIDTH // 2 - name_artist.winfo_reqwidth() // 2
name_artist.place(x=x_center_artist_new, y=20)
name_album.configure(text=name_album_none)
try:
if a.album == " " or a.album == " " or a.album == "":
pass
else:
name_album.configure(text=a.album)
except AttributeError:
pass
x_center_album_new = WIDTH // 2 - name_album.winfo_reqwidth() // 2
name_album.place(x=x_center_album_new, y=40)
except IndexError:
name_trek.configure(text=track_none)
x_center_name_new = WIDTH // 2 - name_trek.winfo_reqwidth() // 2
name_trek.place(x=x_center_name_new, y=0)
except TypeError:
name_trek.configure(text=track_none)
x_center_name_new = WIDTH // 2 - name_trek.winfo_reqwidth() // 2
name_trek.place(x=x_center_name_new, y=0)
if trek >= len(playlist):
trek = 0
# тоже самое что "следущий" но переход наоборот
def early(event=None):
global trek, a
if trek >= len(playlist):
trek = 0
if trek < 0:
trek = len(playlist) - 1
try:
trek -= 1
if trek < 0:
trek = len(playlist) - 1
music.load(playlist[trek])
music.play(loops=-1)
try:
a = TinyTag.get(playlist[trek])
except LookupError:
pass
name_trek.configure(text=name_trek_none)
try:
if a.title == " " or a.title == " " or a.title == "":
pass
else:
name_trek.configure(text=a.title)
except AttributeError:
pass
x_center_name_new = WIDTH // 2 - name_trek.winfo_reqwidth() // 2
name_trek.place(x=x_center_name_new, y=0)
name_artist.configure(text=name_artist_none)
try:
if a.artist == " " or a.artist == " " or a.artist == "":
pass
else:
name_artist.configure(text=a.artist)
except AttributeError:
pass
x_center_artist_new = WIDTH // 2 - name_artist.winfo_reqwidth() // 2
name_artist.place(x=x_center_artist_new, y=20)
name_album.configure(text=name_album_none)
try:
if a.album == " " or a.album == " " or a.album == "":
pass
else:
name_album.configure(text=a.album)
except AttributeError:
pass
x_center_album_new = WIDTH // 2 - name_album.winfo_reqwidth() // 2
name_album.place(x=x_center_album_new, y=40)
except IndexError:
name_trek.configure(text=track_none)
x_center_name_new = WIDTH // 2 - name_trek.winfo_reqwidth() // 2
name_trek.place(x=x_center_name_new, y=0)
except TypeError:
name_trek.configure(text=track_none)
x_center_name_new = WIDTH // 2 - name_trek.winfo_reqwidth() // 2
name_trek.place(x=x_center_name_new, y=0)
if trek >= len(playlist):
trek = 0
if trek < 0:
trek = len(playlist) - 1
def language():
global choose, lang, track_null, track_none, added_file, added_folder, nesting, earning, name_trek_none, name_album_none, name_artist_none, pauses, play, added_folder, a
choose += 1
if choose >= 2:
choose = 0
if choose == 0:
lang = "language: English"
track_null = "add track"
track_none = "add track!"
added_file = "add track"
added_folder = "add folder"
nesting = "next"
earning = "early"
name_trek_none = "no track name"
name_album_none = "no album name"
name_artist_none = "no performer"
pauses = "pause"
play = "play"
elif choose == 1:
lang = "язык: Русский"
track_null = "Добавьте трек"
track_none = "Добавьте трек!"
added_file = "Добавить трек"
added_folder = "добавить папку"
nesting = """след-
ую-
щий"""
earning = """пре-
дыду-
щий"""
name_trek_none = "нет имени трека"
name_album_none = "нет имени альбома"
name_artist_none = "не указан исполнитель"
pauses = "пауза"
play = "играть"
button1.configure(text=play)
x_center_1_new = WIDTH // 2 - button1.winfo_reqwidth() // 2
button1.place(x=x_center_1_new + 30, y=HEIGHT - 125)
button2.configure(text=pauses)
x_center_2_new = WIDTH // 2 - button2.winfo_reqwidth() // 2
button2.place(x=x_center_2_new - 30, y=HEIGHT - 125)
name_trek.configure(text=track_null)
x_center_name_new = WIDTH // 2 - name_trek.winfo_reqwidth() // 2
name_trek.place(x=x_center_name_new, y=0)
name_artist.configure(text=track_null)
x_center_artist_new = WIDTH // 2 - name_artist.winfo_reqwidth() // 2
name_artist.place(x=x_center_artist_new, y=20)
name_album.configure(text=track_null)
x_center_album_new = WIDTH // 2 - name_album.winfo_reqwidth() // 2
name_album.place(x=x_center_album_new, y=40)
button_restart.configure(text=added_file)
button_restart.place(x=0, y=HEIGHT - 50)
button_restart2.configure(text=added_folder)
x_center_restart = WIDTH - button_restart.winfo_reqwidth()
button_restart2.place(x=x_center_restart, y=HEIGHT - 50)
button_next.configure(text=nesting)
x_center_next_new = WIDTH // 2 - button_next.winfo_reqwidth() // 2
button_next.place(x=x_center_next_new + 75, y=HEIGHT - 125)
button_early.configure(text=earning)
x_center_early_new = WIDTH // 2 - button_early.winfo_reqwidth() // 2
button_early.place(x=x_center_early_new - 75, y=HEIGHT - 125)
button_lang.configure(text=lang)
x_center_lang_new = WIDTH // 2 - button_lang.winfo_reqwidth() // 2
button_lang.place(x=x_center_lang_new, y=HEIGHT - 25)
name_trek.configure(text=name_trek_none)
try:
if a.title != " " and a.title != " " and a.title != "" and a.title is not None:
name_trek.configure(text=a.title)
else:
pass
except AttributeError:
pass
x_center_name_new = WIDTH // 2 - name_trek.winfo_reqwidth() // 2
name_trek.place(x=x_center_name_new, y=0)
name_artist.configure(text=name_artist_none)
try:
if a.artist != " " or a.artist != " " or a.artist != "" or a.artist is not None:
name_artist.configure(text=a.artist)
else:
pass
except AttributeError:
pass
x_center_artist_new = WIDTH // 2 - name_artist.winfo_reqwidth() // 2
name_artist.place(x=x_center_artist_new, y=20)
name_album.configure(text=name_album_none)
try:
if a.album != " " or a.album != " " or a.album != "" or a.artist is not None:
name_album.configure(text=a.album)
else:
pass
except AttributeError:
pass
x_center_album_new = WIDTH // 2 - name_album.winfo_reqwidth() // 2
name_album.place(x=x_center_album_new, y=40)
def trek_from_list(event=None):
global trek, playlist, a
select = list_track.curselection()
trek = select[0]
music.load(playlist[trek])
music.play(loops=-1)
try:
a | |
for the sql " \
" * to literally match an underscore, not any " \
" * single character LIKE usually matches it to. */ " \
" and (v2.file_family like '%%/_copy/_[0-9]' escape '/'" \
" or " \
" (v1.system_inhibit_1 in ('duplicating', " \
" 'duplicated') " \
" or (select count(alt_bfid) " \
" from file f1,file f2,file_copies_map " \
" where f1.volume = v1.id " \
" and f1.bfid = file_copies_map.bfid" \
" and f2.volume = v2.id " \
" and f2.bfid = file_copies_map.alt_bfid " \
" limit 1) " \
" > 0)); " \
% (src_vol, dst_vol)
q_m = "select v1.label, v2.label " \
"from volume v1, volume v2 " \
"where v1.label = '%s' and v2.label = '%s' " \
" and (v2.file_family like '%%-MIGRATION' " \
" or " \
" (v1.system_inhibit_1 in ('migrating', " \
" 'migrated') " \
" or (select count(dst_bfid) " \
" from file, migration " \
" where v1.id = file.volume " \
" and (file.bfid = migration.src_bfid or " \
" file.bfid = migration.dst_bfid) " \
" /* Be sure to exclude duplication! */" \
" and v1.system_inhibit_1 not in " \
" ('duplicating', " \
" 'duplicated') " \
" limit 1) " \
" > 0));" \
% (src_vol, dst_vol)
q_c = "select v1.label, v2.label " \
"from volume v1, volume v2 " \
"where v1.label = '%s' and v2.label = '%s' " \
" and (v1.system_inhibit_1 in ('cloning', 'cloned') " \
" or v1.media_type = v2.media_type); " \
% (src_vol, dst_vol)
res = db.query(q_m).getresult()
if len(res) != 0:
migration_result = "MIGRATION"
res = db.query(q_d).getresult()
if len(res) != 0:
duplication_result = "DUPLICATION"
res = db.query(q_c).getresult()
if len(res) != 0:
cloning_result = "CLONING"
except IndexError:
return None
return __get_migration_type(migration_result, duplication_result,
None, cloning_result)
def __get_migration_type(migration_result, duplication_result,
multiple_copy_result, cloning_result):
#print "migration_result:", migration_result
#print "duplication_result:", duplication_result
#print "cloning_result:", cloning_result
#print "multiple_copy_result:", multiple_copy_result
if migration_result and duplication_result:
return "The metadata is inconsistent between migration " \
"and duplication."
elif (not migration_result and cloning_result) and duplication_result:
#If duplicating to the same media...
return "DUPLICATION"
elif cloning_result:
return "CLONING"
elif migration_result:
return "MIGRATION"
elif duplication_result:
return "DUPLICATION"
elif multiple_copy_result:
return "MULTIPLE_COPY"
return None
#Helper for get_multiple_copy_bfids() and is_multiple_copy_bfid().
def __multiple_copy(bfid, db):
# Extract the multiple copy list; exclude destinations that are unknown.
q = "select alt_bfid from file_copies_map,file " \
"where file_copies_map.bfid = '%s'" \
" and alt_bfid = file.bfid " \
" and file.deleted in ('y', 'n')" % (bfid,)
res = db.query(q).getresult()
return res
#Report the multiple copies a file has.
def get_multiple_copy_bfids(bfid, db):
res = __multiple_copy(bfid, db)
multiple_copy_list = []
for row in res:
multiple_copy_list.append(row[0])
return multiple_copy_list
#Report if the bfid is a multiple copy bfid.
def is_multiple_copy_bfid(bfid, db):
# Extract the multiple copy list; exclude destinations that are unknown.
q = "select alt_bfid from file_copies_map,file " \
"where file_copies_map.alt_bfid = '%s'" \
" and alt_bfid = file.bfid " \
" and file.deleted in ('y', 'n')" % (bfid,)
res = db.query(q).getresult()
if len(res) > 0:
return True
return False
#Helper for get_original_copy_bfid() and is_original_copy_bfid().
def __original_copy(bfid, db):
# Extract the multiple copy list; exclude destinations that are unknown.
q = "select file_copies_map.bfid from file_copies_map,file " \
"where file_copies_map.alt_bfid = '%s'" \
" and file_copies_map.bfid = file.bfid " \
" and file.deleted in ('y', 'n')" % (bfid,)
res = db.query(q).getresult()
return res
#Report the original copy a file has.
def get_original_copy_bfid(bfid, db):
res = __original_copy(bfid, db)
original_copy_list = []
for row in res:
original_copy_list.append(row[0])
return original_copy_list
#Report the root original copy a file has.
def get_the_original_copy_bfid(bfid, db):
current_bfid = bfid
res = -1 #dummy starter value
while current_bfid:
res = __original_copy(current_bfid, db)
if len(res) == 0:
return current_bfid
elif len(res) == 1:
current_bfid = res[0][0]
else:
#Should never happen!!!
raise ValueError("Too many bfids found")
return None
#Report if the bfid is an original copy bfid.
def is_original_copy_bfid(bfid, db):
res = __original_copy(bfid, db)
if len(res) > 0:
return False
return True
#
def search_directory(original_path):
##
## Determine the deepest directory that exists.
##
mig_dir = chimera.get_directory_name(migration_path(original_path, {}))
search_mig_dir = mig_dir
while 1:
#We need to go through all this looping to find
#a Migration directory that exists, since any new
#Migration directory isn't created until the first
#new copy is is about to be written to tape for
#the corresponding non-Migration directory.
try:
os.stat(search_mig_dir) #existance test
except (OSError, IOError):
if os.path.basename(search_mig_dir) == MIGRATION_DB:
return search_mig_dir
#break #Didn't find it.
#Try the next directory.
search_mig_dir = os.path.dirname(search_mig_dir)
if search_mig_dir == "/" \
or search_mig_dir == "":
break #Didn't find it.
continue
#If we get here, then we found what we were looking
# for.
return search_mig_dir
return None
#Look for the media type that the file would be written to.
def search_media_type(original_path, db):
search_dir = search_directory(original_path)
if search_dir:
media_type = get_media_type(search_dir, db)
else:
media_type = None
return media_type
#Modify the sql result to match fcc.bfid_info() format.
def __correct_db_file_info(file_record):
try:
#First is the sanity cookie.
file_record['sanity_cookie'] = (file_record['sanity_size'],
file_record['sanity_crc'])
except KeyError:
pass
try:
del file_record['sanity_size']
except KeyError:
pass
try:
del file_record['sanity_crc']
except KeyError:
pass
return file_record
#Obtain information for the bfid.
def get_file_info(my_task, bfid, fcc, db):
#use_clerks = USE_CLERKS
use_clerks = True
# SFA: force to always use file clerk instead of local query.
# FC provides file package information we need
if use_clerks:
reply_ticket = fcc.bfid_info(bfid)
if not e_errors.is_ok(reply_ticket):
error_log(my_task, "%s info not found: %s" \
% (bfid, reply_ticket['status']))
return None
return reply_ticket
else:
# get file info
q = "select bfid, label as external_label, location_cookie, \
pnfs_id as pnfsid, update, uid, gid, drive, \
case when deleted = 'y' then '%s' \
when deleted = 'n' then '%s' \
else '%s' \
end as deleted, \
pnfs_path as pnfs_name0, size, crc as complete_crc, \
sanity_size, sanity_crc \
from file, volume \
where file.volume = volume.id and \
bfid = '%s';" % (YES, NO, UNKNOWN, bfid,)
if debug:
log(my_task, q)
res = db.query(q).dictresult()
# does it exist?
if not len(res):
error_log(my_task, "%s does not exist in db" % (bfid,))
return None
return_copy = copy.copy(res[0])
#Modify the sql result to match fcc.bfid_info() format.
return_copy = __correct_db_file_info(return_copy)
return_copy['status'] = (e_errors.OK, None)
return return_copy
volume_info_cache = {} #Keyed by volume label.
#Obtain information for the volume.
def get_volume_info(my_task, volume, vcc, db, use_cache=False):
global volume_info_cache
#First see if we should use the cache.
if use_cache:
return_copy = volume_info_cache.get(volume)
if return_copy:
return return_copy
if USE_CLERKS:
reply_ticket = vcc.inquire_vol(volume)
if not e_errors.is_ok(reply_ticket):
error_log(my_task, "%s info not found: %s" \
% (volume, reply_ticket['status']))
return None
else:
#get volume info
q = "select label as external_label, block_size as blocksize, \
capacity_bytes, declared, eod_cookie, first_access, \
last_access, library, media_type, remaining_bytes, \
sum_mounts, sum_rd_access, sum_rd_err, sum_wr_access, \
sum_wr_err, \
system_inhibit_0, system_inhibit_1, \
user_inhibit_0, user_inhibit_1, \
si_time_0, si_time_1, \
storage_group || '.' || file_family || '.' || wrapper as volume_family, \
write_protected, comment, modification_time \
from volume \
where volume.label = '%s';" % (volume,)
if debug:
log(my_task, q)
res = db.query(q).dictresult()
# does it exist?
if not len(res):
error_log(my_task, "%s does not exist in db" % (volume,))
return None
return_copy = copy.copy(res[0])
#Modify the sql result to match vcc.inquire_vol() format.
try:
#First is the system inhibit.
return_copy['system_inhibit'] = [res[0]['system_inhibit_0'],
res[0]['system_inhibit_1']]
except KeyError:
pass
try:
del return_copy['system_inhibit_0']
except KeyError:
pass
try:
del return_copy['system_inhibit_1']
except KeyError:
pass
try:
#Second is the user inhibit.
return_copy['user_inhibit'] = [res[0]['user_inhibit_0'],
res[0]['user_inhibit_1']]
except KeyError:
pass
try:
del return_copy['user_inhibit_0']
except KeyError:
pass
try:
del return_copy['user_inhibit_1']
except KeyError:
pass
try:
# Third is the si_time.
return_copy['si_time'] = (res[0]['si_time_0'],
res[0]['si_time_1'])
except KeyError:
pass
try:
del return_copy['si_time_0']
except KeyError:
pass
try:
del return_copy['si_time_1']
except KeyError:
pass
return_copy['status'] = (e_errors.OK, None)
volume_info_cache[volume] = return_copy
return return_copy
def get_volume_info_for_bfid(my_task, bfid, vcc, fcc, db):
bfid_dict = get_file_info(my_task, bfid, fcc, db)
if bfid_dict == None:
return None
volume_dict = get_volume_info(my_task, bfid_dict['external_label'],
vcc, db)
if volume_dict == None:
return None
return volume_dict
#Return the list of files to migrate for the volume.
def get_tape_list(my_task, volume, fcc, db, intf, all_files = False):
if USE_CLERKS:
list_ticket = fcc.tape_list(volume)
# Don't ever include unknown files.
if intf.with_deleted:
allowed_deleted_states = [YES, NO]
else:
allowed_deleted_states = [NO] #Don't allow deleted files.
#Get the list of all bad files.
if intf.skip_bad:
bad_ticket = fcc.show_bad()
if | |
<filename>src/hplib_database.py
# Import packages
import os
import pandas as pd
import scipy
import hplib as hpl
from functools import partial
import concurrent.futures
# Functions
def import_heating_data():
# read in keymark data from *.txt files in /input/txt/
# save a dataframe to database_heating.csv in folder /output/
Modul = []
Manufacturer = []
Date = []
Refrigerant = []
Mass = []
Poff = []
Psb = []
Prated = []
SPLindoor = []
SPLoutdoor = []
Type = []
Climate = []
Guideline = []
T_in = []
T_out = []
P_th = []
COP = []
df = pd.DataFrame()
os.chdir('../')
root = os.getcwd()
Scanordner = (root + '/input/txt')
os.chdir(Scanordner)
Scan = os.scandir(os.getcwd())
with Scan as dir1:
for file in dir1:
with open(file, 'r', encoding='utf-8') as f:
contents = f.readlines()
date = 'NaN'
modul = 'NaN'
prated_low = 'NaN'
prated_medium = 'NaN'
heatpumpType = 'NaN'
refrigerant = 'NaN'
splindoor_low = 'NaN'
splindoor_medium = 'NaN'
sploutdoor_low = 'NaN'
sploutdoor_medium = 'NaN'
poff = 'NaN'
climate = 'NaN'
NumberOfTestsPerNorm = []
NumberOfTestsPerModule = []
i = 1 # indicator for the line wich is read
d = 0 # indicator if only medium Temperature is given
p = 0 # -15° yes or no
date = contents[1]
date = date[61:]
if (date == '17 Dec 2020\n'):
date = '17.12.2020\n'
if (date == '18 Dec 2020\n'):
date = '18.12.2020\n'
if (date.startswith('5 Mar 2021')):
date = '05.03.2021\n'
if (date.startswith('15 Feb 2021')):
date = '15.02.2021\n'
if (date.startswith('22 Feb 2021')):
date = '22.02.2021\n'
for lines in contents:
i = i + 1
if (lines.startswith('Name\n') == 1):
manufacturer = (contents[i])
if (manufacturer.find('(') > 0):
manufacturer = manufacturer.split('(', 1)[1].split('\n')[0]
if manufacturer.endswith('GmbH\n'):
manufacturer = manufacturer[:-5]
if manufacturer.endswith('S.p.A.\n'):
manufacturer = manufacturer[:-6]
if manufacturer.endswith('s.p.a.\n'):
manufacturer = manufacturer[:-6]
if manufacturer.endswith('S.p.A\n'):
manufacturer = manufacturer[:-5]
if manufacturer.endswith('S.L.U.\n'):
manufacturer = manufacturer[:-6]
if manufacturer.endswith('s.r.o.\n'):
manufacturer = manufacturer[:-6]
if manufacturer.endswith('S.A.\n'):
manufacturer = manufacturer[:-4]
if manufacturer.endswith('S.L.\n'):
manufacturer = manufacturer[:-4]
if manufacturer.endswith('B.V.\n'):
manufacturer = manufacturer[:-4]
if manufacturer.endswith('N.V.\n'):
manufacturer = manufacturer[:-4]
if manufacturer.endswith('GmbH & Co KG\n'):
manufacturer = manufacturer[:-12]
elif manufacturer.startswith('NIBE'):
manufacturer = 'Nibe\n'
elif manufacturer.startswith('Nibe'):
manufacturer = 'Nibe\n'
elif manufacturer.startswith('Mitsubishi'):
manufacturer = 'Mitsubishi\n'
elif manufacturer.startswith('Ochsner'):
manufacturer = 'Ochsner\n'
elif manufacturer.startswith('OCHSNER'):
manufacturer = 'Ochsner\n'
elif manufacturer.startswith('Viessmann'):
manufacturer = 'Viessmann\n'
elif (lines.endswith('Date\n') == 1):
date = (contents[i])
if (date == 'basis\n'):
date = contents[i - 3]
date = date[14:]
elif (lines.startswith('Model') == 1):
modul = (contents[i - 2])
splindoor_low = 'NaN'
splindoor_medium = 'NaN'
sploutdoor_low = 'NaN'
sploutdoor_medium = 'NaN'
elif lines.endswith('Type\n'):
heatpumpType = contents[i][:-1]
if heatpumpType.startswith('A'):
heatpumpType = 'Outdoor Air/Water'
if heatpumpType.startswith('Eau glycol'):
heatpumpType = 'Brine/Water'
elif (lines.startswith('Sound power level indoor')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
splindoor_low = contents[i + 4][:-7]
splindoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
splindoor_medium = contents[i + 4][:-7]
splindoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
splindoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
else:
splindoor_low = contents[i][:-7]
splindoor_medium = contents[i][:-7]
elif (lines.startswith('Sound power level outdoor')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
sploutdoor_low = contents[i + 4][:-7]
sploutdoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
sploutdoor_medium = contents[i + 4][:-7]
sploutdoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
sploutdoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
else:
sploutdoor_low = contents[i][:-7]
sploutdoor_medium = contents[i][:-7]
elif (lines.startswith('Puissance acoustique extérieure')):
b = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
sploutdoor_low = contents[i + 4][:-7]
sploutdoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
sploutdoor_medium = contents[i + 4][:-7]
sploutdoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
sploutdoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
else:
sploutdoor_low = contents[i][:-7]
sploutdoor_medium = contents[i][:-7]
elif (lines.startswith('Potencia sonora de la unidad interior')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
splindoor_low = contents[i + 4][:-7]
splindoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
splindoor_medium = contents[i + 4][:-7]
splindoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
splindoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
else:
splindoor_low = contents[i][:-7]
splindoor_medium = contents[i][:-7]
elif (lines.startswith('Potencia sonora de la unidad exterior')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
sploutdoor_low = contents[i + 4][:-7]
sploutdoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
sploutdoor_medium = contents[i + 4][:-7]
sploutdoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
sploutdoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
else:
sploutdoor_low = contents[i][:-7]
sploutdoor_medium = contents[i][:-7]
elif (lines.startswith('Nivel de Potência sonora interior')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
splindoor_low = contents[i + 4][:-7]
splindoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
splindoor_medium = contents[i + 4][:-7]
splindoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
splindoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
else:
splindoor_low = contents[i][:-7]
splindoor_medium = contents[i][:-7]
elif (lines.startswith('Nivel de Potência sonora exterior')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
sploutdoor_low = contents[i + 4][:-7]
sploutdoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
sploutdoor_medium = contents[i + 4][:-7]
sploutdoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
sploutdoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
else:
sploutdoor_low = contents[i][:-7]
sploutdoor_medium = contents[i][:-7]
elif (lines.startswith('Livello di potenza acustica interna')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
splindoor_low = contents[i + 4][:-7]
splindoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
splindoor_medium = contents[i + 4][:-7]
splindoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
splindoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
else:
splindoor_low = contents[i][:-7]
splindoor_medium = contents[i][:-7]
elif (lines.startswith('Livello di potenza acustica externa')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
sploutdoor_low = contents[i + 4][:-7]
sploutdoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
sploutdoor_medium = contents[i + 4][:-7]
sploutdoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
sploutdoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
else:
sploutdoor_low = contents[i][:-7]
sploutdoor_medium = contents[i][:-7]
elif (lines == 'Refrigerant\n'):
if (contents[i - 3] == 'Mass Of\n'):
continue
refrigerant = (contents[i])
elif (lines.startswith('Mass Of') == 1):
if (lines == 'Mass Of\n'):
mass = contents[i + 1]
elif (lines.endswith('kg\n') == 1):
mass = contents[i - 2]
mass = mass[20:]
else:
mass = contents[i]
elif lines.startswith('Average'):
| |
<reponame>NVlabs/iccad2020-GPUgatesim
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# sys.argv[1] = block name
# sys.argv[2] = testname
# sys.argv[3] = cycles
# sys.argv[4] = clock period
# python code that uses pytorch and DGL.ai packages to perform per cycle, zero delay mode 2 value oblivious simulation with parallelism across cycles and gates.
# suggest run on GPU, need following packages
# command line variables described above.
# script takes as input a lil_matrix graph object, traces of input port and register outputs in array format, the clock period, and the number of cycles to be simulated
# outputs an array/tensor with the simulated values for the combinational logic
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import dgl
import dgl.function as fn
from dgl import DGLGraph
import pickle
import numpy as np
import networkx as nx
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
import argparse
import glob, os
import re
from openpyxl import load_workbook
import openpyxl
import pandas as pd
from pandas import DataFrame
from sklearn import preprocessing
from datetime import datetime
BLOCK=sys.argv[1]
TESTNAME=sys.argv[2]
CYCLES=int(sys.argv[3])
#BLOCK = "mul3"
#TESTNAME='random10cycles'
#CYCLES=10
def is_update_edge(edges): return (edges.data['edge_type'] == 2)
def is_prop_edge(edges): return (edges.data['edge_type'] != 2)
#data loading, builds the DGL graph from lil_matrix graph, and loads the input traces into array format
def build_graph(pkl, traces_features, total_cycles):
data = np.load(pkl, allow_pickle=1) #assume the lil_matrix graph, and the corresponding graph feature fields, are stored in a pkl file
Adj=data['adjacency_matrix'] #pkl data has adjacency_matrix lil_matrix field, which stores the graph structure
PinOrder=data['adjacency_pin_ids'] #another lil_matrix stores the pin order of each net connection/graph edge
EdgeTypes=data['adjacency_edge_types'] #another lil_matrix stores the net connection/edge types. This is used for cutting the graph, and setting register outputs as source nodes, register inputs as sink nodes
print("pkl loaded")
#create the graph
g=dgl.DGLGraph()
ll=Adj.toarray() ; ee=PinOrder.toarray() ; tt=EdgeTypes.toarray() ;
g.add_nodes(ll.shape[0])
edges=np.argwhere(ll==True) ; src, dst = tuple(zip(*edges)) ; g.add_edges(src, dst) ;
print("graph created")
#"global variables"
edge_orders = np.argwhere(ee > 0)
g.edges[edge_orders[:,0],edge_orders[:,1]].data['edge_type'] = th.ByteTensor(tt[edge_orders[:,0],edge_orders[:,1]])
cell_names=data['cell_index'] # this is a dictionary that keeps track of the waveform tensor indexes and their corresponding cell name "row 0 is instance I1234/Z", for example
cell_types=data['cell_ids'] ; cell_types=th.LongTensor(cell_types) ; g.ndata['cell_type'] = cell_types # create the cell_type node feature
#start prop graph
prop=dgl.DGLGraph() ; prop.add_nodes(ll.shape[0]) ; edges=g.filter_edges(is_prop_edge) ; #this cuts the graph into pipe stages
srcs=g.edges()[0][edges] ; dsts=g.edges()[1][edges] ; prop.add_edges(srcs, dsts) ;
#add edge features
prop.edges[srcs,dsts].data['x'] = th.Tensor(ee[srcs,dsts])
prop.edata['x'] =prop.edata['x'].type(th.ByteTensor)
#add the node features
prop.ndata['cell_type'] = cell_types
print("cut graph done")
##graphs done, now load the input features
cell_names.update( { v:k for k,v in cell_names.items() } ) # create a bi-directional dictionary, we can look up cell instance by row number index, or look up row number index by cell name
features_array = np.empty([len(g.nodes()), total_cycles], dtype=int) ;
features_array = np.full((len(g.nodes()), total_cycles), 9) # just initialize everything to 9, which means 'x'
features_array[len(g.nodes())-1] = np.full((1,total_cycles), 1) # we can make one node always 1 (VDD), for tie hi's in our netlist. Here we designate the last node to be VDD
features_array[len(g.nodes())-2] = np.full((1,total_cycles), 0) # we can make one node always 0 (GND), for tie lo's in our netlist. Here we designate the 2nd to last node to be GND
with open(traces_features, 'r') as f: # open the known trace waveform file. Here we stored it as an array. Each line in the file is of format "<cell instance name> <0/1 value at init> <0/1 value at cycle 1
for line in f:
signals = line.split(' ', 1)
reg_name = signals[0]
features_array[cell_names[reg_name]] = np.fromstring(signals[1].rstrip(), dtype=int, sep=' ')[0:total_cycles] #lookup which row the line belongs to using the bi-directional dictionary, store the per cycle 0/1 values
features_array=th.ByteTensor(features_array)
return prop, features_array, cell_names
prop_block, features_block, cell_names_block= build_graph(BLOCK + "_graph_object", "traces_ios_" + BLOCK + "_" + TESTNAME + ".tbl", CYCLES)
# +
#after this step you can query the DGL.ai graph object (prop_block) to see what information is stored in the graph
#you can query features_block, along with cell_names_block to see how the waveforms will be set up
# -
#load the truth tables for the simulation
def dec_to_bin(x,bit_width):
return bin(x)[2:].zfill(bit_width)
logic_truth_tables = {}
#list all the cell types in your standard cell library. Example sample shown. Ordering of list should match cell_type node feature
#ordering of pin names should match pin order 'x' edge feature
cells_list=[ ("AND2", ['A1', 'A2'], "int(bits[0] and bits[1])"), \
("AND3", ['A1', 'A2', 'A3'], "int(bits[0] and bits[1] and bits[2])"), \
("AO211", ['A1', 'A2', 'B', 'C'], "int((bits[0] and bits[1]) or bits[2] or bits[3])"), \
("AO21", ['A1', 'A2', 'B'], "int((bits[0] and bits[1]) or bits[2])"), \
("AO221", ['A1', 'A2', 'B1', 'B2', 'C'], "int((bits[0] and bits[1]) or (bits[2] and bits[3]) or bits[4])"), \
("AO31", ['A1', 'A2', 'A3', 'B'], "int((bits[0] and bits[1] and bits[2]) or bits[3])"), \
("AO32", ['A1', 'A2', 'A3', 'B1', 'B2'], "int((bits[0] and bits[1] and bits[2]) or (bits[3] and bits[4]))"), \
("AOI21", ['A1', 'A2', 'B'], "int(not((bits[0] and bits[1]) or bits[2]))"), \
("AOI31", ['A1', 'A2', 'A3', 'B'], "int(not((bits[0] and bits[1] and bits[2]) or bits[3]))"), \
("BUF", ['I'], "int(bits[0])"), \
("MUX2", ['I0', 'I1', 'S'], "int((not(bits[2]) and bits[0]) or (bits[2] and bits[1]))"), \
("MUX3", ['I0', 'I1', 'I2', 'S0', 'S1'], "int((not(bits[3]) and not(bits[4]) and bits[0]) or (bits[3] and (not(bits[4])) and bits[1]) or (not(bits[3]) and bits[4] and bits[2]))"), \
("INV", ['I'], "int(not(bits[0]))"), \
("NAND2", ['A1', 'A2'], "int(not(bits[0] and bits[1]))"), \
("NAND3", ['A1', 'A2', 'A3'], "int(not(bits[0] and bits[1] and bits[2]))"), \
("NOR2", ['A1', 'A2'], "int(not(bits[0] or bits[1]))"), \
("NOR3", ['A1', 'A2', 'A3'], "int(not(bits[0] or bits[1] or bits[2]))"), \
("OR2", ['A1', 'A2'], "int(bits[0] or bits[1])"), \
("FA_SUM", ['A', 'B', 'C'], "int(bits[0] ^ bits[1] ^ bits[2])"), \
("FA_CO", ['A', 'B', 'C'], "int((bits[0] and bits[1]) or (bits[2] and (bits[0] ^ bits[1])))"), \
("HA_SUM", ['A', 'B'], "int(bits[0] ^ bits[1])"), \
("HA_CO", ['A', 'B'], "int(bits[0] and bits[1])"), \
("OA21", ['A1', 'A2', 'B'], "int((bits[0] or bits[1]) and bits[2])"), \
("OA31", ['A1', 'A2', 'A3', 'B'], "int((bits[0] or bits[1] or bits[2]) and bits[3])"), \
("OAI21", ['A1', 'A2', 'B'], "int(not((bits[0] or bits[1]) and bits[2]))"), \
("OAI31", ['A1', 'A2', 'A3', 'B'], "int(not((bits[0] or bits[1] or bits[2]) and bits[3]))"), \
("OR3", ['A1', 'A2', 'A3'], "int(bits[0] or bits[1] or bits[2])"), \
("XOR2", ['A1', 'A2'], "int(bits[0] ^ bits[1])"), \
("XOR3", ['A1', 'A2', 'A3'], "int(bits[0] ^ bits[1] ^ bits[2])") ]
#all the following does is translate the 'string' described logic above into array format results of logic evaluation
cell_counter = 0
for cell_info in cells_list:
cell_name=cell_info[0]
cell_pins=cell_info[1]
cell_func=cell_info[2]
logic_truth_tables[cell_name]={}
logic_truth_tables[cell_name]['pins']=cell_pins
logic_truth_tables[cell_name]['cell_id']=cell_counter ; cell_counter+=1 ;
truth_table = np.zeros(shape=(2**len(logic_truth_tables[cell_name]['pins']),len(logic_truth_tables[cell_name]['pins'])+1))
for i in range(2**len(logic_truth_tables[cell_name]['pins'])):
bits=dec_to_bin(i,len(logic_truth_tables[cell_name]['pins']))
bits=[int(b) for b in str(bits)]
output = eval(cell_func)
bits.append(output)
truth_table[i] = bits
logic_truth_tables[cell_name]['truth_table']=truth_table
#make the truth table a 2d 'dictionary-like' tensor
out_tables=th.zeros([len(logic_truth_tables.keys()), 32], dtype=th.uint8)
for cell_type in logic_truth_tables.keys():
out_tables[logic_truth_tables[cell_type]['cell_id'],0:len(logic_truth_tables[cell_type]['truth_table'][:,-1])]=th.ByteTensor(logic_truth_tables[cell_type]['truth_table'][:,-1])
print(out_tables.element_size())
#out_tables houses ALL the logic in the netlist.
#each row correponds to a different gate. for example, AND2 is row 0.
#each node in the graph has a 'cell_type' node feature. This matches the row number in out_tables.
#For example, an AND2 gate node will have 'cell_type'=0. This way, the simulator knows the graph node is an AND2 gate,
#and will reference row 0 in out_tables to get the output pin value
#each column in out_tables corresponds to an entry in each gate type's logic truth table.
#For example, row 0, column 0, correponds to when AND2 gate inputs are [0,0].
#row 0, column 1, corresponds to when AND2 gate inputs are [0,1]
#row 0, column 2, corresponds to when AND2 gate inputs are [1,0]
#row 0, column 3, corresponds to when AND2 gate inputs are [1,1]
#which input edge corresponds to which pin order in the logic truth table is noted by edge feature 'x' in the graph
#in this way, if we know the inputs to the gate, then we simply to lookup table lookup to get the output value
#set up the simulator
#calculate which column we should use when doing the LUT lookup
#'h' is just a node feature created during actually running the simulation that stores the combinational logic waveforms
def gcn_msg(edges):
return {'m' : | |
-1, -1, 1, -1, -1, -1, 1, -1, -1],
[-1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1],
[1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1],
[-1, -1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1],
[-1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1],
[1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1],
[-1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1],
[1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1],
[1, -1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1],
[-1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1],
[1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1],
[-1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1],
[-1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1],
[1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1],
[-1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1],
[1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1],
[1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, -1],
[-1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1],
[1, -1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1],
[-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1],
[-1, -1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1],
[1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1],
[-1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1],
[1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1],
[1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1],
[-1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1],
[1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1],
[-1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1],
[1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, -1],
[-1, 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1],
[1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1],
[-1, -1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1],
[-1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1],
[1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1],
[-1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1],
[1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1],
[1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1],
[-1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1, -1, 1],
[1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, -1],
[-1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1],
[-1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1],
[1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1],
[-1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1],
[1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1],
[1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1],
[-1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1],
[1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, 1, -1, -1, 1, -1],
[-1, 1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1],
[-1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1],
[1, 1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1],
[-1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1],
[1, -1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1],
[1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, 1],
[1, -1, 1, -1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1],
[-1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1],
[-1, -1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1],
[1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, -1, 1],
[-1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1],
[1, -1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1],
[1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1],
[-1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1],
[1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1],
[-1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1],
[-1, 1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1],
[1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1],
[-1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1],
[1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1],
[1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, -1],
[-1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1],
[1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1],
[-1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1],
[-1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1],
[1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, 1],
[-1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, -1],
[1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1],
[-1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1],
[1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1],
[-1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, 1],
[-1, -1, -1, -1, 1, -1, 1, | |
import random
from typing import Iterator, List, Tuple, Dict
import pytest
from words.exceptions.parser_exceptions import StackSizeException, InvalidPredicateException, \
UndefinedIdentifierException, IdentifierPreviouslyDefinedException
from words.lexer.lex import Lexer
from words.lexer.lex_util import DebugData
from words.parser.parse import Parser
from words.token_types.lexer_token import LexerToken
from words.token_types.parser_token import NumberParserToken, BooleanParserToken, MacroParserToken, ParserToken, \
WhileParserToken, IfParserToken, ValueParserToken, IdentParserToken, VariableParserToken, ReturnParserToken, \
FunctionParserToken, LambdaParserToken, ArithmeticOperatorParserToken, BooleanOperatorParserToken, \
DictionaryOperatorParserToken
from words.interpreter.interpret_util import exhaustive_interpret_tokens
def _parse_from_string(words: str) -> List[ParserToken]:
contents_with_line_nums = enumerate(iter(words.splitlines()))
lexed_tokens: Iterator[LexerToken] = Lexer.lex_file_contents(contents_with_line_nums)
return Parser.parse(lexed_tokens).tokens
def _execute_from_string(words: str) -> Tuple[List[ParserToken], Dict[str, ParserToken]]:
contents_with_line_nums = enumerate(iter(words.splitlines()))
lexed_tokens: Iterator[LexerToken] = Lexer.lex_file_contents(contents_with_line_nums)
return exhaustive_interpret_tokens(Parser.parse(lexed_tokens).tokens, [], {})
class TestParserToken:
"""Test base class functionality with a concrete implementation."""
def test_debug_str(self):
class ConcreteParserToken(ParserToken):
def execute(self, stack: list, dictionary: dict) -> Tuple[list, dict]:
"""Abstract method does not need to be tested"""
assert isinstance(ConcreteParserToken(DebugData(0)).debug_str(), str)
class TestDictionaryToken:
"""No functionality in base class to test."""
class TestNumberParserToken:
def test_execute_positive(self):
"""Test the return value is correct for a number token."""
random_int = random.randint(0, 100)
number_token = NumberParserToken(DebugData(0), random_int)
assert number_token.execute([], {}) == ([random_int], {})
def test_execute_return_value(self):
"""Test a return value is given."""
number_token = NumberParserToken(DebugData(0), 16)
assert number_token.execute([], {}) != ([], {})
class TestBooleanParserToken:
def test_init_value(self):
bool_token = BooleanParserToken(DebugData(0), "True")
assert isinstance(bool_token.value, bool)
bool_token = BooleanParserToken(DebugData(0), "False")
assert isinstance(bool_token.value, bool)
def test_execute_positive(self):
"""Test the return value is correct for a boolean token."""
bool_token_true = BooleanParserToken(DebugData(0), "True")
assert bool_token_true.execute([], {}) == ([True], {})
bool_token_false = BooleanParserToken(DebugData(0), "False")
assert bool_token_false.execute([], {}) == ([False], {})
def test_execute_return_value(self):
"""Test a return value is given."""
bool_token = BooleanParserToken(DebugData(0), "True")
assert bool_token.execute([], {}) != ([], {})
bool_token = BooleanParserToken(DebugData(0), "False")
assert bool_token.execute([], {}) != ([], {})
class TestMacroParserToken:
def test_execute_print_positive(self, capsys):
"""The print macro should output the topmost value to stdout."""
macro = MacroParserToken(DebugData(0), "__PRINT__")
number_to_print = 42
macro.execute([number_to_print], {})
captured_print = capsys.readouterr()
assert captured_print.out == f"{number_to_print}\n"
def test_execute_print_invalid_stack_size(self):
"""An exception should be raised if the code tried to print from an empty stack."""
macro = MacroParserToken(DebugData(0), "__PRINT__")
with pytest.raises(StackSizeException):
macro.execute([], {})
def test_execute_print_output(self):
"""Make sure the stack and dictionary are returned after printing."""
macro = MacroParserToken(DebugData(0), "__PRINT__")
number_to_print = 42
assert macro.execute([number_to_print], {}) == ([number_to_print], {})
class TestWhileParserToken:
def test_execute_positive(self):
"""Test a valid while loop configuration."""
# Fixture
initial_state: Tuple[List, Dict] = _execute_from_string(
"VARIABLE SOME_VAR "
"0 ASSIGN SOME_VAR"
)
predicate: List[ParserToken] = _parse_from_string(
"SOME_VAR 10 < "
)
body: List[ParserToken] = _parse_from_string(
"SOME_VAR 1 + "
"ASSIGN SOME_VAR"
)
# Test
while_statement = WhileParserToken(DebugData(0), predicate, body)
result = while_statement.execute(*initial_state)
assert result[1]['SOME_VAR'] == 10
def test_execute_non_bool_predicate(self):
"""A while loop requires a valid (boolean) predicate."""
# Fixture
predicate: List[ParserToken] = _parse_from_string(
"10"
)
body: List[ParserToken] = _parse_from_string(
"203"
)
# Test
while_statement = WhileParserToken(DebugData(0), predicate, body)
with pytest.raises(InvalidPredicateException):
while_statement.execute([], {})
def test_execute_false_predicate(self):
"""The while body should not run if the predicate is never true."""
while_statement = WhileParserToken(DebugData(0), _parse_from_string("False"), _parse_from_string("0"))
assert not while_statement.execute([], {})[0]
class TestIfParserToken:
def test_execute_true_condition(self):
"""Test a correct if statement with a body and an else body."""
# Fixture
condition = _execute_from_string(
"True"
)
if_body = _parse_from_string(
"3"
)
# Assert stack equals the value in if body
if_statement = IfParserToken(DebugData(0), if_body)
assert if_statement.execute(*condition)[0][0] == 3
def test_execute_false_condition(self):
"""If the condition is false, the if token should execute the else body."""
# Fixture
condition = _execute_from_string(
"False"
)
if_body = _parse_from_string(
"7"
)
# Assert stack is empty, so if body is not executed
if_statement = IfParserToken(DebugData(0), if_body)
assert not if_statement.execute(*condition)[0]
def test_execute_false_condition_else(self):
"""If the condition is false, the if token should execute the else body."""
# Fixture
condition = _execute_from_string(
"False"
)
if_body = _parse_from_string(
"7"
)
else_body = _parse_from_string(
"13"
)
# Assert stack equals the value in else body
if_statement = IfParserToken(DebugData(0), if_body, else_body)
assert if_statement.execute(*condition)[0][0] == 13
def test_execute_non_bool_predicate(self):
"""The if statement requires a boolean condition value before executing."""
# Fixture
condition = _execute_from_string(
"0"
)
if_body = _parse_from_string(
"9"
)
else_body = _parse_from_string(
"2"
)
# Assert an invalid predicate exception is raised if the condition is not a boolean value
if_statement = IfParserToken(DebugData(0), if_body, else_body)
with pytest.raises(InvalidPredicateException):
if_statement.execute(*condition)
class TestVariableParserToken:
def test_execute_positive(self):
"""Test creating a variable in the dictionary."""
variable = VariableParserToken(DebugData(0), "SOME_VAR")
result = variable.execute([], {})
assert "SOME_VAR" in result[1]
def test_execute_duplicate_definition(self):
"""
If a variable is defined that already exists an
exception should be raised, since shadowing is not allowed
"""
variable_definition = _execute_from_string(
"VARIABLE DEFINED_VAR"
)
variable = VariableParserToken(DebugData(0), "DEFINED_VAR")
with pytest.raises(IdentifierPreviouslyDefinedException):
variable.execute(*variable_definition)
def test_visit_positive(self):
"""Test visiting a variable returns its value on the stack."""
variable = VariableParserToken(DebugData(0), "VAR")
variable.assigned_value = 62
assert variable.visit([12], {})[0] == [12, 62]
class TestValueParserToken:
def test_execute(self):
"""Values cannot be executed, only added to the dictionary when instantiating a function."""
with pytest.raises(RuntimeError):
ValueParserToken(DebugData(0), "SOME_VALUE").execute([], {})
class TestIdentParserToken:
def test_execute_negative(self):
"""If a key that is not in the dictionary is provided, an undefined variable exception should be raised."""
# Fixture
var_decl = _execute_from_string(
"VARIABLE A"
)
# Assert the token value is retrieved if it exists in the dictionary
identifier = IdentParserToken(DebugData(0), "B")
with pytest.raises(UndefinedIdentifierException):
assert identifier.execute(*var_decl)[0][0] == VariableParserToken.VarUnassigned
def test_execute_variable(self):
"""If the variable identifier key is found in the dictionary, its value should be returned."""
# Fixture
var_decl = _execute_from_string(
"VARIABLE X"
)
# Assert the token value is retrieved if it exists in the dictionary
identifier = IdentParserToken(DebugData(0), "X")
assert identifier.execute(*var_decl)[0][0] == VariableParserToken.VarUnassigned
def test_execute_function(self):
"""If the function identifier key is found in the dictionary, it should be executed."""
# Fixture
func_decl = _execute_from_string(
"| SOME_FUNC ( ) "
"8 12 81751692 "
"RETURN 3 |"
)
# Assert the function is executed and its return value is retrieved
identifier = IdentParserToken(DebugData(0), "SOME_FUNC")
result = identifier.execute(*func_decl)
assert result[0] == [8, 12, 81751692]
class TestReturnParserToken:
def test_execute_positive(self):
"""Test the correct amount of values are returned onto the stack."""
# Fixture
values_on_stack_in_function = _execute_from_string(
"1512 125 92"
)
# Assert all values are on the new stack after returning them
return_token = ReturnParserToken(DebugData(0), 3)
result = return_token.execute(*values_on_stack_in_function)
assert result[0] == [1512, 125, 92]
def test_execute_return_not_all_values(self):
"""If the return count is smaller than the local stack, only count values should be returned."""
# Fixture
values_on_stack_in_function = _execute_from_string(
"92 82928 9282 923839 162"
)
# Assert only the last two values are returned from the stack
return_token = ReturnParserToken(DebugData(0), 2)
result = return_token.execute(*values_on_stack_in_function)
assert result[0] == [923839, 162]
def test_execute_not_enough_values_on_stack(self):
"""If fewer values exist on the stack than the return expects, an exception should be raised."""
# Fixture
values_on_stack_in_function = _execute_from_string(
"92 92"
)
# Assert an exception is raised, since not enough values are on the stack
return_token = ReturnParserToken(DebugData(0), 3)
with pytest.raises(StackSizeException):
return_token.execute(*values_on_stack_in_function)
def test_execute_zero_count(self):
"""
If the return count is zero, no value should be returned,
this is equivalent to having no RETURN statement.
"""
# Fixture
values_on_stack_in_funcion = _execute_from_string(
"9829 929"
)
# Assert no value is returned
return_token = ReturnParserToken(DebugData(0), 0)
result = return_token.execute(*values_on_stack_in_funcion)
assert not result[0]
class TestFunctionParserToken:
def test_execute_positive(self):
"""Test placing a function in the dictionary that does not yet exist."""
function_decl = FunctionParserToken(DebugData(0), "SOME_FUNC", [], [])
result = function_decl.execute([], {})
assert "SOME_FUNC" in result[1]
def test_execute_duplicate_definition(self):
"""If the function was already defined, it cannot be defined again."""
# Fixture
initial_state = _execute_from_string(
"| DEFINED_FUNC ( ) RETURN 0 |"
)
# Assert an exception is raised if the function was previously defined
function_decl = FunctionParserToken(DebugData(0), "DEFINED_FUNC", [], [])
with pytest.raises(IdentifierPreviouslyDefinedException):
function_decl.execute(*initial_state)
def test_visit_positive(self):
"""Test executing the function body."""
# Fixture
function_with_body = _parse_from_string(
"| FNC_BODY ( ) 1 2 3 RETURN 3 |"
)[0]
# Assert the parsed string returns a function
assert isinstance(function_with_body, FunctionParserToken)
# Assert the visit function executes the body correctly.
assert function_with_body.visit([], {})[0] == [1, 2, 3]
def test_visit_setup_parameters(self):
"""The parameters should be accessible during visit."""
# Fixture
function_with_params = _parse_from_string(
"| FNC_PARAM ( VALUE X ) X RETURN 1 |"
)[0]
# Assert the parsed string returns a function
assert isinstance(function_with_params, FunctionParserToken)
# Assert the visit function accepts the parameter and returns it
assert function_with_params.visit([20], {})[0] == [20]
def test_visit_parameters_eaten_from_stack(self):
"""Assert all parameters taken are removed from the stack."""
# Fixture
function = _parse_from_string(
| |
stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = bigtable_table_admin.ListSnapshotsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListSnapshotsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_snapshots(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_snapshots_flattened_error_async():
client = BigtableTableAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_snapshots(
bigtable_table_admin.ListSnapshotsRequest(),
parent='parent_value',
)
def test_list_snapshots_pager():
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
bigtable_table_admin.ListSnapshotsResponse(
snapshots=[
table.Snapshot(),
table.Snapshot(),
table.Snapshot(),
],
next_page_token='abc',
),
bigtable_table_admin.ListSnapshotsResponse(
snapshots=[],
next_page_token='def',
),
bigtable_table_admin.ListSnapshotsResponse(
snapshots=[
table.Snapshot(),
],
next_page_token='ghi',
),
bigtable_table_admin.ListSnapshotsResponse(
snapshots=[
table.Snapshot(),
table.Snapshot(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_snapshots(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, table.Snapshot)
for i in results)
def test_list_snapshots_pages():
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
bigtable_table_admin.ListSnapshotsResponse(
snapshots=[
table.Snapshot(),
table.Snapshot(),
table.Snapshot(),
],
next_page_token='abc',
),
bigtable_table_admin.ListSnapshotsResponse(
snapshots=[],
next_page_token='def',
),
bigtable_table_admin.ListSnapshotsResponse(
snapshots=[
table.Snapshot(),
],
next_page_token='ghi',
),
bigtable_table_admin.ListSnapshotsResponse(
snapshots=[
table.Snapshot(),
table.Snapshot(),
],
),
RuntimeError,
)
pages = list(client.list_snapshots(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_snapshots_async_pager():
client = BigtableTableAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
bigtable_table_admin.ListSnapshotsResponse(
snapshots=[
table.Snapshot(),
table.Snapshot(),
table.Snapshot(),
],
next_page_token='abc',
),
bigtable_table_admin.ListSnapshotsResponse(
snapshots=[],
next_page_token='def',
),
bigtable_table_admin.ListSnapshotsResponse(
snapshots=[
table.Snapshot(),
],
next_page_token='ghi',
),
bigtable_table_admin.ListSnapshotsResponse(
snapshots=[
table.Snapshot(),
table.Snapshot(),
],
),
RuntimeError,
)
async_pager = await client.list_snapshots(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, table.Snapshot)
for i in responses)
@pytest.mark.asyncio
async def test_list_snapshots_async_pages():
client = BigtableTableAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
bigtable_table_admin.ListSnapshotsResponse(
snapshots=[
table.Snapshot(),
table.Snapshot(),
table.Snapshot(),
],
next_page_token='abc',
),
bigtable_table_admin.ListSnapshotsResponse(
snapshots=[],
next_page_token='def',
),
bigtable_table_admin.ListSnapshotsResponse(
snapshots=[
table.Snapshot(),
],
next_page_token='ghi',
),
bigtable_table_admin.ListSnapshotsResponse(
snapshots=[
table.Snapshot(),
table.Snapshot(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_snapshots(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_delete_snapshot(transport: str = 'grpc', request_type=bigtable_table_admin.DeleteSnapshotRequest):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_table_admin.DeleteSnapshotRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_snapshot_from_dict():
test_delete_snapshot(request_type=dict)
def test_delete_snapshot_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_snapshot),
'__call__') as call:
client.delete_snapshot()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_table_admin.DeleteSnapshotRequest()
@pytest.mark.asyncio
async def test_delete_snapshot_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.DeleteSnapshotRequest):
client = BigtableTableAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_table_admin.DeleteSnapshotRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_snapshot_async_from_dict():
await test_delete_snapshot_async(request_type=dict)
def test_delete_snapshot_field_headers():
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_table_admin.DeleteSnapshotRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_snapshot),
'__call__') as call:
call.return_value = None
client.delete_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_snapshot_field_headers_async():
client = BigtableTableAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_table_admin.DeleteSnapshotRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_snapshot),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_snapshot_flattened():
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_snapshot(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_snapshot_flattened_error():
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_snapshot(
bigtable_table_admin.DeleteSnapshotRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_snapshot_flattened_async():
client = BigtableTableAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_snapshot(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_snapshot_flattened_error_async():
client = BigtableTableAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_snapshot(
bigtable_table_admin.DeleteSnapshotRequest(),
name='name_value',
)
def test_create_backup(transport: str = 'grpc', request_type=bigtable_table_admin.CreateBackupRequest):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_backup),
'__call__') as call:
# Designate an appropriate return value for the call.
| |
again!')
response_data = {
'error': message
}
logger.exception(message)
return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
except requests.exceptions.RequestException:
message = ('A server error occured while processing zip file. '
'Please try again!')
response_data = {
'error': message
}
logger.exception(message)
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
# Extract zip file
try:
zip_ref = zipfile.ZipFile(CHALLENGE_ZIP_DOWNLOAD_LOCATION, 'r')
zip_ref.extractall(join(BASE_LOCATION, unique_folder_name))
zip_ref.close()
except zipfile.BadZipfile:
message = ('The zip file contents cannot be extracted. '
'Please check the format!')
response_data = {
'error': message
}
return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
# Search for yaml file
yaml_file_count = 0
for name in zip_ref.namelist():
if (name.endswith('.yaml') or name.endswith('.yml')) and (
not name.startswith('__MACOSX')): # Ignore YAML File in __MACOSX Directory
yaml_file = name
extracted_folder_name = yaml_file.split(basename(yaml_file))[0]
yaml_file_count += 1
if not yaml_file_count:
message = 'There is no YAML file in zip file you uploaded!'
response_data = {
'error': message
}
logger.info(message)
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
if yaml_file_count > 1:
message = 'There are {0} YAML files instead of one in zip folder!'.format(yaml_file_count)
response_data = {
'error': message
}
logger.info(message)
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
try:
with open(join(BASE_LOCATION, unique_folder_name, yaml_file), "r") as stream:
yaml_file_data = yaml.safe_load(stream)
except (yaml.YAMLError, ScannerError) as exc:
message = 'Error in creating challenge. Please check the yaml configuration!'
response_data = {
'error': message
}
logger.exception(exc)
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
# Check for evaluation script path in yaml file.
try:
evaluation_script = yaml_file_data['evaluation_script']
evaluation_script_path = join(BASE_LOCATION,
unique_folder_name,
extracted_folder_name,
evaluation_script)
except KeyError:
message = ('There is no key for evaluation script in YAML file. '
'Please add it and then try again!')
response_data = {
'error': message
}
logger.exception(message)
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
# Check for evaluation script file in extracted zip folder.
if isfile(evaluation_script_path):
with open(evaluation_script_path, 'rb') as challenge_evaluation_script:
challenge_evaluation_script_file = ContentFile(
challenge_evaluation_script.read(), evaluation_script_path)
else:
message = ('No evaluation script is present in the zip file. '
'Please add it and then try again!')
response_data = {
'error': message
}
logger.exception(message)
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
# Check for test annotation file path in yaml file.
try:
challenge_phases_data = yaml_file_data['challenge_phases']
except KeyError:
message = ('No challenge phase key found. '
'Please add challenge phases in YAML file and try again!')
response_data = {
'error': message
}
logger.exception(message)
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
for data in challenge_phases_data:
test_annotation_file = data['test_annotation_file']
if test_annotation_file:
test_annotation_file_path = join(BASE_LOCATION,
unique_folder_name,
extracted_folder_name,
test_annotation_file)
else:
message = ('There is no key for test annotation file for'
'challenge phase {} in yaml file. Please add it'
' and then try again!'.format(data['name']))
response_data = {
'error': message
}
logger.exception(message)
return Response(
response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
if not isfile(test_annotation_file_path):
message = ('No test annotation file found in zip file'
'for challenge phase \'{}\'. Please add it and '
' then try again!'.format(data['name']))
response_data = {
'error': message
}
logger.exception(message)
return Response(
response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
# Check for challenge image in yaml file.
image = yaml_file_data.get('image')
if image and (image.endswith('.jpg') or image.endswith('.jpeg') or image.endswith('.png')):
challenge_image_path = join(BASE_LOCATION,
unique_folder_name,
extracted_folder_name,
image)
if isfile(challenge_image_path):
challenge_image_file = ContentFile(
get_file_content(challenge_image_path, 'rb'), image)
else:
challenge_image_file = None
else:
challenge_image_file = None
# check for challenge description file
try:
challenge_description_file_path = join(BASE_LOCATION,
unique_folder_name,
extracted_folder_name,
yaml_file_data['description'])
if challenge_description_file_path.endswith('.html') and isfile(challenge_description_file_path):
yaml_file_data['description'] = get_file_content(
challenge_description_file_path, 'rb').decode('utf-8')
else:
yaml_file_data['description'] = None
except KeyError:
message = ('There is no key for description. '
'Please add it and then try again!')
response_data = {
'error': message
}
logger.exception(message)
return Response(response_data, status.HTTP_406_NOT_ACCEPTABLE)
# check for evaluation details file
try:
challenge_evaluation_details_file_path = join(
BASE_LOCATION,
unique_folder_name,
extracted_folder_name,
yaml_file_data['evaluation_details']
)
if (challenge_evaluation_details_file_path.endswith('.html') and
isfile(challenge_evaluation_details_file_path)):
yaml_file_data['evaluation_details'] = get_file_content(
challenge_evaluation_details_file_path, 'rb').decode('utf-8')
else:
yaml_file_data['evaluation_details'] = None
except KeyError:
message = ('There is no key for evalutaion details. '
'Please add it and then try again!')
response_data = {
'error': message
}
logger.exception(message)
return Response(response_data, status.HTTP_406_NOT_ACCEPTABLE)
# check for terms and conditions file
try:
challenge_terms_and_cond_file_path = join(
BASE_LOCATION,
unique_folder_name,
extracted_folder_name,
yaml_file_data['terms_and_conditions']
)
if challenge_terms_and_cond_file_path.endswith('.html') and isfile(challenge_terms_and_cond_file_path):
yaml_file_data['terms_and_conditions'] = get_file_content(
challenge_terms_and_cond_file_path, 'rb').decode('utf-8')
else:
yaml_file_data['terms_and_conditions'] = None
except KeyError:
message = ('There is no key for terms and conditions. '
'Please add it and then try again!')
response_data = {
'error': message
}
logger.exception(message)
return Response(response_data, status.HTTP_406_NOT_ACCEPTABLE)
# check for submission guidelines file
try:
submission_guidelines_file_path = join(
BASE_LOCATION,
unique_folder_name,
extracted_folder_name,
yaml_file_data['submission_guidelines']
)
if (submission_guidelines_file_path.endswith('.html') and
isfile(submission_guidelines_file_path)):
yaml_file_data['submission_guidelines'] = get_file_content(
submission_guidelines_file_path, 'rb').decode('utf-8')
else:
yaml_file_data['submission_guidelines'] = None
except KeyError:
message = ('There is no key for submission guidelines. '
'Please add it and then try again!')
response_data = {
'error': message
}
logger.exception(message)
return Response(response_data, status.HTTP_406_NOT_ACCEPTABLE)
# Check for leaderboard schema in YAML file
leaderboard_schema = yaml_file_data.get('leaderboard')
'''
Format of leaderboard data is:
[
{
'id': 1,
'schema': {
'default_order_by': 'bleu',
'labels': ['bleu']
}
}
]
'''
if leaderboard_schema:
if 'default_order_by' not in leaderboard_schema[0].get('schema'):
message = ('There is no \'default_order_by\' key in leaderboard '
'schema. Please add it and then try again!')
response_data = {
'error': message
}
logger.exception(message)
return Response(response_data, status.HTTP_406_NOT_ACCEPTABLE)
if 'labels' not in leaderboard_schema[0].get('schema'):
message = ('There is no \'labels\' key in leaderboard '
'schema. Please add it and then try again!')
response_data = {
'error': message
}
logger.exception(message)
return Response(response_data, status.HTTP_406_NOT_ACCEPTABLE)
else:
message = ('There is no key \'leaderboard\' '
'in the YAML file. Please add it and then try again!')
response_data = {
'error': message
}
logger.exception(message)
return Response(response_data, status.HTTP_406_NOT_ACCEPTABLE)
try:
with transaction.atomic():
serializer = ZipChallengeSerializer(
data=yaml_file_data,
context={
'request': request,
'challenge_host_team': challenge_host_team,
'image': challenge_image_file,
'evaluation_script': challenge_evaluation_script_file
}
)
if serializer.is_valid():
serializer.save()
challenge = serializer.instance
challenge_title = challenge.title.split(' ')
challenge_title = '-'.join(challenge_title).lower()
random_challenge_id = uuid.uuid4()
challenge_queue_name = "{}-{}".format(challenge_title, random_challenge_id)
challenge.queue = challenge_queue_name
challenge.save()
else:
response_data = serializer.errors
# transaction.set_rollback(True)
# return Response(response_data, status.HTTP_406_NOT_ACCEPTABLE)
# Create Leaderboard
yaml_file_data_of_leaderboard = yaml_file_data['leaderboard']
leaderboard_ids = {}
for data in yaml_file_data_of_leaderboard:
serializer = LeaderboardSerializer(data=data)
if serializer.is_valid():
serializer.save()
leaderboard_ids[str(data['id'])] = serializer.instance.pk
else:
response_data = serializer.errors
# Create Challenge Phase
challenge_phase_ids = {}
for data in challenge_phases_data:
# Check for challenge phase description file
phase_description_file_path = join(
BASE_LOCATION,
unique_folder_name,
extracted_folder_name,
data['description']
)
if (phase_description_file_path.endswith('.html') and isfile(phase_description_file_path)):
data['description'] = get_file_content(
phase_description_file_path, 'rb').decode('utf-8')
else:
data['description'] = None
test_annotation_file = data['test_annotation_file']
if test_annotation_file:
test_annotation_file_path = join(
BASE_LOCATION,
unique_folder_name,
extracted_folder_name,
test_annotation_file
)
if isfile(test_annotation_file_path):
with open(test_annotation_file_path, 'rb') as test_annotation_file:
challenge_test_annotation_file = ContentFile(
test_annotation_file.read(),
test_annotation_file_path
)
serializer = ChallengePhaseCreateSerializer(
data=data,
context={
'challenge': challenge,
'test_annotation': challenge_test_annotation_file
}
)
if serializer.is_valid():
serializer.save()
challenge_phase_ids[str(data['id'])
] = serializer.instance.pk
else:
response_data = serializer.errors
# Create Dataset Splits
yaml_file_data_of_dataset_split = yaml_file_data['dataset_splits']
dataset_split_ids = {}
for data in yaml_file_data_of_dataset_split:
serializer = DatasetSplitSerializer(data=data)
if serializer.is_valid():
serializer.save()
dataset_split_ids[str(data['id'])] = serializer.instance.pk
else:
# Return error when dataset split name is not unique.
response_data = serializer.errors
# Create Challenge Phase Splits
try:
challenge_phase_splits_data = yaml_file_data['challenge_phase_splits']
except KeyError:
message = ('There is no key for challenge phase splits. '
'Please add it and then try again!')
response_data = {
'error': message
}
logger.exception(message)
return Response(response_data, status.HTTP_406_NOT_ACCEPTABLE)
for data in challenge_phase_splits_data:
challenge_phase = challenge_phase_ids[str(
data['challenge_phase_id'])]
leaderboard = leaderboard_ids[str(data['leaderboard_id'])]
dataset_split = dataset_split_ids[str(
data['dataset_split_id'])]
visibility = data['visibility']
data = {
'challenge_phase': challenge_phase,
'leaderboard': leaderboard,
'dataset_split': dataset_split,
'visibility': visibility
}
serializer = ZipChallengePhaseSplitSerializer(data=data)
if serializer.is_valid():
serializer.save()
else:
response_data = serializer.errors
zip_config = ChallengeConfiguration.objects.get(
pk=uploaded_zip_file.pk)
if zip_config:
# Add the Challenge Host as a test participant.
emails = challenge_host_team.get_all_challenge_host_email()
team_name = "Host_{}_Team".format(random.randint(1, 100000))
participant_host_team = ParticipantTeam(
team_name=team_name,
created_by=challenge_host_team.created_by,)
participant_host_team.save()
for email in emails:
user = User.objects.get(email=email)
host = Participant(
user=user,
status=Participant.ACCEPTED,
team=participant_host_team,
)
host.save()
challenge.participant_teams.add(participant_host_team)
zip_config.challenge = challenge
zip_config.save()
response_data = {
'success': 'Challenge {} has been created successfully and'
' sent for review to EvalAI Admin.'.format(challenge.title)}
return Response(response_data, status=status.HTTP_201_CREATED)
except:
try:
if response_data:
response_data = {'error': response_data.values()[0]}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
except:
response_data = {
'error': 'Error in creating challenge. Please check the yaml configuration!'}
return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
finally:
try:
shutil.rmtree(BASE_LOCATION)
logger.info('Zip folder is removed')
except:
logger.exception('Zip folder for challenge {} is not removed from location'.format(challenge.pk,
BASE_LOCATION))
try:
shutil.rmtree(BASE_LOCATION)
logger.info('Zip folder is removed')
except:
logger.info('Zip folder for challenge {} is not removed from location'.format(challenge.pk,
BASE_LOCATION))
@throttle_classes([UserRateThrottle])
@api_view(['GET'])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((ExpiringTokenAuthentication,))
def get_all_submissions_of_challenge(request, challenge_pk, challenge_phase_pk):
"""
Returns all the submissions for a particular challenge
"""
# To check for the corresponding challenge from challenge_pk.
challenge = get_challenge_model(challenge_pk)
# To check for the corresponding challenge | |
--lease-time 86400
"""
helps['vmware workload-network dns-service'] = """
type: group
short-summary: Commands to manage a DNS Service workload network.
"""
helps['vmware workload-network dns-service list'] = """
type: command
short-summary: List of DNS services in a private cloud workload network.
examples:
- name: List of DNS services in a workload network.
text: az vmware workload-network dns-service list --resource-group group1 --private-cloud cloud1
"""
helps['vmware workload-network dns-service show'] = """
type: command
short-summary: Get a DNS service by ID in a private cloud workload network.
examples:
- name: Get a DNS service by ID in a workload network.
text: az vmware workload-network dns-service show --resource-group group1 --private-cloud cloud1 --dns-service dnsService1
"""
helps['vmware workload-network dns-service create'] = """
type: command
short-summary: Create a DNS service by ID in a private cloud workload network.
examples:
- name: Create a DNS service by ID in a workload network.
text: az vmware workload-network dns-service create --resource-group group1 --private-cloud cloud1 --dns-service dnsService1 --display-name dnsService1 --dns-service-ip 172.16.31.10 --default-dns-zone defaultDnsZone1 --fqdn-zones fqdnZone1 --log-level INFO --revision 1
"""
helps['vmware workload-network dns-service update'] = """
type: command
short-summary: Update a DNS service by ID in a private cloud workload network.
examples:
- name: Update a DNS service by ID in a workload network.
text: az vmware workload-network dns-service update --resource-group group1 --private-cloud cloud1 --dns-service dnsService1 --display-name dnsService1 --dns-service-ip 172.16.31.10 --default-dns-zone defaultDnsZone1 --fqdn-zones fqdnZone1 --log-level INFO --revision 1
"""
helps['vmware workload-network dns-service delete'] = """
type: command
short-summary: Delete a DNS service by ID in a private cloud workload network.
examples:
- name: Delete a DNS service by ID in a workload network.
text: az vmware workload-network dns-service delete --resource-group group1 --private-cloud cloud1 --dns-service dnsService1
"""
helps['vmware workload-network dns-zone'] = """
type: group
short-summary: Commands to manage a DNS Zone workload network.
"""
helps['vmware workload-network dns-zone list'] = """
type: command
short-summary: List of DNS zones in a private cloud workload network.
examples:
- name: List of DNS zones in a workload network.
text: az vmware workload-network dns-zone list --resource-group group1 --private-cloud cloud1
"""
helps['vmware workload-network dns-zone show'] = """
type: command
short-summary: Get a DNS zone by ID in a private cloud workload network.
examples:
- name: Get a DNS zone by ID in a workload network.
text: az vmware workload-network dns-zone show --resource-group group1 --private-cloud cloud1 --dns-zone dnsZone1
"""
helps['vmware workload-network dns-zone create'] = """
type: command
short-summary: Create a DNS zone by ID in a private cloud workload network.
examples:
- name: Create a DNS zone by ID in a workload network.
text: az vmware workload-network dns-zone create --resource-group group1 --private-cloud cloud1 --dns-zone dnsZone1 --display-name dnsZone1 --domain domain1 --dns-server-ips 1.1.1.1 --source-ip 8.8.8.8 --dns-services 1 --revision 1
"""
helps['vmware workload-network dns-zone update'] = """
type: command
short-summary: Update a DNS zone by ID in a private cloud workload network.
examples:
- name: Update a DNS zone by ID in a workload network.
text: az vmware workload-network dns-zone update --resource-group group1 --private-cloud cloud1 --dns-zone dnsZone1 --display-name dnsZone1 --domain domain1 --dns-server-ips 1.1.1.1 --source-ip 8.8.8.8 --dns-services 1 --revision 1
"""
helps['vmware workload-network dns-zone delete'] = """
type: command
short-summary: Delete a DNS zone by ID in a private cloud workload network.
examples:
- name: Delete a DNS zone by ID in a workload network.
text: az vmware workload-network dns-zone delete --resource-group group1 --private-cloud cloud1 --dns-zone dnsZone1
"""
helps['vmware workload-network port-mirroring'] = """
type: group
short-summary: Commands to manage a Port Mirroring workload network.
"""
helps['vmware workload-network port-mirroring list'] = """
type: command
short-summary: List of port mirroring profiles in a private cloud workload network.
examples:
- name: List of port mirroring profiles in a workload network.
text: az vmware workload-network port-mirroring list --resource-group group1 --private-cloud cloud1
"""
helps['vmware workload-network port-mirroring show'] = """
type: command
short-summary: Get a port mirroring profile by ID in a private cloud workload network.
examples:
- name: Get a port mirroring profile by ID in a workload network.
text: az vmware workload-network port-mirroring show --resource-group group1 --private-cloud cloud1 --port-mirroring portMirroring1
"""
helps['vmware workload-network port-mirroring create'] = """
type: command
short-summary: Create a port mirroring profile by ID in a private cloud workload network.
examples:
- name: Create a port mirroring profile by ID in a workload network.
text: az vmware workload-network port-mirroring create --resource-group group1 --private-cloud cloud1 --port-mirroring portMirroring1 --display-name portMirroring1 --direction BIDIRECTIONAL --source vmGroup1 --destination vmGroup2 --revision 1
"""
helps['vmware workload-network port-mirroring update'] = """
type: command
short-summary: Update a port mirroring profile by ID in a private cloud workload network.
examples:
- name: Update a port mirroring profile by ID in a workload network.
text: az vmware workload-network port-mirroring update --resource-group group1 --private-cloud cloud1 --port-mirroring portMirroring1 --display-name portMirroring1 --direction BIDIRECTIONAL --source vmGroup1 --destination vmGroup2 --revision 1
"""
helps['vmware workload-network port-mirroring delete'] = """
type: command
short-summary: Delete a port mirroring profile by ID in a private cloud workload network.
examples:
- name: Delete a port mirroring profile by ID in a workload network.
text: az vmware workload-network port-mirroring delete --resource-group group1 --private-cloud cloud1 --port-mirroring portMirroring1
"""
helps['vmware workload-network segment'] = """
type: group
short-summary: Commands to manage a Segment workload network.
"""
helps['vmware workload-network segment list'] = """
type: command
short-summary: List of segments in a private cloud workload network.
examples:
- name: List of segments in a workload network.
text: az vmware workload-network segment list --resource-group group1 --private-cloud cloud1
"""
helps['vmware workload-network segment show'] = """
type: command
short-summary: Get a segment by ID in a private cloud workload network.
examples:
- name: Get a segment by ID in a workload network.
text: az vmware workload-network segment show --resource-group group1 --private-cloud cloud1 --segment segment1
"""
helps['vmware workload-network segment create'] = """
type: command
short-summary: Create a segment by ID in a private cloud workload network.
examples:
- name: Create a segment by ID in a workload network.
text: az vmware workload-network segment create --resource-group group1 --private-cloud cloud1 --segment segment1 --display-name segment1 --connected-gateway /infra/tier-1s/gateway --revision 1 --dhcp-ranges 172.16.58.3 172.16.31.10 --gateway-address 192.168.3.11/16 --port-name port1
"""
helps['vmware workload-network segment update'] = """
type: command
short-summary: Update a segment by ID in a private cloud workload network.
examples:
- name: Update a segment by ID in a workload network.
text: az vmware workload-network segment update --resource-group group1 --private-cloud cloud1 --segment segment1 --display-name segment1 --connected-gateway /infra/tier-1s/gateway --revision 1 --dhcp-ranges 172.16.58.3 172.16.31.10 --gateway-address 192.168.3.11/16 --port-name port1
"""
helps['vmware workload-network segment delete'] = """
type: command
short-summary: Delete a segment by ID in a private cloud workload network.
examples:
- name: Delete a segment by ID in a workload network.
text: az vmware workload-network segment delete --resource-group group1 --private-cloud cloud1 --segment segment1
"""
helps['vmware workload-network public-ip'] = """
type: group
short-summary: Commands to manage a Public-IP workload network.
"""
helps['vmware workload-network public-ip list'] = """
type: command
short-summary: List of Public IP Blocks in a private cloud workload network.
examples:
- name: List of Public IP Blocks in a workload network.
text: az vmware workload-network public-ip list --resource-group group1 --private-cloud cloud1
"""
helps['vmware workload-network public-ip show'] = """
type: command
short-summary: Get a Public IP Block by ID in a private cloud workload network.
examples:
- name: Get a Public IP Block by ID in a workload network.
text: az vmware workload-network public-ip show --resource-group group1 --private-cloud cloud1 --public-ip publicIP1
"""
helps['vmware workload-network public-ip create'] = """
type: command
short-summary: Create a Public IP Block by ID in a private cloud workload network.
examples:
- name: Create a Public IP Block by ID in a workload network.
text: az vmware workload-network public-ip create --resource-group group1 --private-cloud cloud1 --public-ip publicIP1 --display-name publicIP1 --number-of-public-ips 32
"""
helps['vmware workload-network public-ip delete'] = """
type: command
short-summary: Delete a Public IP Block by ID in a private cloud workload network.
examples:
- name: Delete a Public IP Block by ID in a workload network.
text: az vmware workload-network public-ip delete --resource-group group1 --private-cloud cloud1 --public-ip publicIP1
"""
helps['vmware workload-network vm-group'] = """
type: group
short-summary: Commands to manage a VM Group workload network.
"""
helps['vmware workload-network vm-group list'] = """
type: command
short-summary: List of VM Groups in a private cloud workload network.
examples:
- name: List of VM Groups in a workload network.
text: az vmware workload-network vm-group | |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Explorer/explorer.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1171, 967)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(0, 0))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/Explorer/gradient.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralWidget = QtWidgets.QWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralWidget.sizePolicy().hasHeightForWidth())
self.centralWidget.setSizePolicy(sizePolicy)
self.centralWidget.setFocusPolicy(QtCore.Qt.NoFocus)
self.centralWidget.setObjectName("centralWidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralWidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.overallTabsContainer = QtWidgets.QTabWidget(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.overallTabsContainer.sizePolicy().hasHeightForWidth())
self.overallTabsContainer.setSizePolicy(sizePolicy)
self.overallTabsContainer.setMinimumSize(QtCore.QSize(0, 0))
self.overallTabsContainer.setBaseSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setPointSize(12)
self.overallTabsContainer.setFont(font)
self.overallTabsContainer.setFocusPolicy(QtCore.Qt.StrongFocus)
self.overallTabsContainer.setTabPosition(QtWidgets.QTabWidget.East)
self.overallTabsContainer.setObjectName("overallTabsContainer")
self.imageTabsContainer = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.imageTabsContainer.sizePolicy().hasHeightForWidth())
self.imageTabsContainer.setSizePolicy(sizePolicy)
self.imageTabsContainer.setFocusPolicy(QtCore.Qt.NoFocus)
self.imageTabsContainer.setObjectName("imageTabsContainer")
self.gridLayout = QtWidgets.QGridLayout(self.imageTabsContainer)
self.gridLayout.setObjectName("gridLayout")
self.imageTabs = QtWidgets.QTabWidget(self.imageTabsContainer)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.imageTabs.sizePolicy().hasHeightForWidth())
self.imageTabs.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
self.imageTabs.setFont(font)
self.imageTabs.setFocusPolicy(QtCore.Qt.StrongFocus)
self.imageTabs.setObjectName("imageTabs")
self.depthBufferTab = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.depthBufferTab.sizePolicy().hasHeightForWidth())
self.depthBufferTab.setSizePolicy(sizePolicy)
self.depthBufferTab.setFocusPolicy(QtCore.Qt.TabFocus)
self.depthBufferTab.setObjectName("depthBufferTab")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.depthBufferTab)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.imageTabs.addTab(self.depthBufferTab, "")
self.reliefTab = QtWidgets.QWidget()
self.reliefTab.setFocusPolicy(QtCore.Qt.TabFocus)
self.reliefTab.setObjectName("reliefTab")
self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.reliefTab)
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.imageTabs.addTab(self.reliefTab, "")
self.backgroundMaskTab = QtWidgets.QWidget()
self.backgroundMaskTab.setFocusPolicy(QtCore.Qt.TabFocus)
self.backgroundMaskTab.setObjectName("backgroundMaskTab")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.backgroundMaskTab)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.imageTabs.addTab(self.backgroundMaskTab, "")
self.gradientXTab = QtWidgets.QWidget()
self.gradientXTab.setFocusPolicy(QtCore.Qt.TabFocus)
self.gradientXTab.setObjectName("gradientXTab")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.gradientXTab)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.imageTabs.addTab(self.gradientXTab, "")
self.gradientXMaskTab = QtWidgets.QWidget()
self.gradientXMaskTab.setFocusPolicy(QtCore.Qt.TabFocus)
self.gradientXMaskTab.setObjectName("gradientXMaskTab")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.gradientXMaskTab)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.imageTabs.addTab(self.gradientXMaskTab, "")
self.gradientYTab = QtWidgets.QWidget()
self.gradientYTab.setFocusPolicy(QtCore.Qt.TabFocus)
self.gradientYTab.setObjectName("gradientYTab")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.gradientYTab)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.imageTabs.addTab(self.gradientYTab, "")
self.gradientYMaskTab = QtWidgets.QWidget()
self.gradientYMaskTab.setFocusPolicy(QtCore.Qt.TabFocus)
self.gradientYMaskTab.setObjectName("gradientYMaskTab")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.gradientYMaskTab)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.imageTabs.addTab(self.gradientYMaskTab, "")
self.compositeMaskTab = QtWidgets.QWidget()
self.compositeMaskTab.setFocusPolicy(QtCore.Qt.TabFocus)
self.compositeMaskTab.setObjectName("compositeMaskTab")
self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.compositeMaskTab)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.imageTabs.addTab(self.compositeMaskTab, "")
self.gradientXUnsharpTab = QtWidgets.QWidget()
self.gradientXUnsharpTab.setFocusPolicy(QtCore.Qt.TabFocus)
self.gradientXUnsharpTab.setObjectName("gradientXUnsharpTab")
self.verticalLayout = QtWidgets.QVBoxLayout(self.gradientXUnsharpTab)
self.verticalLayout.setObjectName("verticalLayout")
self.imageTabs.addTab(self.gradientXUnsharpTab, "")
self.gradientYUnsharpTab = QtWidgets.QWidget()
self.gradientYUnsharpTab.setFocusPolicy(QtCore.Qt.TabFocus)
self.gradientYUnsharpTab.setObjectName("gradientYUnsharpTab")
self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.gradientYUnsharpTab)
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.imageTabs.addTab(self.gradientYUnsharpTab, "")
self.gridLayout.addWidget(self.imageTabs, 0, 1, 1, 1)
self.overallTabsContainer.addTab(self.imageTabsContainer, "")
self.modelTabsContainer = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.modelTabsContainer.sizePolicy().hasHeightForWidth())
self.modelTabsContainer.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
self.modelTabsContainer.setFont(font)
self.modelTabsContainer.setFocusPolicy(QtCore.Qt.NoFocus)
self.modelTabsContainer.setObjectName("modelTabsContainer")
self.gridLayout_3 = QtWidgets.QGridLayout(self.modelTabsContainer)
self.gridLayout_3.setObjectName("gridLayout_3")
self.modelTabs = QtWidgets.QTabWidget(self.modelTabsContainer)
self.modelTabs.setFocusPolicy(QtCore.Qt.StrongFocus)
self.modelTabs.setObjectName("modelTabs")
self.modelMeshTab = QtWidgets.QWidget()
self.modelMeshTab.setFocusPolicy(QtCore.Qt.TabFocus)
self.modelMeshTab.setObjectName("modelMeshTab")
self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.modelMeshTab)
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.modelTabs.addTab(self.modelMeshTab, "")
self.modelMeshScaledTab = QtWidgets.QWidget()
self.modelMeshScaledTab.setObjectName("modelMeshScaledTab")
self.verticalLayout_13 = QtWidgets.QVBoxLayout(self.modelMeshScaledTab)
self.verticalLayout_13.setObjectName("verticalLayout_13")
self.modelTabs.addTab(self.modelMeshScaledTab, "")
self.reliefMeshTab = QtWidgets.QWidget()
self.reliefMeshTab.setFocusPolicy(QtCore.Qt.TabFocus)
self.reliefMeshTab.setObjectName("reliefMeshTab")
self.verticalLayout_12 = QtWidgets.QVBoxLayout(self.reliefMeshTab)
self.verticalLayout_12.setObjectName("verticalLayout_12")
self.modelTabs.addTab(self.reliefMeshTab, "")
self.gridLayout_3.addWidget(self.modelTabs, 0, 0, 1, 1)
self.overallTabsContainer.addTab(self.modelTabsContainer, "")
self.workbenchTabsContainer = QtWidgets.QWidget()
self.workbenchTabsContainer.setObjectName("workbenchTabsContainer")
self.gridLayout_4 = QtWidgets.QGridLayout(self.workbenchTabsContainer)
self.gridLayout_4.setObjectName("gridLayout_4")
self.workbenchTabs = QtWidgets.QTabWidget(self.workbenchTabsContainer)
self.workbenchTabs.setFocusPolicy(QtCore.Qt.StrongFocus)
self.workbenchTabs.setObjectName("workbenchTabs")
self.i1Tab = QtWidgets.QWidget()
self.i1Tab.setObjectName("i1Tab")
self.verticalLayout_14 = QtWidgets.QVBoxLayout(self.i1Tab)
self.verticalLayout_14.setObjectName("verticalLayout_14")
self.workbenchTabs.addTab(self.i1Tab, "")
self.i2Tab = QtWidgets.QWidget()
self.i2Tab.setObjectName("i2Tab")
self.verticalLayout_15 = QtWidgets.QVBoxLayout(self.i2Tab)
self.verticalLayout_15.setObjectName("verticalLayout_15")
self.workbenchTabs.addTab(self.i2Tab, "")
self.i3Tab = QtWidgets.QWidget()
self.i3Tab.setObjectName("i3Tab")
self.verticalLayout_16 = QtWidgets.QVBoxLayout(self.i3Tab)
self.verticalLayout_16.setObjectName("verticalLayout_16")
self.workbenchTabs.addTab(self.i3Tab, "")
self.i4Tab = QtWidgets.QWidget()
self.i4Tab.setObjectName("i4Tab")
self.verticalLayout_17 = QtWidgets.QVBoxLayout(self.i4Tab)
self.verticalLayout_17.setObjectName("verticalLayout_17")
self.workbenchTabs.addTab(self.i4Tab, "")
self.i5Tab = QtWidgets.QWidget()
self.i5Tab.setObjectName("i5Tab")
self.verticalLayout_18 = QtWidgets.QVBoxLayout(self.i5Tab)
self.verticalLayout_18.setObjectName("verticalLayout_18")
self.workbenchTabs.addTab(self.i5Tab, "")
self.i6Tab = QtWidgets.QWidget()
self.i6Tab.setObjectName("i6Tab")
self.verticalLayout_19 = QtWidgets.QVBoxLayout(self.i6Tab)
self.verticalLayout_19.setObjectName("verticalLayout_19")
self.workbenchTabs.addTab(self.i6Tab, "")
self.i7Tab = QtWidgets.QWidget()
self.i7Tab.setObjectName("i7Tab")
self.verticalLayout_20 = QtWidgets.QVBoxLayout(self.i7Tab)
self.verticalLayout_20.setObjectName("verticalLayout_20")
self.workbenchTabs.addTab(self.i7Tab, "")
self.i8Tab = QtWidgets.QWidget()
self.i8Tab.setObjectName("i8Tab")
self.verticalLayout_21 = QtWidgets.QVBoxLayout(self.i8Tab)
self.verticalLayout_21.setObjectName("verticalLayout_21")
self.workbenchTabs.addTab(self.i8Tab, "")
self.gridLayout_4.addWidget(self.workbenchTabs, 0, 0, 1, 1)
self.overallTabsContainer.addTab(self.workbenchTabsContainer, "")
self.gridLayout_2.addWidget(self.overallTabsContainer, 0, 1, 1, 1)
self.settingsContainer = QtWidgets.QWidget(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.settingsContainer.sizePolicy().hasHeightForWidth())
self.settingsContainer.setSizePolicy(sizePolicy)
self.settingsContainer.setMinimumSize(QtCore.QSize(300, 0))
self.settingsContainer.setMaximumSize(QtCore.QSize(300, 896))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.settingsContainer.setFont(font)
self.settingsContainer.setFocusPolicy(QtCore.Qt.ClickFocus)
self.settingsContainer.setObjectName("settingsContainer")
self.gradientThresholdCheckBox = QtWidgets.QCheckBox(self.settingsContainer)
self.gradientThresholdCheckBox.setGeometry(QtCore.QRect(32, 151, 188, 24))
self.gradientThresholdCheckBox.setMaximumSize(QtCore.QSize(373, 16777215))
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.gradientThresholdCheckBox.setFont(font)
self.gradientThresholdCheckBox.setObjectName("gradientThresholdCheckBox")
self.gradientThresholdLineEdit = QtWidgets.QLineEdit(self.settingsContainer)
self.gradientThresholdLineEdit.setGeometry(QtCore.QRect(240, 150, 50, 29))
self.gradientThresholdLineEdit.setMaximumSize(QtCore.QSize(373, 16777215))
self.gradientThresholdLineEdit.setObjectName("gradientThresholdLineEdit")
self.unsharpGaussianLowLineEdit = QtWidgets.QLineEdit(self.settingsContainer)
self.unsharpGaussianLowLineEdit.setGeometry(QtCore.QRect(240, 335, 50, 28))
self.unsharpGaussianLowLineEdit.setMaximumSize(QtCore.QSize(373, 16777215))
self.unsharpGaussianLowLineEdit.setObjectName("unsharpGaussianLowLineEdit")
self.unsharpGaussianHighLineEdit = QtWidgets.QLineEdit(self.settingsContainer)
self.unsharpGaussianHighLineEdit.setGeometry(QtCore.QRect(240, 366, 50, 29))
self.unsharpGaussianHighLineEdit.setMaximumSize(QtCore.QSize(373, 16777215))
self.unsharpGaussianHighLineEdit.setObjectName("unsharpGaussianHighLineEdit")
self.unsharpHFScaleLineEdit = QtWidgets.QLineEdit(self.settingsContainer)
self.unsharpHFScaleLineEdit.setGeometry(QtCore.QRect(240, 398, 50, 27))
self.unsharpHFScaleLineEdit.setMaximumSize(QtCore.QSize(373, 16777215))
self.unsharpHFScaleLineEdit.setObjectName("unsharpHFScaleLineEdit")
self.processButton = QtWidgets.QPushButton(self.settingsContainer)
self.processButton.setGeometry(QtCore.QRect(200, 5, 75, 27))
self.processButton.setMaximumSize(QtCore.QSize(373, 16777215))
self.processButton.setObjectName("processButton")
self.attenuationCheckBox = QtWidgets.QCheckBox(self.settingsContainer)
self.attenuationCheckBox.setGeometry(QtCore.QRect(32, 184, 210, 24))
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.attenuationCheckBox.setFont(font)
self.attenuationCheckBox.setObjectName("attenuationCheckBox")
self.labelSetings = QtWidgets.QLabel(self.settingsContainer)
self.labelSetings.setGeometry(QtCore.QRect(-1, 10, 181, 21))
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(False)
font.setWeight(50)
self.labelSetings.setFont(font)
self.labelSetings.setAlignment(QtCore.Qt.AlignCenter)
self.labelSetings.setObjectName("labelSetings")
self.attenuationDecayLineEdit = QtWidgets.QLineEdit(self.settingsContainer)
self.attenuationDecayLineEdit.setGeometry(QtCore.QRect(240, 240, 50, 29))
self.attenuationDecayLineEdit.setObjectName("attenuationDecayLineEdit")
self.attenuationFactorLineEdit = QtWidgets.QLineEdit(self.settingsContainer)
self.attenuationFactorLineEdit.setGeometry(QtCore.QRect(240, 208, 50, 28))
self.attenuationFactorLineEdit.setObjectName("attenuationFactorLineEdit")
self.labelAttenuationFactor = QtWidgets.QLabel(self.settingsContainer)
self.labelAttenuationFactor.setGeometry(QtCore.QRect(50, 212, 181, 24))
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(False)
font.setWeight(50)
self.labelAttenuationFactor.setFont(font)
self.labelAttenuationFactor.setObjectName("labelAttenuationFactor")
self.labelAttenuationDecay = QtWidgets.QLabel(self.settingsContainer)
self.labelAttenuationDecay.setGeometry(QtCore.QRect(50, 242, 191, 24))
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(False)
font.setWeight(50)
self.labelAttenuationDecay.setFont(font)
self.labelAttenuationDecay.setObjectName("labelAttenuationDecay")
self.unsharpMaskingCheckBox = QtWidgets.QCheckBox(self.settingsContainer)
self.unsharpMaskingCheckBox.setGeometry(QtCore.QRect(32, 270, 177, 24))
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.unsharpMaskingCheckBox.setFont(font)
self.unsharpMaskingCheckBox.setObjectName("unsharpMaskingCheckBox")
self.p1CheckBox = QtWidgets.QCheckBox(self.settingsContainer)
self.p1CheckBox.setGeometry(QtCore.QRect(32, 757, 165, 24))
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(10)
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
self.p1CheckBox.setFont(font)
self.p1CheckBox.setStyleSheet("")
self.p1CheckBox.setObjectName("p1CheckBox")
self.p2CheckBox = QtWidgets.QCheckBox(self.settingsContainer)
self.p2CheckBox.setGeometry(QtCore.QRect(32, 785, 204, 24))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
self.p2CheckBox.setFont(font)
self.p2CheckBox.setStyleSheet("")
self.p2CheckBox.setObjectName("p2CheckBox")
self.p1LineEdit = QtWidgets.QLineEdit(self.settingsContainer)
self.p1LineEdit.setGeometry(QtCore.QRect(240, 750, 50, 27))
self.p1LineEdit.setMaximumSize(QtCore.QSize(373, 16777215))
self.p1LineEdit.setObjectName("p1LineEdit")
self.p2LineEdit = QtWidgets.QLineEdit(self.settingsContainer)
self.p2LineEdit.setGeometry(QtCore.QRect(240, 780, 50, 27))
self.p2LineEdit.setMaximumSize(QtCore.QSize(373, 16777215))
self.p2LineEdit.setObjectName("p2LineEdit")
self.silhouetteEdgeWidthLineEdit = QtWidgets.QLineEdit(self.settingsContainer)
self.silhouetteEdgeWidthLineEdit.setGeometry(QtCore.QRect(240, 570, 50, 27))
self.silhouetteEdgeWidthLineEdit.setMaximumSize(QtCore.QSize(373, 16777215))
self.silhouetteEdgeWidthLineEdit.setObjectName("silhouetteEdgeWidthLineEdit")
self.p3CheckBox = QtWidgets.QCheckBox(self.settingsContainer)
self.p3CheckBox.setGeometry(QtCore.QRect(32, 810, 201, 24))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
self.p3CheckBox.setFont(font)
self.p3CheckBox.setStyleSheet("")
self.p3CheckBox.setObjectName("p3CheckBox")
self.p4LineEdit = QtWidgets.QLineEdit(self.settingsContainer)
self.p4LineEdit.setGeometry(QtCore.QRect(240, 300, 50, 27))
self.p4LineEdit.setMaximumSize(QtCore.QSize(373, 16777215))
self.p4LineEdit.setObjectName("p4LineEdit")
self.p4CheckBox = QtWidgets.QCheckBox(self.settingsContainer)
self.p4CheckBox.setGeometry(QtCore.QRect(50, 303, 151, 24))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
self.p4CheckBox.setFont(font)
self.p4CheckBox.setStyleSheet("")
self.p4CheckBox.setObjectName("p4CheckBox")
self.p5CheckBox = QtWidgets.QCheckBox(self.settingsContainer)
self.p5CheckBox.setGeometry(QtCore.QRect(32, 90, 190, 24))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
self.p5CheckBox.setFont(font)
self.p5CheckBox.setStyleSheet("")
self.p5CheckBox.setObjectName("p5CheckBox")
self.translateMeshZPositiveCheckBox = QtWidgets.QCheckBox(self.settingsContainer)
self.translateMeshZPositiveCheckBox.setGeometry(QtCore.QRect(32, 493, 171, 24))
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.translateMeshZPositiveCheckBox.setFont(font)
self.translateMeshZPositiveCheckBox.setStyleSheet("")
self.translateMeshZPositiveCheckBox.setObjectName("translateMeshZPositiveCheckBox")
self.p8LineEdit = QtWidgets.QLineEdit(self.settingsContainer)
self.p8LineEdit.setEnabled(True)
self.p8LineEdit.setGeometry(QtCore.QRect(240, 118, 50, 27))
self.p8LineEdit.setMaximumSize(QtCore.QSize(373, 16777215))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.p8LineEdit.setFont(font)
self.p8LineEdit.setObjectName("p8LineEdit")
self.planarBackgroundCheckBox = QtWidgets.QCheckBox(self.settingsContainer)
self.planarBackgroundCheckBox.setGeometry(QtCore.QRect(32, 462, 187, 24))
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.planarBackgroundCheckBox.setFont(font)
self.planarBackgroundCheckBox.setStyleSheet("")
self.planarBackgroundCheckBox.setObjectName("planarBackgroundCheckBox")
self.p8CheckBox = QtWidgets.QCheckBox(self.settingsContainer)
self.p8CheckBox.setGeometry(QtCore.QRect(32, 120, 221, 24))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
self.p8CheckBox.setFont(font)
self.p8CheckBox.setStyleSheet("")
self.p8CheckBox.setObjectName("p8CheckBox")
self.p5LineEdit = QtWidgets.QLineEdit(self.settingsContainer)
self.p5LineEdit.setGeometry(QtCore.QRect(240, 86, 50, 27))
self.p5LineEdit.setMaximumSize(QtCore.QSize(373, 16777215))
self.p5LineEdit.setObjectName("p5LineEdit")
self.labelProcessing = QtWidgets.QLabel(self.settingsContainer)
self.labelProcessing.setGeometry(QtCore.QRect(9, 30, 141, 32))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.labelProcessing.setFont(font)
self.labelProcessing.setObjectName("labelProcessing")
self.fileTBDCheckBox = QtWidgets.QCheckBox(self.settingsContainer)
self.fileTBDCheckBox.setGeometry(QtCore.QRect(32, 710, 165, 24))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.fileTBDCheckBox.setFont(font)
self.fileTBDCheckBox.setObjectName("fileTBDCheckBox")
self.labelGaussianLow = QtWidgets.QLabel(self.settingsContainer)
self.labelGaussianLow.setGeometry(QtCore.QRect(50, 339, 131, 24))
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(False)
font.setWeight(50)
self.labelGaussianLow.setFont(font)
self.labelGaussianLow.setObjectName("labelGaussianLow")
self.labelGaussianHigh = QtWidgets.QLabel(self.settingsContainer)
self.labelGaussianHigh.setGeometry(QtCore.QRect(50, 373, 131, 18))
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(False)
font.setWeight(50)
self.labelGaussianHigh.setFont(font)
self.labelGaussianHigh.setObjectName("labelGaussianHigh")
self.labelHighFrequencyScaling = QtWidgets.QLabel(self.settingsContainer)
self.labelHighFrequencyScaling.setGeometry(QtCore.QRect(50, 402, 191, 22))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelHighFrequencyScaling.sizePolicy().hasHeightForWidth())
self.labelHighFrequencyScaling.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(False)
font.setWeight(50)
self.labelHighFrequencyScaling.setFont(font)
self.labelHighFrequencyScaling.setObjectName("labelHighFrequencyScaling")
self.reliefScaleLineEdit = QtWidgets.QLineEdit(self.settingsContainer)
self.reliefScaleLineEdit.setGeometry(QtCore.QRect(240, 640, 50, 27))
self.reliefScaleLineEdit.setMaximumSize(QtCore.QSize(373, 16777215))
self.reliefScaleLineEdit.setObjectName("reliefScaleLineEdit")
self.labelReliefScale = QtWidgets.QLabel(self.settingsContainer)
self.labelReliefScale.setGeometry(QtCore.QRect(10, 643, 201, 18))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelReliefScale.sizePolicy().hasHeightForWidth())
self.labelReliefScale.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelReliefScale.setFont(font)
self.labelReliefScale.setObjectName("labelReliefScale")
self.labelGradients = QtWidgets.QLabel(self.settingsContainer)
self.labelGradients.setGeometry(QtCore.QRect(10, 70, 201, 18))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelGradients.sizePolicy().hasHeightForWidth())
self.labelGradients.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelGradients.setFont(font)
self.labelGradients.setObjectName("labelGradients")
self.labelSilhouettes = QtWidgets.QLabel(self.settingsContainer)
self.labelSilhouettes.setGeometry(QtCore.QRect(10, 528, 201, 18))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelSilhouettes.sizePolicy().hasHeightForWidth())
self.labelSilhouettes.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelSilhouettes.setFont(font)
self.labelSilhouettes.setObjectName("labelSilhouettes")
self.labelGeometry = QtWidgets.QLabel(self.settingsContainer)
self.labelGeometry.setGeometry(QtCore.QRect(10, 440, 201, 18))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelGeometry.sizePolicy().hasHeightForWidth())
self.labelGeometry.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelGeometry.setFont(font)
self.labelGeometry.setObjectName("labelGeometry")
self.labelSilhouettEdgeWidth = QtWidgets.QLabel(self.settingsContainer)
self.labelSilhouettEdgeWidth.setGeometry(QtCore.QRect(50, 574, 191, 22))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelSilhouettEdgeWidth.sizePolicy().hasHeightForWidth())
self.labelSilhouettEdgeWidth.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.labelSilhouettEdgeWidth.setFont(font)
self.labelSilhouettEdgeWidth.setObjectName("labelSilhouettEdgeWidth")
self.p7LineEdit = QtWidgets.QLineEdit(self.settingsContainer)
self.p7LineEdit.setGeometry(QtCore.QRect(240, 870, 50, 27))
self.p7LineEdit.setMaximumSize(QtCore.QSize(373, 16777215))
self.p7LineEdit.setObjectName("p7LineEdit")
self.labelSilhouetteSigma = QtWidgets.QLabel(self.settingsContainer)
self.labelSilhouetteSigma.setGeometry(QtCore.QRect(50, 606, 191, 22))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelSilhouetteSigma.sizePolicy().hasHeightForWidth())
self.labelSilhouetteSigma.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.labelSilhouetteSigma.setFont(font)
self.labelSilhouetteSigma.setObjectName("labelSilhouetteSigma")
self.silhouetteCheckBox = QtWidgets.QCheckBox(self.settingsContainer)
self.silhouetteCheckBox.setGeometry(QtCore.QRect(50, 547, 131, 24))
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.silhouetteCheckBox.setFont(font)
self.silhouetteCheckBox.setStyleSheet("")
self.silhouetteCheckBox.setObjectName("silhouetteCheckBox")
self.p3LineEdit = QtWidgets.QLineEdit(self.settingsContainer)
self.p3LineEdit.setGeometry(QtCore.QRect(240, 810, 50, 27))
self.p3LineEdit.setMaximumSize(QtCore.QSize(373, 16777215))
self.p3LineEdit.setObjectName("p3LineEdit")
self.p6CheckBox = QtWidgets.QCheckBox(self.settingsContainer)
self.p6CheckBox.setGeometry(QtCore.QRect(32, 840, 201, 24))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
self.p6CheckBox.setFont(font)
self.p6CheckBox.setStyleSheet("")
self.p6CheckBox.setObjectName("p6CheckBox")
self.p6LineEdit = QtWidgets.QLineEdit(self.settingsContainer)
self.p6LineEdit.setGeometry(QtCore.QRect(240, 840, 50, 27))
self.p6LineEdit.setMaximumSize(QtCore.QSize(373, 16777215))
self.p6LineEdit.setObjectName("p6LineEdit")
self.silhouetteSigmaLineEdit = QtWidgets.QLineEdit(self.settingsContainer)
self.silhouetteSigmaLineEdit.setGeometry(QtCore.QRect(240, 600, 50, 27))
self.silhouetteSigmaLineEdit.setMaximumSize(QtCore.QSize(373, 16777215))
self.silhouetteSigmaLineEdit.setObjectName("silhouetteSigmaLineEdit")
self.p7CheckBox = QtWidgets.QCheckBox(self.settingsContainer)
self.p7CheckBox.setGeometry(QtCore.QRect(32, 870, 201, 24))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
self.p7CheckBox.setFont(font)
self.p7CheckBox.setStyleSheet("")
self.p7CheckBox.setObjectName("p7CheckBox")
self.labelFileOutput = QtWidgets.QLabel(self.settingsContainer)
self.labelFileOutput.setGeometry(QtCore.QRect(10, 690, 201, 18))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelFileOutput.sizePolicy().hasHeightForWidth())
self.labelFileOutput.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.labelFileOutput.setFont(font)
self.labelFileOutput.setObjectName("labelFileOutput")
self.gridLayout_2.addWidget(self.settingsContainer, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralWidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1171, 26))
font = QtGui.QFont()
font.setPointSize(13)
self.menubar.setFont(font)
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionOpen = QtWidgets.QAction(MainWindow)
self.actionOpen.setObjectName("actionOpen")
self.menuFile.addAction(self.actionOpen)
self.menubar.addAction(self.menuFile.menuAction())
self.retranslateUi(MainWindow)
self.overallTabsContainer.setCurrentIndex(0)
self.imageTabs.setCurrentIndex(0)
self.modelTabs.setCurrentIndex(0)
self.workbenchTabs.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.processButton, self.p5CheckBox)
MainWindow.setTabOrder(self.p5CheckBox, self.p5LineEdit)
MainWindow.setTabOrder(self.p5LineEdit, self.p8CheckBox)
MainWindow.setTabOrder(self.p8CheckBox, self.p8LineEdit)
MainWindow.setTabOrder(self.p8LineEdit, self.gradientThresholdCheckBox)
MainWindow.setTabOrder(self.gradientThresholdCheckBox, self.gradientThresholdLineEdit)
MainWindow.setTabOrder(self.gradientThresholdLineEdit, self.attenuationCheckBox)
MainWindow.setTabOrder(self.attenuationCheckBox, self.attenuationFactorLineEdit)
MainWindow.setTabOrder(self.attenuationFactorLineEdit, self.attenuationDecayLineEdit)
MainWindow.setTabOrder(self.attenuationDecayLineEdit, self.unsharpMaskingCheckBox)
MainWindow.setTabOrder(self.unsharpMaskingCheckBox, self.p4CheckBox)
MainWindow.setTabOrder(self.p4CheckBox, self.p4LineEdit)
MainWindow.setTabOrder(self.p4LineEdit, self.unsharpGaussianLowLineEdit)
MainWindow.setTabOrder(self.unsharpGaussianLowLineEdit, self.unsharpGaussianHighLineEdit)
MainWindow.setTabOrder(self.unsharpGaussianHighLineEdit, self.unsharpHFScaleLineEdit)
MainWindow.setTabOrder(self.unsharpHFScaleLineEdit, self.planarBackgroundCheckBox)
MainWindow.setTabOrder(self.planarBackgroundCheckBox, self.translateMeshZPositiveCheckBox)
MainWindow.setTabOrder(self.translateMeshZPositiveCheckBox, self.silhouetteCheckBox)
MainWindow.setTabOrder(self.silhouetteCheckBox, self.silhouetteEdgeWidthLineEdit)
MainWindow.setTabOrder(self.silhouetteEdgeWidthLineEdit, self.silhouetteSigmaLineEdit)
MainWindow.setTabOrder(self.silhouetteSigmaLineEdit, self.reliefScaleLineEdit)
MainWindow.setTabOrder(self.reliefScaleLineEdit, self.fileTBDCheckBox)
MainWindow.setTabOrder(self.fileTBDCheckBox, self.p1CheckBox)
MainWindow.setTabOrder(self.p1CheckBox, self.p1LineEdit)
| |
#!/usr/bin/env python
"""Geoname Annotator"""
from __future__ import absolute_import
import math
import re
import sqlite3
from collections import defaultdict
from .annotator import Annotator, AnnoTier, AnnoSpan
from .ngram_annotator import NgramAnnotator
from .ne_annotator import NEAnnotator
from geopy.distance import great_circle
from .maximum_weight_interval_set import Interval, find_maximum_weight_interval_set
from .get_database_connection import get_database_connection
from . import geoname_classifier
import logging
from six.moves import zip
logging.basicConfig(level=logging.ERROR, format='%(asctime)s %(message)s')
logger = logging.getLogger(__name__)
blocklist = set([
'January', 'February', 'March', 'April', 'May', 'June', 'July',
'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday',
'August', 'September', 'October', 'November', 'December',
'North', 'East', 'West', 'South',
'Northeast', 'Southeast', 'Northwest', 'Southwest',
'Eastern', 'Western', 'Southern', 'Northern',
'About', 'Many', 'See', 'Also', 'As', 'In', 'About', 'Health', 'Some',
'International', 'City', 'World', 'Federal', 'Federal District', 'The city',
'British', 'Russian',
'Valley', 'University', 'Center', 'Central',
# These locations could be legitimate,
# but they are rarely referred to in a context
# where its location is relevent.
'National Institutes of Health',
'Centers for Disease Control',
'Ministry of Health and Sanitation',
'1',
])
# Containment levels indicate which properties must match when determing
# whether a geoname of a given containment level contains another geoname.
# The admin codes generally correspond to states, provinces and cities.
CONTAINMENT_LEVELS = [
'country_code',
'admin1_code',
'admin2_code',
'admin3_code',
'admin4_code'
]
def location_contains(loc_outer, loc_inner):
"""
Do a comparison to see if the first geoname contains the second.
It returns an integer to indicate the level of containment.
0 indicates no containment. Siblings locations and identical locations
have 0 containment. The level of containment is determined by the specificty
of the outer location. e.g. USA would be a smaller number than Texas.
In order for containment to be detected the outer location must have a
ADM* or PCL* feature code, which is most countries, states, and districts.
"""
# Test the country code in advance for efficiency. The country code must match for
# any level of containment.
if loc_outer.country_code != loc_inner.country_code or loc_outer.country_code == '':
return 0
feature_code = loc_outer.feature_code
if feature_code == 'ADM1':
outer_feature_level = 2
elif feature_code == 'ADM2':
outer_feature_level = 3
elif feature_code == 'ADM3':
outer_feature_level = 4
elif feature_code == 'ADM4':
outer_feature_level = 5
elif re.match("^PCL.", feature_code):
outer_feature_level = 1
else:
return 0
for prop in CONTAINMENT_LEVELS[1:outer_feature_level]:
if loc_outer[prop] == '':
return 0
if loc_outer[prop] != loc_inner[prop]:
return 0
if loc_outer.geonameid == loc_inner.geonameid:
return 0
return outer_feature_level
class GeoSpan(AnnoSpan):
def __init__(self, start, end, doc, geoname):
super(GeoSpan, self).__init__(
start,
end,
doc,
metadata={
'geoname': geoname
})
self.geoname = geoname
self.label = geoname.name
def to_dict(self):
result = super(GeoSpan, self).to_dict()
result['geoname'] = self.geoname.to_dict()
return result
GEONAME_ATTRS = [
'geonameid',
'name',
'feature_code',
'country_code',
'admin1_code',
'admin2_code',
'admin3_code',
'admin4_code',
'longitude',
'latitude',
'population',
'asciiname',
'names_used',
'name_count']
ADMINNAME_ATTRS = [
'country_name',
'admin1_name',
'admin2_name',
'admin3_name']
class GeonameRow(object):
__slots__ = GEONAME_ATTRS + ADMINNAME_ATTRS + [
'alternate_locations',
'spans',
'parents',
'score',
'lat_long',
'high_confidence']
def __init__(self, sqlite3_row):
for key in sqlite3_row.keys():
if key in GEONAME_ATTRS:
setattr(self, key, sqlite3_row[key])
self.lat_long = (self.latitude, self.longitude,)
self.alternate_locations = set()
self.spans = set()
self.parents = set()
self.score = None
def add_spans(self, span_text_to_spans):
for name in self.names_used.split(';'):
for span in span_text_to_spans[name.lower().strip()]:
self.spans.add(span)
def __hash__(self):
return id(self)
def __repr__(self):
return self.name
def __getitem__(self, key):
return getattr(self, key)
def to_dict(self):
result = {}
for key in GEONAME_ATTRS:
result[key] = self[key]
for key in ADMINNAME_ATTRS:
if hasattr(self, key):
result[key] = self[key]
result['parents'] = [p.to_dict() for p in self.parents]
result['score'] = self.score
return result
class GeonameFeatures(object):
"""
This represents the aspects of a condidate geoname that are used to
determine whether it is being referenced.
"""
# The feature name array is used to maintain the order of the
# values in the feature vector.
feature_names = [
'log_population',
'name_count',
'num_spans',
'max_span_length',
'cannonical_name_used',
'loc_NE_portion',
'other_NE_portion',
'noun_portion',
'other_pos_portion',
'num_tokens',
'ambiguity',
'PPL_feature_code',
'ADM_feature_code',
'CONT_feature_code',
'other_feature_code',
'combined_span_parents',
# contextual features
'close_locations',
'very_close_locations',
'containing_locations',
'max_containment_level',
# high_confidence indicates the base feature set received a high score.
# It is an useful feature for preventing high confidence geonames
# from receiving low final scores when they lack contextual cues -
# for example, when they are the only location mentioned.
'high_confidence',
]
def __init__(self, geoname, spans_to_nes, span_to_tokens):
self.geoname = geoname
# The set of geonames that are mentioned in proximity to the spans
# corresponding to this feature.
# This will be populated by the add_contextual_features function.
self.nearby_mentions = set()
d = {}
d['log_population'] = math.log(geoname.population + 1)
# Geonames with lots of alternate names
# tend to be the ones most commonly referred to.
d['name_count'] = geoname.name_count
d['num_spans'] = len(geoname.spans)
d['max_span_length'] = max([
len(span.text) for span in geoname.spans])
def cannonical_name_match(span, geoname):
first_leaf = next(span.iterate_leaf_base_spans(), None)
if first_leaf:
span_text = first_leaf.text
else:
span_text = span.text
span_in_name = span_text in geoname.name or span_text in geoname.asciiname
return (float(len(span_text)) if span_in_name else 0) / len(geoname.name)
d['cannonical_name_used'] = max([
cannonical_name_match(span, geoname)
for span in geoname.spans
])
loc_NEs_overlap = 0
other_NEs_overlap = 0
total_spans = len(geoname.spans)
for span in geoname.spans:
for ne_span in spans_to_nes[span]:
if ne_span.label == 'GPE' or ne_span.label == 'LOC':
loc_NEs_overlap += 1
else:
other_NEs_overlap += 1
d['loc_NE_portion'] = float(loc_NEs_overlap) / total_spans
d['other_NE_portion'] = float(other_NEs_overlap) / total_spans
noun_pos_tags = 0
other_pos_tags = 0
pos_tags = 0
for span in geoname.spans:
for token_span in span_to_tokens[span]:
token = token_span.token
pos_tags += 1
if token.tag_.startswith("NN") or token.tag_ == "FW":
noun_pos_tags += 1
else:
other_pos_tags += 1
d['combined_span_parents'] = len(geoname.parents)
d['noun_portion'] = float(noun_pos_tags) / pos_tags
d['other_pos_portion'] = float(other_pos_tags) / pos_tags
d['num_tokens'] = pos_tags
d['ambiguity'] = len(geoname.alternate_locations)
feature_code = geoname.feature_code
if feature_code.startswith('PPL'):
d['PPL_feature_code'] = 1
elif feature_code.startswith('ADM'):
d['ADM_feature_code'] = 1
elif feature_code.startswith('CONT'):
d['CONT_feature_code'] = 1
else:
d['other_feature_code'] = 1
self._values = [0] * len(self.feature_names)
self.set_values(d)
def set_value(self, feature_name, value):
self._values[self.feature_names.index(feature_name)] = value
def set_values(self, value_dict):
for idx, name in enumerate(self.feature_names):
if name in value_dict:
self._values[idx] = value_dict[name]
def set_contextual_features(self):
"""
GeonameFeatures are initialized with only values that can be extracted
from the geoname database and span. This extends the GeonameFeature
with values that require information from nearby_mentions.
"""
geoname = self.geoname
close_locations = 0
very_close_locations = 0
containing_locations = 0
max_containment_level = 0
for recently_mentioned_geoname in self.nearby_mentions:
if recently_mentioned_geoname == geoname:
continue
containment_level = max(
location_contains(geoname, recently_mentioned_geoname),
location_contains(recently_mentioned_geoname, geoname))
if containment_level > 0:
containing_locations += 1
if containment_level > max_containment_level:
max_containment_level = containment_level
distance = great_circle(
recently_mentioned_geoname.lat_long, geoname.lat_long
).kilometers
if distance < 400:
close_locations += 1
if distance < 100:
very_close_locations += 1
self.set_values(dict(
close_locations=close_locations,
very_close_locations=very_close_locations,
containing_locations=containing_locations,
max_containment_level=max_containment_level))
def to_dict(self):
return {
key: value
for key, value in zip(self.feature_names, self._values)}
def values(self):
return self._values
class GeonameAnnotator(Annotator):
def __init__(self, custom_classifier=None):
self.connection = get_database_connection()
self.connection.row_factory = sqlite3.Row
if custom_classifier:
self.geoname_classifier = custom_classifier
else:
self.geoname_classifier = geoname_classifier
def get_candidate_geonames(self, doc):
"""
Returns an array of geoname dicts correponding to locations that the
document may refer to.
The dicts are extended with lists of associated AnnoSpans.
"""
if 'ngrams' not in doc.tiers:
doc.add_tiers(NgramAnnotator())
logger.info('Ngrams annotated')
if 'nes' not in doc.tiers:
doc.add_tiers(NEAnnotator())
logger.info('Named entities annotated')
def is_possible_geoname(text):
if text in blocklist:
return False
# We can rule out a few FPs and make the query much faster
# by only looking at capitalized names.
if text[0] != text[0].upper():
return False
if len(text) < 3 and text != text.upper():
return False
return True
all_ngrams = list(set([span.text.lower()
for span in doc.tiers['ngrams'].spans
if is_possible_geoname(span.text)
]))
logger.info('%s ngrams extracted' % len(all_ngrams))
cursor = self.connection.cursor()
geoname_results = list(cursor.execute('''
SELECT
geonames.*,
count AS name_count,
group_concat(alternatename, ";") AS names_used
FROM geonames
JOIN alternatename_counts USING ( geonameid )
JOIN alternatenames USING ( geonameid )
WHERE alternatename_lemmatized IN
(''' + ','.join('?' for x in all_ngrams) + ''')
GROUP BY geonameid''', all_ngrams))
logger.info('%s geonames fetched' % len(geoname_results))
geoname_results = [GeonameRow(g) for g in geoname_results]
# Associate spans with the geonames.
# This is done up front so span information can be used in the scoring
# function
span_text_to_spans = defaultdict(list)
for span in doc.tiers['ngrams'].spans:
if is_possible_geoname(span.text):
span_text_to_spans[span.text.lower()].append(span)
candidate_geonames = []
for geoname in geoname_results:
geoname.add_spans(span_text_to_spans)
# In rare cases geonames may have no matching spans because
# sqlite unicode equivalency rules match geonames that use different
# characters the document spans used to query them.
# These geonames are ignored.
if len(geoname.spans) > 0:
candidate_geonames.append(geoname)
# Add combined spans to locations that are | |
<filename>core/botCore.py
#! /usr/bin/env python3
import os
import sys
import time
import json
import os.path
import hashlib
import logging
import threading
from decimal import Decimal
from flask_socketio import SocketIO
from flask import Flask, render_template, url_for, request
from binance_api import api_master_rest_caller
from binance_api import api_master_socket_caller
from . import trader
MULTI_DEPTH_INDICATORS = ['ema', 'sma', 'rma', 'order']
# Initilize globals.
## Setup flask app/socket
APP = Flask(__name__)
SOCKET_IO = SocketIO(APP)
## Initilize base core object.
core_object = None
started_updater = False
## Initilize IP/port pair globals.
host_ip = ''
host_port = ''
## Set traders cache file name.
CAHCE_FILES = 'traders.json'
@APP.context_processor
def override_url_for():
return(dict(url_for=dated_url_for))
def dated_url_for(endpoint, **values):
# Override to prevent cached assets being used.
if endpoint == 'static':
filename = values.get('filename', None)
if filename:
file_path = os.path.join(APP.root_path,
endpoint,
filename)
values['q'] = int(os.stat(file_path).st_mtime)
return url_for(endpoint, **values)
@APP.route('/', methods=['GET'])
def control_panel():
# Base control panel configuration.
global started_updater
## Web updater used for live updating.
if not(started_updater):
started_updater = True
web_updater_thread = threading.Thread(target=web_updater)
web_updater_thread.start()
## Set socket ip/port.
start_up_data = {
'host':{'IP': host_ip, 'Port': host_port},
'market_symbols': core_object.trading_markets
}
return(render_template('main_page.html', data=start_up_data))
@APP.route('/rest-api/v1/trader_update', methods=['POST'])
def update_trader():
# Base API for managing trader interaction.
data = request.get_json()
## Check if specified bot exists.
current_trader = api_error_check(data)
if current_trader == None:
## No trader therefore return false.
return(json.dumps({'call':False, 'message':'INVALID_TRADER'}))
elif data['action'] == 'start':
## Updating trader status to running.
if current_trader.state_data['runtime_state'] == 'FORCE_PAUSE':
current_trader.state_data['runtime_state'] = 'RUN'
elif data['action'] == 'pause':
## Updating trader status to paused.
if current_trader.state_data['runtime_state'] == 'RUN':
current_trader.state_data['runtime_state'] = 'FORCE_PAUSE'
else:
## If action was not found return false
return(json.dumps({'call':False, 'message':'INVALID_ACTION'}))
return(json.dumps({'call':True}))
@APP.route('/rest-api/v1/get_trader_charting', methods=['GET'])
def get_trader_charting():
# Endpoint to pass trader indicator data.
market = request.args.get('market')
limit = int(request.args.get('limit'))
data = {'market':market}
## Check if specified bot exists.
current_trader = api_error_check(data)
if current_trader == None:
## No trader therefore return false.
return(json.dumps({'call':False, 'message':'INVALID_TRADER'}))
candle_data = core_object.get_trader_candles(current_trader.print_pair)[:limit]
indicator_data = core_object.get_trader_indicators(current_trader.print_pair)
short_indicator_data = shorten_indicators(indicator_data, candle_data[-1][0])
return(json.dumps({'call':True, 'data':{'market':market, 'indicators':short_indicator_data, 'candles':candle_data}}))
@APP.route('/rest-api/v1/get_trader_indicators', methods=['GET'])
def get_trader_indicators():
# Endpoint to pass trader indicator data.
market = request.args.get('market')
limit = int(request.args.get('limit'))
data = {'market':market}
## Check if specified bot exists.
current_trader = api_error_check(data)
if current_trader == None:
## No trader therefore return false.
return(json.dumps({'call':False, 'message':'INVALID_TRADER'}))
indicator_data = core_object.get_trader_indicators(current_trader.print_pair)
return(json.dumps({'call':True, 'data':{'market':market, 'indicators':indicator_data}}))
@APP.route('/rest-api/v1/get_trader_candles', methods=['GET'])
def get_trader_candles():
# Endpoint to pass trader candles.
market = request.args.get('market')
limit = int(request.args.get('limit'))
data = {'market':market}
## Check if specified bot exists.
current_trader = api_error_check(data)
if current_trader == None:
## No trader therefore return false.
return(json.dumps({'call':False, 'message':'INVALID_TRADER'}))
candle_data = core_object.get_trader_candles(current_trader.print_pair)[:limit]
return(json.dumps({'call':True, 'data':{'market':market, 'candles':candle_data}}))
@APP.route('/rest-api/v1/test', methods=['GET'])
def test_rest_call():
# API endpoint test
return(json.dumps({'call':True, 'message':'HELLO WORLD!'}))
def shorten_indicators(indicators, end_time):
base_indicators = {}
for ind in indicators:
if ind in MULTI_DEPTH_INDICATORS:
base_indicators.update({ind:{}})
for sub_ind in indicators[ind]:
base_indicators[ind].update({sub_ind:[ [val[0] if ind != 'order' else val[0]*1000,val[1]] for val in indicators[ind][sub_ind] if (val[0] if ind != 'order' else val[0]*1000) > end_time ]})
else:
base_indicators.update({ind:[ [val[0],val[1]] for val in indicators[ind] if val[0] > end_time]})
return(base_indicators)
def api_error_check(data):
## Check if specified bot exists.
current_trader = None
for trader in core_object.trader_objects:
if trader.print_pair == data['market']:
current_trader = trader
break
return(current_trader)
def web_updater():
# Web updater use to update live via socket.
lastHash = None
while True:
if core_object.coreState == 'RUN':
## Get trader data and hash it to find out if there have been any changes.
traderData = core_object.get_trader_data()
currHash = hashlib.md5(str(traderData).encode())
if lastHash != currHash:
## Update any new changes via socket.
lastHash = currHash
total_bulk_data = []
for trader in traderData:
bulk_data = {}
bulk_data.update({'market':trader['market']})
bulk_data.update({'trade_recorder':trader['trade_recorder']})
bulk_data.update({'wallet_pair':trader['wallet_pair']})
bulk_data.update(trader['custom_conditions'])
bulk_data.update(trader['market_activity'])
bulk_data.update(trader['market_prices'])
bulk_data.update(trader['state_data'])
total_bulk_data.append(bulk_data)
SOCKET_IO.emit('current_traders_data', {'data':total_bulk_data})
time.sleep(.8)
class BotCore():
def __init__(self, settings, logs_dir, cache_dir):
# Initilization for the bot core managment object.
logging.info('[BotCore] Initilizing the BotCore object.')
## Setup binance REST and socket API.
self.rest_api = api_master_rest_caller.Binance_REST(settings['public_key'], settings['private_key'])
self.socket_api = api_master_socket_caller.Binance_SOCK()
## Setup the logs/cache dir locations.
self.logs_dir = logs_dir
self.cache_dir = cache_dir
## Setup run type, market type, and update bnb balance.
self.run_type = settings['run_type']
self.market_type = settings['market_type']
self.update_bnb_balance = settings['update_bnb_balance']
## Setup max candle/depth setting.
self.max_candles = settings['max_candles']
self.max_depth = settings['max_depth']
## Get base quote pair (This prevents multiple different pairs from conflicting.)
pair_one = settings['trading_markets'][0]
self.quote_asset = pair_one[:pair_one.index('-')]
self.base_currency = settings['trading_currency']
self.candle_Interval = settings['trader_interval']
## Initilize base trader settings.
self.trader_objects = []
self.trading_markets = settings['trading_markets']
## Initilize core state
self.coreState = 'READY'
def start(self):
# Start the core object.
logging.info('[BotCore] Starting the BotCore object.')
self.coreState = 'SETUP'
## check markets
found_markets = []
not_supported = []
#breakpoint()
for market in self.rest_api.get_exchangeInfo()['symbols']:
fmtMarket = '{0}-{1}'.format(market['quoteAsset'], market['baseAsset'])
# If the current market is not in the trading markets list then skip.
if not fmtMarket in self.trading_markets:
continue
found_markets.append(fmtMarket)
if (self.market_type == 'MARGIN' and market['isMarginTradingAllowed'] == False) or (self.market_type == 'SPOT' and market['isSpotTradingAllowed'] == False):
not_supported.append(fmtMarket)
continue
# This is used to setup min quantity.
if float(market['filters'][2]['minQty']) < 1.0:
minQuantBase = (Decimal(market['filters'][2]['minQty'])).as_tuple()
lS = abs(int(len(minQuantBase.digits)+minQuantBase.exponent))+1
else: lS = 0
# This is used to set up the price precision for the market.
tickSizeBase = (Decimal(market['filters'][0]['tickSize'])).as_tuple()
tS = abs(int(len(tickSizeBase.digits)+tickSizeBase.exponent))+1
# This is used to get the markets minimal notation.
mN = float(market['filters'][3]['minNotional'])
# Put all rules into a json object to pass to the trader.
market_rules = {'LOT_SIZE':lS, 'TICK_SIZE':tS, 'MINIMUM_NOTATION':mN}
# Initilize trader objecta dn also set-up its inital required data.
traderObject = trader.BaseTrader(market['quoteAsset'], market['baseAsset'], self.rest_api, socket_api=self.socket_api)
traderObject.setup_initial_values(self.market_type, self.run_type, market_rules)
self.trader_objects.append(traderObject)
## Show markets that dont exist on the binance exchange.
if len(self.trading_markets) != len(found_markets):
no_market_text = ''
for market in [market for market in self.trading_markets if market not in found_markets]:
no_market_text+=str(market)+', '
logging.warning('Following pairs dont exist: {}'.format(no_market_text[:-2]))
## Show markets that dont support the market type.
if len(not_supported) > 0:
not_support_text = ''
for market in not_supported:
not_support_text += ' '+str(market)
logging.warning('[BotCore] Following market pairs are not supported for {}: {}'.format(self.market_type, not_support_text))
valid_tading_markets = [market for market in found_markets if market not in not_supported]
## setup the binance socket.
for market in valid_tading_markets:
self.socket_api.set_candle_stream(symbol=market, interval=self.candle_Interval)
self.socket_api.set_manual_depth_stream(symbol=market, update_speed='1000ms')
#breakpoint()
if self.run_type == 'REAL':
self.socket_api.set_userDataStream(self.rest_api, self.market_type)
self.socket_api.BASE_CANDLE_LIMIT = self.max_candles
self.socket_api.BASE_DEPTH_LIMIT = self.max_depth
self.socket_api.build_query()
self.socket_api.set_live_and_historic_combo(self.rest_api)
self.socket_api.start()
# Load the wallets.
if self.run_type == 'REAL':
user_info = self.rest_api.get_account(self.market_type)
#iwan: todo: check if this request is successfull.
#one case is request is ahead of time, and binance return error 'code':-1021 'msg':"Timestamp for this request was 1000ms ahead of the server's time.
if self.market_type == 'SPOT':
wallet_balances = user_info['balances']
elif self.market_type == 'MARGIN':
wallet_balances = user_info['userAssets']
current_tokens = {}
for balance in wallet_balances:
total_balance = (float(balance['free']) + float(balance['locked']))
if total_balance > 0:
current_tokens.update({balance['asset']:[
float(balance['free']),
float(balance['locked'])]})
else:
current_tokens = {self.quote_asset:[float(self.base_currency), 0.0]}
# Load cached data
cached_traders_data = None
if os.path.exists(self.cache_dir+CAHCE_FILES):
with open(self.cache_dir+CAHCE_FILES, 'r') as f:
cached_traders_data = json.load(f)['data']
## Setup the trader objects and start them.
logging.info('[BotCore] Starting the trader objects.')
#breakpoint()
for trader_ in self.trader_objects:
currSymbol = "{0}{1}".format(trader_.base_asset, trader_.quote_asset)
# Update trader with cached data (to resume trades/keep records of trades.)
if cached_traders_data != '' and cached_traders_data:
for cached_trader in cached_traders_data:
m_split = cached_trader['market'].split('-')
if (m_split[1]+m_split[0]) == currSymbol:
trader_.configuration = cached_trader['configuration']
trader_.custom_conditional_data = cached_trader['custom_conditions']
trader_.market_activity = cached_trader['market_activity']
trader_.trade_recorder = cached_trader['trade_recorder']
trader_.state_data = cached_trader['state_data']
wallet_pair = {}
if trader_.quote_asset in current_tokens:
wallet_pair.update({trader_.quote_asset:current_tokens[trader_.quote_asset]})
if trader_.base_asset in current_tokens:
wallet_pair.update({trader_.base_asset:current_tokens[trader_.base_asset]})
trader_.start(self.base_currency, wallet_pair)
logging.debug('[BotCore] Starting trader manager')
TM_thread = threading.Thread(target=self._trader_manager)
TM_thread.start()
if self.update_bnb_balance:
logging.debug('[BotCore] Starting BNB manager')
BNB_thread = threading.Thread(target=self._bnb_manager)
BNB_thread.start()
logging.debug('[BotCore] Starting connection manager thread.')
CM_thread = threading.Thread(target=self._connection_manager)
CM_thread.start()
logging.debug('[BotCore] Starting file manager thread.')
FM_thread = threading.Thread(target=self._file_manager)
FM_thread.start()
logging.info('[BotCore] BotCore successfully started.')
self.coreState = 'RUN'
def _trader_manager(self):
''' '''
while self.coreState != 'STOP':
pass
def _bnb_manager(self):
''' This will manage BNB balance and update if there is low BNB in account. '''
last_wallet_update_time = 0
while self.coreState != 'STOP':
socket_buffer_global = self.socket_api.socketBuffer
# If outbound postion is seen then wallet has updated.
if 'outboundAccountPosition' in socket_buffer_global:
if last_wallet_update_time != socket_buffer_global['outboundAccountPosition']['E']:
last_wallet_update_time = socket_buffer_global['outboundAccountPosition']['E']
for wallet in socket_buffer_global['outboundAccountPosition']['B']:
if wallet['a'] == 'BNB':
if float(wallet['f']) < 0.01:
bnb_order = self.rest_api.place_order(self.market_type, symbol='BNBBTC', side='BUY', type='MARKET', quantity=0.1)
time.sleep(2)
def _file_manager(self):
''' This section is responsible for activly updating the traders cache files. '''
while self.coreState != 'STOP':
time.sleep(15)
traders_data = self.get_trader_data()
if os.path.exists(self.cache_dir):
file_path = '{0}{1}'.format(self.cache_dir,CAHCE_FILES)
with open(file_path, 'w') as f:
json.dump({'lastUpdateTime':time.time() ,'data':traders_data}, f)
def _connection_manager(self):
''' This section is responsible for re-testing connectiongs in the event of a disconnect. '''
| |
= data['groupId']
text_json['id'] = data['groupId']
text_json['pitGroupRef'] = data['groupId']
response.text = json.dumps(text_json)
elif re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-volumes$",
path):
response.status_code = 200
text_json = json.loads("""{"unusableRepositoryCapacity": "0",
"totalSizeInBytes":
"-1", "worldWideName": "60080E500023BB3400001FAD52CEF2F5",
"boundToPIT": true, "wwn":
"60080E500023BB3400001FAD52CEF2F5", "id":
"3500000060080E500023BB3400001FAD52CEF2F5",
"baseVol": "0200000060080E500023BB3400001FA352CECCAE",
"label": "bdm-pv-1", "volumeFull": false,
"preferredControllerId": "070000000000000000000001", "offline":
false, "viewSequenceNumber": "10", "status": "optimal",
"viewRef": "3500000060080E500023BB3400001FAD52CEF2F5",
"mapped": false, "accessMode": "readOnly", "viewTime":
"1389315613", "repositoryVolume":
"0000000000000000000000000000000000000000", "preferredManager":
"070000000000000000000001", "volumeHandle": 16385,
"currentManager": "070000000000000000000001",
"maxRepositoryCapacity": "0", "name": "bdm-pv-1",
"fullWarnThreshold": 0, "currentControllerId":
"070000000000000000000001", "basePIT":
"3400000060080E500023BB3400631F335294A5A8", "clusterSize":
0, "mgmtClientAttribute": 0}""")
text_json['label'] = data['name']
text_json['name'] = data['name']
text_json['id'] = data['name']
text_json['basePIT'] = data['snapshotImageId']
text_json['baseVol'] = data['baseMappableObjectId']
response.text = json.dumps(text_json)
elif re.match("^/storage-systems$", path):
response.status_code = 200
response.text = """{"freePoolSpace": "17055871480319",
"driveCount": 24,
"wwn": "60080E500023C73400000000515AF323", "id": "1",
"hotSpareSizeAsString": "0", "hostSparesUsed": 0, "types": "",
"hostSpareCountInStandby": 0, "status": "optimal", "trayCount":
1, "usedPoolSpaceAsString": "37452115456", "ip2":
"10.63.165.216", "ip1": "10.63.165.215",
"freePoolSpaceAsString": "17055871480319", "hotSpareCount": 0,
"hotSpareSize": "0", "name": "stle2600-7_8", "usedPoolSpace":
"37452115456", "driveTypes": ["sas"],
"unconfiguredSpaceByDriveType": {}, "unconfiguredSpaceAsStrings":
"0", "model": "2650", "unconfiguredSpace": "0"}"""
elif re.match("^/storage-systems/[0-9a-zA-Z]+$",
path):
response.status_code = 200
elif re.match("^/storage-systems/[0-9a-zA-Z]+/volume-copy-jobs$",
path):
response.status_code = 200
response.text = """{"status": "complete", "cloneCopy": true,
"pgRef":
"3300000060080E500023C73400000ACA52D29454", "volcopyHandle":49160
, "idleTargetWriteProt": true, "copyPriority": "priority2",
"volcopyRef": "1800000060080E500023C73400000ACF52D29466",
"worldWideName": "60080E500023C73400000ACF52D29466",
"copyCompleteTime": "0", "sourceVolume":
"3500000060080E500023C73400000ACE52D29462", "currentManager":
"070000000000000000000002", "copyStartTime": "1389551671",
"reserved1": "00000000", "targetVolume":
"0200000060080E500023C73400000A8C52D10675"}"""
elif re.match("^/storage-systems/[0-9a-zA-Z]+/volumes/[0-9A-Za-z]+$",
path):
response.status_code = 200
response.text = """{"extremeProtection": false,
"pitBaseVolume": true,
"dssMaxSegmentSize": 131072,
"totalSizeInBytes": "1073741824", "raidLevel": "raid6",
"volumeRef": "0200000060080E500023BB34000003FB515C2293",
"listOfMappings": [{
"lunMappingRef":"8800000000000000000000000000000000000000",
"lun": 0,
"ssid": 16384,
"perms": 15,
"volumeRef": "0200000060080E500023BB34000003FB515C2293",
"type": "all",
"mapRef": "8400000060080E500023C73400300381515BFBA3"
}], "sectorOffset": "15",
"id": "0200000060080E500023BB34000003FB515C2293",
"wwn": "60080E500023BB3400001FC352D14CB2",
"capacity": "2147483648", "mgmtClientAttribute": 0,
"label": "rename",
"volumeFull": false,
"blkSize": 512, "volumeCopyTarget": false,
"volumeGroupRef":
"0400000060080E500023BB3400001F9F52CECC3F",
"preferredControllerId": "070000000000000000000001",
"currentManager": "070000000000000000000001",
"applicationTagOwned": false, "status": "optimal",
"segmentSize": 131072, "volumeUse": "standardVolume",
"action": "none", "preferredManager":
"070000000000000000000001", "volumeHandle": 15,
"offline": false, "preReadRedundancyCheckEnabled": false,
"dssPreallocEnabled": false, "name": "bdm-vc-test-1",
"worldWideName": "60080E500023BB3400001FC352D14CB2",
"currentControllerId": "070000000000000000000001",
"protectionInformationCapable": false, "mapped": false,
"reconPriority": 1, "protectionType":
"type1Protection"}"""
else:
# Unknown API
response.status_code = 500
return response
def do_DELETE(self, path, params, data, headers):
"""Respond to a DELETE request."""
response = FakeEseriesResponse()
if "/devmgr/vn" not in path:
response.status_code = 500
(__, ___, path) = path.partition("/devmgr/vn")
if re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-images"
"/[0-9A-Za-z]+$", path):
code = 204
elif re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-groups"
"/[0-9A-Za-z]+$", path):
code = 204
elif re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-volumes"
"/[0-9A-Za-z]+$", path):
code = 204
elif re.match("^/storage-systems/[0-9a-zA-Z]+/volume-copy-jobs"
"/[0-9A-Za-z]+$", path):
code = 204
elif re.match("^/storage-systems/[0-9a-zA-Z]+/volumes"
"/[0-9A-Za-z]+$", path):
code = 204
elif re.match("^/storage-systems/[0-9a-zA-Z]+/volume-mappings/"
"[0-9a-zA-Z]+$", path):
code = 204
else:
code = 500
response.status_code = code
return response
class FakeEseriesHTTPSession(object):
"""A fake requests.Session for netapp tests."""
def __init__(self):
self.handler = FakeEseriesServerHandler()
def request(self, method, url, params, data, headers, timeout, verify):
address = '127.0.0.1:80'
(__, ___, path) = url.partition(address)
if method.upper() == 'GET':
return self.handler.do_GET(path, params, data, headers)
elif method.upper() == 'POST':
return self.handler.do_POST(path, params, data, headers)
elif method.upper() == 'DELETE':
return self.handler.do_DELETE(path, params, data, headers)
else:
raise exception.Invalid()
class NetAppEseriesISCSIDriverTestCase(test.TestCase):
"""Test case for NetApp e-series iscsi driver."""
volume = {'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1,
'volume_name': 'lun1', 'host': 'hostname@backend#DDP',
'os_type': 'linux', 'provider_location': 'lun1',
'name_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef',
'provider_auth': 'provider a b', 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None}
snapshot = {'id': '17928122-553b-4da9-9737-e5c3dcd97f75',
'volume_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef',
'size': 2, 'volume_name': 'lun1',
'volume_size': 2, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None}
volume_sec = {'id': 'b6c01641-8955-4917-a5e3-077147478575',
'size': 2, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'lun1',
'name_id': 'b6c01641-8955-4917-a5e3-077147478575',
'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None}
volume_clone = {'id': 'b4b24b27-c716-4647-b66d-8b93ead770a5', 'size': 3,
'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'cl_sm',
'name_id': 'b4b24b27-c716-4647-b66d-8b93ead770a5',
'provider_auth': None,
'project_id': 'project', 'display_name': None,
'display_description': 'lun1',
'volume_type_id': None}
volume_clone_large = {'id': 'f6ef5bf5-e24f-4cbb-b4c4-11d631d6e553',
'size': 6, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'cl_lg',
'name_id': 'f6ef5bf5-e24f-4cbb-b4c4-11d631d6e553',
'provider_auth': None,
'project_id': 'project', 'display_name': None,
'display_description': 'lun1',
'volume_type_id': None}
fake_eseries_volume_label = utils.convert_uuid_to_es_fmt(volume['id'])
connector = {'initiator': 'iqn.1998-01.com.vmware:localhost-28a58148'}
fake_size_gb = volume['size']
fake_eseries_pool_label = 'DDP'
fake_ref = {'source-name': 'CFDGJSLS'}
fake_ret_vol = {'id': 'vol_id', 'label': 'label',
'worldWideName': 'wwn', 'capacity': '2147583648'}
def setUp(self):
super(NetAppEseriesISCSIDriverTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
self.mock_object(na_utils, 'OpenStackInfo')
# Inject fake netapp_lib module classes.
fakes.mock_netapp_lib([client])
self.mock_object(common.na_utils, 'check_netapp_lib')
configuration = self._set_config(create_configuration())
self.driver = common.NetAppDriver(configuration=configuration)
self.library = self.driver.library
self.mock_object(requests, 'Session', FakeEseriesHTTPSession)
self.mock_object(self.library,
'_check_mode_get_or_register_storage_system')
self.driver.do_setup(context='context')
self.driver.library._client._endpoint = fakes.FAKE_ENDPOINT_HTTP
def _set_config(self, configuration):
configuration.netapp_storage_family = 'eseries'
configuration.netapp_storage_protocol = 'iscsi'
configuration.netapp_transport_type = 'http'
configuration.netapp_server_hostname = '127.0.0.1'
configuration.netapp_server_port = None
configuration.netapp_webservice_path = '/devmgr/vn'
configuration.netapp_controller_ips = '127.0.0.2,127.0.0.3'
configuration.netapp_sa_password = '<PASSWORD>'
configuration.netapp_login = 'rw'
configuration.netapp_password = 'rw'
configuration.netapp_storage_pools = 'DDP'
configuration.netapp_enable_multiattach = False
return configuration
def test_embedded_mode(self):
configuration = self._set_config(create_configuration())
configuration.netapp_controller_ips = '127.0.0.1,127.0.0.3'
driver = common.NetAppDriver(configuration=configuration)
self.mock_object(client.RestClient, 'list_storage_systems', mock.Mock(
return_value=[fakes.STORAGE_SYSTEM]))
driver.do_setup(context='context')
self.assertEqual('1fa6efb5-f07b-4de4-9f0e-52e5f7ff5d1b',
driver.library._client.get_system_id())
def test_check_system_pwd_not_sync(self):
def list_system():
if getattr(self, 'test_count', None):
self.test_count = 1
return {'status': 'passwordoutofsync'}
return {'status': 'needsAttention'}
self.library._client.list_storage_system = mock.Mock(wraps=list_system)
result = self.library._check_storage_system()
self.assertTrue(result)
def test_create_destroy(self):
FAKE_POOLS = [{'label': 'DDP', 'volumeGroupRef': 'test'}]
self.library._get_storage_pools = mock.Mock(return_value=FAKE_POOLS)
self.mock_object(self.library._client, '_get_resource_url', mock.Mock(
return_value=fakes.FAKE_ENDPOINT_HTTP))
self.mock_object(self.library._client, '_eval_response')
self.mock_object(self.library._client, 'list_volumes', mock.Mock(
return_value=FAKE_POOLS))
self.driver.create_volume(self.volume)
self.driver.delete_volume(self.volume)
def test_vol_stats(self):
self.driver.get_volume_stats(refresh=False)
def test_get_pool(self):
self.mock_object(self.library, '_get_volume',
mock.Mock(return_value={
'volumeGroupRef': 'fake_ref'}))
self.mock_object(self.library._client, "get_storage_pool",
mock.Mock(return_value={'volumeGroupRef': 'fake_ref',
'label': 'ddp1'}))
pool = self.driver.get_pool({'name_id': 'fake-uuid'})
self.assertEqual('ddp1', pool)
def test_get_pool_no_pools(self):
self.mock_object(self.library, '_get_volume',
mock.Mock(return_value={
'volumeGroupRef': 'fake_ref'}))
self.mock_object(self.library._client, "get_storage_pool",
mock.Mock(return_value=None))
pool = self.driver.get_pool({'name_id': 'fake-uuid'})
self.assertEqual(None, pool)
@mock.patch.object(library.NetAppESeriesLibrary, '_create_volume',
mock.Mock())
def test_create_volume(self):
self.driver.create_volume(self.volume)
self.library._create_volume.assert_called_with(
'DDP', self.fake_eseries_volume_label, self.volume['size'])
def test_create_volume_no_pool_provided_by_scheduler(self):
volume = copy.deepcopy(self.volume)
volume['host'] = "host@backend" # missing pool
self.assertRaises(exception.InvalidHost, self.driver.create_volume,
volume)
@mock.patch.object(client.RestClient, 'list_storage_pools')
def test_helper_create_volume_fail(self, fake_list_pools):
fake_pool = {}
fake_pool['label'] = self.fake_eseries_pool_label
fake_pool['volumeGroupRef'] = 'foo'
fake_pool['raidLevel'] = 'raidDiskPool'
fake_pools = [fake_pool]
fake_list_pools.return_value = fake_pools
wrong_eseries_pool_label = 'hostname@backend'
self.assertRaises(exception.NetAppDriverException,
self.library._create_volume,
wrong_eseries_pool_label,
self.fake_eseries_volume_label,
self.fake_size_gb)
@mock.patch.object(library.LOG, 'info')
@mock.patch.object(client.RestClient, 'list_storage_pools')
@mock.patch.object(client.RestClient, 'create_volume',
mock.MagicMock(return_value='CorrectVolume'))
def test_helper_create_volume(self, storage_pools, log_info):
fake_pool = {}
fake_pool['label'] = self.fake_eseries_pool_label
fake_pool['volumeGroupRef'] = 'foo'
fake_pool['raidLevel'] = 'raidDiskPool'
fake_pools = [fake_pool]
storage_pools.return_value = fake_pools
storage_vol = self.library._create_volume(
self.fake_eseries_pool_label,
self.fake_eseries_volume_label,
self.fake_size_gb)
log_info.assert_called_once_with("Created volume with label %s.",
self.fake_eseries_volume_label)
self.assertEqual('CorrectVolume', storage_vol)
@mock.patch.object(client.RestClient, 'list_storage_pools')
@mock.patch.object(client.RestClient, 'create_volume',
mock.MagicMock(
side_effect=exception.NetAppDriverException))
@mock.patch.object(library.LOG, 'info', mock.Mock())
def test_create_volume_check_exception(self, fake_list_pools):
fake_pool = {}
fake_pool['label'] = self.fake_eseries_pool_label
fake_pool['volumeGroupRef'] = 'foo'
fake_pool['raidLevel'] = 'raidDiskPool'
fake_pools = [fake_pool]
fake_list_pools.return_value = fake_pools
self.assertRaises(exception.NetAppDriverException,
self.library._create_volume,
self.fake_eseries_pool_label,
self.fake_eseries_volume_label, self.fake_size_gb)
def test_portal_for_vol_controller(self):
volume = {'id': 'vol_id', 'currentManager': 'ctrl1'}
vol_nomatch = {'id': 'vol_id', 'currentManager': 'ctrl3'}
portals = [{'controller': 'ctrl2', 'iqn': 'iqn2'},
{'controller': 'ctrl1', 'iqn': 'iqn1'}]
portal = self.library._get_iscsi_portal_for_vol(volume, portals)
self.assertEqual({'controller': 'ctrl1', 'iqn': 'iqn1'}, portal)
portal = self.library._get_iscsi_portal_for_vol(vol_nomatch, portals)
self.assertEqual({'controller': 'ctrl2', 'iqn': 'iqn2'}, portal)
def test_portal_for_vol_any_false(self):
vol_nomatch = {'id': 'vol_id', 'currentManager': 'ctrl3'}
portals = [{'controller': 'ctrl2', 'iqn': 'iqn2'},
{'controller': 'ctrl1', 'iqn': 'iqn1'}]
self.assertRaises(exception.NetAppDriverException,
self.library._get_iscsi_portal_for_vol,
vol_nomatch, portals, False)
def test_setup_error_unsupported_host_type(self):
configuration = self._set_config(create_configuration())
configuration.netapp_host_type = 'garbage'
driver = common.NetAppDriver(configuration=configuration)
self.assertRaises(exception.NetAppDriverException,
driver.library.check_for_setup_error)
def test_check_host_type_default(self):
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
driver.library._check_host_type()
self.assertEqual('LnxALUA', driver.library.host_type)
def test_do_setup_all_default(self):
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
driver.library._check_mode_get_or_register_storage_system = mock.Mock()
mock_invoke = self.mock_object(client, 'RestClient')
driver.do_setup(context='context')
mock_invoke.assert_called_with(**fakes.FAKE_CLIENT_PARAMS)
def test_do_setup_http_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'http'
driver = common.NetAppDriver(configuration=configuration)
driver.library._check_mode_get_or_register_storage_system = mock.Mock()
mock_invoke = self.mock_object(client, 'RestClient')
driver.do_setup(context='context')
mock_invoke.assert_called_with(**fakes.FAKE_CLIENT_PARAMS)
def test_do_setup_https_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
driver = common.NetAppDriver(configuration=configuration)
driver.library._check_mode_get_or_register_storage_system = mock.Mock()
mock_invoke = self.mock_object(client, 'RestClient')
driver.do_setup(context='context')
FAKE_EXPECTED_PARAMS = dict(fakes.FAKE_CLIENT_PARAMS, port=8443,
scheme='https')
mock_invoke.assert_called_with(**FAKE_EXPECTED_PARAMS)
def test_do_setup_http_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_server_port = 81
driver = common.NetAppDriver(configuration=configuration)
driver.library._check_mode_get_or_register_storage_system = mock.Mock()
mock_invoke = self.mock_object(client, 'RestClient')
driver.do_setup(context='context')
FAKE_EXPECTED_PARAMS = dict(fakes.FAKE_CLIENT_PARAMS, port=81)
mock_invoke.assert_called_with(**FAKE_EXPECTED_PARAMS)
def test_do_setup_https_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
configuration.netapp_server_port = 446
driver = common.NetAppDriver(configuration=configuration)
driver.library._check_mode_get_or_register_storage_system = mock.Mock()
mock_invoke = self.mock_object(client, 'RestClient')
driver.do_setup(context='context')
FAKE_EXPECTED_PARAMS = dict(fakes.FAKE_CLIENT_PARAMS, port=446,
scheme='https')
mock_invoke.assert_called_with(**FAKE_EXPECTED_PARAMS)
def test_setup_good_controller_ip(self):
configuration = self._set_config(create_configuration())
configuration.netapp_controller_ips = '127.0.0.1'
driver = common.NetAppDriver(configuration=configuration)
driver.library._check_mode_get_or_register_storage_system
def test_setup_good_controller_ips(self):
configuration = self._set_config(create_configuration())
configuration.netapp_controller_ips = '127.0.0.2,127.0.0.1'
driver = common.NetAppDriver(configuration=configuration)
driver.library._check_mode_get_or_register_storage_system
def test_setup_missing_controller_ip(self):
configuration = self._set_config(create_configuration())
configuration.netapp_controller_ips = None
driver = common.NetAppDriver(configuration=configuration)
self.assertRaises(exception.InvalidInput,
driver.do_setup, context='context')
def test_setup_error_invalid_controller_ip(self):
configuration = self._set_config(create_configuration())
configuration.netapp_controller_ips = '987.65.43.21'
driver = common.NetAppDriver(configuration=configuration)
self.mock_object(na_utils, 'resolve_hostname',
mock.Mock(side_effect=socket.gaierror))
self.assertRaises(
exception.NoValidHost,
driver.library._check_mode_get_or_register_storage_system)
def test_setup_error_invalid_first_controller_ip(self):
configuration = self._set_config(create_configuration())
configuration.netapp_controller_ips = '987.65.43.21,127.0.0.1'
driver = common.NetAppDriver(configuration=configuration)
self.mock_object(na_utils, 'resolve_hostname',
mock.Mock(side_effect=socket.gaierror))
self.assertRaises(
exception.NoValidHost,
driver.library._check_mode_get_or_register_storage_system)
def test_setup_error_invalid_second_controller_ip(self):
configuration = self._set_config(create_configuration())
configuration.netapp_controller_ips = '127.0.0.1,987.65.43.21'
driver = common.NetAppDriver(configuration=configuration)
self.mock_object(na_utils, 'resolve_hostname',
mock.Mock(side_effect=socket.gaierror))
self.assertRaises(
exception.NoValidHost,
driver.library._check_mode_get_or_register_storage_system)
def test_setup_error_invalid_both_controller_ips(self):
configuration = self._set_config(create_configuration())
configuration.netapp_controller_ips = '564.124.1231.1,987.65.43.21'
driver = common.NetAppDriver(configuration=configuration)
self.mock_object(na_utils, 'resolve_hostname',
mock.Mock(side_effect=socket.gaierror))
self.assertRaises(
exception.NoValidHost,
driver.library._check_mode_get_or_register_storage_system)
def test_get_vol_with_label_wwn_missing(self):
self.assertRaises(exception.InvalidInput,
self.library._get_volume_with_label_wwn,
None, None)
def test_get_vol_with_label_wwn_found(self):
fake_vl_list = [{'volumeRef': '1', 'volumeUse': 'standardVolume',
'label': 'l1', 'volumeGroupRef': 'g1',
'worlWideName': 'w1ghyu'},
{'volumeRef': '2', 'volumeUse': 'standardVolume',
'label': 'l2', 'volumeGroupRef': 'g2',
'worldWideName': 'w2ghyu'}]
self.library._get_storage_pools = mock.Mock(return_value=['g2', 'g3'])
self.library._client.list_volumes = mock.Mock(
return_value=fake_vl_list)
vol = self.library._get_volume_with_label_wwn('l2', 'w2:gh:yu')
self.assertEqual(1, self.library._client.list_volumes.call_count)
self.assertEqual('2', vol['volumeRef'])
def test_get_vol_with_label_wwn_unmatched(self):
fake_vl_list = [{'volumeRef': '1', 'volumeUse': 'standardVolume',
'label': 'l1', 'volumeGroupRef': 'g1',
'worlWideName': 'w1ghyu'},
{'volumeRef': '2', 'volumeUse': 'standardVolume',
'label': 'l2', 'volumeGroupRef': 'g2',
'worldWideName': 'w2ghyu'}]
self.library._get_storage_pools = mock.Mock(return_value=['g2', 'g3'])
| |
# Copyright: (c) 2018, <NAME> (@jborean93) <<EMAIL>>
# MIT License (see LICENSE or https://opensource.org/licenses/MIT)
from copy import deepcopy
from pypsrp._utils import to_string, version_equal_or_newer
class ObjectMeta(object):
def __init__(self, tag="*", name=None, optional=False, object=None):
self.tag = tag
self.name = name
self.optional = optional
self.object = object
class ListMeta(ObjectMeta):
def __init__(self, obj_type="LST", name=None, optional=False,
list_value_meta=None, list_types=None):
super(ListMeta, self).__init__(obj_type, name, optional)
if list_value_meta is None:
self.list_value_meta = ObjectMeta()
else:
self.list_value_meta = list_value_meta
if list_types is None:
self.list_types = [
"System.Object[]",
"System.Array",
"System.Object"
]
else:
self.list_types = list_types
class StackMeta(ListMeta):
def __init__(self, name=None, optional=False, list_value_meta=None,
list_types=None):
if list_types is None:
list_types = [
"System.Collections.Stack",
"System.Object"
]
super(StackMeta, self).__init__("STK", name, optional, list_value_meta,
list_types)
class QueueMeta(ListMeta):
def __init__(self, name=None, optional=False, list_value_meta=None,
list_types=None):
if list_types is None:
list_types = [
"System.Collections.Queue",
"System.Object"
]
super(QueueMeta, self).__init__("QUE", name, optional, list_value_meta,
list_types)
class DictionaryMeta(ObjectMeta):
def __init__(self, name=None, optional=False, dict_key_meta=None,
dict_value_meta=None, dict_types=None):
super(DictionaryMeta, self).__init__("DCT", name, optional)
if dict_key_meta is None:
self.dict_key_meta = ObjectMeta(name="Key")
else:
self.dict_key_meta = dict_key_meta
if dict_value_meta is None:
self.dict_value_meta = ObjectMeta(name="Value")
else:
self.dict_value_meta = dict_value_meta
if dict_types is None:
self.dict_types = [
"System.Collections.Hashtable",
"System.Object"
]
else:
self.dict_types = dict_types
class ComplexObject(object):
def __init__(self):
self._adapted_properties = ()
self._extended_properties = ()
self._property_sets = ()
self._types = []
self._to_string = None
self._xml = None # only populated on deserialization
def __str__(self):
return to_string(self._to_string)
class GenericComplexObject(ComplexObject):
def __init__(self):
super(GenericComplexObject, self).__init__()
self.property_sets = []
self.extended_properties = {}
self.adapted_properties = {}
self.to_string = None
self.types = []
def __str__(self):
return to_string(self.to_string)
class Enum(ComplexObject):
def __init__(self, enum_type, string_map, **kwargs):
super(Enum, self).__init__()
self._types = [
"System.Enum",
"System.ValueType",
"System.Object"
]
if enum_type is not None:
self._types.insert(0, enum_type)
self._property_sets = (
('value', ObjectMeta("I32")),
)
self._string_map = string_map
self.value = kwargs.get('value')
@property
def _to_string(self):
try:
return self._string_map[self.value]
except KeyError as err:
raise KeyError("%s is not a valid enum value for %s, valid values "
"are %s" % (err, self._types[0], self._string_map))
@_to_string.setter
def _to_string(self, value):
pass
# PSRP Complex Objects - https://msdn.microsoft.com/en-us/library/dd302883.aspx
class Coordinates(ComplexObject):
def __init__(self, **kwargs):
"""
[MS-PSRP] 2.2.3.1 Coordinates
https://msdn.microsoft.com/en-us/library/dd302883.aspx
:param x: The X coordinate (0 is the leftmost column)
:param y: The Y coordinate (0 is the topmost row)
"""
super(Coordinates, self).__init__()
self._adapted_properties = (
('x', ObjectMeta("I32", name="X")),
('y', ObjectMeta("I32", name="Y")),
)
self._types = [
"System.Management.Automation.Host.Coordinates",
"System.ValueType",
"System.Object"
]
self.x = kwargs.get('x')
self.y = kwargs.get('y')
class Size(ComplexObject):
def __init__(self, **kwargs):
"""
[MS-PSRP] 2.2.3.2 Size
https://msdn.microsoft.com/en-us/library/dd305083.aspx
:param width: The width of the size
:param height: The height of the size
"""
super(Size, self).__init__()
self._adapted_properties = (
('width', ObjectMeta("I32", name="Width")),
('height', ObjectMeta("I32", name="Height")),
)
self._types = [
"System.Management.Automation.Host.Size",
"System.ValueType",
"System.Object"
]
self.width = kwargs.get('width')
self.height = kwargs.get('height')
class Color(Enum):
BLACK = 0
DARK_BLUE = 1
DARK_GREEN = 2
DARK_CYAN = 3
DARK_RED = 4
DARK_MAGENTA = 5
DARK_YELLOW = 6
GRAY = 7
DARK_GRAY = 8
BLUE = 9
GREEN = 10
CYAN = 11
RED = 12
MAGENTA = 13
YELLOW = 14
WHITE = 15
def __init__(self, **kwargs):
"""
[MS-PSRP] 2.2.3.3 Color
https://msdn.microsoft.com/en-us/library/dd360026.aspx
:param value: The enum value for Color
"""
string_map = {
0: "Black",
1: "DarkBlue",
2: "DarkGreen",
3: "DarkCyan",
4: "DarkRed",
5: "DarkMagenta",
6: "DarkYellow",
7: "Gray",
8: "DarkGray",
9: "Blue",
10: "Green",
11: "Cyan",
12: "Red",
13: "Magenta",
14: "Yellow",
15: "White",
}
super(Color, self).__init__("System.ConsoleColor", string_map,
**kwargs)
class RunspacePoolState(object):
BEFORE_OPEN = 0
OPENING = 1
OPENED = 2
CLOSED = 3
CLOSING = 4
BROKEN = 5
NEGOTIATION_SENT = 6
NEGOTIATION_SUCCEEDED = 7
CONNECTING = 8
DISCONNECTED = 9
def __init__(self, state):
"""
[MS-PSRP] 2.2.3.4 RunspacePoolState
https://msdn.microsoft.com/en-us/library/dd341723.aspx
Represents the state of the RunspacePool.
:param state: The state int value
"""
self.state = state
def __str__(self):
return {
0: "BeforeOpen",
1: "Opening",
2: "Opened",
3: "Closed",
4: "Closing",
5: "Broken",
6: "NegotiationSent",
7: "NegotiationSucceeded",
8: "Connecting",
9: "Disconnected"
}[self.state]
class PSInvocationState(object):
NOT_STARTED = 0
RUNNING = 1
STOPPING = 2
STOPPED = 3
COMPLETED = 4
FAILED = 5
DISCONNECTED = 6
def __init__(self, state):
"""
[MS-PSRP] 2.2.3.5 PSInvocationState
https://msdn.microsoft.com/en-us/library/dd341651.aspx
Represents the state of a pipeline invocation.
:param state: The state int value
"""
self.state = state
def __str__(self):
return {
0: "NotStarted",
1: "Running",
2: "Stopping",
3: "Stopped",
4: "Completed",
5: "Failed",
6: "Disconnected"
}[self.state]
class PSThreadOptions(Enum):
DEFAULT = 0
USE_NEW_THREAD = 1
REUSE_THREAD = 2
USE_CURRENT_THREAD = 3
def __init__(self, **kwargs):
"""
[MS-PSRP] 2.2.3.6 PSThreadOptions
https://msdn.microsoft.com/en-us/library/dd305678.aspx
:param value: The enum value for PS Thread Options
"""
string_map = {
0: "Default",
1: "UseNewThread",
2: "ReuseThread",
3: "UseCurrentThread"
}
super(PSThreadOptions, self).__init__(
"System.Management.Automation.Runspaces.PSThreadOptions",
string_map, **kwargs
)
class ApartmentState(Enum):
STA = 0
MTA = 1
UNKNOWN = 2
def __init__(self, **kwargs):
"""
[MS-PSRP] 2.2.3.7 ApartmentState
https://msdn.microsoft.com/en-us/library/dd304257.aspx
:param value: The enum value for Apartment State
"""
string_map = {
0: 'STA',
1: 'MTA',
2: 'UNKNOWN'
}
super(ApartmentState, self).__init__(
"System.Management.Automation.Runspaces.ApartmentState",
string_map, **kwargs
)
class RemoteStreamOptions(Enum):
ADD_INVOCATION_INFO_TO_ERROR_RECORD = 1
ADD_INVOCATION_INFO_TO_WARNING_RECORD = 2
ADD_INVOCATION_INFO_TO_DEBUG_RECORD = 4
ADD_INVOCATION_INFO_TO_VERBOSE_RECORD = 8
ADD_INVOCATION_INFO = 15
def __init__(self, **kwargs):
"""
[MS-PSRP] 2.2.3.8 RemoteStreamOptions
https://msdn.microsoft.com/en-us/library/dd303829.aspx
:param value: The initial RemoteStreamOption to set
"""
super(RemoteStreamOptions, self).__init__(
"System.Management.Automation.Runspaces.RemoteStreamOptions",
{}, **kwargs
)
@property
def _to_string(self):
if self.value == 15:
return "AddInvocationInfo"
string_map = (
("AddInvocationInfoToErrorRecord", 1),
("AddInvocationInfoToWarningRecord", 2),
("AddInvocationInfoToDebugRecord", 4),
("AddInvocationInfoToVerboseRecord", 8),
)
values = []
for name, flag in string_map:
if self.value & flag == flag:
values.append(name)
return ", ".join(values)
@_to_string.setter
def _to_string(self, value):
pass
class Pipeline(ComplexObject):
class _ExtraCmds(ComplexObject):
def __init__(self, **kwargs):
# Used to encapsulate ExtraCmds in the structure required
super(Pipeline._ExtraCmds, self).__init__()
self._extended_properties = (
('cmds', ListMeta(
name="Cmds",
list_value_meta=ObjectMeta("Obj", object=Command),
list_types=[
"System.Collections.Generic.List`1[["
"System.Management.Automation.PSObject, "
"System.Management.Automation, Version=1.0.0.0, "
"Culture=neutral, PublicKeyToken=31bf3856ad364e35]]",
"System.Object",
]
)),
)
self.cmds = kwargs.get('cmds')
def __init__(self, **kwargs):
"""
[MS-PSRP] 2.2.3.11 Pipeline
https://msdn.microsoft.com/en-us/library/dd358182.aspx
:param is_nested: Whether the pipeline is a nested pipeline
:param commands: List of commands to run
:param history: The history string to add to the pipeline
:param redirect_err_to_out: Whether to redirect the global
error output pipe to the commands error output pipe.
"""
super(Pipeline, self).__init__()
cmd_types = [
"System.Collections.Generic.List`1[["
"System.Management.Automation.PSObject, "
"System.Management.Automation, "
"Version=1.0.0.0, Culture=neutral, "
"PublicKeyToken=31bf3856ad364e35]]",
"System.Object",
]
self._extended_properties = (
('is_nested', ObjectMeta("B", name="IsNested")),
# ExtraCmds isn't in spec but is value and used to send multiple
# statements
('_extra_cmds', ListMeta(
name="ExtraCmds",
list_value_meta=ObjectMeta("Obj", object=self._ExtraCmds),
list_types=cmd_types
)),
('_cmds', ListMeta(
name="Cmds", list_value_meta=ObjectMeta("Obj", object=Command),
list_types=cmd_types
)),
('history', ObjectMeta("S", name="History")),
('redirect_err_to_out',
ObjectMeta("B", name="RedirectShellErrorOutputPipe")),
)
self.is_nested = kwargs.get('is_nested')
self.commands = kwargs.get('cmds')
self.history = kwargs.get('history')
self.redirect_err_to_out = kwargs.get('redirect_err_to_out')
@property
def _cmds(self):
# Cmds is always the first statement
return self._get_statements()[0]
@_cmds.setter
def _cmds(self, value):
# if commands is already set then that means ExtraCmds was present and
# has already been set
if self.commands and len(self.commands) > 0:
return
# ExtraCmds wasn't present so we need to unpack it
self.commands = value
@property
def _extra_cmds(self):
statements = self._get_statements()
# ExtraCmds is only set if we have more than 1 statement, not present
# if only 1
if len(statements) < 2:
return None
else:
extra = [self._ExtraCmds(cmds=c) for c in statements]
return extra
@_extra_cmds.setter
def _extra_cmds(self, value):
# check if extra_cmds was actually set and return if it wasn't
if value is None:
return
commands = []
for statement in value:
for command in statement.cmds:
commands.append(command)
commands[-1].end_of_statement = True
self.commands = commands
def _get_statements(self):
statements = []
current_statement = []
# set the last command to be the end of the statement
self.commands[-1].end_of_statement = True
for command in self.commands:
# need to use deepcopy as the values can be appended to multiple
# parents and in lxml that removes it from the original parent,
# whereas this will create a copy of the statement for each parent
current_statement.append(deepcopy(command))
if command.end_of_statement:
statements.append(current_statement)
current_statement = []
return statements
class Command(ComplexObject):
def __init__(self, protocol_version="2.3", **kwargs):
"""
[MS-PSRP] 2.2.3.12 Command
https://msdn.microsoft.com/en-us/library/dd339976.aspx
:param protocol_version: The negotiated protocol version of the remote
host. This determines what merge_* objects are added to the
serialized xml.
:param cmd: The cmdlet or script to run
:param is_script: Whether cmd is a script or not
:param use_local_scope: Use local or global scope to invoke commands
:param merge_my_result: Controls the behaviour of what stream to merge
to 'merge_to_result'. Only supports NONE or ERROR (only used in
protocol 2.1)
:param merge_to_result: Controls the | |
pginf7, pginf8, pginf9)
if numwebpagestest1==-10:
return -10
if numwebpagestest1==-100 or numwebpagestest1==verifnumwebpages or len(numwebpagestest1)==0:
lastpage=allinfolist
break
verifnumwebpages=copy.deepcopy(numwebpagestest1)
allinfolist.extend(numwebpagestest1)
else:
startpage=1
numwebpages=getinfo((pginf1.format(startpage)), pginf2, pginf3, pginf4, pginf5)
if numwebpages==-10:
return -10
if totalchoices[:2]==['Mangás', 'MangáHost']:
try:
lastpage=int((numwebpages[0].split())[-1])
except (IndexError, TypeError, ValueError):
lastpage=1
elif len(numwebpages)==0:
lastpage=1
else:
numwbcontrol=[]
stop=0
while True:
if numwebpages==-10:
return -10
numwb=[]
for wpinfo in numwebpages:
wbi=extractint(wpinfo)
if len(wbi)==1:
numwb.append(int(wbi[0]))
for wbictrl in numwbcontrol:
if wbictrl==numwb and numwb!=[]:
stop=1
break
if stop==1:
break
numwbcontrol.append(numwb)
numwebpages=getinfo((pginf1.format(numwb[-1])), pginf2, pginf3, pginf4, pginf5)
if numwebpages==-10:
return -10
lastpage=[]
for pgs in numwbcontrol:
lastpage.extend(pgs)
lastpage=sorted(set(lastpage))[-1]
return lastpage
# Função para obter as informações de todas as paginas de um determinado link
def getinfopgs():
if numwebpagesends==-10:
return -10
else:
allinfolist=[]
pgrange=range(1, (numwebpagesends+1))
for pg in pgrange:
numwbpgsinfo=(pginf1.format(pg))
numwbpgsinfo=getinfo(numwbpgsinfo, pginf6, pginf7, pginf8, pginf9)
if numwbpgsinfo==-10:
return -10
allinfolist.extend(numwbpgsinfo)
return allinfolist
numwebpagesends = getnumwebpages()
if numwebpagesends==-10:
return -10
if type(numwebpagesends)==list:
allinfolist=numwebpagesends
else:
allinfolist=getinfopgs()
if allinfolist==-10:
return -10
return allinfolist
# Função para coletar todas as informações dos títulos disponíveis de determinado site
def colectdata():
os.system('cls')
pagessite=confgs['pagessite']
alltitlesandlinks=getallinfolinks(pagessite, 'getsitenumpagesclass', 'getsitenumpagesextrainfo', 'firstpagesfilter', 'secondpagesfilter', 'titlesandlinksclass', 'titlesextrainfo', 'firsttitlesfilter', 'secondtitlesfilter')
if alltitlesandlinks==-10:
reboot=askreboot()
else:
reboot=1
return alltitlesandlinks, reboot
# Função para verificar existência de pasta ou arquivo
def verifpath(dirp, mode):
if (os.path.exists(dirp))==True:
if mode==0:
return 0
else:
if mode==0:
return 1
try:
os.mkdir(dirp)
except (FileExistsError):
pass
return dirp
# Função para definir identificador do arquivo com os dados do servidor requisitado
def getarchmark():
mark1=(('Alê{}Mark').format(totalchoices[0]))
mark2=(('minimark(server={})').format(totalchoices[1]))
return mark1, mark2
# Função para converter a lista de dados em string
def convertlisttostr(listobj, mark1, mark2):
completestr=mark1
for str1 in listobj:
for str2 in str1:
completestr = completestr + mark2 + str2
completestr= completestr + mark2 + mark1
return completestr
# Função para criar arquivo com os dados do site
def createarchdata():
archpath=inputdir('\nDigite o diretório em que deseja criar esse arquivo: ')
while ((Path(archpath)).is_dir())==False:
print("\nEsse diretório não existe.\n\nTente novamente.")
archpath=inputdir('\nDigite o diretório em que deseja criar esse arquivo: ')
namearch=inputdir('\nDigite o nome do arquivo: ')
ext='.txt'
if namearch.endswith(ext)==False:
namearch=namearch+ext
if archpath=='':
archpath=namearch
else:
archpath=archpath+(('\{}').format(namearch))
arquivo=open(archpath, 'w')
arquivo.close()
arquivo=open(archpath, 'w', encoding="utf-8")
conteúdo, reboot=colectdata()
if conteúdo==-10:
return -10, reboot
mark1, mark2 = getarchmark()
formatedconteúdo=convertlisttostr(conteúdo, mark1, mark2)
arquivo.write(formatedconteúdo)
arquivo.close()
return conteúdo, reboot
# Função para obter o diretório central de destino dos arquivos
def gethqpath(mode):
global typecontentdir
# Função para exibir erro e propor soluções
def erroarch(mode):
if mode==1:
archerro='Arquivo inválido.'
else:
archerro='Esse arquivo não é compatível com o servidor escolhido.'
askarcherro=leiastr(archerro+'\n\nDeseja escolher algum outro arquivo? ')
if 's' in askarcherro:
archpath=inputdir('\nDigite o diretório do arquivo que contém todos os dados do servidor escolhido: ')
return 0, archpath, 0
else:
askcreatearch=leiastr("Deseja criar um arquivo com os dados do servidor escolhido ou prefere utilizar dados temporários coletados em tempo real? ")
if 'cr' in askcreatearch:
conteúdo, reboot=createarchdata()
else:
conteúdo, reboot=colectdata()
return 1, conteúdo, reboot
if mode==1:
showprogram(1)
root = tkinter.Tk()
root.geometry('0x0')
hqpathchoose=('Selecione o diretório da pasta que deseja guardar todos os{}conteúdos escolhidos: ')
if totalchoices[2]=='online':
hqpathchoose=hqpathchoose.format(' "checkpoints" dos ')
else:
hqpathchoose=hqpathchoose.format(' ')
hqpathstr=hqpathchoose.replace('Selecione', '\nDigite')
print('\n'+hqpathchoose)
hqpath = tf.askdirectory(parent=root, initialdir="/",title =hqpathchoose)
root.destroy()
if hqpath=='':
print('\nOpção cancelada.\n\nTente novamente')
hqpath=inputdir(hqpathstr)
while ((Path(hqpath)).is_dir())==False:
print("\nEsse diretório não existe.\n\nTente novamente.")
hqpath=inputdir(hqpathstr)
typecontentdir=(hqpath+('\{}').format(totalchoices[0]))
typecontentdir=verifpath(typecontentdir, 1)
else:
archpath=inputdir('\nDigite o diretório do arquivo que contém todos os dados do servidor escolhido: ')
mark1, mark2 = getarchmark()
while True:
try:
arquivo=open(archpath, 'r', encoding="utf-8")
except (FileNotFoundError, PermissionError, OSError):
loop, conteúdo, reboot=erroarch(1)
if loop==0:
archpath=conteúdo
continue
else:
break
else:
reboot=1
conteúdo=arquivo.read()
if mark1 not in conteúdo or mark2 not in conteúdo:
loop, conteúdo, reboot=erroarch(0)
if loop==0:
archpath=conteúdo
continue
else:
break
conteúdo=convertstrtolist(conteúdo, mark1, mark2)
break
return conteúdo, reboot
# Função para verificar a utilização de um arquivo com informações de determinado servidor
def presetdata():
showprogram(1)
askpreset=leiastr('Deseja coletar os dados atualizados ou prefere utilizar dados já existentes? ')
if 'ex' in askpreset:
askpreset=1
alltitlesandlinks, reboot=gethqpath(0)
else:
askpreset=0
askcreatearch=leiastr("Deseja criar um arquivo com os dados para o funcionamento do programa ou prefere utilizar dados temporários coletados em tempo real? ")
if 'cr' in askcreatearch:
alltitlesandlinks, reboot=createarchdata()
else:
alltitlesandlinks, reboot=colectdata()
return alltitlesandlinks, reboot
# Função para verificar se os dados estão repetidos
def verifcopy(datalist):
copydatalist=copy.deepcopy(datalist)
datalist=[]
testdatastr=''
for testdatainfo in copydatalist:
strtestdatainfo=str(testdatainfo)
testdatastr=testdatastr+strtestdatainfo
if testdatastr.count(strtestdatainfo)==1:
datalist.append(copydatalist[(copydatalist.index(testdatainfo))])
return datalist
# Função para personalizar a informação escrita no arquivo de texto
def txtmsginfo(posmsgs):
readlist=['ler', 'lendo', 'lido']
watchlist=['assistir', 'assistindo', 'assistido']
seelist=['ver', 'vendo', 'visto']
downloadinglist=['baixar', 'baixando', 'baixado']
contentypename=totalchoices[0][:-1].lower()
if contentypename=='série':
article='a'
else:
article='o'
msglist=[article, contentypename]
if totalchoices[0]=='Mangás' or totalchoices[0]=='Quadrinhos':
msglist.extend(['capítulo']+readlist)
elif totalchoices[0]=='Filmes':
msglist.extend(['filme']+watchlist)
elif totalchoices[0]=='Séries':
msglist.extend(['episódio']+watchlist)
else:
msglist.extend(['episódio']+seelist)
qntmsgs=len(msglist)
if len(posmsgs)>qntmsgs:
posmsgs=posmsgs[:qntmsgs]
wantedsmsgs=[]
for msgnum in posmsgs:
wantedsmsgs.append(msglist[msgnum])
return wantedsmsgs
# Função para obter a informação de qual título foi escolhido
def getespecifictile(alltitlesandlinks):
article, txtmsg1, txtmsg2=txtmsginfo((0, 1, 3))
if totalchoices[2]!='online':
txtmsg2='baixar'
asktitlemsg=('\nDigite o nome d{} {} que deseja {}: ').format(article, txtmsg1, txtmsg2)
asktitle=input(asktitlemsg).strip().lower()
postitle=-1
for ft in alltitlesandlinks:
postitle=postitle+1
if len(re.findall(asktitle, (ft[0].lower())))!=0:
titleanswer=leiastr(('Deseja acessar {}? ').format(ft[0]))
if 's' in titleanswer:
gotanswer=1
break
else:
gotanswer=0
pass
else:
gotanswer=0
if gotanswer==1:
return postitle
else:
print('\nTodas as opções acabaram.')
time.sleep(1.25)
return -10
# Função para atualizar o texto de um arquivo existente ou criá-lo caso inexistente
def updatearch(archdir, newlines):
if verifpath(archdir, 0)==1 or newlines=='None':
open(archdir, 'w', encoding='utf-8').close()
if newlines=='None':
newlines=''
returndir=1
else:
returndir=0
arch=open(archdir, 'r', encoding='utf-8')
conteúdo = arch.readlines()
conteúdo.append(newlines)
arch=open(archdir, 'w', encoding='utf-8')
arch.writelines(conteúdo)
arch.close()
if returndir==1:
return archdir
# Função para conferir os capítulos escolhidos para download
def leiacap(caps, qntcaps):
# Função para filtrar as informações válidas sobre os números dos capítulos requisitados
def read():
nonlocal caps, qntcaps
caps=extractint(caps)
capsinfo=[]
for cap in caps:
if int(cap) not in range(1, (qntcaps+1)):
return -10
else:
capsinfo.append(int(cap))
if len(capsinfo)==0:
return -10
else:
return capsinfo
listcaps=[]
if '-' in caps:
caps=caps.replace('-', ' ')
caps=read()
if caps==-10 or len(caps)!=2:
return -10
else:
for elemcap in range(caps[0], (caps[1]+1)):
listcaps.append(elemcap)
elif ';' in caps:
caps=caps.replace(';', ' ')
caps=read()
if caps==-10:
return -10
else:
for elemcap in caps:
listcaps.append(elemcap)
elif 'c' in caps:
listcaps='continuar'
elif 't' in caps:
listcaps='todos'
else:
caps=read()
if caps==-10 or len(caps)!=1:
return -10
else:
listcaps=caps
return listcaps
# Função para montar o diretório do arquivo que está sendo baixado
def getcontentfiledir(namecap, capfolder, numpage):
if totalchoices[0]=='Mangás' or totalchoices[0]=='Quadrinhos':
filename=(('Page_{}').format(numpage))
pagefile=capfolder+(('\{}.png').format(filename))
else:
filename=namecap
pagefile=capfolder+(('\{}.mp4').format(filename))
return pagefile
# Função para carregar continuamente os dados que serão utilizados
def preloaddata(resetmode, namecap, capfolder, capurl):
global capsinfolist
if resetmode==0:
try:
capsinfolist
except (NameError):
capsinfolist=[]
pagesurl=getallinfolinks(capurl, 'numcappgsclass', 'numcappgsextrainfo', 'firstcappgsfilter', 'secondcappgsfilter', 'pagescapclass', 'cappagesextrainfo', 'firstpgscapfilter', 'secondpgscapfilter')
if pagesurl==-10:
return -10
pagesurl=verifcopy(pagesurl)
pgsdatalist=[]
numpage=-1
for pgurl in pagesurl:
numpage=numpage+1
pgdata=''
if totalchoices[2]!='online':
pagefile=getcontentfiledir(namecap, capfolder, numpage)
if verifpath(pagefile, 0)==1:
pgdata=getinfo(pgurl, 'contentpagesclass', 'contentpagesextainfo', 'firstcontentfilter', 'secondcontentfilter')
if pgdata==-10:
return -10
pgdata=pgdata.content
pgsdatalist.extend([pgurl, pgdata])
capsinfolist.append(pgsdatalist)
else:
capsinfolist=[]
# Função para controlar as mensagens mostradas pela thread
def printthreadmsg(msgmode):
if totalchoices[2]=='both':
if msgmode=='online':
if threadtrick==-1:
print("\nTodos os downloads foram finalizados.")
elif msgmode=='offline':
if threadtrick==0:
print("\nPressione 'Enter' para continuar.")
elif threadtrick==1:
print("\nTodos os conteúdos foram mostrados.")
else:
os.system('cls')
print("\nTodos os conteúdos foram mostrados.")
print("\nTodos os downloads foram finalizados.")
# função para definir e administrar a mensagem mostrada na tela se todas as threads estiverem ativadas e ocorrendo ao mesmo tempo
def updateglobalmsg(newmsg):
global askcontinueviewing
askcontinueviewing=newmsg
# Função para ver o conteúdo online
def onlineview(nametitle, numcap, htmlname, checkpoint, dircapcheckpoint, pagesinfolist, countviewedcaps):
htmlpart1=(('''
<!DOCTYPE html>
<html lang="pt-br">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>%(title1)s-%(title2)s</title>
<style>
body{
background-color: gray;
background-image: url(https://images.wallpaperscraft.com/image/dark_spots_texture_background_50355_1920x1080.jpg);
background-size: cover;
background-attachment: fixed;
}
h1 {
font-size: 55px;
font-style: italic;
font-variant: small-caps;
font-weight: bold;
font-family: 'Gill Sans', 'Gill Sans MT', Calibri, 'Trebuchet MS', sans-serif;
}
h2 {
font-size: 45px;
font-style: italic;
font-variant: small-caps;
font-weight: bold;
font-family: Cambria, Cochin, Georgia, Times, 'Times New Roman', serif;
}
p {
font-size: 25px;
font-style: normal;
font-variant: small-caps;
font-weight: normal;
font-family: 'Courier New', Courier, monospace;
opacity: 0.75;
}
</style>
</head>
<body>
<center>
<h1>%(title1)s</h1>
<h2>%(title2)s</h2>''')%{'title1':nametitle, 'title2':htmlname})
htmlpart2=('''
<p>Alemaquirque</p>
</center>
</body>
</html>''')
os.system('cls')
printthreadmsg('online')
print(('\n{}\n\nAbrindo {}').format(nametitle, htmlname))
folderhtmldir=((typecontentdir+'\{}\Html_File').format(nametitle))
folderhtmldir=verifpath(folderhtmldir, 1)
archhtmldir=((folderhtmldir+'\{}.html').format(htmlname))
tmparchhtmldirtest=verifpath(archhtmldir, 0)
if tmparchhtmldirtest==1:
archhtmldir=updatearch(archhtmldir, htmlpart1)
contentnum=-1
pagesinfolist=pagesinfolist[::2]
for site in pagesinfolist:
contentnum=contentnum+1
if totalchoices[0]=='Mangás' or totalchoices[0]=='Quadrinhos':
htmlcodestr=('\n <img rel="preload" | |
def exportAttributes(self, outfile, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEResourceType'):
pass
def exportChildren(self, outfile, level, namespace_='WinExecutableFileObj:', name_='PEResourceType', fromsubclass_=False):
if self.Type is not None:
self.Type.export(outfile, level, namespace_, name_='Type')
if self.Name is not None:
self.Name.export(outfile, level, namespace_, name_='Name')
if self.Hashes is not None:
self.Hashes.export(outfile, level, namespace_, name_='Hashes')
def hasContent_(self):
if (
self.Type is not None or
self.Name is not None or
self.Hashes is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='PEResourceType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.Type is not None:
showIndent(outfile, level)
outfile.write('Type=%s,\n' % quote_python(self.Type).encode(ExternalEncoding))
if self.Name is not None:
showIndent(outfile, level)
outfile.write('Name=%s,\n' % quote_python(self.Name).encode(ExternalEncoding))
if self.Hashes is not None:
showIndent(outfile, level)
outfile.write('Hashes=%s,\n' % quote_python(self.Hashes).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Type':
Type_ = common.StringObjectAttributeType.factory()
Type_.build(child_)
self.set_Type(Type_)
self.validate_PEResourceTypeEnum(self.Type) # validate type PEResourceTypeEnum
elif nodeName_ == 'Name':
Name_ = common.StringObjectAttributeType.factory()
Name_.build(child_)
self.Name = Name_
elif nodeName_ == 'Hashes':
Hashes_ = common.StringObjectAttributeType.factory()
Hashes_.build(child_)
self.Hashes = Hashes_
# end class PEResourceType
class PEExportedFunctionType(GeneratedsSuper):
"""PEExportType sepcifies the type describing exported functions."""
subclass = None
superclass = None
def __init__(self, Function_Name=None, Entry_Point=None, Ordinal=None):
self.Function_Name = Function_Name
self.Entry_Point = Entry_Point
self.Ordinal = Ordinal
def factory(*args_, **kwargs_):
if PEExportedFunctionType.subclass:
return PEExportedFunctionType.subclass(*args_, **kwargs_)
else:
return PEExportedFunctionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Function_Name(self): return self.Function_Name
def set_Function_Name(self, Function_Name): self.Function_Name = Function_Name
def get_Entry_Point(self): return self.Entry_Point
def set_Entry_Point(self, Entry_Point): self.Entry_Point = Entry_Point
def get_Ordinal(self): return self.Ordinal
def set_Ordinal(self, Ordinal): self.Ordinal = Ordinal
def export(self, outfile, level, namespace_='WinExecutableFileObj:', name_='PEExportedFunctionType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PEExportedFunctionType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEExportedFunctionType'):
pass
def exportChildren(self, outfile, level, namespace_='WinExecutableFileObj:', name_='PEExportedFunctionType', fromsubclass_=False):
if self.Function_Name is not None:
self.Function_Name.export(outfile, level, namespace_, name_='Function_Name')
if self.Entry_Point is not None:
self.Entry_Point.export(outfile, level, namespace_, name_='Entry_Point')
if self.Ordinal is not None:
self.Ordinal.export(outfile, level, namespace_, name_='Ordinal')
def hasContent_(self):
if (
self.Function_Name is not None or
self.Entry_Point is not None or
self.Ordinal is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='PEExportedFunctionType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.Function_Name is not None:
showIndent(outfile, level)
outfile.write('Function_Name=%s,\n' % quote_python(self.Function_Name).encode(ExternalEncoding))
if self.Entry_Point is not None:
showIndent(outfile, level)
outfile.write('Entry_Point=%s,\n' % quote_python(self.Entry_Point).encode(ExternalEncoding))
if self.Ordinal is not None:
showIndent(outfile, level)
outfile.write('Ordinal=%s,\n' % quote_python(self.Ordinal).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Function_Name':
Function_Name_ = common.StringObjectAttributeType.factory()
Function_Name_.build(child_)
self.set_Function_Name(Function_Name_)
elif nodeName_ == 'Entry_Point':
Entry_Point_ = common.StringObjectAttributeType.factory()
Entry_Point_.build(child_)
self.set_Entry_Point(Entry_Point_)
elif nodeName_ == 'Ordinal':
Ordinal_ = child_.text
Ordinal_ = self.gds_validate_string(Ordinal_, node, 'Ordinal')
self.Ordinal = Ordinal_
# end class PEExportedFunctionType
class PEResourceListType(GeneratedsSuper):
"""PEResourceListType specifies a list of resources found in the PE
file."""
subclass = None
superclass = None
def __init__(self, Resource=None):
if Resource is None:
self.Resource = []
else:
self.Resource = Resource
def factory(*args_, **kwargs_):
if PEResourceListType.subclass:
return PEResourceListType.subclass(*args_, **kwargs_)
else:
return PEResourceListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Resource(self): return self.Resource
def set_Resource(self, Resource): self.Resource = Resource
def add_Resource(self, value): self.Resource.append(value)
def insert_Resource(self, index, value): self.Resource[index] = value
def export(self, outfile, level, namespace_='WinExecutableFileObj:', name_='PEResourceListType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PEResourceListType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEResourceListType'):
pass
def exportChildren(self, outfile, level, namespace_='WinExecutableFileObj:', name_='PEResourceListType', fromsubclass_=False):
for Resource_ in self.Resource:
Resource_.export(outfile, level, namespace_, name_='Resource')
def hasContent_(self):
if (
self.Resource
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='PEResourceListType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Resource=[\n')
level += 1
for Resource_ in self.Resource:
showIndent(outfile, level)
outfile.write('model_.PEResourceType(\n')
Resource_.exportLiteral(outfile, level, name_='PEResourceType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Resource':
obj_ = PEResourceType.factory()
obj_.build(child_)
self.Resource.append(obj_)
# end class PEResourceListType
class PEImportedFunctionType(GeneratedsSuper):
"""PEImportedFunctionType specifies the type describing imported
functions."""
subclass = None
superclass = None
def __init__(self, Function_Name=None, Hint=None, Ordinal=None, Bound=None, Virtual_Address=None):
self.Function_Name = Function_Name
self.Hint = Hint
self.Ordinal = Ordinal
self.Bound = Bound
self.Virtual_Address = Virtual_Address
def factory(*args_, **kwargs_):
if PEImportedFunctionType.subclass:
return PEImportedFunctionType.subclass(*args_, **kwargs_)
else:
return PEImportedFunctionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Function_Name(self): return self.Function_Name
def set_Function_Name(self, Function_Name): self.Function_Name = Function_Name
def get_Hint(self): return self.Hint
def set_Hint(self, Hint): self.Hint = Hint
def get_Ordinal(self): return self.Ordinal
def set_Ordinal(self, Ordinal): self.Ordinal = Ordinal
def get_Bound(self): return self.Bound
def set_Bound(self, Bound): self.Bound = Bound
def get_Virtual_Address(self): return self.Virtual_Address
def set_Virtual_Address(self, Virtual_Address): self.Virtual_Address = Virtual_Address
def export(self, outfile, level, namespace_='WinExecutableFileObj:', name_='PEImportedFunctionType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PEImportedFunctionType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEImportedFunctionType'):
pass
def exportChildren(self, outfile, level, namespace_='WinExecutableFileObj:', name_='PEImportedFunctionType', fromsubclass_=False):
if self.Function_Name is not None:
self.Function_Name.export(outfile, level, namespace_, name_='Function_Name')
if self.Hint is not None:
self.Hint.export(outfile, level, namespace_, name_='Hint')
if self.Ordinal is not None:
self.Ordinal.export(outfile, level, namespace_, name_='Ordinal')
if self.Bound is not None:
self.Bound.export(outfile, level, namespace_, name_='Bound')
if self.Virtual_Address is not None:
self.Virtual_Address.export(outfile, level, namespace_, name_='Virtual_Address')
def hasContent_(self):
if (
self.Function_Name is not None or
self.Hint is not None or
self.Ordinal is not None or
self.Bound is not None or
self.Virtual_Address is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='PEImportedFunctionType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.Function_Name is not None:
showIndent(outfile, level)
outfile.write('Function_Name=%s,\n' % quote_python(self.Function_Name).encode(ExternalEncoding))
if self.Hint is not None:
showIndent(outfile, level)
outfile.write('Hint=%s,\n' % quote_python(self.Hint).encode(ExternalEncoding))
if self.Ordinal is not None:
showIndent(outfile, level)
outfile.write('Ordinal=%s,\n' % quote_python(self.Ordinal).encode(ExternalEncoding))
if self.Bound is not None:
showIndent(outfile, level)
outfile.write('Bound=%s,\n' % quote_python(self.Bound).encode(ExternalEncoding))
if self.Virtual_Address is not None:
showIndent(outfile, level)
outfile.write('Virtual_Address=%s,\n' % quote_python(self.Virtual_Address).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Function_Name':
Function_Name_ = common.StringObjectAttributeType.factory()
Function_Name_.build(child_)
self.set_Function_Name(Function_Name_)
elif nodeName_ == 'Hint':
Hint_ = child_.text
Hint_ = self.gds_validate_string(Hint_, node, 'Hint')
self.Hint = Hint_
elif nodeName_ == 'Ordinal':
Ordinal_ = child_.text
Ordinal_ = self.gds_validate_string(Ordinal_, node, 'Ordinal')
self.Ordinal = Ordinal_
elif nodeName_ == 'Bound':
Bound_ = child_.text
Bound_ = self.gds_validate_string(Bound_, node, 'Bound')
self.Bound = Bound_
elif nodeName_ == 'Virtual_Address':
Virtual_Address_ = child_.text
Virtual_Address_ = self.gds_validate_string(Virtual_Address_, node, 'Virtual_Address')
self.Virtual_Address = Virtual_Address_
# end class PEImportedFunctionType
class PEImportListType(GeneratedsSuper):
"""PEImportListType specifies a list of functions in an import data
section."""
subclass = None
superclass = None
def __init__(self, Import=None):
if Import is None:
self.Import = []
else:
self.Import = Import
def factory(*args_, **kwargs_):
if PEImportListType.subclass:
return PEImportListType.subclass(*args_, **kwargs_)
else:
return PEImportListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Import(self): return self.Import
def set_Import(self, Import): self.Import = Import
def add_Import(self, value): self.Import.append(value)
def insert_Import(self, index, value): self.Import[index] = value
def export(self, outfile, level, namespace_='WinExecutableFileObj:', name_='PEImportListType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PEImportListType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
| |
# Import Built-Ins
import logging
import json
import time
import queue
import threading
from threading import Thread
# Import Third-Party
from websocket import create_connection, WebSocketTimeoutException
from websocket import WebSocketConnectionClosedException
# Import Homebrew
from bitex.api.WSS.base import WSSAPI
# import Server-side Exceptions
from bitex.api.WSS.exceptions import InvalidBookLengthError, GenericSubscriptionError
from bitex.api.WSS.exceptions import NotSubscribedError, AlreadySubscribedError
from bitex.api.WSS.exceptions import InvalidPairError, InvalidChannelError
from bitex.api.WSS.exceptions import InvalidEventError, InvalidBookPrecisionError
# import Client-side Exceptions
from bitex.api.WSS.exceptions import UnknownEventError, UnknownWSSError
from bitex.api.WSS.exceptions import UnknownWSSInfo, AlreadyRegisteredError
from bitex.api.WSS.exceptions import NotRegisteredError, UnknownChannelError
from bitex.api.WSS.exceptions import FaultyPayloadError
# Init Logging Facilities
log = logging.getLogger(__name__)
class BitfinexWSS(WSSAPI):
"""
Client Class to connect to Bitfinex Websocket API. Data is stored in attributes.
Features error handling and logging, as well as reconnection automation if
the Server issues a connection reset.
"""
def __init__(self, pairs=None):
"""
Initializes BitfinexWSS Instance.
:param key: Api Key as string
:param secret: Api secret as string
:param addr: Websocket API Address
"""
super(BitfinexWSS, self).__init__('wss://api.bitfinex.com/ws/2', 'Bitfinex')
self.conn = None
if pairs:
self.pairs = pairs
else:
self.pairs = ['ETHBTC', 'BTCUSD', 'ETHUSD', 'ETCUSD', 'ETCBTC',
'ZECUSD', 'ZECBTC', 'XMRUSD', 'XMRBTC', 'LTCUSD',
'LTCBTC', 'DSHUSD', 'DSHBTC']
# Set up variables for receiver and main loop threads
self._receiver_lock = threading.Lock()
self._processor_lock = threading.Lock()
self.receiver_q = queue.Queue()
self.receiver_thread = None
self.processing_thread = None
self.ping_timer = None
self.timeout = 5
self._heartbeats = {}
self._late_heartbeats = {}
# Set up book-keeping variables & configurations
self.api_version = None
self.channels = {} # Dict for matching channel ids with handlers
self.channel_labels = {} # Dict for matching channel ids with names
self.channel_states = {} # Dict for matching channel ids with status of each channel (alive/dead)
self.channel_configs = {} # Variables, as set by subscribe command
self.wss_config = {} # Config as passed by 'config' command
self._event_handlers = {'error': self._raise_error,
'unsubscribed': self._handle_unsubscribed,
'subscribed': self._handle_subscribed,
'auth': self._handle_subscribed,
'unauth': self._handle_unsubscribed,
'info': self._handle_info,
'pong': self._handle_pong,
'conf': self._handle_conf}
self._data_handlers = {'ticker': self._handle_ticker,
'book': self._handle_book,
'raw_book': self._handle_raw_book,
'candles': self._handle_candles,
'trades': self._handle_trades,
'auth': self._handle_auth}
# 1XXXX == Error Code -> raise, 2XXXX == Info Code -> call
def restart_client():
self._controller_q.put('restart')
self._code_handlers = {'20051': restart_client,
'20060': self.pause,
'20061': self.unpause,
'10000': InvalidEventError,
'10001': InvalidPairError,
'10300': GenericSubscriptionError,
'10301': AlreadySubscribedError,
'10302': InvalidChannelError,
'10400': GenericSubscriptionError,
'10401': NotSubscribedError,
'10011': InvalidBookPrecisionError,
'10012': InvalidBookLengthError}
def eval_command(self, cmd):
"""
Thread func to allow restarting / stopping of threads, for example
when receiving a connection reset info message from the wss server.
:return:
"""
if cmd == 'restart':
self.restart(soft=True)
elif cmd == 'stop':
self.stop()
def _check_heartbeats(self, ts, *args, **kwargs):
"""
Checks if the heartbeats are on-time. If not, the channel id is escalated
to self._late_heartbeats and a warning is issued; once a hb is received
again from this channel, it'll be removed from this dict, and an Info
message logged.
:param ts: timestamp, declares when data was received by the client
:return:
"""
for chan_id in self._heartbeats:
if ts - self._heartbeats[chan_id] >= 10:
if chan_id not in self._late_heartbeats:
try:
# This is newly late; escalate
log.warning("BitfinexWSS.heartbeats: Channel %s hasn't "
"sent a heartbeat in %s seconds!",
self.channel_labels[chan_id],
ts - self._heartbeats[chan_id])
self._late_heartbeats[chan_id] = ts
except KeyError:
# This channel ID Is not known to us - log and raise
log.error("BitfinexWSS.heartbeats: Channel %s is not "
"registered in the connector's registry! "
"Restarting Connection to avoid errors..",
chan_id)
raise UnknownChannelError
else:
# We know of this already
continue
else:
# its not late
try:
self._late_heartbeats.pop(chan_id)
except KeyError:
# wasn't late before, check next channel
continue
log.info("BitfinexWSS.heartbeats: Channel %s has sent a "
"heartbeat again!", self.channel_labels[chan_id])
self.ping()
def _check_ping(self):
"""
Checks if the ping command timed out and raises TimeoutError if so.
:return:
"""
if time.time() - self.ping_timer > self.timeout:
raise TimeoutError("Ping Command timed out!")
def pause(self):
"""
Pauses the client
:return:
"""
self._receiver_lock.acquire()
log.info("BitfinexWSS.pause(): Pausing client..")
def unpause(self):
"""
Unpauses the client
:return:
"""
self._receiver_lock.release()
log.info("BitfinexWSS.pause(): Unpausing client..")
def start(self):
"""
Start the websocket client threads
:return:
"""
super(BitfinexWSS, self).start()
log.info("BitfinexWSS.start(): Initializing Websocket connection..")
while self.conn is None:
try:
self.conn = create_connection(self.addr, timeout=10)
except WebSocketTimeoutException:
self.conn = None
print("Couldn't create websocket connection - retrying!")
log.info("BitfinexWSS.start(): Initializing receiver thread..")
if not self.receiver_thread:
self.receiver_thread = Thread(target=self.receive, name='Receiver Thread')
self.receiver_thread.start()
else:
log.info("BitfinexWSS.start(): Thread not started! "
"self.receiver_thread is populated!")
log.info("BitfinexWSS.start(): Initializing processing thread..")
if not self.processing_thread:
self.processing_thread = Thread(target=self.process, name='Processing Thread')
self.processing_thread.start()
else:
log.info("BitfinexWSS.start(): Thread not started! "
"self.processing_thread is populated!")
self.setup_subscriptions()
def stop(self):
"""
Stop all threads and modules of the client.
:return:
"""
super(BitfinexWSS, self).stop()
log.info("BitfinexWSS.stop(): Stopping client..")
log.info("BitfinexWSS.stop(): Joining receiver thread..")
try:
self.receiver_thread.join()
if self.receiver_thread.is_alive():
time.time(1)
except AttributeError:
log.debug("BitfinexWSS.stop(): Receiver thread was not running!")
log.info("BitfinexWSS.stop(): Joining processing thread..")
try:
self.processing_thread.join()
if self.processing_thread.is_alive():
time.time(1)
except AttributeError:
log.debug("BitfinexWSS.stop(): Processing thread was not running!")
log.info("BitfinexWSS.stop(): Closing websocket conection..")
try:
self.conn.close()
except WebSocketConnectionClosedException:
pass
except AttributeError:
# Connection is None
pass
self.conn = None
self.processing_thread = None
self.receiver_thread = None
log.info("BitfinexWSS.stop(): Done!")
def restart(self, soft=False):
"""
Restarts client. If soft is True, the client attempts to re-subscribe
to all channels which it was previously subscribed to.
:return:
"""
log.info("BitfinexWSS.restart(): Restarting client..")
super(BitfinexWSS, self).restart()
# cache channel labels temporarily if soft == True
channel_labels = [self.channel_labels[k] for k in self.channel_labels] if soft else None
# clear previous channel caches
self.channels = {}
self.channel_labels = {}
self.channel_states = {}
if channel_labels:
# re-subscribe to channels
for channel_name, kwargs in channel_labels:
self._subscribe(channel_name, **kwargs)
def receive(self):
"""
Receives incoming websocket messages, and puts them on the Client queue
for processing.
:return:
"""
while self.running:
if self._receiver_lock.acquire(blocking=False):
try:
raw = self.conn.recv()
except WebSocketTimeoutException:
self._receiver_lock.release()
continue
except WebSocketConnectionClosedException:
# this needs to restart the client, while keeping track
# of the currently subscribed channels!
self.conn = None
self._controller_q.put('restart')
except AttributeError:
# self.conn is None, idle loop until shutdown of thread
self._receiver_lock.release()
continue
msg = time.time(), json.loads(raw)
log.debug("receiver Thread: Data Received: %s", msg)
self.receiver_q.put(msg)
self._receiver_lock.release()
else:
# The receiver_lock was locked, idling until available
time.sleep(0.5)
def process(self):
"""
Processes the Client queue, and passes the data to the respective
methods.
:return:
"""
while self.running:
if self._processor_lock.acquire(blocking=False):
if self.ping_timer:
try:
self._check_ping()
except TimeoutError:
log.exception("BitfinexWSS.ping(): TimedOut! (%ss)" %
self.ping_timer)
except (WebSocketConnectionClosedException,
ConnectionResetError):
log.exception("BitfinexWSS.ping(): Connection Error!")
self.conn = None
if not self.conn:
# The connection was killed - initiate restart
self._controller_q.put('restart')
skip_processing = False
try:
ts, data = self.receiver_q.get(timeout=0.1)
except queue.Empty:
skip_processing = True
ts = time.time()
data = None
if not skip_processing:
log.debug("Processing Data: %s", data)
if isinstance(data, list):
self.handle_data(ts, data)
else: # Not a list, hence it could be a response
try:
self.handle_response(ts, data)
except UnknownEventError:
# We don't know what event this is- Raise an
# error & log data!
log.exception("main() - UnknownEventError: %s",
data)
log.info("main() - Shutting Down due to "
"Unknown Error!")
self._controller_q.put('stop')
except ConnectionResetError:
log.info("processor Thread: Connection Was reset, "
"initiating restart")
self._controller_q.put('restart')
self._check_heartbeats(ts)
self._processor_lock.release()
else:
time.sleep(0.5)
##
# Response Message Handlers
##
def handle_response(self, ts, resp):
"""
Passes a response message to the corresponding event handler, and also
takes care of handling errors raised by the _raise_error handler.
:param ts: timestamp, declares when data was received by the client
:param resp: dict, containing info or error keys, among others
:return:
"""
log.info("handle_response: Handling response %s", resp)
event = resp['event']
try:
self._event_handlers[event](ts, **resp)
# Handle Non-Critical Errors
except (InvalidChannelError, InvalidPairError, InvalidBookLengthError,
InvalidBookPrecisionError) as e:
log.exception(e)
print(e)
except (NotSubscribedError, AlreadySubscribedError) as e:
log.exception(e)
print(e)
except GenericSubscriptionError as e:
log.exception(e)
print(e)
# Handle Critical Errors
except InvalidEventError as e:
log.critical("handle_response(): %s; %s", e, resp)
log.exception(e)
raise SystemError(e)
except KeyError:
# unsupported event!
raise UnknownEventError("handle_response(): %s" % resp)
def _handle_subscribed(self, *args, chanId=None, channel=None, **kwargs):
"""
Handles responses to subscribe() commands - registers a channel id with
the client and assigns a data handler to it.
:param chanId: int, represent channel id as assigned by server
:param channel: str, represents channel name
"""
log.debug("_handle_subscribed: %s - %s - %s", chanId, channel, kwargs)
if chanId in self.channels:
raise AlreadyRegisteredError()
self._heartbeats[chanId] = time.time()
try:
channel_key = ('raw_'+channel
if kwargs['prec'].startswith('R') and channel == 'book'
else channel)
except KeyError:
channel_key = channel
try:
self.channels[chanId] = self._data_handlers[channel_key]
except KeyError:
raise UnknownChannelError()
# prep kwargs to be used as secondary value in dict key
try:
kwargs.pop('event')
| |
lcdata[key]
for key in sapkeys:
lcdict['sap'][key.lower()] = lcdata[key]
for key in pdckeys:
lcdict['pdc'][key.lower()] = lcdata[key]
# turn some of the light curve information into numpy arrays so we can
# sort on them later
lcdict['lc_channel'] = npfull_like(lcdict['time'],
lcdict['lcinfo']['channel'][0])
lcdict['lc_skygroup'] = npfull_like(lcdict['time'],
lcdict['lcinfo']['skygroup'][0])
lcdict['lc_module'] = npfull_like(lcdict['time'],
lcdict['lcinfo']['module'][0])
lcdict['lc_output'] = npfull_like(lcdict['time'],
lcdict['lcinfo']['output'][0])
lcdict['lc_quarter'] = npfull_like(lcdict['time'],
lcdict['quarter'][0])
lcdict['lc_season'] = npfull_like(lcdict['time'],
lcdict['season'][0])
lcdict['lc_campaign'] = npfull_like(lcdict['time'],
lcdict['campaign'][0])
## END OF LIGHT CURVE CONSTRUCTION ##
# normalize the SAP and PDCSAP fluxes if needed
# FIXME: should we normalize the other stuff too?
if normalize:
lcdict['sap']['sap_flux'] = (
lcdict['sap']['sap_flux'] /
np.nanmedian(lcdict['sap']['sap_flux'])
)
lcdict['pdc']['pdcsap_flux'] = (
lcdict['pdc']['pdcsap_flux'] /
np.nanmedian(lcdict['pdc']['pdcsap_flux'])
)
# update the lcdict columns with the actual columns
lcdict['columns'] = (
[x.lower() for x in datakeys] +
['sap.%s' % x.lower() for x in sapkeys] +
['pdc.%s' % x.lower() for x in pdckeys] +
['lc_channel','lc_skygroup','lc_module',
'lc_output','lc_quarter','lc_season']
)
# return the lcdict at the end
return lcdict
def consolidate_kepler_fitslc(keplerid,
lcfitsdir,
normalize=True,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS):
'''This gets all light curves for the given keplerid in lcfitsdir.
Searches recursively in lcfitsdir for all of the files belonging to the
specified keplerid. Sorts the light curves by time. Returns an lcdict. This
is meant to be used for light curves across quarters.
NOTE: keplerid is an integer (without the leading zeros). This is usually
the KIC ID.
NOTE: if light curve time arrays contain nans, these and their associated
measurements will be sorted to the end of the final combined arrays.
If normalize == True, then each component light curve's SAP_FLUX and
PDCSAP_FLUX measurements will be normalized to 1.0 by dividing out the
median flux for the component light curve.
NOTE: The other flux related measurements, such as errors and backgrounds
WILL NOT be normalized (FIXME: for now).
'''
LOGINFO('looking for Kepler light curve FITS in %s for %s...' % (lcfitsdir,
keplerid))
# for Python 3.5 and up, use recursive glob, it appears to be absurdly
# faster than os.walk
if sys.version_info[:2] > (3,4):
matching = glob.glob(os.path.join(lcfitsdir,
'**',
'kplr%09i-*_llc.fits' % keplerid),
recursive=True)
LOGINFO('found %s files: %s' % (len(matching), repr(matching)))
# for Python < 3.5, use os.walk and glob
else:
# use the os.walk function to start looking for files in lcfitsdir
walker = os.walk(lcfitsdir)
matching = []
for root, dirs, files in walker:
for sdir in dirs:
searchpath = os.path.join(root,
sdir,
'kplr%09i-*_llc.fits' % keplerid)
foundfiles = glob.glob(searchpath)
if foundfiles:
matching.extend(foundfiles)
LOGINFO('found %s in dir: %s' % (repr(foundfiles),
os.path.join(root,sdir)))
# now that we've found everything, read them all in
if len(matching) > 0:
LOGINFO('consolidating...')
# the first file
consolidated = read_kepler_fitslc(matching[0],
headerkeys=headerkeys,
datakeys=datakeys,
sapkeys=sapkeys,
pdckeys=pdckeys,
topkeys=topkeys,
apkeys=apkeys,
normalize=normalize)
# get the rest of the files
for lcf in matching:
consolidated = read_kepler_fitslc(lcf,
appendto=consolidated,
headerkeys=headerkeys,
datakeys=datakeys,
sapkeys=sapkeys,
pdckeys=pdckeys,
topkeys=topkeys,
apkeys=apkeys,
normalize=normalize)
# get the sort indices
# we use time for the columns and quarters for the headers
LOGINFO('sorting by time...')
# NOTE: nans in time will be sorted to the end of the array
finiteind = npisfinite(consolidated['time'])
if npsum(finiteind) < consolidated['time'].size:
LOGWARNING('some time values are nan! '
'measurements at these times will be '
'sorted to the end of the column arrays.')
# get the sort index
column_sort_ind = npargsort(consolidated['time'])
# sort the columns by time
for col in consolidated['columns']:
if '.' in col:
key, subkey = col.split('.')
consolidated[key][subkey] = (
consolidated[key][subkey][column_sort_ind]
)
else:
consolidated[col] = consolidated[col][column_sort_ind]
# now sort the headers by quarters
header_sort_ind = npargsort(consolidated['quarter']).tolist()
# this is a bit convoluted, but whatever: list -> array -> list
for key in ('quarter', 'season', 'datarelease', 'obsmode'):
consolidated[key] = (
nparray(consolidated[key])[header_sort_ind].tolist()
)
for key in ('timesys','bjdoffset','exptime','lcaperture',
'aperpixused','aperpixunused','pixarcsec',
'channel','skygroup','module','output','ndet'):
consolidated['lcinfo'][key] = (
nparray(consolidated['lcinfo'][key])[header_sort_ind].tolist()
)
for key in ('cdpp3_0','cdpp6_0','cdpp12_0','pdcvar','pdcmethod',
'aper_target_total_ratio','aper_target_frac'):
consolidated['varinfo'][key] = (
nparray(consolidated['varinfo'][key])[header_sort_ind].tolist()
)
# finally, return the consolidated lcdict
return consolidated
# if we didn't find anything, complain
else:
LOGERROR('could not find any light curves '
'for %s in %s or its subdirectories' % (keplerid,
lcfitsdir))
return None
########################
## READING K2 SFF LCs ##
########################
SFFTOPKEYS = LCTOPKEYS
SFFHEADERKEYS = LCHEADERKEYS + ['MASKTYPE','MASKINDE','NPIXSAP']
SFFDATAKEYS = ['T','FRAW','FCOR','ARCLENGTH','MOVING','CADENCENO']
def read_k2sff_lightcurve(lcfits):
'''
This reads a K2 SFF (Vandenberg+ 2014) light curve into an lcdict.
'''
# read the fits file
hdulist = pyfits.open(lcfits)
lchdr, lcdata = hdulist[1].header, hdulist[1].data
lctophdr = hdulist[0].header
hdulist.close()
hdrinfo = {}
# get the number of detections
ndet = lchdr['NAXIS2']
# get the info from the topheader
for key in SFFTOPKEYS:
if key in lctophdr and lctophdr[key] is not None:
hdrinfo[key.lower()] = lctophdr[key]
else:
hdrinfo[key.lower()] = None
# now get the values we want from the header
for key in SFFHEADERKEYS:
if key in lchdr and lchdr[key] is not None:
hdrinfo[key.lower()] = lchdr[key]
else:
hdrinfo[key.lower()] = None
# form the lcdict
# the metadata is one-elem arrays because we might add on to them later
lcdict = {
'quarter':[hdrinfo['quarter']],
'season':[hdrinfo['season']],
'datarelease':[hdrinfo['data_rel']],
'obsmode':[hdrinfo['obsmode']],
'objectid':hdrinfo['object'],
'campaign':[hdrinfo['campaign']],
'lcinfo':{
'timesys':[hdrinfo['timesys']],
'bjdoffset':[hdrinfo['bjdrefi'] + hdrinfo['bjdreff']],
'exptime':[hdrinfo['exposure']],
'lcapermaskidx':[hdrinfo['maskinde']],
'lcapermasktype':[hdrinfo['masktype']],
'aperpixused':[hdrinfo['npixsap']],
'aperpixunused':[None],
'pixarcsec':[None],
'channel':[hdrinfo['channel']],
'skygroup':[hdrinfo['skygroup']],
'module':[hdrinfo['module']],
'output':[hdrinfo['output']],
'ndet':[ndet],
},
'objectinfo':{
'keplerid':hdrinfo['keplerid'],
'ra':hdrinfo['ra_obj'],
'decl':hdrinfo['dec_obj'],
'pmra':hdrinfo['pmra'],
'pmdecl':hdrinfo['pmdec'],
'pmtotal':hdrinfo['pmtotal'],
'sdssg':hdrinfo['gmag'],
'sdssr':hdrinfo['rmag'],
'sdssi':hdrinfo['imag'],
'sdssz':hdrinfo['zmag'],
'kepmag':hdrinfo['kepmag'],
'teff':hdrinfo['teff'],
'logg':hdrinfo['logg'],
'feh':hdrinfo['feh'],
'ebminusv':hdrinfo['ebminusv'],
'extinction':hdrinfo['av'],
'starradius':hdrinfo['radius'],
'twomassuid':hdrinfo['tmindex'],
},
'varinfo':{
'cdpp3_0':[hdrinfo['cdpp3_0']],
'cdpp6_0':[hdrinfo['cdpp6_0']],
'cdpp12_0':[hdrinfo['cdpp12_0']],
'pdcvar':[hdrinfo['pdcvar']],
'pdcmethod':[hdrinfo['pdcmethd']],
'aptgttotrat':[hdrinfo['crowdsap']],
'aptgtfrac':[hdrinfo['flfrcsap']],
},
}
# get the LC columns
for key in SFFDATAKEYS:
lcdict[key.lower()] = lcdata[key]
# add some of the light curve information to the data arrays so we can sort
# on them later
lcdict['channel'] = npfull_like(lcdict['t'],
lcdict['lcinfo']['channel'][0])
lcdict['skygroup'] = npfull_like(lcdict['t'],
lcdict['lcinfo']['skygroup'][0])
lcdict['module'] = npfull_like(lcdict['t'],
lcdict['lcinfo']['module'][0])
lcdict['output'] = npfull_like(lcdict['t'],
lcdict['lcinfo']['output'][0])
lcdict['quarter'] = npfull_like(lcdict['t'],
lcdict['quarter'][0])
lcdict['season'] = npfull_like(lcdict['t'],
lcdict['season'][0])
lcdict['campaign'] = npfull_like(lcdict['t'],
lcdict['campaign'][0])
# update the lcdict columns with the actual columns
lcdict['columns'] = (
[x.lower() for x in SFFDATAKEYS] +
['channel','skygroup','module','output','quarter','season','campaign']
)
# return the lcdict at the end
return lcdict
##################
## INPUT/OUTPUT ##
##################
def kepler_lcdict_to_pkl(lcdict,
outfile=None):
'''This simply writes the lcdict to a pickle.
'''
if not outfile:
outfile = '%s-keplc.pkl' % lcdict['objectid'].replace(' ','-')
# we're using pickle.HIGHEST_PROTOCOL here, this will make Py3 pickles
# unreadable for Python 2.7
with open(outfile,'wb') as outfd:
pickle.dump(lcdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return os.path.abspath(outfile)
def read_kepler_pklc(picklefile):
'''This turns the pickled lightcurve back into an lcdict.
'''
try:
with open(picklefile, 'rb') as infd:
lcdict = pickle.load(infd)
except UnicodeDecodeError:
with open(picklefile,'rb') as infd:
lcdict = pickle.load(infd, encoding='latin1')
LOGWARNING('pickle %s was probably from Python 2 '
'and failed to load without using "latin1" encoding. '
'This is probably a numpy issue: '
'http://stackoverflow.com/q/11305790' % checkplotpickle)
return lcdict
##########################
## KEPLER LC PROCESSING ##
##########################
def stitch_kepler_lcdict(lcdict):
'''
This stitches Kepler light curves together across quarters.
FIXME: implement this.
'''
def filter_kepler_lcdict(lcdict,
filterflags=True,
nanfilter='sap,pdc',
timestoignore=None):
'''This filters the Kepler light curve dict.
By default, this function removes points in the Kepler LC that have ANY
quality flags set. Also removes nans.
timestoignore is a list of tuples containing start and end times to mask:
[(time1_start, time1_end), (time2_start, time2_end), ...]
This function filters the dict IN PLACE!
'''
cols = lcdict['columns']
# filter all bad LC points as noted by quality flags
if filterflags:
nbefore = lcdict['time'].size
filterind = lcdict['sap_quality'] == 0
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][filterind]
else:
lcdict[col] = lcdict[col][filterind]
nafter = lcdict['time'].size
LOGINFO('applied quality flag filter, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
if nanfilter and nanfilter == 'sap,pdc':
notnanind = (
npisfinite(lcdict['sap']['sap_flux']) &
npisfinite(lcdict['pdc']['pdcsap_flux'])
)
elif nanfilter and nanfilter == 'sap':
notnanind = npisfinite(lcdict['sap']['sap_flux'])
elif nanfilter and nanfilter == 'pdc':
notnanind = npisfinite(lcdict['pdc']['pdcsap_flux'])
# remove nans from all columns
if nanfilter:
nbefore = lcdict['time'].size
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][notnanind]
else:
lcdict[col] = lcdict[col][notnanind]
nafter = lcdict['time'].size
LOGINFO('removed nans, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# exclude all times in timestoignore
if (timestoignore and
isinstance(timestoignore, list) and
len(timestoignore) > 0):
exclind = npfull_like(lcdict['time'],True)
nbefore = exclind.size
# get all the masks
for ignoretime in timestoignore:
time0, time1 = ignoretime[0], ignoretime[1]
thismask = (lcdict['time'] > time0) & (lcdict['time'] < time1)
exclind = exclind & | |
import threading
import sqlite3
from enum import Enum
import time
import datetime
from ..CTGP7Defines import CTGP7Defines
current_time_min = lambda: int(round(time.time() / 60))
class ConsoleMessageType(Enum):
SINGLE_MESSAGE = 0
TIMED_MESSAGE = 1
SINGLE_KICKMESSAGE = 2
TIMED_KICKMESSAGE = 3
class CTGP7ServerDatabase:
def __init__(self):
self.isConn = False
self.conn = None
self.lock = threading.Lock()
self.kickCallback = None
def setKickLogCallback(self, callback):
self.kickCallback = callback
def connect(self):
if not self.isConn:
self.conn = sqlite3.connect('RedYoshiBot/server/data/data.sqlite', check_same_thread=False)
self.isConn = True
def disconnect(self):
if (self.isConn):
self.commit()
with self.lock:
self.isConn = False
self.conn.close()
self.conn = None
def commit(self):
if (self.isConn):
with self.lock:
self.conn.commit()
def set_database_config(self, field, value):
with self.lock:
c = self.conn.cursor()
c.execute("UPDATE config SET value = ? WHERE field = ?", (str(value), str(field)))
def get_database_config(self, field):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM config WHERE field = ?", (str(field),))
for row in rows:
return row[1]
def get_online_region(self):
return int(self.get_database_config("onlregion"))
def get_debugonline_region(self):
return int(self.get_database_config("onlregion")) + 2
def set_online_region(self, value):
self.set_database_config("onlregion", value)
def get_track_freq_split(self):
return int(self.get_database_config("trackfreqsplit"))
def set_track_freq_split(self, value):
self.set_database_config("trackfreqsplit", value)
def get_ctww_version(self):
return int(self.get_database_config("ctwwver"))
def set_ctww_version(self, value):
self.set_database_config("ctwwver", value)
def get_beta_version(self):
return int(self.get_database_config("betaver"))
def set_beta_version(self, value):
self.set_database_config("betaver", value)
def get_stats_dirty(self):
return int(self.get_database_config("stats_dirty")) == 1
def set_stats_dirty(self, isDirty):
self.set_database_config("stats_dirty", 1 if isDirty else 0)
def get_most_played_tracks(self, course_type, amount):
currsplit = self.get_track_freq_split()
with self.lock:
c = self.conn.cursor()
c2 = self.conn.cursor()
rows = c.execute("SELECT * FROM stats_tracksfreq WHERE split = ? AND type = ? ORDER BY freq DESC", (int(currsplit), int(course_type)))
i = 0
ret = []
for row in rows:
if (i >= amount): break
prevValue = c2.execute("SELECT SUM(freq) FROM stats_tracksfreq WHERE id = ? AND split < ?", (str(row[0]), int(currsplit))).fetchone()[0]
ret.append([row[0], row[2], 0 if prevValue is None else prevValue])
i += 1
return ret
def increment_track_frequency(self, szsName, value):
currsplit = self.get_track_freq_split()
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM stats_tracksfreq WHERE id = ? AND split = ?", (str(szsName),int(currsplit)))
for _ in rows:
c.execute("UPDATE stats_tracksfreq SET freq = freq + {} WHERE id = ? AND split = ?".format(str(int(value))), (str(szsName),int(currsplit)))
return
courseType = CTGP7Defines.getTypeFromSZS(szsName)
if (courseType != -1):
c.execute('INSERT INTO stats_tracksfreq VALUES (?,?,?,?)', (str(szsName), int(currsplit), int(value), int(courseType)))
def get_stats(self):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM stats_general WHERE 1=1")
ret = {}
i = 0
names = [description[0] for description in rows.description]
for row in rows:
for val in row:
ret[names[i]] = val
i += 1
break
return ret
def increment_general_stats(self, param, value):
with self.lock:
c = self.conn.cursor()
c.execute("UPDATE stats_general SET {} = {} + {} WHERE 1=1".format(param, param, str(int(value))))
def fetch_stats_seqid(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM stats_seqid WHERE cID = ?", (int(cID),))
for row in rows:
newSeqID = row[1] + 1
c.execute("UPDATE stats_seqid SET seqID = ? WHERE cID = ?", (int(newSeqID), int(cID)))
return newSeqID
c.execute('INSERT INTO stats_seqid VALUES (?,?)', (int(cID), int(1)))
return 1
def get_stats_seqid(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM stats_seqid WHERE cID = ?", (int(cID),))
for row in rows:
return row[1]
return 0
def get_unique_console_count(self):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT COUNT(*) FROM stats_seqid")
for row in rows:
return row[0]
return 0
def delete_console_message(self, cID):
with self.lock:
c = self.conn.cursor()
c.execute("DELETE FROM console_message WHERE cID = ?", (int(cID),))
def set_console_message(self, cID, messageType, message, amountMin=None, isSilent=False):
currTime = current_time_min() if amountMin is not None else None
with self.lock:
c = self.conn.cursor()
c.execute("DELETE FROM console_message WHERE cID = ?", (int(cID),))
c.execute('INSERT INTO console_message VALUES (?,?,?,?,?)', (int(cID), str(message), int(messageType), currTime, amountMin))
if (self.kickCallback):
self.kickCallback(cID, messageType, message, amountMin, isSilent)
def get_console_message(self, cID, realConsoleID): # Real console ID is to keep track if cID is 0
ret = None
startTime = None
amountTime = None
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_message WHERE cID = ?", (int(cID),))
for row in rows:
messageText = row[1]
messageType = row[2]
startTime = row[3]
amountTime = row[4]
ret = [messageType, messageText, startTime, amountTime]
if (ret is not None):
if (ret[0] == ConsoleMessageType.SINGLE_KICKMESSAGE.value and self.get_console_is_admin(realConsoleID)):
ret[0] = ConsoleMessageType.SINGLE_MESSAGE.value
elif (ret[0] == ConsoleMessageType.TIMED_KICKMESSAGE.value and self.get_console_is_admin(realConsoleID)):
ret[0] = ConsoleMessageType.TIMED_MESSAGE.value
if ret[0] == ConsoleMessageType.SINGLE_MESSAGE.value or ret[0] == ConsoleMessageType.SINGLE_KICKMESSAGE.value:
self.delete_console_message(cID)
elif (startTime is not None and amountTime is not None and startTime + amountTime < current_time_min()):
self.delete_console_message(cID)
if (ret is None and cID != 0):
ret = self.get_console_message(0, realConsoleID)
return tuple(ret) if ret is not None else None
def set_console_is_verified(self, cID, isVerified):
wasVerified = self.get_console_is_verified(cID)
if (wasVerified == isVerified):
return
with self.lock:
c = self.conn.cursor()
if (isVerified):
c.execute('INSERT INTO verified_consoles VALUES (?)', (int(cID),))
else:
c.execute("DELETE FROM verified_consoles WHERE cID = ?", (int(cID),))
def get_console_is_verified(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM verified_consoles WHERE cID = ?", (int(cID),))
for row in rows:
return True
return False
def set_console_is_admin(self, cID, isAdmin):
wasAdmin = self.get_console_is_admin(cID)
if (wasAdmin == isAdmin):
return
with self.lock:
c = self.conn.cursor()
if (isAdmin):
c.execute('INSERT INTO admin_consoles VALUES (?)', (int(cID),))
else:
c.execute("DELETE FROM admin_consoles WHERE cID = ?", (int(cID),))
def get_console_is_admin(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM admin_consoles WHERE cID = ?", (int(cID),))
for row in rows:
return True
return False
def set_console_last_name(self, cID, lastName):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_name WHERE cID = ?", (int(cID),))
for row in rows:
c.execute("UPDATE console_name SET name = ? WHERE cID = ?", (str(lastName), int(cID)))
return
c.execute('INSERT INTO console_name VALUES (?,?)', (int(cID), str(lastName)))
def get_console_last_name(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_name WHERE cID = ?", (int(cID),))
for row in rows:
return str(row[1])
return "(Unknown)"
def set_console_vr(self, cID, vr):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_vr WHERE cID = ?", (int(cID),))
for row in rows:
c.execute("UPDATE console_vr SET ctvr = ?, cdvr = ? WHERE cID = ?", (int(vr[0]), int(vr[1]), int(cID)))
return
c.execute('INSERT INTO console_vr VALUES (?,?,?)', (int(cID), int(vr[0]), int(vr[1])))
def get_console_vr(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_vr WHERE cID = ?", (int(cID),))
for row in rows:
return (row[1], row[2])
return (1000, 1000)
def get_unique_console_vr_count(self):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT COUNT(*) FROM console_vr")
for row in rows:
return row[0]
return 0
def get_most_users_vr(self, mode, amount):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_vr ORDER BY {} DESC".format("ctvr" if mode == 0 else "cdvr"))
i = 0
ret = []
for row in rows:
if (i >= amount): break
ret.append([row[0], row[1] if mode == 0 else row[2]])
i += 1
return ret
def increment_today_launches(self):
with self.lock:
now = datetime.datetime.utcnow().strftime('%Y-%m-%d')
c = self.conn.cursor()
rows = c.execute("SELECT * FROM launch_times WHERE date = ?", (now,))
for row in rows:
c.execute("UPDATE launch_times SET value = ? WHERE date = ?", (row[1] + 1, now))
return
c.execute('INSERT INTO launch_times VALUES (?,?)', (now, 1))
def get_daily_launches(self, date: datetime.datetime):
with self.lock:
d = date.strftime('%Y-%m-%d')
c = self.conn.cursor()
rows = c.execute("SELECT * FROM launch_times WHERE date = ?", (d,))
for row in rows:
return row[1]
return 0
def increment_today_unique_consoles(self):
with self.lock:
now = datetime.datetime.utcnow().strftime('%Y-%m-%d')
c = self.conn.cursor()
rows = c.execute("SELECT * FROM new_launch_times WHERE date = ?", (now,))
for row in rows:
c.execute("UPDATE new_launch_times SET value = ? WHERE date = ?", (row[1] + 1, now))
return
c.execute('INSERT INTO new_launch_times VALUES (?,?)', (now, 1))
def get_daily_unique_consoles(self, date: datetime.datetime):
with self.lock:
d = date.strftime('%Y-%m-%d')
c = self.conn.cursor()
rows = c.execute("SELECT * FROM new_launch_times WHERE date = ?", (d,))
for row in rows:
return row[1]
return 0
def set_discord_link_console(self, discordID, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM discord_link WHERE cID = ?", (int(cID),))
for row in rows:
c.execute("UPDATE discord_link SET discordID = ? WHERE cID = ?", (int(discordID), int(cID)))
return
c.execute('INSERT INTO discord_link VALUES (?,?)', (int(cID), int(discordID)))
def get_discord_link_console(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM discord_link WHERE cID = ?", (int(cID),))
for row in rows:
| |
obj_.build(child_)
self.experimentalConditions = obj_
obj_.original_tagname_ = 'experimentalConditions'
elif nodeName_ == 'encoding':
obj_ = encoding.factory()
obj_.build(child_)
self.encoding.append(obj_)
obj_.original_tagname_ = 'encoding'
elif nodeName_ == 'sequenceParameters':
obj_ = sequenceParametersType.factory()
obj_.build(child_)
self.sequenceParameters = obj_
obj_.original_tagname_ = 'sequenceParameters'
elif nodeName_ == 'userParameters':
obj_ = userParameters.factory()
obj_.build(child_)
self.userParameters = obj_
obj_.original_tagname_ = 'userParameters'
# end class ismrmrdHeader
class subjectInformationType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, patientName=None, patientWeight_kg=None, patientID=None, patientBirthdate=None, patientGender=None):
self.original_tagname_ = None
self.patientName = patientName
self.patientWeight_kg = patientWeight_kg
self.patientID = patientID
if isinstance(patientBirthdate, basestring):
initvalue_ = datetime_.datetime.strptime(patientBirthdate, '%Y-%m-%d').date()
else:
initvalue_ = patientBirthdate
self.patientBirthdate = initvalue_
self.patientGender = patientGender
self.validate_patientGenderType(self.patientGender)
def factory(*args_, **kwargs_):
if subjectInformationType.subclass:
return subjectInformationType.subclass(*args_, **kwargs_)
else:
return subjectInformationType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_patientName(self): return self.patientName
def set_patientName(self, patientName): self.patientName = patientName
def get_patientWeight_kg(self): return self.patientWeight_kg
def set_patientWeight_kg(self, patientWeight_kg): self.patientWeight_kg = patientWeight_kg
def get_patientID(self): return self.patientID
def set_patientID(self, patientID): self.patientID = patientID
def get_patientBirthdate(self): return self.patientBirthdate
def set_patientBirthdate(self, patientBirthdate): self.patientBirthdate = patientBirthdate
def get_patientGender(self): return self.patientGender
def set_patientGender(self, patientGender): self.patientGender = patientGender
def validate_patientGenderType(self, value):
# Validate type patientGenderType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_patientGenderType_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_patientGenderType_patterns_, ))
validate_patientGenderType_patterns_ = [['^[MFO]$']]
def hasContent_(self):
if (
self.patientName is not None or
self.patientWeight_kg is not None or
self.patientID is not None or
self.patientBirthdate is not None or
self.patientGender is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='subjectInformationType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='subjectInformationType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='subjectInformationType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='subjectInformationType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='subjectInformationType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.patientName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%spatientName>%s</%spatientName>%s' % (namespace_, self.gds_format_string(quote_xml(self.patientName).encode(ExternalEncoding), input_name='patientName'), namespace_, eol_))
if self.patientWeight_kg is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%spatientWeight_kg>%s</%spatientWeight_kg>%s' % (namespace_, self.gds_format_float(self.patientWeight_kg, input_name='patientWeight_kg'), namespace_, eol_))
if self.patientID is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%spatientID>%s</%spatientID>%s' % (namespace_, self.gds_format_string(quote_xml(self.patientID).encode(ExternalEncoding), input_name='patientID'), namespace_, eol_))
if self.patientBirthdate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%spatientBirthdate>%s</%spatientBirthdate>%s' % (namespace_, self.gds_format_date(self.patientBirthdate, input_name='patientBirthdate'), namespace_, eol_))
if self.patientGender is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%spatientGender>%s</%spatientGender>%s' % (namespace_, self.gds_format_string(quote_xml(self.patientGender).encode(ExternalEncoding), input_name='patientGender'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='subjectInformationType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.patientName is not None:
showIndent(outfile, level)
outfile.write('patientName=%s,\n' % quote_python(self.patientName).encode(ExternalEncoding))
if self.patientWeight_kg is not None:
showIndent(outfile, level)
outfile.write('patientWeight_kg=%f,\n' % self.patientWeight_kg)
if self.patientID is not None:
showIndent(outfile, level)
outfile.write('patientID=%s,\n' % quote_python(self.patientID).encode(ExternalEncoding))
if self.patientBirthdate is not None:
showIndent(outfile, level)
outfile.write('patientBirthdate=model_.GeneratedsSuper.gds_parse_date("%s"),\n' % self.gds_format_date(self.patientBirthdate, input_name='patientBirthdate'))
if self.patientGender is not None:
showIndent(outfile, level)
outfile.write('patientGender=%s,\n' % quote_python(self.patientGender).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'patientName':
patientName_ = child_.text
patientName_ = self.gds_validate_string(patientName_, node, 'patientName')
self.patientName = patientName_
elif nodeName_ == 'patientWeight_kg':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'patientWeight_kg')
self.patientWeight_kg = fval_
elif nodeName_ == 'patientID':
patientID_ = child_.text
patientID_ = self.gds_validate_string(patientID_, node, 'patientID')
self.patientID = patientID_
elif nodeName_ == 'patientBirthdate':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.patientBirthdate = dval_
elif nodeName_ == 'patientGender':
patientGender_ = child_.text
patientGender_ = self.gds_validate_string(patientGender_, node, 'patientGender')
self.patientGender = patientGender_
# validate type patientGenderType
self.validate_patientGenderType(self.patientGender)
# end class subjectInformationType
class studyInformationType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, studyDate=None, studyTime=None, studyID=None, accessionNumber=None, referringPhysicianName=None, studyDescription=None, studyInstanceUID=None):
self.original_tagname_ = None
if isinstance(studyDate, basestring):
initvalue_ = datetime_.datetime.strptime(studyDate, '%Y-%m-%d').date()
else:
initvalue_ = studyDate
self.studyDate = initvalue_
if isinstance(studyTime, basestring):
initvalue_ = datetime_.datetime.strptime(studyTime, '%H:%M:%S').time()
else:
initvalue_ = studyTime
self.studyTime = initvalue_
self.studyID = studyID
self.accessionNumber = accessionNumber
self.referringPhysicianName = referringPhysicianName
self.studyDescription = studyDescription
self.studyInstanceUID = studyInstanceUID
def factory(*args_, **kwargs_):
if studyInformationType.subclass:
return studyInformationType.subclass(*args_, **kwargs_)
else:
return studyInformationType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_studyDate(self): return self.studyDate
def set_studyDate(self, studyDate): self.studyDate = studyDate
def get_studyTime(self): return self.studyTime
def set_studyTime(self, studyTime): self.studyTime = studyTime
def get_studyID(self): return self.studyID
def set_studyID(self, studyID): self.studyID = studyID
def get_accessionNumber(self): return self.accessionNumber
def set_accessionNumber(self, accessionNumber): self.accessionNumber = accessionNumber
def get_referringPhysicianName(self): return self.referringPhysicianName
def set_referringPhysicianName(self, referringPhysicianName): self.referringPhysicianName = referringPhysicianName
def get_studyDescription(self): return self.studyDescription
def set_studyDescription(self, studyDescription): self.studyDescription = studyDescription
def get_studyInstanceUID(self): return self.studyInstanceUID
def set_studyInstanceUID(self, studyInstanceUID): self.studyInstanceUID = studyInstanceUID
def hasContent_(self):
if (
self.studyDate is not None or
self.studyTime is not None or
self.studyID is not None or
self.accessionNumber is not None or
self.referringPhysicianName is not None or
self.studyDescription is not None or
self.studyInstanceUID is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='studyInformationType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='studyInformationType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='studyInformationType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='studyInformationType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='studyInformationType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.studyDate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sstudyDate>%s</%sstudyDate>%s' % (namespace_, self.gds_format_date(self.studyDate, input_name='studyDate'), namespace_, eol_))
if self.studyTime is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sstudyTime>%s</%sstudyTime>%s' % (namespace_, self.gds_format_time(self.studyTime, input_name='studyTime'), namespace_, eol_))
if self.studyID is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sstudyID>%s</%sstudyID>%s' % (namespace_, self.gds_format_string(quote_xml(self.studyID).encode(ExternalEncoding), input_name='studyID'), namespace_, eol_))
if self.accessionNumber is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%saccessionNumber>%s</%saccessionNumber>%s' % (namespace_, self.gds_format_integer(self.accessionNumber, input_name='accessionNumber'), namespace_, eol_))
if self.referringPhysicianName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sreferringPhysicianName>%s</%sreferringPhysicianName>%s' % (namespace_, self.gds_format_string(quote_xml(self.referringPhysicianName).encode(ExternalEncoding), input_name='referringPhysicianName'), namespace_, eol_))
if self.studyDescription is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sstudyDescription>%s</%sstudyDescription>%s' % (namespace_, self.gds_format_string(quote_xml(self.studyDescription).encode(ExternalEncoding), input_name='studyDescription'), namespace_, eol_))
if self.studyInstanceUID is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sstudyInstanceUID>%s</%sstudyInstanceUID>%s' % (namespace_, self.gds_format_string(quote_xml(self.studyInstanceUID).encode(ExternalEncoding), input_name='studyInstanceUID'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='studyInformationType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.studyDate is not None:
showIndent(outfile, level)
outfile.write('studyDate=model_.GeneratedsSuper.gds_parse_date("%s"),\n' % self.gds_format_date(self.studyDate, input_name='studyDate'))
if self.studyTime is not None:
showIndent(outfile, level)
outfile.write('studyTime=model_.GeneratedsSuper.gds_parse_time("%s"),\n' % self.gds_format_time(self.studyTime, input_name='studyTime'))
if self.studyID is not None:
showIndent(outfile, level)
outfile.write('studyID=%s,\n' % quote_python(self.studyID).encode(ExternalEncoding))
if self.accessionNumber is not None:
showIndent(outfile, level)
outfile.write('accessionNumber=%d,\n' % self.accessionNumber)
if self.referringPhysicianName is not None:
showIndent(outfile, level)
outfile.write('referringPhysicianName=%s,\n' % quote_python(self.referringPhysicianName).encode(ExternalEncoding))
if self.studyDescription is not None:
showIndent(outfile, level)
outfile.write('studyDescription=%s,\n' % quote_python(self.studyDescription).encode(ExternalEncoding))
if self.studyInstanceUID is not None:
showIndent(outfile, level)
outfile.write('studyInstanceUID=%s,\n' % quote_python(self.studyInstanceUID).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'studyDate':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.studyDate = dval_
elif nodeName_ == 'studyTime':
sval_ = child_.text
dval_ = self.gds_parse_time(sval_)
self.studyTime = dval_
elif nodeName_ == 'studyID':
studyID_ = child_.text
studyID_ = self.gds_validate_string(studyID_, node, 'studyID')
self.studyID = studyID_
elif nodeName_ == 'accessionNumber':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'accessionNumber')
self.accessionNumber = ival_
elif nodeName_ == 'referringPhysicianName':
referringPhysicianName_ = child_.text
referringPhysicianName_ = self.gds_validate_string(referringPhysicianName_, node, 'referringPhysicianName')
self.referringPhysicianName = referringPhysicianName_
elif nodeName_ == 'studyDescription':
studyDescription_ = child_.text
studyDescription_ = self.gds_validate_string(studyDescription_, node, 'studyDescription')
self.studyDescription = studyDescription_
elif nodeName_ == 'studyInstanceUID':
studyInstanceUID_ = child_.text
studyInstanceUID_ = self.gds_validate_string(studyInstanceUID_, node, 'studyInstanceUID')
self.studyInstanceUID = studyInstanceUID_
# end class studyInformationType
class measurementInformationType(GeneratedsSuper):
subclass = None
superclass = None
| |
from dataclasses import dataclass, field
from typing import Dict, List, Optional
X_NL_NAMESPACE = "urn:oasis:names:tc:ciq:xsdschema:xNL:2.0"
@dataclass
class Function:
"""Function of the Person defined.
Example: Managing Director, CEO, Marketing Manager, etc.
:ivar content:
:ivar code: Indicates the name element code defined by postal
standard groups like ECCMA, ADIS, UN/PROLIST for postal
services.
:ivar other_attributes:
"""
class Meta:
namespace = X_NL_NAMESPACE
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
code: Optional[str] = field(
default=None,
metadata={
"name": "Code",
"type": "Attribute",
"required": True,
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
"required": True,
}
)
@dataclass
class NameLineType:
"""
:ivar content:
:ivar type: Type of data defined as a free format text. Example:
Former name, Nick name, Known as, etc. or anything else to help
identify the line as part of the name.
:ivar name_type: Clarifies the meaning of the element. Example:
First Name can be Christian name, Given name, first name, etc.
:ivar code: Indicates the name element code defined by postal
standard groups like ECCMA, ADIS, UN/PROLIST for postal
services.
:ivar other_attributes:
"""
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
type: Optional[str] = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
}
)
name_type: Optional[str] = field(
default=None,
metadata={
"name": "NameType",
"type": "Attribute",
}
)
code: Optional[str] = field(
default=None,
metadata={
"name": "Code",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class OrganisationNameDetails:
"""
A container for organisation name details.
:ivar name_line: Free format text that defines the organisation name
or parts of it.
:ivar organisation_name: Name of the organisation. Example: MSI
Business Solutions in "MSI Business Solutions Pty. Ltd" or the
whole name itself
:ivar organisation_type: Indicates the legal status of an
organisation. Example: Pty, Ltd, GmbH, etc. Pty. Ltd. in "XYZ
Pty. Ltd"
:ivar type: Type of Organisation Name. Example: Former name, Known
as, etc
:ivar name_details_key_ref: Reference to another NameDetails element
with no foreign key reinforcement. The referenced element may be
out of the document and the document is still valid.
:ivar other_attributes:
:ivar organisation_former_name: Name history for the organisation
:ivar organisation_known_as: Any other names the organisation can be
known under.
:ivar other_element: Use this to import/use/reference name elements
from other namespaces
"""
class Meta:
namespace = X_NL_NAMESPACE
name_line: List[NameLineType] = field(
default_factory=list,
metadata={
"name": "NameLine",
"type": "Element",
}
)
organisation_name: List["OrganisationNameDetails.OrganisationName"] = field(
default_factory=list,
metadata={
"name": "OrganisationName",
"type": "Element",
}
)
organisation_type: List["OrganisationNameDetails.OrganisationType"] = field(
default_factory=list,
metadata={
"name": "OrganisationType",
"type": "Element",
}
)
type: Optional[str] = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
}
)
name_details_key_ref: Optional[str] = field(
default=None,
metadata={
"name": "NameDetailsKeyRef",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
organisation_former_name: List["OrganisationNameDetails.OrganisationFormerName"] = field(
default_factory=list,
metadata={
"name": "OrganisationFormerName",
"type": "Element",
}
)
organisation_known_as: List["OrganisationNameDetails.OrganisationKnownAs"] = field(
default_factory=list,
metadata={
"name": "OrganisationKnownAs",
"type": "Element",
}
)
other_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##other",
}
)
@dataclass
class OrganisationFormerName:
"""
:ivar name_line: Free format text that defines the organisation
name or parts of it.
:ivar organisation_name: Name of the organisation. Example: MSI
Business Solutions in "MSI Business Solutions Pty. Ltd" or
the whole name itself
:ivar organisation_type: Indicates the legal status of an
organisation. Example: Pty, Ltd, GmbH, etc. Pty. Ltd. in
"XYZ Pty. Ltd"
:ivar type: Type of Organisation Name. Example: Former name,
Known as, etc
:ivar name_details_key_ref: Reference to another NameDetails
element with no foreign key reinforcement. The referenced
element may be out of the document and the document is still
valid.
:ivar other_attributes:
:ivar other_element: Use this to import/use/reference name
elements from other namespaces
:ivar valid_from: The first date when the name is valid.
Inclusive.
:ivar valid_to: The last date when the name is valid. Inclusive.
"""
name_line: List[NameLineType] = field(
default_factory=list,
metadata={
"name": "NameLine",
"type": "Element",
}
)
organisation_name: List["OrganisationNameDetails.OrganisationFormerName.OrganisationName"] = field(
default_factory=list,
metadata={
"name": "OrganisationName",
"type": "Element",
}
)
organisation_type: List["OrganisationNameDetails.OrganisationFormerName.OrganisationType"] = field(
default_factory=list,
metadata={
"name": "OrganisationType",
"type": "Element",
}
)
type: Optional[str] = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
}
)
name_details_key_ref: Optional[str] = field(
default=None,
metadata={
"name": "NameDetailsKeyRef",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
other_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##other",
}
)
valid_from: Optional[str] = field(
default=None,
metadata={
"name": "ValidFrom",
"type": "Attribute",
}
)
valid_to: Optional[str] = field(
default=None,
metadata={
"name": "ValidTo",
"type": "Attribute",
}
)
@dataclass
class OrganisationName:
"""
:ivar content:
:ivar type: Type of Organisation name. Example: Official,
Legal, Un-official, etc
:ivar name_type: Defines the name type of the Organisation
name. Example: Former name, new name, abbreviated name
etc.
:ivar code: Indicates the name element code defined by
postal standard groups like ECCMA, ADIS, UN/PROLIST for
postal services.
:ivar other_attributes:
"""
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
type: Optional[str] = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
}
)
name_type: Optional[str] = field(
default=None,
metadata={
"name": "NameType",
"type": "Attribute",
}
)
code: Optional[str] = field(
default=None,
metadata={
"name": "Code",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class OrganisationType:
"""
:ivar content:
:ivar type: Defines the Type of Organisation Type. Example:
Abbreviation, Legal Type, etc.
:ivar name_type: Defines the name type of Organisation Type.
Example: Private, Public, proprietary, etc.
:ivar code: Indicates the name element code defined by
postal standard groups like ECCMA, ADIS, UN/PROLIST for
postal services.
:ivar other_attributes:
"""
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
type: Optional[str] = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
}
)
name_type: Optional[str] = field(
default=None,
metadata={
"name": "NameType",
"type": "Attribute",
}
)
code: Optional[str] = field(
default=None,
metadata={
"name": "Code",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class OrganisationKnownAs:
"""
:ivar name_line: Free format text that defines the organisation
name or parts of it.
:ivar organisation_name: Name of the organisation. Example: MSI
Business Solutions in "MSI Business Solutions Pty. Ltd" or
the whole name itself
:ivar organisation_type: Indicates the legal status of an
organisation. Example: Pty, Ltd, GmbH, etc. Pty. Ltd. in
"XYZ Pty. Ltd"
:ivar type: Type of Organisation Name. Example: Former name,
Known as, etc
:ivar name_details_key_ref: Reference to another NameDetails
element with no foreign key reinforcement. The referenced
element may be out of the document and the document is still
valid.
:ivar other_attributes:
:ivar other_element: Use this to import/use/reference name
elements from other namespaces
:ivar valid_from: The first date when the name is valid.
Inclusive.
:ivar valid_to: The last date when the name is valid. Inclusive.
"""
name_line: List[NameLineType] = field(
default_factory=list,
metadata={
"name": "NameLine",
"type": "Element",
}
)
organisation_name: List["OrganisationNameDetails.OrganisationKnownAs.OrganisationName"] = field(
default_factory=list,
metadata={
"name": "OrganisationName",
"type": "Element",
}
)
organisation_type: List["OrganisationNameDetails.OrganisationKnownAs.OrganisationType"] = field(
default_factory=list,
metadata={
"name": "OrganisationType",
"type": "Element",
}
)
type: Optional[str] = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
}
)
name_details_key_ref: Optional[str] = field(
default=None,
metadata={
"name": "NameDetailsKeyRef",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
other_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##other",
}
)
valid_from: Optional[str] = field(
default=None,
metadata={
"name": "ValidFrom",
"type": "Attribute",
}
)
valid_to: Optional[str] = field(
default=None,
metadata={
"name": "ValidTo",
"type": "Attribute",
}
)
@dataclass
class OrganisationName:
"""
:ivar content:
:ivar type: Type of Organisation name. Example: Official,
Legal, Un-official, etc
:ivar name_type: Defines the name type of the Organisation
name. Example: Former name, new name, abbreviated name
etc.
:ivar code: Indicates the name element code defined by
postal standard groups like ECCMA, ADIS, UN/PROLIST for
postal services.
:ivar other_attributes:
"""
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
type: Optional[str] = field(
default=None,
metadata={
| |
skills1 = [
[("Thrown Weapon (Dart)", 1, SK)],
[("Thrown Weapon (Knife)", 1, SK)],
[("Thrown Weapon (Shuriken)", 1, SK)],
[("Throwing", 1, SK)],
[("Blowpipe", 1, SK)],
[("Sling", 1, SK)],
]
traits.extend(pick_from_list(skills1, 1))
melee_option = random.randrange(3)
if melee_option == 0:
skills2 = [
[("Knife", 4, SK)],
[("Axe/Mace", 4, SK)],
[("Jitte/Sai", 4, SK)],
[("Shortsword", 4, SK)],
[("Smallsword", 4, SK)],
[("Staff", 4, SK)],
[("Tonfa", 4, SK)],
[("Flail", 4, SK)],
[("Kusari", 4, SK)],
]
traits.extend(pick_from_list(skills2, 8))
elif melee_option == 1:
skills3 = [
[("Knife", 4, SK)],
[("Axe/Mace", 4, SK)],
[("Jitte/Sai", 4, SK)],
[("Shortsword", 4, SK)],
[("Smallsword", 4, SK)],
[("Staff", 4, SK)],
[("Tonfa", 4, SK)],
[("Flail", 4, SK)],
[("Kusari", 4, SK)],
]
traits.extend(pick_from_list(skills3, 4))
traits = [
(name, cost, trait_type)
for (name, cost, trait_type) in traits
if name != "Judo" and name != "Karate"
]
traits.append(("Judo", 4, SK))
traits.append(("Karate", 4, SK))
else:
traits = [
(name, cost, trait_type)
for (name, cost, trait_type) in traits
if name != "Judo" and name != "Karate"
]
if random.randrange(2) == 0:
traits.append(("Judo", 8, SK))
traits.append(("Karate", 4, SK))
else:
traits.append(("Judo", 4, SK))
traits.append(("Karate", 8, SK))
skills4 = [
[("Fast-Draw (any)", 1, SK)],
[("Climbing", 1, SK)],
[("First Aid", 1, SK)],
[("Gesture", 1, SK)],
[("Teaching", 1, SK)],
[("Hiking", 1, SK)],
[("Running", 1, SK)],
[("Intimidation", 1, SK)],
[("Observation", 1, SK)],
]
traits.extend(pick_from_list(skills4, 3))
pick_or_improve_skills_from_list(
special_skill_names, 14, traits, min_cost=2
)
# Prereq hack.
trait_names = set((trait[0] for trait in traits))
if "Flying Leap" in trait_names and "Power Blow" not in trait_names:
total_cost = 0
for (name, cost, trait_type) in traits:
if name == "Flying Leap":
total_cost += cost
traits.remove((name, cost, trait_type))
remaining_special_skill_names = list(
special_skill_names - trait_names - {"Flying Leap"}
)
name2 = random.choice(remaining_special_skill_names)
traits.append((name2, total_cost, SK))
return traits
def generate_scout() -> List[Tuple[str, int, TraitType]]:
traits = [
("ST 13", 30, PA),
("DX 14", 80, PA),
("IQ 11", 20, PA),
("HT 12", 20, PA),
("HP 13", 0, SA),
("Will 11", 0, SA),
("Per 14", 15, SA),
("FP 12", 0, SA),
("Basic Speed 7.0", 10, SA),
("Basic Move 7", 0, SA),
("Heroic Archer", 20, AD),
("Outdoorsman 2", 20, AD),
]
ads1 = [
list_levels("ST +%d", 10, PA, 2),
[("DX +1", 20, PA)],
list_levels("HT +%d", 10, PA, 2),
list_levels("Per +%d", 5, SA, 4),
[("Basic Speed +1", 20, SA)],
list_levels("Basic Move +%d", 5, SA, 3),
[("Absolute Direction", 5, AD)],
list_levels("Acute Vision %d", 2, AD, 5),
[("Combat Reflexes", 15, AD)],
[("Danger Sense", 15, AD)],
[("Fit", 5, AD), ("Very Fit", 15, AD)],
[("High Pain Threshold", 10, AD)],
[("Luck", 15, AD)],
list_levels("Night Vision %d", 1, AD, 9),
list_levels("Outdoorsman %d", 10, AD, 2, min_level=3),
[("Peripheral Vision", 15, AD)],
[("Rapid Healing", 5, AD)],
list_levels("Signature Gear %d", 1, AD, 10),
[("Weapon Bond", 1, AD)],
[("Weapon Master (Bow)", 20, AD)],
]
traits.extend(pick_from_list(ads1, 20))
disads1 = [
list_self_control_levels("Bloodlust", -10),
[("Callous (12)", -5, DI)],
list_self_control_levels("Greed", -15),
list_self_control_levels("Honesty", -10),
list_self_control_levels("Overconfidence", -5),
[("Sense of Duty (Adventuring companions)", -5, DI)],
[("Stubbornness", -5, DI)],
]
traits.extend(pick_from_list(disads1, -15))
disads2 = [
[
("Code of Honor (Pirate's)", -5, DI),
("Code of Honor (Soldier's)", -10, DI),
],
[("Intolerance (Urbanites)", -5, DI)],
list_self_control_levels("Loner", -5),
[("No Sense of Humor", -10, DI)],
[("Odious Personal Habit (Unwashed bushwhacker)", -5, DI)],
[("Paranoia", -10, DI)],
list_self_control_levels("Phobia (Crowds)", -15),
[("Social Stigma (Disowned)", -5, DI)],
[("Vow (Never Sleep Indoors)", -10, DI)],
[("Vow (Own no more than what can be carried)", -10, DI)],
]
disads2.extend(disads1)
traits.extend(pick_from_list(disads2, -35))
fixed_skills = [
[("Bow", 16, SK)],
[("Camouflage", 2, SK)],
[("Fast-Draw (Arrow)", 1, SK)],
[("Observation", 2, SK)],
[("Tracking", 2, SK)],
[("Climbing", 1, SK)],
[("Stealth", 1, SK)],
[("Gesture", 2, SK)],
[("Cartography", 4, SK)],
[("Shadowing", 4, SK)],
[("Traps", 4, SK)],
[("Mimicry (Bird Calls)", 2, SK)],
[("Hiking", 2, SK)],
]
for fixed_skill in fixed_skills:
traits.append(fixed_skill[0])
skills1 = [
[
("Broadsword", 12, SK),
("Shortsword", 12, SK),
("Spear", 12, SK),
("Staff", 12, SK),
],
[("Broadsword", 8, SK), ("Shortsword", 8, SK), ("Spear", 8, SK)],
[("Shield", 4, SK)],
]
skills2 = [[("Navigation (Land)", 1, SK)], [("Navigation (Sea)", 1, SK)]]
skills3 = [
[("Survival (Arctic)", 1, SK)],
[("Survival (Desert)", 1, SK)],
[("Survival (Island/Beach)", 1, SK)],
[("Survival (Jungle)", 1, SK)],
[("Survival (Mountain)", 1, SK)],
[("Survival (Plains)", 1, SK)],
[("Survival (Swampland)", 1, SK)],
[("Survival (Woodlands)", 1, SK)],
]
skills4 = [
[("Brawling", 1, SK)],
[("Fast-Draw (any other)", 1, SK)],
[("Garrote", 1, SK)],
[("Jumping", 1, SK)],
[("Knife", 1, SK)],
[("Knot-Tying", 1, SK)],
[("Boating (Unpowered)", 1, SK)],
[("Riding (Horse)", 1, SK)],
[("Throwing", 1, SK)],
[("Wrestling", 1, SK)],
[("First Aid", 1, SK)],
[("Seamanship", 1, SK)],
[("Armoury (Missile Weapons)", 1, SK)],
[("Prospecting", 1, SK)],
[("Weather Sense", 1, SK)],
[("Swimming", 1, SK)],
[("Running", 1, SK)],
[("Skiing", 1, SK)],
[("Search", 1, SK)],
]
all_skills = set()
for lst in [skills1, skills2, skills3, skills4, fixed_skills]:
for lst2 in lst:
for tup in lst2:
all_skills.add(tup[0])
traits.extend(pick_from_list(skills1, 12))
traits.extend(pick_from_list(skills2, 1))
traits.extend(pick_from_list(skills3, 1))
pick_or_improve_skills_from_list(all_skills, 8, traits)
return traits
def generate_swashbuckler() -> List[Tuple[str, int, TraitType]]:
traits = [
("ST 11", 10, PA),
("DX 15", 100, PA),
("IQ 10", 0, PA),
("HT 13", 30, PA),
("HP 11", 0, SA),
("Will 10", 0, SA),
("Per 10", 0, SA),
("FP 13", 0, SA),
("Basic Speed 7.0", 0, SA),
("Basic Move 7", 0, SA),
("Combat Reflexes", 15, AD),
("Enhanced Parry (Weapon of choice) 1", 5, AD),
("Luck", 15, AD),
("Weapon Bond (Any starting weapon)", 1, AD),
("Weapon Master (Weapon of choice) 1", 20, AD),
("Jumping", 1, SK),
("Fast-Draw (Knife)", 1, SK),
("Fast-Draw (Sword)", 1, SK),
("Acrobatics", 4, SK),
("Wrestling", 2, SK),
("Stealth", 1, SK),
("Carousing", 1, SK),
]
ads1 = [
list_levels("ST +%d", 10, PA, 6),
list_levels("DX +%d", 20, PA, 3),
list_levels("Basic Speed +%d", 20, SA, 2),
list_levels("Basic Move +%d", 5, SA, 3),
[("Alcohol Tolerance", 1, AD)],
[("Ambidexterity", 5, AD)],
[
("Appearance: Attractive", 4, AD),
("Appearance: Handsome", 12, AD),
("Appearance: Very Handsome", 16, AD),
],
list_levels("Charisma %d", 5, AD, 5),
[("Daredevil", 15, AD)],
[("Enhanced Dodge", 15, AD)],
list_levels(
"Enhanced Parry %d (Weapon of Choice)", 5, AD, 2, min_level=2
),
[("Extra Attack 1", 25, AD)],
[("No Hangover", 1, AD)],
[("Perfect Balance", 15, AD)],
[("Rapier Wit", 5, AD)],
list_levels("Serendipity %d", 15, AD, 4),
list_levels("Signature Gear %d", 1, AD, 10),
list_levels("Striking ST %d", 5, AD, 2),
[("Extraordinary Luck", 15, AD), ("Ridiculous Luck", 45, AD)],
]
traits.extend(pick_from_list(ads1, 60))
disads1 = [
[
("Code of Honor (Pirate's)", -5, DI),
("Code of Honor (Gentleman's)", -10, DI),
],
list_self_control_levels(
"Obsession (Become the best swordsman in the world)", -10
),
[("Vow (Use only weapon of choice)", -5, DI)],
[("Vow (Never resist a challenge to combat)", -10, DI)],
[("Vow (Challenge every swordsman to combat)", -15, DI)],
[("Vow (Never wear armor)", -15, DI)],
[("Wounded", -5, DI)],
]
traits.extend(pick_from_list(disads1, -15))
disads2 = [
list_self_control_levels("Impulsiveness", -10),
list_self_control_levels("Overconfidence", -5),
list_self_control_levels("Short Attention Span", -10),
list_self_control_levels("Trickster", -15),
]
disads2.extend(disads1)
traits.extend(pick_from_list(disads2, -15))
disads3 = [
list_self_control_levels2("Chummy", -5, "Gregarious", -10),
list_self_control_levels("Compulsive Carousing", -5),
list_self_control_levels("Compulsive Spending", -5),
list_self_control_levels("Greed", -15),
list_self_control_levels("Jealousy", -10),
list_self_control_levels("Lecherousness", -15),
[("One Eye", -15, DI)],
[("Sense of Duty (Adventuring companions)", -5, DI)],
[("Wounded", -5, DI)],
]
disads3.extend(disads2)
traits.extend(pick_from_list(disads3, -20))
skills1 = [[("Thrown Weapon (Knife)", 2, SK)], [("Throwing", 2, SK)]]
traits.extend(pick_from_list(skills1, 2))
skills2 = [
[
("Broadsword", 20, SK),
("Rapier", 20, SK),
("Saber", 20, SK),
("Shortsword", 20, SK),
("Smallsword", 20, SK),
],
[
("Broadsword", 16, SK),
("Rapier", 16, SK),
("Saber", 16, SK),
("Shortsword", 16, SK),
("Smallsword", 16, SK),
],
[
("Broadsword", 12, SK),
("Rapier", 12, SK),
("Saber", 12, SK),
("Shortsword", 12, SK),
("Smallsword", 12, SK),
],
[
("Shield (Buckler)", 8, SK),
("Cloak", 8, SK),
("Main-Gauche", 8, SK),
],
[
("Shield (Buckler)", 4, SK),
("Cloak", 4, SK),
("Main-Gauche", 4, SK),
],
]
traits.extend(pick_from_list(skills2, 20))
skills3 = [[("Brawling", 2, SK)], [("Boxing", 2, SK)]]
traits.extend(pick_from_list(skills3, 2))
skills4 = [
[("Savoir-Faire (High Society)", 2, SK)],
[("Streetwise", 2, SK)],
]
traits.extend(pick_from_list(skills4, 2))
skills5 = [
[("Fast-Draw (any other)", 1, SK)],
[("Climbing", 1, SK)],
[("First Aid", 1, SK)],
[("Gesture", 1, SK)],
[("Seamanship", 1, SK)],
[("Connoisseur (any)", 1, SK)],
[("Fast-Talk", 1, SK)],
[("Gambling", 1, SK)],
[("Hiking", 1, SK)],
[("Sex Appeal", 1, SK)],
[("Intimidation", 1, SK)],
[("Scrounging", 1, SK)],
[("Search", 1, SK)],
]
traits.extend(pick_from_list(skills5, 7))
return traits
def generate_thief() -> List[Tuple[str, int, TraitType]]:
traits = [
("ST 11", 10, PA),
("DX | |
<gh_stars>0
# Copyright (C) 2021 Open Source Robotics Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import ignition
from ignition.math import Angle, SphericalCoordinates, Vector3d
import math
class TestSphericalCoordinates(unittest.TestCase):
def test_constructor(self):
# Default surface type
st = SphericalCoordinates.EARTH_WGS84
# No arguments, default parameters
sc = SphericalCoordinates()
self.assertEqual(sc.surface(), st)
self.assertEqual(sc.latitude_reference(), Angle())
self.assertEqual(sc.longitude_reference(), Angle())
self.assertEqual(sc.heading_offset(), Angle())
self.assertAlmostEqual(sc.elevation_reference(), 0.0, delta=1e-6)
# SurfaceType argument, default parameters
sc = SphericalCoordinates(st)
self.assertEqual(sc.surface(), st)
self.assertEqual(sc.latitude_reference(), Angle())
self.assertEqual(sc.longitude_reference(), Angle())
self.assertEqual(sc.heading_offset(), Angle())
self.assertAlmostEqual(sc.elevation_reference(), 0.0, delta=1e-6)
# All arguments
lat = Angle(0.3)
lon = Angle(-1.2)
heading = Angle(0.5)
elev = 354.1
sc = SphericalCoordinates(st, lat, lon, elev, heading)
self.assertEqual(sc.surface(), st)
self.assertEqual(sc.latitude_reference(), lat)
self.assertEqual(sc.longitude_reference(), lon)
self.assertEqual(sc.heading_offset(), heading)
self.assertAlmostEqual(sc.elevation_reference(), elev, delta=1e-6)
# Copy constructor
sc2 = SphericalCoordinates(sc)
self.assertEqual(sc, sc2)
def test_convert(self):
# Default surface type
st = SphericalCoordinates.EARTH_WGS84
self.assertEqual(SphericalCoordinates.convert("EARTH_WGS84"), st)
self.assertEqual(SphericalCoordinates.EARTH_WGS84,
SphericalCoordinates.convert("OTHER-COORD"))
self.assertEqual("EARTH_WGS84", SphericalCoordinates.convert(st))
def test_set_functions(self):
# Default surface type
st = SphericalCoordinates.EARTH_WGS84
# Default parameters
sc = SphericalCoordinates()
self.assertEqual(sc.surface(), st)
self.assertEqual(sc.latitude_reference(), Angle())
self.assertEqual(sc.longitude_reference(), Angle())
self.assertEqual(sc.heading_offset(), Angle())
self.assertAlmostEqual(sc.elevation_reference(), 0.0, delta=1e-6)
lat = Angle(0.3)
lon = Angle(-1.2)
heading = Angle(0.5)
elev = 354.1
sc.set_surface(st)
sc.set_latitude_reference(lat)
sc.set_longitude_reference(lon)
sc.set_heading_offset(heading)
sc.set_elevation_reference(elev)
self.assertEqual(sc.surface(), st)
self.assertEqual(sc.latitude_reference(), lat)
self.assertEqual(sc.longitude_reference(), lon)
self.assertEqual(sc.heading_offset(), heading)
self.assertAlmostEqual(sc.elevation_reference(), elev, delta=1e-6)
def test_coordinate_transforms(self):
# Default surface type
st = SphericalCoordinates.EARTH_WGS84
# Parameters
lat = Angle(0.3)
lon = Angle(-1.2)
heading = Angle(Angle.HALF_PI)
elev = 354.1
sc = SphericalCoordinates(st, lat, lon, elev, heading)
# Check GlobalFromLocal with heading offset of 90 degrees
# Heading 0: X == East, Y == North, Z == Up
# Heading 90: X == North, Y == West , Z == Up
# local frame
xyz = Vector3d()
# east, north, up
enu = Vector3d()
xyz.set(1, 0, 0)
enu = sc.global_from_local_velocity(xyz)
self.assertAlmostEqual(enu.y(), xyz.x(), delta=1e-6)
self.assertAlmostEqual(enu.x(), -xyz.y(), delta=1e-6)
self.assertEqual(xyz, sc.local_from_global_velocity(enu))
xyz.set(0, 1, 0)
enu = sc.global_from_local_velocity(xyz)
self.assertAlmostEqual(enu.y(), xyz.x(), delta=1e-6)
self.assertAlmostEqual(enu.x(), -xyz.y(), delta=1e-6)
self.assertEqual(xyz, sc.local_from_global_velocity(enu))
xyz.set(1, -1, 0)
enu = sc.global_from_local_velocity(xyz)
self.assertAlmostEqual(enu.y(), xyz.x(), delta=1e-6)
self.assertAlmostEqual(enu.x(), -xyz.y(), delta=1e-6)
self.assertEqual(xyz, sc.local_from_global_velocity(enu))
xyz.set(2243.52334, 556.35, 435.6553)
enu = sc.global_from_local_velocity(xyz)
self.assertAlmostEqual(enu.y(), xyz.x(), delta=1e-6)
self.assertAlmostEqual(enu.x(), -xyz.y(), delta=1e-6)
self.assertEqual(xyz, sc.local_from_global_velocity(enu))
# Check SphericalFromLocal
# local frame
xyz = Vector3d()
# spherical coordinates
sph = Vector3d()
# No offset
xyz.set(0, 0, 0)
sph = sc.spherical_from_local_position(xyz)
# latitude
self.assertAlmostEqual(sph.x(), lat.degree(), delta=1e-6)
# longitude
self.assertAlmostEqual(sph.y(), lon.degree(), delta=1e-6)
# elevation
self.assertAlmostEqual(sph.z(), elev, delta=1e-6)
# 200 km offset in x (pi/2 heading offset means North). We use
# SphericalFromLocal, which means that xyz is a linear movement on
# a plane (not along the curvature of Earth). This will result in
# a large height offset.
xyz.set(2e5, 0, 0)
sph = sc.spherical_from_local_position(xyz)
# increase in latitude about 1.8 degrees
self.assertAlmostEqual(sph.x(), lat.degree() + 1.8, delta=0.008)
# no change in longitude
self.assertAlmostEqual(sph.z(), 3507.024791, delta=1e-6)
xyz2 = sc.local_from_spherical_position(sph)
self.assertEqual(xyz, xyz2)
# Check position projection
# WGS84 coordinate obtained from online mapping software
# > gdaltransform -s_srs WGS84 -t_srs EPSG:4978
# > latitude longitude altitude
# > X Y Z
tmp = Vector3d()
osrf_s = Vector3d(37.3877349, -122.0651166, 32.0)
osrf_e = Vector3d(-2693701.91434394, -4299942.14687992, 3851691.0393571)
goog_s = Vector3d(37.4216719, -122.0821853, 30.0)
# Local tangent plane coordinates (ENU = GLOBAL) coordinates of
# Google when OSRF is taken as the origin:
# > proj +ellps=WGS84 +proj=tmerc
# +lat_0=37.3877349 +lon_0=-122.0651166 +k=1 +x_0=0 +y_0=0
# > -122.0821853 37.4216719 (LON,LAT)
# > -1510.88 3766.64 (EAST,NORTH)
vec = Vector3d(-1510.88, 3766.64, -3.29)
# Convert degrees to radians
osrf_s.x(osrf_s.x() * 0.0174532925)
osrf_s.y(osrf_s.y() * 0.0174532925)
# Set the ORIGIN to be the Open Source Robotics Foundation
sc2 = SphericalCoordinates(st, Angle(osrf_s.x()),
Angle(osrf_s.y()), osrf_s.z(), Angle.ZERO)
# Check that SPHERICAL -> ECEF works
tmp = sc2.position_transform(osrf_s, SphericalCoordinates.SPHERICAL,
SphericalCoordinates.ECEF)
self.assertAlmostEqual(tmp.x(), osrf_e.x(), delta=8e-2)
self.assertAlmostEqual(tmp.y(), osrf_e.y(), delta=8e-2)
self.assertAlmostEqual(tmp.z(), osrf_e.z(), delta=1e-2)
# Check that ECEF -> SPHERICAL works
tmp = sc2.position_transform(tmp, SphericalCoordinates.ECEF, SphericalCoordinates.SPHERICAL)
self.assertAlmostEqual(tmp.x(), osrf_s.x(), delta=1e-2)
self.assertAlmostEqual(tmp.y(), osrf_s.y(), delta=1e-2)
self.assertAlmostEqual(tmp.z(), osrf_s.z(), delta=1e-2)
# Check that SPHERICAL -> LOCAL works
tmp = sc2.local_from_spherical_position(goog_s)
self.assertAlmostEqual(tmp.x(), vec.x(), delta=8e-2)
self.assertAlmostEqual(tmp.y(), vec.y(), delta=8e-2)
self.assertAlmostEqual(tmp.z(), vec.z(), delta=1e-2)
# Check that SPHERICAL -> LOCAL -> SPHERICAL works
tmp = sc2.spherical_from_local_position(tmp)
self.assertAlmostEqual(tmp.x(), goog_s.x(), delta=8e-2)
self.assertAlmostEqual(tmp.y(), goog_s.y(), delta=8e-2)
self.assertAlmostEqual(tmp.z(), goog_s.z(), delta=1e-2)
# Give no heading offset to confirm ENU frame
lat = Angle(0.3)
lon = Angle(-1.2)
heading = Angle(0.0)
elev = 354.1
sc = SphericalCoordinates(st, lat, lon, elev, heading)
# Check GlobalFromLocal with no heading offset
# local frame
xyz = Vector3d()
# east, north, up
enu = Vector3d()
xyz.set(1, 0, 0)
enu = sc.velocity_transform(xyz, SphericalCoordinates.LOCAL2, SphericalCoordinates.GLOBAL)
self.assertEqual(xyz, enu)
self.assertEqual(xyz, sc.local_from_global_velocity(enu))
xyz.set(0, 1, 0)
enu = sc.velocity_transform(xyz, SphericalCoordinates.LOCAL2, SphericalCoordinates.GLOBAL)
self.assertEqual(xyz, enu)
self.assertEqual(xyz, sc.local_from_global_velocity(enu))
xyz.set(1, -1, 0)
enu = sc.velocity_transform(xyz, SphericalCoordinates.LOCAL2, SphericalCoordinates.GLOBAL)
self.assertEqual(xyz, enu)
self.assertEqual(xyz, sc.local_from_global_velocity(enu))
xyz.set(2243.52334, 556.35, 435.6553)
enu = sc.velocity_transform(xyz, SphericalCoordinates.LOCAL2, SphericalCoordinates.GLOBAL)
self.assertEqual(xyz, enu)
self.assertEqual(xyz, sc.local_from_global_velocity(enu))
def test_distance(self):
latA = Angle()
longA = Angle()
latB = Angle()
longB = Angle()
latA.set_degree(46.250944)
longA.set_degree(-122.249972)
latB.set_degree(46.124953)
longB.set_degree(-122.251683)
d = SphericalCoordinates.distance(latA, longA, latB, longB)
self.assertAlmostEqual(14002, d, delta=20)
def test_bad_set_surface(self):
sc = SphericalCoordinates()
sc.set_surface(SphericalCoordinates.SurfaceType(2))
self.assertEqual(sc.surface(), SphericalCoordinates.SurfaceType(2))
def test_transform(self):
sc = SphericalCoordinates()
vel = Vector3d(1, 2, -4)
result = sc.velocity_transform(
vel,
SphericalCoordinates.ECEF,
SphericalCoordinates.ECEF)
self.assertEqual(result, vel)
pos = Vector3d(-1510.88, 2, -4)
result = sc.position_transform(
pos,
SphericalCoordinates.ECEF,
SphericalCoordinates.GLOBAL)
self.assertAlmostEqual(result.x(), 2, delta=1e-6)
self.assertAlmostEqual(result.y(), -4, delta=1e-6)
self.assertAlmostEqual(result.z(), -6379647.8799999999, delta=1e-6)
print('NEW POS[', result.x(), ' ', result.y(), ' ', result.z(), ']\n')
def test_bad_coordinate_type(self):
sc = SphericalCoordinates()
pos = Vector3d(1, 2, -4)
result = sc.position_transform(pos,
SphericalCoordinates.CoordinateType(7),
SphericalCoordinates.CoordinateType(6))
self.assertEqual(result, pos)
result = sc.position_transform(pos,
SphericalCoordinates.CoordinateType(4),
SphericalCoordinates.CoordinateType(6))
self.assertEqual(result, pos)
result = sc.velocity_transform(
pos,
SphericalCoordinates.SPHERICAL,
SphericalCoordinates.ECEF)
self.assertEqual(result, pos)
result = sc.velocity_transform(
pos,
SphericalCoordinates.ECEF,
SphericalCoordinates.SPHERICAL)
self.assertEqual(result, pos)
result = sc.velocity_transform(pos,
SphericalCoordinates.CoordinateType(7),
SphericalCoordinates.ECEF)
self.assertEqual(result, pos)
result = sc.velocity_transform(pos,
SphericalCoordinates.ECEF,
SphericalCoordinates.CoordinateType(7))
self.assertEqual(result, pos)
def test_equality_ops(self):
# Default surface type
st = SphericalCoordinates.EARTH_WGS84
lat = Angle(0.3)
lon = Angle(-1.2)
heading = Angle(0.5)
elev = 354.1
sc1 = SphericalCoordinates(st, lat, lon, elev, heading)
sc2 = SphericalCoordinates(st, lat, lon, elev, heading)
self.assertTrue(sc1 == sc2)
self.assertFalse(sc1 != sc2)
sc3 = SphericalCoordinates(st, Angle.ZERO, lon, elev, heading)
self.assertFalse(sc1 == sc3)
self.assertTrue(sc1 != sc3)
sc4 = SphericalCoordinates(st, lat, Angle.ZERO, elev, heading)
self.assertFalse(sc1 == sc4)
self.assertTrue(sc1 != sc4)
sc5 = SphericalCoordinates(st, lat, lon, elev + 1, heading)
self.assertFalse(sc1 == sc5)
self.assertTrue(sc1 != sc5)
sc6 = SphericalCoordinates(st, lat, lon, elev, Angle.ZERO)
self.assertFalse(sc1 == sc6)
self.assertTrue(sc1 != sc6)
def test_assigment_op(self):
# Default surface type
st = SphericalCoordinates.EARTH_WGS84
lat = Angle(0.3)
lon = Angle(-1.2)
heading = Angle(0.5)
elev = 354.1
sc1 = SphericalCoordinates(st, lat, lon, elev, heading)
sc2 = sc1
self.assertEqual(sc1, sc2)
def test_no_heading(self):
# Default heading
st = SphericalCoordinates.EARTH_WGS84
lat = Angle(-22.9 * math.pi / 180.0)
lon = Angle(-43.2 * math.pi / 180.0)
heading = Angle(0.0)
elev = 0
sc = SphericalCoordinates(st, lat, lon, elev, heading)
# Origin matches input
latLonAlt = sc.spherical_from_local_position(Vector3d(0, 0, 0))
self.assertEqual(lat.degree(), latLonAlt.x())
self.assertEqual(lon.degree(), latLonAlt.y())
self.assertEqual(elev, latLonAlt.z())
xyzOrigin = sc.local_from_spherical_position(latLonAlt)
self.assertEqual(Vector3d.ZERO, xyzOrigin)
# Check how different lat/lon affect the local position
# Increase latitude == go North == go +Y
xyz = sc.local_from_spherical_position(
Vector3d(lat.degree() + 1.0, lon.degree(), elev))
self.assertAlmostEqual(xyzOrigin.x(), xyz.x(), delta=1e-6)
self.assertLess(xyzOrigin.y(), xyz.y())
# Decrease latitude == go South == go -Y
xyz = sc.local_from_spherical_position(
Vector3d(lat.degree() - 1.0, lon.degree(), elev))
self.assertAlmostEqual(xyzOrigin.x(), xyz.x(), delta=1e-6)
self.assertGreater(xyzOrigin.y(), xyz.y())
# Increase longitude == go East == go +X
# Also move a bit -Y because this is the Southern Hemisphere
xyz = sc.local_from_spherical_position(
Vector3d(lat.degree(), lon.degree() + 1.0, elev))
self.assertLess(xyzOrigin.x(), xyz.x())
self.assertGreater(xyzOrigin.y(), xyz.y())
# Decrease longitude == go West == go -X
# Also move a bit -Y because this is the Southern Hemisphere
xyz = sc.local_from_spherical_position(
Vector3d(lat.degree(), lon.degree() - 1.0, elev))
self.assertGreater(xyzOrigin.x(), xyz.x())
self.assertGreater(xyzOrigin.y(), xyz.y())
# Increase altitude
xyz = sc.local_from_spherical_position(
Vector3d(lat.degree(), lon.degree(), elev + 10.0))
self.assertAlmostEqual(xyzOrigin.x(), xyz.x(), delta=1e-6)
self.assertAlmostEqual(xyzOrigin.y(), xyz.y(), delta=1e-6)
self.assertAlmostEqual(xyzOrigin.z() + 10.0, xyz.z(), delta=1e-6)
# Decrease altitude
xyz = sc.local_from_spherical_position(
Vector3d(lat.degree(), lon.degree(), elev - 10.0))
self.assertAlmostEqual(xyzOrigin.x(), xyz.x(), delta=1e-6)
self.assertAlmostEqual(xyzOrigin.y(), xyz.y(), delta=1e-6)
self.assertAlmostEqual(xyzOrigin.z() - 10.0, xyz.z(), delta=1e-6)
# | |
"""Tests for the uas_telemetry module."""
import datetime
from auvsi_suas.models.aerial_position import AerialPosition
from auvsi_suas.models.gps_position import GpsPosition
from auvsi_suas.models.mission_config import MissionConfig
from auvsi_suas.models.uas_telemetry import UasTelemetry
from auvsi_suas.models.waypoint import Waypoint
from auvsi_suas.proto.interop_admin_api_pb2 import WaypointEvaluation
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
class TestUasTelemetryBase(TestCase):
"""Base for the UasTelemetry tests."""
def setUp(self):
self.user = User.objects.create_user('testuser', '<EMAIL>',
'<PASSWORD>')
self.user.save()
# Mission
pos = GpsPosition()
pos.latitude = 10
pos.longitude = 100
pos.save()
apos = AerialPosition()
apos.latitude = 10
apos.longitude = 100
apos.altitude_msl = 1000
apos.save()
wpt = Waypoint()
wpt.latitude = 10
wpt.longitude = 100
wpt.altitude_msl = 1000
wpt.order = 10
wpt.save()
self.mission = MissionConfig()
self.mission.home_pos = pos
self.mission.lost_comms_pos = pos
self.mission.emergent_last_known_pos = pos
self.mission.off_axis_odlc_pos = pos
self.mission.map_center_pos = pos
self.mission.map_height_ft = 1
self.mission.air_drop_pos = pos
self.mission.ugv_drive_pos = pos
self.mission.save()
self.mission.mission_waypoints.add(wpt)
self.mission.search_grid_points.add(wpt)
self.mission.save()
self.now = timezone.now()
def create_log_element(self, timestamp, lat, lon, alt, heading, user=None):
if user is None:
user = self.user
log = UasTelemetry(user=user,
latitude=lat,
longitude=lon,
altitude_msl=alt,
uas_heading=heading)
log.save()
log.timestamp = self.now + datetime.timedelta(seconds=timestamp)
log.save()
return log
def create_uas_logs(self, entries, user=None):
"""Create a list of uas telemetry logs.
Args:
entries: List of (t, lat, lon, alt, heading) tuples for each entry.
user: User to create logs for.
Returns:
List of UasTelemetry objects
"""
if user is None:
user = self.user
ret = []
for (t, lat, lon, alt, head) in entries:
ret.append(self.create_log_element(t, lat, lon, alt, head, user))
return ret
def waypoints_from_data(self, waypoints_data):
"""Converts tuples of lat/lon/alt to a waypoint."""
waypoints = []
for i, waypoint in enumerate(waypoints_data):
(lat, lon, alt) = waypoint
wpt = Waypoint()
wpt.order = i
wpt.latitude = lat
wpt.longitude = lon
wpt.altitude_msl = alt
wpt.save()
waypoints.append(wpt)
return waypoints
def assertTelemetryEqual(self, expect, got):
"""Assert two telemetry are equal."""
msg = '%s != %s' % (expect, got)
self.assertAlmostEqual(expect.latitude,
got.latitude,
places=6,
msg=msg)
self.assertAlmostEqual(expect.longitude,
got.longitude,
places=6,
msg=msg)
self.assertAlmostEqual(expect.altitude_msl,
got.altitude_msl,
places=3,
msg=msg)
self.assertAlmostEqual(expect.uas_heading,
got.uas_heading,
places=3,
msg=msg)
def assertTelemetriesEqual(self, expect, got):
"Assert two lists of telemetry are equal." ""
expect = [i for i in expect]
got = [i for i in got]
self.assertEqual(len(expect), len(got))
for ix in range(len(expect)):
self.assertTelemetryEqual(expect[ix], got[ix])
def assertSatisfiedWaypoints(self, expect, got):
"""Assert two satisfied_waypoints return values are equal."""
msg = '%s != %s' % (expect, got)
self.assertEqual(len(expect), len(got), msg=msg)
for i in range(len(expect)):
e = expect[i]
g = got[i]
self.assertEqual(e.id, g.id, msg=msg)
self.assertAlmostEqual(e.score_ratio,
g.score_ratio,
places=2,
msg=msg)
self.assertAlmostEqual(e.closest_for_scored_approach_ft,
g.closest_for_scored_approach_ft,
delta=5,
msg=msg)
self.assertAlmostEqual(e.closest_for_mission_ft,
g.closest_for_mission_ft,
delta=5,
msg=msg)
class TestUasTelemetry(TestUasTelemetryBase):
"""Tests the UasTelemetry model."""
def setUp(self):
super(TestUasTelemetry, self).setUp()
self.log = self.create_log_element(timestamp=0,
lat=10,
lon=100,
alt=200,
heading=90)
def test_clean(self):
"""Tests model validation."""
self.log.full_clean()
def test_duplicate_unequal(self):
"""Tests duplicate function with unequal telemetry."""
log1 = self.create_log_element(timestamp=0,
lat=20,
lon=200,
alt=200,
heading=90)
log2 = self.create_log_element(timestamp=0,
lat=10,
lon=100,
alt=300,
heading=90)
log3 = self.create_log_element(timestamp=0,
lat=10,
lon=100,
alt=200,
heading=10)
self.assertFalse(self.log.duplicate(log1))
self.assertFalse(self.log.duplicate(log2))
self.assertFalse(self.log.duplicate(log3))
def test_duplicate_equal(self):
"""Tests duplicate function with equal telemetry."""
log1 = self.create_log_element(timestamp=0,
lat=10,
lon=100,
alt=200,
heading=90)
self.assertTrue(self.log.duplicate(self.log))
self.assertTrue(self.log.duplicate(log1))
class TestUasTelemetryFilter(TestUasTelemetryBase):
def setUp(self):
super(TestUasTelemetryFilter, self).setUp()
self.log1 = self.create_log_element(timestamp=0,
lat=10,
lon=200,
alt=200,
heading=90)
self.log2 = self.create_log_element(timestamp=0,
lat=20,
lon=200,
alt=200,
heading=90)
self.log3 = self.create_log_element(timestamp=0,
lat=30,
lon=200,
alt=200,
heading=90)
self.log4 = self.create_log_element(timestamp=0,
lat=40,
lon=200,
alt=200,
heading=90)
self.log5 = self.create_log_element(timestamp=0,
lat=0,
lon=0,
alt=0,
heading=0)
def test_no_logs(self):
"""Tests empty log."""
self.assertSequenceEqual(list(UasTelemetry.dedupe([])), [])
def test_no_duplicates(self):
"""Tests no duplicates in list."""
orig = [self.log1, self.log2, self.log3, self.log4]
self.assertSequenceEqual(list(UasTelemetry.dedupe(orig)), orig)
def test_boundary_duplicates(self):
"""Tests duplicates on the bounds of the list."""
orig = [self.log1, self.log1, self.log2, self.log2, self.log2]
expect = [self.log1, self.log2]
self.assertSequenceEqual(list(UasTelemetry.dedupe(orig)), expect)
def test_duplicates(self):
orig = [
self.log1, self.log1, self.log2, self.log3, self.log3, self.log4,
self.log4
]
expect = [self.log1, self.log2, self.log3, self.log4]
self.assertSequenceEqual(list(UasTelemetry.dedupe(orig)), expect)
def test_filter_bad(self):
"""Tests filter_bad()."""
orig = [self.log1, self.log5]
expect = [self.log1]
self.assertSequenceEqual(list(UasTelemetry.filter_bad(orig)), expect)
class TestUasTelemetryInterpolate(TestUasTelemetryBase):
"""Tests the UasTelemetry interpolate()."""
def test_single_point(self):
"""Tests handles ranges of points."""
self.assertTelemetriesEqual(
self.create_uas_logs([
(0, 38, -76, 100, 0),
]),
UasTelemetry.interpolate(
self.create_uas_logs([
(0, 38, -76, 100, 0),
])))
def test_position(self):
"""Tests the returned interpolated position."""
self.assertTelemetriesEqual(
self.create_uas_logs([
(0.0, 38, -76, 100, 0),
(0.1, 39, -75, 105, 1),
(0.2, 40, -74, 110, 2),
(0.3, 41, -73, 115, 3),
(0.4, 42, -72, 120, 4),
(0.5, 43, -71, 130, 5),
(0.6, 44, -70, 140, 6),
(0.7, 45, -69, 150, 7),
]),
UasTelemetry.interpolate(
self.create_uas_logs([
(0.0, 38, -76, 100, 0),
(0.2, 40, -74, 110, 2),
(0.4, 42, -72, 120, 4),
(0.7, 45, -69, 150, 7),
])))
def test_over_step(self):
"""Tests it doesn't interpolate when dt less than step."""
self.assertTelemetriesEqual(
self.create_uas_logs([
(0.0, 38, -76, 100, 0),
(0.1, 38, -76, 110, 0),
(0.2, 38, -76, 120, 0),
]),
UasTelemetry.interpolate(
self.create_uas_logs([
(0.0, 38, -76, 100, 0),
(0.1, 38, -76, 110, 0),
(0.2, 38, -76, 120, 0),
])))
def test_over_max_gap(self):
"""Tests it doesn't interpolate when over the max gap."""
self.assertTelemetriesEqual(
self.create_uas_logs([
(00, 38, -76, 100, 0),
(10, 38, -76, 110, 0),
]),
UasTelemetry.interpolate(
self.create_uas_logs([
(00, 38, -76, 100, 0),
(10, 38, -76, 110, 0),
])))
class TestUasTelemetryWaypoints(TestUasTelemetryBase):
def test_satisfied_waypoints(self):
"""Tests the evaluation of waypoints method."""
# Create mission config
gpos = GpsPosition()
gpos.latitude = 10
gpos.longitude = 10
gpos.save()
waypoints = self.waypoints_from_data([
(38, -76, 100),
(39, -77, 200),
(40, -78, 0),
])
# Only first is valid.
logs = self.create_uas_logs([
(0, 38, -76, 140, 0),
(1, 40, -78, 600, 0),
(2, 37, -75, 40, 0),
])
expect = [
WaypointEvaluation(id=0,
score_ratio=0.6,
closest_for_scored_approach_ft=40,
closest_for_mission_ft=40),
WaypointEvaluation(id=1, score_ratio=0,
closest_for_mission_ft=170),
WaypointEvaluation(id=2, score_ratio=0, closest_for_mission_ft=600)
]
self.assertSatisfiedWaypoints(
expect, UasTelemetry.satisfied_waypoints(gpos, waypoints, logs))
# First and last are valid.
logs = self.create_uas_logs([
(0, 38, -76, 140, 0),
(1, 40, -78, 600, 0),
(2, 40, -78, 40, 0),
])
expect = [
WaypointEvaluation(id=0,
score_ratio=0.6,
closest_for_scored_approach_ft=40,
closest_for_mission_ft=40),
WaypointEvaluation(id=1, score_ratio=0,
closest_for_mission_ft=170),
WaypointEvaluation(id=2,
score_ratio=0.6,
closest_for_scored_approach_ft=40,
closest_for_mission_ft=40)
]
self.assertSatisfiedWaypoints(
expect, UasTelemetry.satisfied_waypoints(gpos, waypoints, logs))
# Hit all.
logs = self.create_uas_logs([
(0, 38, -76, 140, 0),
(1, 39, -77, 180, 0),
(2, 40, -78, 40, 0),
])
expect = [
WaypointEvaluation(id=0,
score_ratio=0.6,
closest_for_scored_approach_ft=40,
closest_for_mission_ft=40),
WaypointEvaluation(id=1,
score_ratio=0.8,
closest_for_scored_approach_ft=20,
closest_for_mission_ft=20),
WaypointEvaluation(id=2,
score_ratio=0.6,
closest_for_scored_approach_ft=40,
closest_for_mission_ft=40)
]
self.assertSatisfiedWaypoints(
expect, UasTelemetry.satisfied_waypoints(gpos, waypoints, logs))
# Only hit the first waypoint on run one, hit all on run two.
logs = self.create_uas_logs([
(0, 38, -76, 140, 0),
(1, 40, -78, 600, 0),
(2, 37, -75, 40, 0),
# Run two:
(3, 38, -76, 140, 0),
(4, 39, -77, 180, 0),
(5, 40, -78, 40, 0),
])
expect = [
WaypointEvaluation(id=0,
score_ratio=0.6,
closest_for_scored_approach_ft=40,
closest_for_mission_ft=40),
WaypointEvaluation(id=1,
score_ratio=0.8,
closest_for_scored_approach_ft=20,
closest_for_mission_ft=20),
WaypointEvaluation(id=2,
score_ratio=0.6,
closest_for_scored_approach_ft=40,
closest_for_mission_ft=40)
]
self.assertSatisfiedWaypoints(
expect, UasTelemetry.satisfied_waypoints(gpos, waypoints, logs))
# Hit all on run one, only hit the first waypoint on run two.
logs = self.create_uas_logs([
(0, 38, -76, 140, 0),
(1, 39, -77, 180, 0),
(2, 40, -78, 40, 0),
# Run two:
(3, 38, -76, 140, 0),
(4, 40, -78, 600, 0),
(5, 37, -75, 40, 0)
])
expect = [
WaypointEvaluation(id=0,
score_ratio=0.6,
closest_for_scored_approach_ft=40,
closest_for_mission_ft=40),
WaypointEvaluation(id=1,
score_ratio=0.8,
closest_for_scored_approach_ft=20,
closest_for_mission_ft=20),
WaypointEvaluation(id=2,
score_ratio=0.6,
closest_for_scored_approach_ft=40,
closest_for_mission_ft=40)
]
self.assertSatisfiedWaypoints(
expect, UasTelemetry.satisfied_waypoints(gpos, waypoints, logs))
# Keep flying after hitting all waypoints.
logs = self.create_uas_logs([
(0, 38, -76, 140, 0),
(1, 39, -77, 180, 0),
(2, 40, -78, 40, 0),
(3, 30.1, -78.1, 100, 0),
])
expect = [
WaypointEvaluation(id=0,
score_ratio=0.6,
closest_for_scored_approach_ft=40,
closest_for_mission_ft=40),
WaypointEvaluation(id=1,
score_ratio=0.8,
closest_for_scored_approach_ft=20,
closest_for_mission_ft=20),
WaypointEvaluation(id=2,
score_ratio=0.6,
closest_for_scored_approach_ft=40,
closest_for_mission_ft=40)
]
self.assertSatisfiedWaypoints(
expect, UasTelemetry.satisfied_waypoints(gpos, waypoints, logs))
# Hit all in first run, but second is higher scoring.
logs = self.create_uas_logs([
(0, 38, -76, 140, 0),
(1, 39, -77, 180, 0),
(2, 40, -78, 60, 0),
# Run two:
(3, 38, -76, 100, 0),
(4, 39, -77, 200, 0),
(5, 40, -78, 110, 0)
])
expect = [
WaypointEvaluation(id=0,
score_ratio=1,
closest_for_scored_approach_ft=0,
closest_for_mission_ft=0),
WaypointEvaluation(id=1,
score_ratio=1,
closest_for_scored_approach_ft=0,
closest_for_mission_ft=0),
WaypointEvaluation(id=2, score_ratio=0, closest_for_mission_ft=60)
]
self.assertSatisfiedWaypoints(
expect, UasTelemetry.satisfied_waypoints(gpos, waypoints, logs))
# Restart waypoint path in the middle, use path in between points.
waypoints = self.waypoints_from_data([
(38, -76, 100),
(39, -77, 200),
(40, -78, 0),
])
logs = self.create_uas_logs([
(0, 38, -76, 140, 0), # Use
(1, 39, -77, 180, 0), # Use
# Restart:
(2, 38, -76, 70, 0),
(3, 39, -77, 150, 0),
(4, 40, -78, 10, 0), # Use
])
expect = [
WaypointEvaluation(id=0,
score_ratio=0.6,
closest_for_scored_approach_ft=40,
closest_for_mission_ft=30),
WaypointEvaluation(id=1,
score_ratio=0.8,
closest_for_scored_approach_ft=20,
closest_for_mission_ft=20),
WaypointEvaluation(id=2,
score_ratio=0.9,
closest_for_scored_approach_ft=10,
closest_for_mission_ft=10)
]
self.assertSatisfiedWaypoints(
expect, UasTelemetry.satisfied_waypoints(gpos, waypoints, logs))
# Sanity check waypoint scoring with interpolation.
waypoints = self.waypoints_from_data([
(38, -76, 70),
(38, -76, 110),
])
logs = self.create_uas_logs([
| |
import os
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
from PIL import Image
def norm(img):
""" Normalize an image down to a 'unit vector' """
n = np.linalg.norm(img)
if n == 0:
return img
return img.astype(np.float32) / n
def rgb2gray(img):
""" Convert RGB image to grayscale values """
# return np.dot(img, [0.2989, 0.5870, 0.1140]).astype(np.uint8)
return np.dot(img, [0.2125, 0.7154, 0.0721]).astype(np.uint8)
# return np.mean(img, axis=2).astype(np.uint8)
def gray2rgb(img):
""" Expand grayscale image into RGB (still gray) """
if len(img.shape) == 2:
return np.repeat(img[:,:,np.newaxis], 3, axis=2)
return img
def rgb2hsv(img):
""" Use matplotlib to convert rgb to hsv (TA allowed) """
return (rgb_to_hsv(img.astype(np.float32) / 255.) * 255).astype(np.uint8)
def hsv2rgb(img):
""" Use matplotlib to convert hsv to rgb (TA allowed) """
return (hsv_to_rgb(img.astype(np.float32) / 255.) * 255).astype(np.uint8)
# from http://www.janeriksolem.net/2009/06/histogram-equalization-with-python-and.html
def histogram_equalization(img, bins=128, clip_limit=None):
""" Use histogram equalization to balance contrast over the entire image """
hist, hist_bins = np.histogram(img.flatten(), bins)
if clip_limit is not None: # Clip and redistribute (simplified)
clip_mask = (hist < clip_limit)
distr = np.sum(hist * (1 - clip_mask) - clip_limit) / np.sum(clip_mask)
hist = np.clip(hist + distr * clip_mask, 0, clip_limit)
cdf = hist.cumsum()
cdf = 255 * cdf / cdf[-1]
equalized = np.interp(img.flatten(), hist_bins[:-1], cdf)
return equalized.reshape(img.shape)
# THIS IMPLEMENTATION IS INCORRECT, DO NOT USE
def CLAHE(img, clip_limit=2.0, tile_size=(8,8), bins=128):
""" Balance contrast locally over an image using tiling approximation """
n, m = img.shape[:2]
u, v = tile_size
output = np.zeros(img.shape)
for r in range(max(1, (n-1) // u + 1)): # Round up integer div
for c in range(max(1, (m-1) // v + 1)):
end_r = min(n, (r+1)*u)
end_c = min(m, (c+1)*v)
output[r*u:end_r, c*v:end_c] = histogram_equalization(
img[r*u:end_r, c*u:end_c], bins=bins, clip_limit=clip_limit)
return output
def binary_dilation(img, iterations=1):
""" Dilates a mask with a square structuring element """
output = np.copy(img)
n, m = img.shape[:2]
for _ in range(iterations): # Move left
output[:,:m-1] |= output[:,1:]
for _ in range(iterations): # Move right
output[:,1:] |= output[:,:m-1]
for _ in range(iterations): # Move up
output[:n-1] |= output[1:]
for _ in range(iterations): # Move down
output[1:] |= output[:n-1]
return output
def binary_erosion(img, iterations=1):
""" Erodes a mask with a square structuring element """
output = np.copy(img)
n, m = img.shape[:2]
for _ in range(iterations): # Move left
output[:,:m-1] &= output[:,1:]
for _ in range(iterations): # Move right
output[:,1:] &= output[:,:m-1]
for _ in range(iterations): # Move up
output[:n-1] &= output[1:]
for _ in range(iterations): # Move down
output[1:] &= output[:n-1]
return output
def gray_dilation(img, iterations=1):
""" Dilates a grayscale image with a square structuring element """
if len(img.shape) == 3:
output = np.max(img, axis=2)
else:
output = np.copy(img)
n, m = img.shape[:2]
for _ in range(iterations): # Move left
np.maximum(output[:,:m-1], output[:,1:], output[:,:m-1])
for _ in range(iterations): # Move right
np.maximum(output[:,1:], output[:,:m-1], output[:,1:])
for _ in range(iterations): # Move up
np.maximum(output[:n-1], output[1:], output[:n-1])
for _ in range(iterations): # Move down
np.maximum(output[1:], output[:n-1], output[1:])
return gray2rgb(output)
def gray_erosion(img, iterations=1):
""" Erodes a grayscale image with a square structuring element """
if len(img.shape) == 3:
output = np.max(img, axis=2)
else:
output = np.copy(img)
n, m = img.shape[:2]
for _ in range(iterations): # Move left
np.minimum(output[:,:m-1], output[:,1:], output[:,:m-1])
for _ in range(iterations): # Move right
np.minimum(output[:,1:], output[:,:m-1], output[:,1:])
for _ in range(iterations): # Move up
np.minimum(output[:n-1], output[1:], output[:n-1])
for _ in range(iterations): # Move down
np.minimum(output[1:], output[:n-1], output[1:])
return gray2rgb(output)
def gray_opening(img, size=1):
""" Computes the opening operation on a grayscale image """
return gray_dilation(gray_erosion(img, iterations=size), iterations=size)
def gray_closing(img, size=1):
""" Computes the closing operation on a grayscale image """
return gray_erosion(gray_dilation(img, iterations=size), iterations=size)
def white_tophat(img, size=1):
""" Applies a white-tophat transform to an image """
return img - gray_opening(img, size=size)
def black_tophat(img, size=1):
""" Applies a black-tophat transform to an image """
return gray_closing(img, size=size) - img
def correlate(A, B, step=1):
""" Correlates image B over image A. Assumes B is normalized """
u, v = B.shape[:2]
n, m = A.shape[:2]
# Padding the input
p1 = u // 2
p2 = v // 2
padded_img = np.pad(A, [(p1, p1), (p2, p2), (0, 0)], mode='constant')
output = np.zeros((n,m)) # Output is same size as input
for r in range(0, n, step):
for c in range(0, m, step):
window = norm(padded_img[r:r+u, c:c+v])
output[r:r+step,c:c+step] = np.vdot(window, B)
return output
def conv2d(img, filter):
""" Convolves a grayscale image with a filter """
k1, k2 = filter.shape[:2]
n, m = img.shape[:2]
if not k1 & k2 & 1:
raise ValueError("Filter should have odd dimensions")
# Padding the input
p1 = k1 // 2
p2 = k2 // 2
padded_img = np.pad(img, [(p1, p1), (p2, p2)], mode='constant')
output = np.zeros(img.shape) # Output is same size as input
for r in range(n):
for c in range(m):
window = padded_img[r:r+k1, c:c+k2]
output[r,c] = np.sum(filter * window, axis=(0,1))
return output
def get_boxes(mask, radius=3, mode="square"):
""" Extracts bounding boxes from a binary mask
'mode' can be wither "square" or "circular" for how neighbors are selected.
The effect of this parameter is more apparent at the edges
"""
# Generate neighbors first
neighbors = []
if mode == "square":
neighbors = [(r,c) for r in range(-radius+1,radius)
for c in range(-radius+1,radius)]
# neighbors = [(-radius, 0), (radius, 0), (0, radius), (0, -radius)]
elif mode == "circular":
neighbors = [(r,c) for r in range(-radius+1,radius)
for c in range(-radius+1,radius)
if r*r + c*r <= radius*radius]
else:
raise ValueError("Unrecognized neighbor mode")
neighbors = np.array(neighbors, dtype=np.int16)
num_neighbors = neighbors.shape[0]
# BFS to find all objects
n, m = mask.shape[:2]
not_visited = mask.astype(np.bool)
queue = np.zeros((mask.size, 2), dtype=np.int16)
i = 0 # End of queue
boxes = []
y1 = n; x1 = m; y2 = 0; x2 = 0 # Initialize bounding box
x = 0; y = 0 # For finding the bounding box
while(not_visited.any()):
# Find a non-zero element as the starting point
queue[0] = np.argwhere(not_visited)[0]
i = 1
y1 = n; x1 = m; y2 = 0; x2 = 0 # Re-initialize bounding box
while i > 0:
i -= 1
y, x = queue[i]
in_bounds = (0 <= x < m) and (0 <= y < n)
# This pixel is set, so propagate
if in_bounds and not_visited[y, x]:
y1 = min(y1, y); x1 = min(x1, x)
y2 = max(y2, y); x2 = max(x2, x)
# not_visited[x:x+radius, y:y+radius] = False # Stop future propagation
not_visited[y, x] = False # Stop future propagation
# Populate queue with neighbors
queue[i:i+num_neighbors] = queue[i] + neighbors
i += num_neighbors
# Save bounding box of this object
boxes.append([int(y1), int(x1), int(y2), int(x2)])
return boxes
def draw_boxes(img, bounding_boxes):
""" Finds and draws red-light bounding boxes, returns the new image """
I = np.copy(img)
# Top, Left, Bottom, Right coords
for t, l, b, r in bounding_boxes:
# Clear red and green (add value for brightness)
I[t:b,l,0:2] = 90 # left wall
I[t:b,r,0:2] = 90 # right wall
I[t,l:r,0:2] = 90 # top wall
I[b,l:r,0:2] = 90 # bottom wall
# Color in blue
I[t:b,l,2] = 255 # left wall
I[t:b,r,2] = 255 # right wall
I[t,l:r,2] = 255 # top wall
I[b,l:r,2] = 255 # bottom wall
return I
def load_filters(lights_path):
orig_filters = []
filters = []
for f_name in os.listdir(lights_path):
filt_img = np.asarray(Image.open(os.path.join(lights_path,f_name)))
filt_img = histogram_equalization(filt_img, clip_limit=2)
orig_filters.append(filt_img.astype(np.uint8))
filters.append(norm(filt_img))
# Generate compound img
max_width = max([x.shape[1] for x in orig_filters])
compound_filter = np.concatenate(
[np.pad(x, [(0,10), (max_width - x.shape[1], 0), (0,0)], mode='constant')
for x in orig_filters], 0)
return filters, compound_filter
# https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : | |
# This imports the "json" module from the Python standard library
# https://docs.python.org/3/library/json.html
import json
from time import sleep
from colour import Color
# This is outside the scope of beginner Python and VRC, but this is for
# something called "type-hin ting" that makes Python code easier to debug
from typing import Any, Callable, Dict
# This imports the Paho MQTT library that we will use to communicate to
# other running processes
# https://github.com/eclipse/paho.mqtt.python
import paho.mqtt.client as mqtt
# This creates a new class that will contain multiple functions
# which are known as "methods"
class Sandbox():
# The "__init__" method of any class is special in Python. It's what runs when
# you create a class like `sandbox = Sandbox()`. In here, we usually put
# first-time initialization and setup code. The "self" argument is a magic
# argument that must be the first argument in any class method. This allows the code
# inside the method to access class information.
def __init__(self) -> None:
# Create a string attribute to hold the hostname/ip address of the MQTT server
# we're going to connect to (an attribute is just a variable in a class).
# Because we're running this code within a Docker Compose network,
# using the container name of "mqtt" will work.
self.mqtt_host = "mqtt"
self.isdropping1=0
# Create an integer attribute to hold the port number of the MQTT server
# we're going to connect to. MQTT uses a default of port 1883, but we'll
# add a zero, so as to not require administrator priviledges from the host
# operating system by using a low port number.
self.mqtt_port = 18830
# Create an attribute to hold an instance of the Paho MQTT client class
self.mqtt_client = mqtt.Client()
# This part is a little bit more complicated. Here, we're assigning the
# attributes of the Paho MQTT client `on_connect` and `on_message` to handles
# of methods in our Sandbox class, which are defined below.
# This isn't *running* those methods, but rather creating a reference to them.
# Once we start running the Paho MQTT client, this tells the client to execute
# these methods after it establishes the connection, and after every message
# it recieves, respectfully.
self.mqtt_client.on_connect = self.on_connect
self.mqtt_client.on_message = self.on_message
# Create a string attribute to hold the commonly used prefix used in MQTT topics
self.topic_prefix = "vrc"
# Here, we're creating a dictionary of MQTT topic names to method handles
# (as we discussed above). A dictionary is a data structure that allows use to
# obtain values based on keys. Think of a dictionary of state names as keys
# and their capitals as values. By using the state name as a key, you can easily
# find the associated capital. However, this does not work in reverse. So here,
# we're creating a dictionary of MQTT topics, and the methods we want to run
# whenever a message arrives on that topic.
self.topic_map: Dict[str, Callable[[dict], None]] = {
# This is what is known as a "f-string". This allows you to easily inject
# variables into a string without needing to combine lots of
# strings together. Scroll down farther to see what `self.show_velocity` is.
# https://realpython.com/python-f-strings/#f-strings-a-new-and-improved-way-to-format-strings-in-python
f"{self.topic_prefix}/velocity": self.show_velocity,
f"{self.topic_prefix}/mytestopen": self.open_servo,
f"{self.topic_prefix}/mytestclose": self.close_servo,
"vrc/apriltags/visible_tags": self.visible_apriltag,
}
# Create a new method to effectively run everything.
def run(self) -> None:
# Connect the Paho MQTT client to the MQTT server with the given host and port
# The 60 is a keep-alive timeout that defines how long in seconds
# the connection should stay alive if connection is lost.
self.mqtt_client.connect(host=self.mqtt_host, port=self.mqtt_port, keepalive=60)
# This method of the Paho MQTT client tells it to start running in a loop
# forever until it is stopped. This is a blocking function, so this line
# will run forever until the entire program is stopped. That is why we've
# setup the `on_message` callback you'll see below.
self.mqtt_client.loop_forever()
# As we described above, this method runs after the Paho MQTT client has connected
# to the server. This is generally used to do any setup work after the connection
# and subscribe to topics.
def on_connect(self, client: mqtt.Client, userdata: Any, rc: int, properties: mqtt.Properties = None) -> None:
# Print the result code to the console for debugging purposes.
print(f"Connected with result code {str(rc)}")
# After the MQTT client has connected to the server, this line has the client
# connect to all topics that begin with our common prefix. The "#" character
# acts as a wildcard. If you only wanted to subscribe to certain topics,
# you would run this method multiple times with the exact topics you wanted
# each time, such as:
# client.subscribe(f"{self.topic_prefix}/velocity")
# client.subscribe(f"{self.topic_prefix}/location")
client.subscribe(f"{self.topic_prefix}/#")
# If you wanted to be more clever, you could also iterate through the topic map
# in the `__init__` method, and subscribe to each topic in the keys.
# For example:
# for topic in self.topic_map.keys():
# client.subscribe(topic)
# As we described above, this method runs after any message on a topic
# that has been subscribed to has been recieved.
def on_message(self, client: mqtt.Client, userdata: Any, msg: mqtt.MQTTMessage) -> None:
# Print the topic name and the message payload to the console
# for debugging purposes.
#print(f"{msg.topic}: f{str(msg.payload)}")
# First, check if the topic of the message we've recieved is inside the topic
# map we've created.
if msg.topic in self.topic_map:
print(f"{msg.topic}: f{str(msg.payload)}")
# We can't send JSON (dictionary) data over MQTT, so we send it as an
# encoded string. Here, we convert that encoded string back to
# JSON information for convience.
payload = json.loads(msg.payload)
# Lookup the method for the topic, and execute it
# (with the parentheses) and pass it the payload of the message.
self.topic_map[msg.topic](payload)
# By not creating an `else` statement here, we effectively discard
# any message that wasn't from a topic in our topic map.
# ================================================================================
# Now the training wheels come off! Write your custom message handlers here.
# Below is a very simple example to look at it. Ideally, you would want to
# have a message handler do something more useful than just printing to
# the console.
def show_velocity(self, data: dict) -> None:
vx = data["vX"]
vy = data["vY"]
vz = data["vZ"]
v_ms = (vx, vy, vz)
print(f"Velocity information: {v_ms} m/s")
# v_fts = tuple([v * 3.28084 for v in v_ms])
# print(f"Velocity information: {v_fts} ft/s")
# ================================================================================
# Here is an example on how to publish a message to an MQTT topic to
# perform an action
def close_servo(self,data: dict) -> None:
# First, we construct a dictionary payload per the documentation.
data = {"servo": 0, "action": "close"}
# This creates it all in one line, however, you could also create it in multiple
# lines as shown below.
# data = {}
# data["servo"] = 0
# data["action"] = "open"
# Now, we convert the dictionary to a JSON encoded string that we can publish.
payload = json.dumps(data)
# Finally, we publish the payload to the topic, once again using f-strings to
# re-use our common prefix.
self.mqtt_client.publish(topic=f"{self.topic_prefix}/pcc/set_servo_open_close", payload=payload)
def open_servo(self,data: dict) -> None:
# First, we construct a dictionary payload per the documentation.
data = {"servo": 0, "action": "open"}
# This creates it all in one line, however, you could also create it in multiple
# lines as shown below.
# data = {}
# data["servo"] = 0
# data["action"] = "open"
# Now, we convert the dictionary to a JSON encoded string that we can publish.
payload = json.dumps(data)
# Finally, we publish the payload to the topic, once again using f-strings to
# re-use our common prefix.
self.mqtt_client.publish(topic=f"{self.topic_prefix}/pcc/set_servo_open_close", payload=payload)
def parse_hex_color(self,color):
rgb = color.get_rgb()
return tuple(round(c*255) for c in rgb)
def send_color_by_distance_mesg(self,distance):
#print(f"::::::Ready | |
:width: 100%
:align: left
Display of the NYC Metro Area, with extra annotations beyond what :py:meth:`display_fips <covid19_stats.engine.viz.display_fips>` can do.
Here are the arguments.
:param str msaname: the identifying name for the `MSA <msa_>`_, for example ``nyc``.
:param fig: the :py:class:`Figure <matplotlib.figure.Figure>` onto which to draw this :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`.
:param bool doShow: if ``False``, then just display the figure. If ``True``, also save to a file, ``msa_<msaname>_counties.png``. Default is ``False``.
:rtype: :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`
.. _msa: https://en.wikipedia.org/wiki/Metropolitan_statistical_area
"""
fig.set_size_inches([18,18])
#
data_msa = core.get_msa_data( msaname )
bdict = core.get_boundary_dict( data_msa[ 'fips' ] )
bbox = gis.calculate_total_bbox( chain.from_iterable( bdict.values( ) ) )
ax = create_and_draw_fromfig( fig, bbox , **kwargs)
fc = list( to_rgba( '#1f77b4' ) )
fc[-1] = 0.25
for fips in sorted( bdict ):
for shape in bdict[ fips ]:
poly = Polygon(
shape, closed = True,
edgecolor = 'k', linewidth = 2.0, linestyle = 'dashed',
facecolor = tuple(fc), alpha = 1.0, transform = ccrs.PlateCarree( ) )
ax.add_patch( poly )
lng_cent = shape[:,0].mean( )
lat_cent = shape[:,1].mean( )
ax.text(
lng_cent, lat_cent, fips, fontsize = 10, fontweight = 'bold', color = 'red',
transform = ccrs.PlateCarree( ) )
#
## now info on this MSA
ax.text( 0.01, 0.98, 'COUNTIES IN %s.' % data_msa[ 'region name' ], fontsize = 20, fontweight = 'bold',
transform = ax.transAxes, horizontalalignment = 'left', verticalalignment = 'top' )
if not doShow: return ax
#
canvas = FigureCanvasAgg( fig )
canvas.print_figure( 'msa_%s_counties.png' % msaname, bbox_inches = 'tight' )
autocrop_image.autocrop_image( 'msa_%s_counties.png' % msaname )
return ax
def plot_cases_or_deaths_bycounty(
inc_data, regionName, fig, type_disp = 'cases', days_from_beginning = 0,
doTitle = True, plot_artists = { },
poly_line_width = 1.0, legend_text_scaling = 1.0,
doSmarter = False, rows = 1, cols = 1, num = 1 ):
"""
The lower-level function that displays the status of COVID-19 cases or deaths given an incidident data :py:class:`dict`, ``inc_data``. It displays the status of cumulative COVID-19 cases or deaths, a specific number of days from the beginning, coloring the counties in that region according to the legend maximum, and places the resulting :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>` at a specific location in a :py:class:`Figure <matplotlib.figure.Figure>` grid of :py:class:`Axes <matplotlib.axes.Axes>` or :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`.
Instead of returning a :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`, this initializes a :py:class:`dict` of matplotlib objects, ``plot_artists``. In this way, subsequent plots, e.g. for different days after the beginnning, do not have to perform the relatively costly operation of recreating the :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>` and fully painting in the :py:class:`Polygon <matplotlib.patches.Polygon>` patches; instead, these :py:class:`Polygon <matplotlib.patches.Polygon>` patches are re-colored and necessary :py:class:`Text <matplotlib.text.Text>` artists' strings are changed.
.. _plot_artists_dict_discussion:
This :py:class:`dict`, ``plot_artists``, has the following keys,
* ``axes``: when initialized, the :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>` that consists of all counties, with COVID-19 cases or deaths, to display.
* ``sm``: the :py:class:`ScalarMappable <matplotlib.cm.ScalarMappable>` describing the coloration by value for each county.
Furthermore, it is easier to show rather than tell. :numref:`viz_plot_cases_or_deaths_bycounty_nyc` depicts both cumulative COVID-19 cases and deaths for the NYC metro area, 150 days after this metro's first COVID-19 incident.
.. _viz_plot_cases_or_deaths_bycounty_nyc:
.. figure:: /_static/viz/viz_plot_cases_or_deaths_bycounty_nyc.png
:width: 100%
:align: left
On the left, is the COVID-19 cumulative cases, and on the right, is the COVID-19 cumulative deaths, for the NYC metro area, 150 days after its first COVID-19 incident. The color limits for cases (left) is :math:`1.7\\times 10^6`, while the color limits for death (right) is :math:`5.6\\times 10^4`. We have chosen to display the titles over both plots. Color scaling is logarithmic.
Here are the arguments.
:param dict inc_data: the data for incidence of COVID-19 cases and deaths for a given geographical region. See :py:meth:`get_incident_data <covid19_stats.engine.core.get_incident_data>` for the format of the output data.
:param str regionName: the name of the region to display in title plots. For example, in :numref:`viz_plot_cases_or_deaths_bycounty_nyc`, this is ``NYC Metro Area``.
:param fig: the :py:class:`Figure <matplotlib.figure.Figure>` onto which to create a :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>` (stored into the ``plot_artists`` :py:class:`dict`) containing geographic features. Last three arguments -- ``rows``, ``cols``, and ``num`` -- describe the relative placement of the created :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`. See :py:meth:`add_subplot <matplotlib.figure.Figure.add_subplot>` for those three arguments' meanings.
:param str type_disp: if ``cases``, then show cumulative COVID-19 cases. If ``deaths``, then show cumulative COVID-19 deaths. Can only be ``cases`` or ``deaths``.
:param int days_from_beginning: days after first incident of COVID-19 in this region. Must be :math:`\ge 0`.
:param bool doTitle: if ``True``, then display the title over the plot. Default is ``True``.
:param dict plot_artists: this contains the essential plotting objects for quicker re-display when plotting different days. Look at :ref:`this description <plot_artists_dict_discussion>`.
:param float poly_line_width: the line width of the counties to draw in the plot.
:param float legend_text_scaling: sometimes the text annotations showing the date, number of incident days, and cumulative deaths or cases is *too large*. This is a multiplier on that text's font size. Default is 1.0, but must be :math:`> 0`.
:param bool doSmarter: if ``False``, then make a plot tailored for small regions (relative to the size of the earth), such as states or MSA_\ s. If ``True``, then make a plot tailored for large regions such as the CONUS_. Default is ``False``.
:param int rows: the number of rows for axes in the :py:class:`Figure <matplotlib.figure.Figure>` grid. Must be :math:`\ge 1`, and by default is 1.
:param int cols: the number of columns for axes in the :py:class:`Figure <matplotlib.figure.Figure>` grid. Must be :math:`\ge 1`, and by default is 1.
:param int num: the plot number of the :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>` in this :py:class:`Figure <matplotlib.figure.Figure>` grid. Must be :math:`\ge 1` and :math:`\le`\ ``rows`` times ``columns``. Its default is 1. Look at :py:meth:`add_subplot <matplotlib.figure.Figure.add_subplot>` for its meaning.
.. _MSA: https://en.wikipedia.org/wiki/Metropolitan_statistical_area
.. _CONUS: https://en.wikipedia.org/wiki/Contiguous_United_States
"""
cases_dict = { 'cases' : 'cases', 'deaths' : 'death' }
assert( type_disp in cases_dict )
assert( days_from_beginning >= 0 )
assert( days_from_beginning <= inc_data[ 'last day' ] )
assert( legend_text_scaling > 0 )
key = cases_dict[ type_disp ]
#
## NOW CREATE BASEMAP HIGH REZ
## LAZY LOADING
boundaries = inc_data['boundaries']
if 'axes' not in plot_artists:
if not doSmarter:
ax = create_and_draw_fromfig( fig, inc_data[ 'bbox' ], rows = rows, cols = cols, num = num )
else: ax = create_and_draw_fromfig(
fig, inc_data[ 'bbox' ],
river_linewidth = 1.0, river_alpha = 0.15,
coast_linewidth = 1.0, coast_alpha = 0.25, mult_bounds_lat = 1.25,
rows = rows, cols = cols, num = num )
maxnum = max(
list(map(lambda fips: inc_data[ 'df' ][ '%s_%s' % ( type_disp, fips ) ].max( ),
inc_data[ 'fips' ] ) ) )
maxnum_colorbar = max(1.0, find_plausible_maxnum( maxnum ) )
plot_artists[ 'axes' ] = ax
plot_artists[ 'sm' ] = ScalarMappable( norm = LogNorm( 1.0, maxnum_colorbar ), cmap = 'jet' )
#
## after initialization
df_dfm = inc_data['df'][ inc_data['df']['days_from_beginning'] == days_from_beginning ].copy( )
ax = plot_artists[ 'axes' ]
sm = plot_artists[ 'sm' ]
for fips in sorted( boundaries ):
nums = df_dfm['%s_%s' % ( type_disp, fips )].max( )
if nums == 0: fc = ( 1.0, 1.0, 1.0, 0.0 )
else: fc = sm.to_rgba( nums )
art_key = '%s_polys_%s' % ( key, fips )
if art_key not in plot_artists:
plot_artists.setdefault( art_key, [ ] )
for shape in boundaries[ fips ]:
poly = Polygon(
shape, closed = True,
linewidth = poly_line_width, linestyle = 'dashed',
facecolor = fc, alpha = 0.4, transform = ccrs.PlateCarree( ) )
ax.add_patch( poly )
plot_artists[ art_key ].append( poly )
else:
for poly in plot_artists[ art_key ]:
poly.set_facecolor( fc )
poly.set_alpha( 0.4 )
#
## now add the colorbar associated with sm
if 'cb' not in plot_artists:
cb = my_colorbar( sm, ax, alpha = 0.8 )
cb.set_label( 'number of %s' % type_disp, fontsize = 18, fontweight = 'bold' )
plot_artists[ 'cb' ] = cb
#
## now put in the legend in upper left corner, fontsize = 14, weight = bold
## following info: date, days after beginning, number of cases
date_s = df_dfm.date.max().strftime( '%d %B %Y' )
num_tot = df_dfm[ key ].max( )
if '%s_text' | |
import shutil
import tempfile
class SSFile:
"""
Abstract base class for all the supported file types in ShapeShifter. Subclasses must implement reading a file to pandas and exporting a dataframe to the filetype
"""
def __init__(self, filePath, fileType):
self.filePath=filePath
self.fileType=fileType
self.isGzipped= self.__is_gzipped()
def read_input_to_pandas(self, columnList=[], indexCol="Sample"):
"""
Reads from a file into a Pandas data frame. File may be gzipped. Must be implemented by subclasses
:param columnList: List of string column names to be read in. If blank, all columns will be read in
:param indexCol: String name of the column representing the index of the data set
:return: Pandas data frame with the requested data
"""
raise NotImplementedError("Reading from this file type is not currently supported.")
def export_filter_results(self, inputSSFile, column_list=[], query=None, transpose=False, include_all_columns=False,
gzip_results=False, index_col="Sample"):
"""
Filters and then exports data to a file
:param inputSSFile: SSFile object representing the file to be read and filtered
:param column_list: list of columns to include in the output. If blank, all columns will be included.
:param query: string representing the query or filter to apply to the data set
:param transpose: boolean indicating whether the results will be transposed
:param include_all_columns: boolean indicating whether to include all columns in the output. If True, overrides columnList
:param gzip_results: boolean indicating whether the resulting file will be gzipped
:param index_col: string name of the index column of the data set
"""
df = None
includeIndex = False
null = 'NA'
query, inputSSFile, df, includeIndex = self._prep_for_export(inputSSFile, column_list, query, transpose,
include_all_columns, df, includeIndex, index_col)
self.write_to_file(df, gzip_results, includeIndex, null)
def _prep_for_export(self, inputSSFile, columnList, query, transpose, includeAllColumns, df, includeIndex,
indexCol):
"""
Prepares a file to be exported by checking query syntax, unzipping the input, filtering, and transposing the data. This function is used
in every file type's export_filter_results function with the exception of SQLiteFile
:param inputSSFile: SSFile containing the data to be filtered
:param columnList: list of column names to be included in the output. If the list is empty, all columns will be included
:param query: string representing the query or filter to be applied to the data set
:param transpose: boolean indicating if the resulting data should be transposed
:param includeAllColumns: boolean indicating to include all columns in the output. If True, it overrides columnList
:param df: the Pandas data frame that will contain the results of the filters
:param includeIndex: boolean that will store whether or not the output file will include the index column
:param indexCol: string representing the name of the index column of the data set
:return: updated query, inputSSFile, df, includeIndex. These updated values will be used by export_filter_results
"""
if query != None:
query = self._translate_null_query(query)
df = inputSSFile._filter_data(columnList=columnList, query=query,
includeAllColumns=includeAllColumns, indexCol=indexCol)
if transpose:
df = df.set_index(indexCol) if indexCol in df.columns else df
df = df.transpose()
includeIndex = True
#TODO: remove returning inputSSFile for every file type, it is no longer needed since gzip is taken care of elsewhere
return query, inputSSFile, df, includeIndex
def factory(filePath, type=None):
"""
Constructs the appropriate subclass object based on the type of file passed in
:param filePath: string representing a file's path
:param type: string representing the type of file
:return: SSFile subclass object
"""
if type==None:
type = SSFile.__determine_extension(filePath)
if type.lower() == 'parquet': return ParquetFile.ParquetFile(filePath, type)
elif type.lower() == 'tsv': return TSVFile.TSVFile(filePath,type)
elif type.lower() == 'csv': return CSVFile.CSVFile(filePath,type)
elif type.lower() == 'json': return JSONFile.JSONFile(filePath,type)
elif type.lower() == 'excel': return ExcelFile.ExcelFile(filePath,type)
elif type.lower() == 'hdf5': return HDF5File.HDF5File(filePath,type)
elif type.lower() == 'pickle': return PickleFile.PickleFile(filePath,type)
elif type.lower() == 'msgpack': return MsgPackFile.MsgPackFile(filePath, type)
elif type.lower() == 'stata': return StataFile.StataFile(filePath,type)
elif type.lower() == 'sqlite': return SQLiteFile.SQLiteFile(filePath,type)
elif type.lower() == 'html': return HTMLFile.HTMLFile(filePath,type)
elif type.lower() == 'arff': return ARFFFile.ARFFFile(filePath,type)
elif type.lower() == 'gct': return GCTFile.GCTFile(filePath,type)
elif type.lower() == 'jupyternotebook': return JupyterNotebookFile.JupyterNBFile(filePath,type)
elif type.lower() == 'rmarkdown': return RMarkdownFile.RMarkdownFile(filePath,type)
elif type.lower() == 'kallistotpm': return KallistoTPMFile.KallistoTPMFile(filePath,type)
elif type.lower() == 'kallisto_est_counts': return Kallisto_est_counts_File.Kallisto_est_counts_File(filePath,type)
elif type.lower() == 'salmontpm': return SalmonTPMFile.SalmonTPMFile(filePath, type)
elif type.lower() == 'salmonnumreads': return SalmonNumReadsFile.SalmonNumReadsFile(filePath,type)
else:
raise Exception("File type not recognized. Supported file types include: TSV, CSV, Parquet, JSON, Excel, HDF5, Pickle, MsgPack, Stata, SQLite, HTML, ARFF, GCT")
factory=staticmethod(factory)
def __determine_extension(fileName):
"""
Determines the file type of a given file based off its extension
:param fileName: Name of a file whose extension will be examined
:return: string representing the file type indicated by the file's extension
"""
extensions = fileName.rstrip("\n").split(".")
if len(extensions) > 1:
extension = extensions[len(extensions) - 1]
if extension == 'gz':
extension = extensions[len(extensions) - 2]
else:
extension = None
if extension == "tsv" or extension == "txt":
return 'tsv'
elif extension == "csv":
return 'csv'
elif extension == "json":
return 'json'
elif extension == "xlsx":
return 'excel'
elif extension == "hdf" or extension == "h5":
return 'hdf5'
elif extension == "pq":
return 'parquet'
elif extension == "mp":
return 'msgpack'
elif extension == "dta":
return 'stata'
elif extension == "pkl":
return 'pickle'
elif extension == "html":
return 'html'
elif extension == "db":
return 'sqlite'
elif extension == "arff":
return 'arff'
elif extension == "gct":
return 'gct'
elif extension == "ipynb":
return 'jupyternotebook'
elif extension == "rmd":
return 'rmarkdown'
else:
raise Exception("Error: Extension on " + fileName + " not recognized. Please use appropriate file extensions or explicitly specify file type.")
__determine_extension = staticmethod(__determine_extension)
def write_to_file(self, df, gzipResults=False, includeIndex=False, null='NA', indexCol="Sample", transpose=False):
"""
Writes a Pandas data frame to a file
:param transpose:
:param indexCol:
:param df: Pandas data frame to be written to file
:param gzipResults: boolean indicating whether the written file will be gzipped
:param includeIndex: boolean indicating whether the index column should be written to the file
:param null: string representing how null or None values should be represented in the output file
"""
raise NotImplementedError("Writing to this file type is not currently supported.")
def _update_index_col(self, df, indexCol="Sample"):
"""
Function for internal use. If the given index column is not in the data frame, it will default to the first column name
"""
if indexCol not in df.columns:
return
def __is_gzipped(self):
"""
Function for internal use. Checks if a file is gzipped based on its extension
"""
extensions = self.filePath.rstrip("\n").split(".")
if extensions[len(extensions) - 1] == 'gz':
return True
return False
def _filter_data(self, columnList=[], query=None,
includeAllColumns=False, indexCol="Sample"):
"""
Filters a data set down according to queries and requested columns
:param columnList: List of string column names to include in the results. If blank, all columns will be included
:param query: String representing a query to be applied to the data set
:param includeAllColumns: boolean indicating whether all columns should be included. If true, overrides columnList
:param indexCol: string representing the name of the index column of the data set
:return: filtered Pandas data frame
"""
if includeAllColumns:
columnList = []
df = self.read_input_to_pandas(columnList, indexCol)
self.__report_if_missing_columns(df, [indexCol])
df = self.__replace_index(df, indexCol)
if query != None:
df = df.query(query)
return df
if len(columnList) == 0 and query == None:
df = self.read_input_to_pandas(columnList, indexCol)
self.__report_if_missing_columns(df, [indexCol])
df = self.__replace_index(df, indexCol)
return df
if query != None:
columnNamesFromQuery = self.__parse_column_names_from_query(query)
columnList = columnNamesFromQuery + columnList
if indexCol not in columnList:
columnList.insert(0, indexCol)
else:
columnList.insert(0, columnList.pop(columnList.index(indexCol)))
df = self.read_input_to_pandas(columnList, indexCol)
self.__report_if_missing_columns(df, columnList)
if query != None:
df = df.query(query)
return df
def _translate_null_query(self, query):
"""
For internal use only. Because pandas does not support querying for null values by "columnname == None", this function translates such queries into valid syntax
"""
regex1 = r"\S*\s*!=\s*None\s*"
regex2 = r"\S*\s*==\s*None\s*"
matchlist1 = re.findall(regex1, query, flags=0)
matchlist2 = re.findall(regex2, query, flags=0)
for match in matchlist1:
col = match.split("!=")[0].rstrip()
query = query.replace(match, col + "==" + col + " ")
for match in matchlist2:
col = match.split("==")[0].rstrip()
query = query.replace(match, col + "!=" + col + " ")
return query
def get_column_names(self) -> list:
"""
Retrieves all column names from a data set stored in a parquet file
:return: All column names
:rtype: list
"""
raise NotImplementedError("This method should | |
<filename>sdk/python/pulumi_azure/costmanagement/resource_group_export.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ResourceGroupExportArgs', 'ResourceGroupExport']
@pulumi.input_type
class ResourceGroupExportArgs:
def __init__(__self__, *,
delivery_info: pulumi.Input['ResourceGroupExportDeliveryInfoArgs'],
query: pulumi.Input['ResourceGroupExportQueryArgs'],
recurrence_period_end: pulumi.Input[str],
recurrence_period_start: pulumi.Input[str],
recurrence_type: pulumi.Input[str],
resource_group_id: pulumi.Input[str],
active: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ResourceGroupExport resource.
:param pulumi.Input['ResourceGroupExportDeliveryInfoArgs'] delivery_info: A `delivery_info` block as defined below.
:param pulumi.Input['ResourceGroupExportQueryArgs'] query: A `query` block as defined below.
:param pulumi.Input[str] recurrence_period_end: The date the export will stop capturing information.
:param pulumi.Input[str] recurrence_period_start: The date the export will start capturing information.
:param pulumi.Input[str] recurrence_type: How often the requested information will be exported. Valid values include `Annually`, `Daily`, `Monthly`, `Weekly`.
:param pulumi.Input[str] resource_group_id: The id of the resource group in which to export information.
:param pulumi.Input[bool] active: Is the cost management export active? Default is `true`.
:param pulumi.Input[str] name: Specifies the name of the Cost Management Export. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "delivery_info", delivery_info)
pulumi.set(__self__, "query", query)
pulumi.set(__self__, "recurrence_period_end", recurrence_period_end)
pulumi.set(__self__, "recurrence_period_start", recurrence_period_start)
pulumi.set(__self__, "recurrence_type", recurrence_type)
pulumi.set(__self__, "resource_group_id", resource_group_id)
if active is not None:
pulumi.set(__self__, "active", active)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="deliveryInfo")
def delivery_info(self) -> pulumi.Input['ResourceGroupExportDeliveryInfoArgs']:
"""
A `delivery_info` block as defined below.
"""
return pulumi.get(self, "delivery_info")
@delivery_info.setter
def delivery_info(self, value: pulumi.Input['ResourceGroupExportDeliveryInfoArgs']):
pulumi.set(self, "delivery_info", value)
@property
@pulumi.getter
def query(self) -> pulumi.Input['ResourceGroupExportQueryArgs']:
"""
A `query` block as defined below.
"""
return pulumi.get(self, "query")
@query.setter
def query(self, value: pulumi.Input['ResourceGroupExportQueryArgs']):
pulumi.set(self, "query", value)
@property
@pulumi.getter(name="recurrencePeriodEnd")
def recurrence_period_end(self) -> pulumi.Input[str]:
"""
The date the export will stop capturing information.
"""
return pulumi.get(self, "recurrence_period_end")
@recurrence_period_end.setter
def recurrence_period_end(self, value: pulumi.Input[str]):
pulumi.set(self, "recurrence_period_end", value)
@property
@pulumi.getter(name="recurrencePeriodStart")
def recurrence_period_start(self) -> pulumi.Input[str]:
"""
The date the export will start capturing information.
"""
return pulumi.get(self, "recurrence_period_start")
@recurrence_period_start.setter
def recurrence_period_start(self, value: pulumi.Input[str]):
pulumi.set(self, "recurrence_period_start", value)
@property
@pulumi.getter(name="recurrenceType")
def recurrence_type(self) -> pulumi.Input[str]:
"""
How often the requested information will be exported. Valid values include `Annually`, `Daily`, `Monthly`, `Weekly`.
"""
return pulumi.get(self, "recurrence_type")
@recurrence_type.setter
def recurrence_type(self, value: pulumi.Input[str]):
pulumi.set(self, "recurrence_type", value)
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> pulumi.Input[str]:
"""
The id of the resource group in which to export information.
"""
return pulumi.get(self, "resource_group_id")
@resource_group_id.setter
def resource_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_id", value)
@property
@pulumi.getter
def active(self) -> Optional[pulumi.Input[bool]]:
"""
Is the cost management export active? Default is `true`.
"""
return pulumi.get(self, "active")
@active.setter
def active(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "active", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Cost Management Export. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _ResourceGroupExportState:
def __init__(__self__, *,
active: Optional[pulumi.Input[bool]] = None,
delivery_info: Optional[pulumi.Input['ResourceGroupExportDeliveryInfoArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
query: Optional[pulumi.Input['ResourceGroupExportQueryArgs']] = None,
recurrence_period_end: Optional[pulumi.Input[str]] = None,
recurrence_period_start: Optional[pulumi.Input[str]] = None,
recurrence_type: Optional[pulumi.Input[str]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ResourceGroupExport resources.
:param pulumi.Input[bool] active: Is the cost management export active? Default is `true`.
:param pulumi.Input['ResourceGroupExportDeliveryInfoArgs'] delivery_info: A `delivery_info` block as defined below.
:param pulumi.Input[str] name: Specifies the name of the Cost Management Export. Changing this forces a new resource to be created.
:param pulumi.Input['ResourceGroupExportQueryArgs'] query: A `query` block as defined below.
:param pulumi.Input[str] recurrence_period_end: The date the export will stop capturing information.
:param pulumi.Input[str] recurrence_period_start: The date the export will start capturing information.
:param pulumi.Input[str] recurrence_type: How often the requested information will be exported. Valid values include `Annually`, `Daily`, `Monthly`, `Weekly`.
:param pulumi.Input[str] resource_group_id: The id of the resource group in which to export information.
"""
if active is not None:
pulumi.set(__self__, "active", active)
if delivery_info is not None:
pulumi.set(__self__, "delivery_info", delivery_info)
if name is not None:
pulumi.set(__self__, "name", name)
if query is not None:
pulumi.set(__self__, "query", query)
if recurrence_period_end is not None:
pulumi.set(__self__, "recurrence_period_end", recurrence_period_end)
if recurrence_period_start is not None:
pulumi.set(__self__, "recurrence_period_start", recurrence_period_start)
if recurrence_type is not None:
pulumi.set(__self__, "recurrence_type", recurrence_type)
if resource_group_id is not None:
pulumi.set(__self__, "resource_group_id", resource_group_id)
@property
@pulumi.getter
def active(self) -> Optional[pulumi.Input[bool]]:
"""
Is the cost management export active? Default is `true`.
"""
return pulumi.get(self, "active")
@active.setter
def active(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "active", value)
@property
@pulumi.getter(name="deliveryInfo")
def delivery_info(self) -> Optional[pulumi.Input['ResourceGroupExportDeliveryInfoArgs']]:
"""
A `delivery_info` block as defined below.
"""
return pulumi.get(self, "delivery_info")
@delivery_info.setter
def delivery_info(self, value: Optional[pulumi.Input['ResourceGroupExportDeliveryInfoArgs']]):
pulumi.set(self, "delivery_info", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Cost Management Export. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def query(self) -> Optional[pulumi.Input['ResourceGroupExportQueryArgs']]:
"""
A `query` block as defined below.
"""
return pulumi.get(self, "query")
@query.setter
def query(self, value: Optional[pulumi.Input['ResourceGroupExportQueryArgs']]):
pulumi.set(self, "query", value)
@property
@pulumi.getter(name="recurrencePeriodEnd")
def recurrence_period_end(self) -> Optional[pulumi.Input[str]]:
"""
The date the export will stop capturing information.
"""
return pulumi.get(self, "recurrence_period_end")
@recurrence_period_end.setter
def recurrence_period_end(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "recurrence_period_end", value)
@property
@pulumi.getter(name="recurrencePeriodStart")
def recurrence_period_start(self) -> Optional[pulumi.Input[str]]:
"""
The date the export will start capturing information.
"""
return pulumi.get(self, "recurrence_period_start")
@recurrence_period_start.setter
def recurrence_period_start(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "recurrence_period_start", value)
@property
@pulumi.getter(name="recurrenceType")
def recurrence_type(self) -> Optional[pulumi.Input[str]]:
"""
How often the requested information will be exported. Valid values include `Annually`, `Daily`, `Monthly`, `Weekly`.
"""
return pulumi.get(self, "recurrence_type")
@recurrence_type.setter
def recurrence_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "recurrence_type", value)
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the resource group in which to export information.
"""
return pulumi.get(self, "resource_group_id")
@resource_group_id.setter
def resource_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_id", value)
class ResourceGroupExport(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
active: Optional[pulumi.Input[bool]] = None,
delivery_info: Optional[pulumi.Input[pulumi.InputType['ResourceGroupExportDeliveryInfoArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
query: Optional[pulumi.Input[pulumi.InputType['ResourceGroupExportQueryArgs']]] = None,
recurrence_period_end: Optional[pulumi.Input[str]] = None,
recurrence_period_start: Optional[pulumi.Input[str]] = None,
recurrence_type: Optional[pulumi.Input[str]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an Azure Cost Management Export for a Resource Group.
!> **Note:** The `costmanagement.ResourceGroupExport` resource has been deprecated in favour of the `core.ResourceGroupCostManagementExport` resource and will be removed in v3.0 of the Azure Provider.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="LRS")
example_resource_group_export = azure.costmanagement.ResourceGroupExport("exampleResourceGroupExport",
resource_group_id=example_resource_group.id,
recurrence_type="Monthly",
recurrence_period_start="2020-08-18T00:00:00Z",
recurrence_period_end="2020-09-18T00:00:00Z",
delivery_info=azure.costmanagement.ResourceGroupExportDeliveryInfoArgs(
storage_account_id=example_account.id,
container_name="examplecontainer",
root_folder_path="/root/updated",
),
query=azure.costmanagement.ResourceGroupExportQueryArgs(
type="Usage",
time_frame="WeekToDate",
))
```
## Import
Cost Management Export for a Resource Group can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:costmanagement/resourceGroupExport:ResourceGroupExport example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example/providers/Microsoft.CostManagement/exports/example
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] active: Is the cost management export active? Default is `true`.
:param pulumi.Input[pulumi.InputType['ResourceGroupExportDeliveryInfoArgs']] delivery_info: A `delivery_info` block as defined below.
:param pulumi.Input[str] name: Specifies the name of the Cost Management Export. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['ResourceGroupExportQueryArgs']] query: A `query` block as defined below.
:param pulumi.Input[str] recurrence_period_end: The date the export will stop capturing information.
:param pulumi.Input[str] recurrence_period_start: The date the export will start capturing information.
:param pulumi.Input[str] recurrence_type: How often the requested information will be exported. Valid values include `Annually`, `Daily`, `Monthly`, `Weekly`.
:param pulumi.Input[str] resource_group_id: The id of the resource group in which to export information.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ResourceGroupExportArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Azure Cost Management Export for a Resource Group.
!> **Note:** The `costmanagement.ResourceGroupExport` resource has been deprecated in favour of the `core.ResourceGroupCostManagementExport` resource and will be removed in v3.0 of the Azure Provider.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="LRS")
example_resource_group_export = azure.costmanagement.ResourceGroupExport("exampleResourceGroupExport",
resource_group_id=example_resource_group.id,
recurrence_type="Monthly",
recurrence_period_start="2020-08-18T00:00:00Z",
recurrence_period_end="2020-09-18T00:00:00Z",
delivery_info=azure.costmanagement.ResourceGroupExportDeliveryInfoArgs(
storage_account_id=example_account.id,
container_name="examplecontainer",
root_folder_path="/root/updated",
),
query=azure.costmanagement.ResourceGroupExportQueryArgs(
type="Usage",
time_frame="WeekToDate",
))
```
## Import
Cost Management Export for a Resource Group can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:costmanagement/resourceGroupExport:ResourceGroupExport example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example/providers/Microsoft.CostManagement/exports/example
| |
empty scans file created")
longitude = -self.telescope.long*180/math.pi # east logitude
latitude = self.telescope.lat*180/math.pi
height = self.telescope.elevation
self.location = astropy.coordinates.EarthLocation(lon=longitude,
lat=latitude,
height=height)
self.logger.debug("__init__: telescope location defined")
# signal properties
self.get_signals()
# observing mode
self.obsmode = obsmode
self.restfreq = restfreq
# JSON file on client host
#self.last_spectra_file = "/var/tmp/last_spectrum.json"
#self.backend.init_disk_monitors()
#self.backend.observer.start() <<<<<<<<<<<<<< not working; maybe not needed
#self.specQueue = queue.Queue()
self.specHandler = MC.ActionThread(self, self.integr_done,
name="specHandler")
#self.specHandler = threading.Thread(target=self.integr_done,
# name="specHandler")
self.specHandler.daemon = True
self.specHandler.start()
self.logger.debug("__init__: done")
def initialize_FITS(self):
"""
initialize a FITS file
"""
self.fitsfile = DSNFITS.FITSfile(self.telescope)
# initialize a list of extensions; the first is the primary
self.HDUs = [self.fitsfile.prihdu] # FITS extensions
# initialize a SDFITS binary table
# default to SAO configuration
# 32768 ch, one position, two pols, 60 integrations, 2 beams
self.dims = [32768,1,1,2,50,2]
self.bintabHDU = self.init_binary_table()
self.logger.debug("initialize_FITS: empty binary table received")
self.HDUs.append(self.bintabHDU)
self.hdulist = pyfits.HDUList(self.HDUs)
self.logger.debug("initialize_FITS: HDU list created")
self.filename = "/var/tmp/test-"+str(time.time())+".fits"
def init_binary_table(self, numscans=100, observer="Horiuchi"):
"""
Here 'observer' is used in the TAMS sense -- a person. Elsewhere
'observer' is used in the ephem sense -- a location.
"""
self.fitsfile.exthead = self.fitsfile.make_basic_header()
# start the extension header
self.fitsfile.exthead['projid'] = self.info['project']['name']
self.fitsfile.exthead['observer'] = observer
self.fitsfile.exthead['FRONTEND'] = self.equipment['FrontEnd'].name
self.fitsfile.exthead['RECEIVER'] = self.equipment['Receiver'].name
# convenient names
nchan = self.dims[0]
nlong = self.dims[1]
nlat = self.dims[2]
npols = self.dims[3]
nrecs = self.dims[4]
nbeams = self.dims[5]
# adjust for X frontend and receiver
self.logger.info("make_SAO_table: receiver is %s",
self.equipment['Receiver'])
if type(self.equipment['Receiver']) == DSNrx.DSN_rx:
nbeams = 1
self.logger.debug("init_binary_table: DSN receivers have one beam")
else:
self.logger.debug("init_binary_table: receiver has %d beams", nbeams)
# add the site data
self.fitsfile.add_site_data(self.fitsfile.exthead)
# make the basic columns that are always needed
self.fitsfile.make_basic_columns()
# add the backend data
freqs = self.backend.freqs
num_chan = len(freqs)
self.fitsfile.exthead['FREQRES'] = freqs[-1]/(num_chan-1)
self.fitsfile.exthead['BANDWIDt'] = num_chan*self.fitsfile.exthead['FREQRES']
self.fitsfile.get_hardware_metadata(self.backend)
# add multi-dimensioned metadata
self.fitsfile.add_time_dependent_columns(nrecs)
# beam offsets
self.fitsfile.make_offset_columns(numrecs=nrecs)
# column for TSYS, one value for each IF
if nbeams == 1:
tsys_dim = "(1,1,1,2,"+str(nrecs)+")"
unit = "count"
else:
tsys_dim = "(1,1,1,2,"+str(nrecs)+",2)"
unit = "K"
self.logger.debug("init_binary_table: TSYS dim is %s", tsys_dim)
self.fitsfile.columns.add_col(pyfits.Column(name='TSYS',
format=str(2*nrecs*2)+'D',
dim=tsys_dim,
unit=unit))
# Add columns describing the data matrix
# Note that refpix defaults to 0
axis = 1; self.fitsfile.make_data_axis(self.fitsfile.exthead,
self.fitsfile.columns, axis,
nchan, 'FREQ-OBS', 'D', unit='Hz',
comment="channel frequency in telescope frame")
axis +=1; self.fitsfile.make_data_axis(self.fitsfile.exthead,
self.fitsfile.columns, axis,
nlong, 'RA---GLS', 'D', unit='deg',
comment="RA J2000.0")
axis +=1; self.fitsfile.make_data_axis(self.fitsfile.exthead,
self.fitsfile.columns, axis,
nlat, 'DEC--GLS','D', unit='deg',
comment="decl. J2000")
# Stokes axis
# get the polarizations from the spectrometer input signals
axis+=1; self.fitsfile.make_data_axis(self.fitsfile.exthead,
self.fitsfile.columns, axis,
npols, 'STOKES', 'I',
comment="polarization code: -8 ... 4")
# time axis
axis+=1; self.fitsfile.make_data_axis(self.fitsfile.exthead,
self.fitsfile.columns, axis,
nrecs, 'TIME', 'E', unit='s',
comment='Python time.time() value')
# Beam axis
if nbeams > 1:
axis+=1; self.fitsfile.make_data_axis(self.fitsfile.exthead,
self.fitsfile.columns, axis,
nbeams, 'BEAM', 'I',
comment="beam 1 or 2")
# Make the DATA column
fmt_multiplier = self.fitsfile.exthead['MAXIS1']*self.fitsfile.exthead['MAXIS2']* \
self.fitsfile.exthead['MAXIS3']*self.fitsfile.exthead['MAXIS4']* \
self.fitsfile.exthead['MAXIS5']
if nbeams > 1:
fmt_multiplier *= self.fitsfile.exthead['MAXIS6']
self.logger.debug("init_binary_table: format multiplier = %d", fmt_multiplier)
dimsval = "("+str(self.fitsfile.exthead['MAXIS1'])+"," \
+str(self.fitsfile.exthead['MAXIS2'])+"," \
+str(self.fitsfile.exthead['MAXIS3'])+"," \
+str(self.fitsfile.exthead['MAXIS4'])+"," \
+str(self.fitsfile.exthead['MAXIS5'])+")"
if nbeams > 1:
dimsval = dimsval[:-1] + ","+str(self.fitsfile.exthead['MAXIS6'])+")"
self.logger.debug("init_binary_table: computed scan shape: %s", dimsval)
data_format = str(fmt_multiplier)+"E"
self.logger.debug("init_binary_table: data_format = %s", data_format)
self.fitsfile.columns.add_col(pyfits.Column(name='DATA', format=data_format,
dim=dimsval))
self.logger.debug("init_binary_table: table columns created")
# create the table extension
FITSrec = pyfits.FITS_rec.from_columns(self.fitsfile.columns, nrows=numscans)
self.logger.debug("init_binary_table: FITS record built")
tabhdu = pyfits.BinTableHDU(data=FITSrec, header=self.fitsfile.exthead,
name="SINGLE DISH")
self.logger.debug("init__binary_table: empty table created")
# empty table
return tabhdu
def save_FITS(self):
"""
Save FITS HDU list to file
The HDU structure used by this server is a pyfits.HDUList() structure
made up from a list with a pyfits.BinTableHDU and a pyfits.PrimaryHDU()
This creates a new HDU list structure from the attributes for only the
rows with data. The file name is not changed so another call to this
will overwrite the previous contents, but presumably with more rows.
"""
self.logger.debug("save_FITS: copying primary HDU...")
savePriHDU = self.fitsfile.prihdu
lastRow = np.unique(self.bintabHDU.data[:]['SCAN'])[-1]
self.logger.debug("save_FITS: bintable has %d rows; making new table", lastRow)
t0 = time.time()
saveRec = pyfits.FITS_rec.from_columns(self.fitsfile.columns, nrows=max(lastRow,1))
for row in range(1,lastRow):
saveRec[row] = self.bintabHDU.data[row]
saveBinTab = pyfits.BinTableHDU(data=saveRec,
header=self.fitsfile.exthead,
name="SINGLE DISH")
saveHDUs = [savePriHDU, saveBinTab]
hdulist = pyfits.HDUList(saveHDUs)
self.logger.debug(
"save_FITS: Took {:.3f} seconds to copy FITS data".format(
time.time() - t0))
t0 = time.time()
hdulist.writeto(self.filename, overwrite=True)
del savePriHDU
del saveRec
del saveBinTab
del saveHDUs
del hdulist
self.logger.debug("save_FITS: wrote FITS to %s in %s s", self.filename, time.time()-t0)
#self.save_FITS.cb({"status": "saved to %s" % self.filename})
def _config_hw(self, context,
import_path=None,
config_args=None,
config_kwargs=None):
"""
Get the equipment details for the given code
"""
self.logger.debug("_config_hw: for %s", context)
self.logger.debug("_config_hw: config args: %s", config_args)
# get the hardware configuration
if context in self.configs:
# the usual way to get the configuration, accepting the defaults
self.logger.debug("_config_hw: standard configuration")
observatory, equipment = MCcfg.station_configuration(context,
**config_args)
else:
# if not a standard configuration, infer from context name like PSR014L
self.logger.debug("_config_hw: non-standard configuration")
activity = context[:4]
project = self.activitys_project(activity)
dss = int(context[4:6])
band = context[7]
now = time.gmtime()
timestr = "02d02d" % (now.tm_hour, now.tm_min)
observatory, equipment = std_configuration(None, project, dss,
now.tm_year, now.tm_yday, timestr, band)
# initialize the equipment confoguration
if import_path is not None:
# non-standard path to the configuration file
if config_args is None:
# over-riding positional configuration arguments
config_args = ()
if config_kwargs is None:
# over-riding keyword configuration arguments
config_kwargs = {}
self.configure(import_path, *config_args, **config_kwargs)
else:
self.observatory = observatory
self.equipment = equipment
self.logger.debug("_config_hw: observatory is %s", observatory)
for item in list(equipment.keys()):
self.logger.debug("_config_hw: %s is %s", item, equipment[item])
def _config_bore(self, boresight_manager_file_paths = None,
boresight_manager_kwargs = None):
"""
initialize the boresight manager
"""
if boresight_manager_file_paths is None:
# use default location of boresight files
file_paths, boresight_manager_file_paths = \
BSM.BoresightManager.detect_file_paths()
if boresight_manager_kwargs is None:
# overriding boresight manager keyword arguments
boresight_manager_kwargs = {}
self.boresight_manager = BSM.BoresightManager(
boresight_manager_file_paths,
**boresight_manager_kwargs
)
def _init_info(self):
"""
initialize program parameters to defaults
"""
tsys_cal_dir = data_dir + "tsys_cals/"
self.info = {
"info_save_dir": self.status_dir,
"point": {
"current_source": None
},
"boresight": {
"running": False,
"data_dir": DSScfg.tams_config.boresight_data_dir,
"offset_el": 0.0,
"offset_xel": 0.0
},
"tsys_calibration": {
"running": False,
'tsys_factors': [
999883083.3775496,
421958318.055633,
1374067124.697352,
705797017.1087824
],
"data_dir": tsys_cal_dir,
"date": None,
"el": None
},
"tip": {
"running": False,
"data_dir": DSScfg.tams_config.tipping_data_dir,
},
"project": {
"name": self.project,
"source_dir": self.project_conf_path,
},
"sources": {},
"verifiers": {},
"calibrators": {}
}
self.logger.debug("_init_info({}): _info: {}".format(logtime(),self.info))
def init_info(self, activity='TMS0'):
"""
"""
self.project = self.get_activitys_project(activity)
self._init_info()
# ------------------ Start-up and shut-down methods -----------------------
@async_method
def set_info(self, path, val):
"""
Set some path within ``_info`` attribute in a thread safe manner
Examples:
>>> server.set_info(["project", "name"],"TAMS")
>>> server.set_info(["tip","running"],False)
Args:
path (list): "path", inside info property to value we want to set.
val (obj): desired value.
"""
self.logger.debug("set_info: called with path: {}, val: {}".format(path, val))
with self.lock:
info_copy = self.info
sub_path = self.info[path[0]]
self.logger.debug("set_info: subpath: {}".format(sub_path))
for p in path[1:-1]:
sub_path = sub_path[p]
self.logger.debug("set_info: subpath: {}".format(sub_path))
sub_path[path[-1]] = val
self.set_info.cb() # no response to client
@support.test.auto_test()
@async_method
def get_info(self, path=None):
"""
Get some path in ``_info`` attribute in thread safe attribute
If no path is provided, return entire ``_info`` attribute
Examples:
>>> server.get_info(["boresight","running"])
False
>>> server.get_info(["point","current_source"])
"0521-365"
Args:
path (list/None): Get some value from ``_info``, or some value from a
subdictionary of ``_info``.
Returns:
obj: Either value of dictionary/subditionary, or subdictionary itself.
"""
self.logger.debug("get_info: path: {}".format(path))
with self.lock:
if path is None:
self.get_info.cb(self.info) # send client the entire info dict
return self.info
sub_path = self.info[path[0]]
for p in path[1:]:
sub_path = sub_path[p]
self.get_info.cb(sub_path) # send client the requested data
return sub_path
@support.test.auto_test()
def save_info(self):
"""
Dump internal _info attribute to a file.
"""
# info file should go with others, not where the file was loaded from
# if not os.path.exists(self.info["info_save_dir"]):
# os.makedirs(self.info["info_save_dir"])
# self.logger.debug(
# "save_info: info_save_dir: {}".format(self.info["info_save_dir"]))
timestamp = datetime.datetime.utcnow().strftime("%Y-%j-%Hh%Mm%Ss")
save_file_name = "info_{}.json".format(timestamp)
self.info["info_save_dir"] = self.project_dir + "Status/" \
+ self.__class__.__name__ + "/"
save_file_path = os.path.join(self.info["info_save_dir"], save_file_name)
self.logger.info("save_info: Saving file {}".format(save_file_path))
t0 = time.time()
with open(save_file_path, "w") as f:
json.dump(self.info, f)
self.logger.debug(
"save_info({}): Took {:.3f} seconds to dump info".format(logtime(),
time.time() - t0))
@support.test.auto_test()
def load_info(self):
"""
Load in _info attribute from most recently dumped settings file.
This assumes a file name like "info_2020-269-22h39m15s.json"
Attribute ``info`` contains parameters for an ongoing | |
A WITH RING BELOW
'\u1e02': b'\xc7B', # LATIN CAPITAL LETTER B WITH DOT ABOVE
'\u1e03': b'\xc7b', # LATIN SMALL LETTER B WITH DOT ABOVE
'\u1e04': b'\xd6B', # LATIN CAPITAL LETTER B WITH DOT BELOW
'\u1e05': b'\xd6b', # LATIN SMALL LETTER B WITH DOT BELOW
'\u1e08': b'\xc2\xd0C', # LATIN CAPITAL LETTER C WITH CEDILLA AND ACUTE
'\u1e09': b'\xc2\xd0c', # LATIN SMALL LETTER C WITH CEDILLA AND ACUTE
'\u1e0a': b'\xc7D', # LATIN CAPITAL LETTER D WITH DOT ABOVE
'\u1e0b': b'\xc7d', # LATIN SMALL LETTER D WITH DOT ABOVE
'\u1e0c': b'\xd6D', # LATIN CAPITAL LETTER D WITH DOT BELOW
'\u1e0d': b'\xd6d', # LATIN SMALL LETTER D WITH DOT BELOW
'\u1e10': b'\xd0D', # LATIN CAPITAL LETTER D WITH CEDILLA
'\u1e11': b'\xd0d', # LATIN SMALL LETTER D WITH CEDILLA
'\u1e12': b'\xdbD', # LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW
'\u1e13': b'\xdbd', # LATIN SMALL LETTER D WITH CIRCUMFLEX BELOW
'\u1e14': b'\xc1\xc5E', # LATIN CAPITAL LETTER E WITH MACRON AND GRAVE
'\u1e15': b'\xc1\xc5e', # LATIN SMALL LETTER E WITH MACRON AND GRAVE
'\u1e16': b'\xc2\xc5E', # LATIN CAPITAL LETTER E WITH MACRON AND ACUTE
'\u1e17': b'\xc2\xc5e', # LATIN SMALL LETTER E WITH MACRON AND ACUTE
'\u1e18': b'\xdbE', # LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW
'\u1e19': b'\xdbe', # LATIN SMALL LETTER E WITH CIRCUMFLEX BELOW
'\u1e1c': b'\xc6\xd0E', # LATIN CAPITAL LETTER E WITH CEDILLA AND BREVE
'\u1e1d': b'\xc6\xd0e', # LATIN SMALL LETTER E WITH CEDILLA AND BREVE
'\u1e1e': b'\xc7F', # LATIN CAPITAL LETTER F WITH DOT ABOVE
'\u1e1f': b'\xc7f', # LATIN SMALL LETTER F WITH DOT ABOVE
'\u1e20': b'\xc5G', # LATIN CAPITAL LETTER G WITH MACRON
'\u1e21': b'\xc5g', # LATIN SMALL LETTER G WITH MACRON
'\u1e22': b'\xc7H', # LATIN CAPITAL LETTER H WITH DOT ABOVE
'\u1e23': b'\xc7h', # LATIN SMALL LETTER H WITH DOT ABOVE
'\u1e24': b'\xd6H', # LATIN CAPITAL LETTER H WITH DOT BELOW
'\u1e25': b'\xd6h', # LATIN SMALL LETTER H WITH DOT BELOW
'\u1e26': b'\xc8H', # LATIN CAPITAL LETTER H WITH DIAERESIS
'\u1e27': b'\xc8h', # LATIN SMALL LETTER H WITH DIAERESIS
'\u1e28': b'\xd0H', # LATIN CAPITAL LETTER H WITH CEDILLA
'\u1e29': b'\xd0h', # LATIN SMALL LETTER H WITH CEDILLA
'\u1e2a': b'\xd5H', # LATIN CAPITAL LETTER H WITH BREVE BELOW
'\u1e2b': b'\xd5h', # LATIN SMALL LETTER H WITH BREVE BELOW
'\u1e2e': b'\xc2\xc8I', # LATIN CAPITAL LETTER I WITH DIAERESIS AND ACUTE
'\u1e2f': b'\xc2\xc8i', # LATIN SMALL LETTER I WITH DIAERESIS AND ACUTE
'\u1e30': b'\xc2K', # LATIN CAPITAL LETTER K WITH ACUTE
'\u1e31': b'\xc2k', # LATIN SMALL LETTER K WITH ACUTE
'\u1e32': b'\xd6K', # LATIN CAPITAL LETTER K WITH DOT BELOW
'\u1e33': b'\xd6k', # LATIN SMALL LETTER K WITH DOT BELOW
'\u1e36': b'\xd6L', # LATIN CAPITAL LETTER L WITH DOT BELOW
'\u1e37': b'\xd6l', # LATIN SMALL LETTER L WITH DOT BELOW
'\u1e38': b'\xc5\xd6L', # LATIN CAPITAL LETTER L WITH DOT BELOW AND MACRON
'\u1e39': b'\xc5\xd6l', # LATIN SMALL LETTER L WITH DOT BELOW AND MACRON
'\u1e3c': b'\xdbL', # LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW
'\u1e3d': b'\xdbl', # LATIN SMALL LETTER L WITH CIRCUMFLEX BELOW
'\u1e3e': b'\xc2M', # LATIN CAPITAL LETTER M WITH ACUTE
'\u1e3f': b'\xc2m', # LATIN SMALL LETTER M WITH ACUTE
'\u1e40': b'\xc7M', # LATIN CAPITAL LETTER M WITH DOT ABOVE
'\u1e41': b'\xc7m', # LATIN SMALL LETTER M WITH DOT ABOVE
'\u1e42': b'\xd6M', # LATIN CAPITAL LETTER M WITH DOT BELOW
'\u1e43': b'\xd6m', # LATIN SMALL LETTER M WITH DOT BELOW
'\u1e44': b'\xc7N', # LATIN CAPITAL LETTER N WITH DOT ABOVE
'\u1e45': b'\xc7n', # LATIN SMALL LETTER N WITH DOT ABOVE
'\u1e46': b'\xd6N', # LATIN CAPITAL LETTER N WITH DOT BELOW
'\u1e47': b'\xd6n', # LATIN SMALL LETTER N WITH DOT BELOW
'\u1e4a': b'\xdbN', # LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW
'\u1e4b': b'\xdbn', # LATIN SMALL LETTER N WITH CIRCUMFLEX BELOW
'\u1e4c': b'\xc2\xc4O', # LATIN CAPITAL LETTER O WITH TILDE AND ACUTE
'\u1e4d': b'\xc2\xc4o', # LATIN SMALL LETTER O WITH TILDE AND ACUTE
'\u1e4e': b'\xc8\xc4O', # LATIN CAPITAL LETTER O WITH TILDE AND DIAERESIS
'\u1e4f': b'\xc8\xc4o', # LATIN SMALL LETTER O WITH TILDE AND DIAERESIS
'\u1e50': b'\xc1\xc5O', # LATIN CAPITAL LETTER O WITH MACRON AND GRAVE
'\u1e51': b'\xc1\xc5o', # LATIN SMALL LETTER O WITH MACRON AND GRAVE
'\u1e52': b'\xc2\xc5O', # LATIN CAPITAL LETTER O WITH MACRON AND ACUTE
'\u1e53': b'\xc2\xc5o', # LATIN SMALL LETTER O WITH MACRON AND ACUTE
'\u1e54': b'\xc2P', # LATIN CAPITAL LETTER P WITH ACUTE
'\u1e55': b'\xc2p', # LATIN SMALL LETTER P WITH ACUTE
'\u1e56': b'\xc7P', # LATIN CAPITAL LETTER P WITH DOT ABOVE
'\u1e57': b'\xc7p', # LATIN SMALL LETTER P WITH DOT ABOVE
'\u1e58': b'\xc7R', # LATIN CAPITAL LETTER R WITH DOT ABOVE
'\u1e59': b'\xc7r', # LATIN SMALL LETTER R WITH DOT ABOVE
'\u1e5a': b'\xd6R', # LATIN CAPITAL LETTER R WITH DOT BELOW
'\u1e5b': b'\xd6r', # LATIN SMALL LETTER R WITH DOT BELOW
'\u1e5c': b'\xc5\xd6R', # LATIN CAPITAL LETTER R WITH DOT BELOW AND MACRON
'\u1e5d': b'\xc5\xd6r', # LATIN SMALL LETTER R WITH DOT BELOW AND MACRON
'\u1e60': b'\xc7S', # LATIN CAPITAL LETTER S WITH DOT ABOVE
'\u1e61': b'\xc7s', # LATIN SMALL LETTER S WITH DOT ABOVE
'\u1e62': b'\xd6S', # LATIN CAPITAL LETTER S WITH DOT BELOW
'\u1e63': b'\xd6s', # LATIN SMALL LETTER S WITH DOT BELOW
'\u1e64': b'\xc7\xc2S', # LATIN CAPITAL LETTER S WITH ACUTE AND DOT ABOVE
'\u1e65': b'\xc7\xc2s', # LATIN SMALL LETTER S WITH ACUTE AND DOT ABOVE
'\u1e66': b'\xc7\xcfS', # LATIN CAPITAL LETTER S WITH CARON AND DOT ABOVE
'\u1e67': b'\xc7\xcfs', # LATIN SMALL LETTER S WITH CARON AND DOT ABOVE
'\u1e68': b'\xc7\xd6S', # LATIN CAPITAL LETTER S WITH DOT BELOW AND DOT ABOVE
'\u1e69': b'\xc7\xd6s', # LATIN SMALL LETTER S WITH DOT BELOW AND DOT ABOVE
'\u1e6a': b'\xc7T', # LATIN CAPITAL LETTER T WITH DOT ABOVE
'\u1e6b': b'\xc7t', # LATIN SMALL LETTER T WITH DOT ABOVE
'\u1e6c': b'\xd6T', # LATIN CAPITAL LETTER T WITH DOT BELOW
'\u1e6d': b'\xd6t', # LATIN SMALL LETTER T WITH DOT BELOW
'\u1e70': b'\xdbT', # LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW
'\u1e71': b'\xdbt', # LATIN SMALL LETTER T WITH CIRCUMFLEX BELOW
'\u1e72': b'\xd7U', # LATIN CAPITAL LETTER U WITH DIAERESIS BELOW
'\u1e73': b'\xd7u', # LATIN SMALL LETTER U WITH DIAERESIS BELOW
'\u1e76': b'\xdbU', # LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW
'\u1e77': b'\xdbu', # LATIN SMALL LETTER U WITH CIRCUMFLEX BELOW
'\u1e78': b'\xc2\xc4U', # LATIN CAPITAL LETTER U WITH TILDE AND ACUTE
'\u1e79': b'\xc2\xc4u', # LATIN SMALL LETTER U WITH TILDE AND ACUTE
'\u1e7a': b'\xc8\xc5U', # LATIN CAPITAL LETTER U WITH MACRON AND DIAERESIS
'\u1e7b': b'\xc8\xc5u', # LATIN SMALL LETTER U WITH MACRON AND DIAERESIS
'\u1e7c': b'\xc4V', # LATIN CAPITAL LETTER V WITH TILDE
'\u1e7d': b'\xc4v', # LATIN SMALL LETTER V WITH TILDE
'\u1e7e': b'\xd6V', # LATIN CAPITAL LETTER V WITH DOT BELOW
'\u1e7f': b'\xd6v', # LATIN SMALL LETTER V WITH DOT BELOW
'\u1e80': b'\xc1W', # LATIN CAPITAL LETTER W WITH GRAVE
'\u1e81': b'\xc1w', # LATIN SMALL LETTER W WITH GRAVE
'\u1e82': b'\xc2W', # LATIN CAPITAL LETTER W WITH ACUTE
'\u1e83': b'\xc2w', # LATIN SMALL LETTER W WITH ACUTE
'\u1e84': b'\xc8W', # LATIN CAPITAL LETTER W WITH DIAERESIS
'\u1e85': b'\xc8w', # LATIN SMALL LETTER W WITH DIAERESIS
'\u1e86': b'\xc7W', # LATIN CAPITAL LETTER W WITH DOT ABOVE
'\u1e87': b'\xc7w', # LATIN SMALL LETTER W WITH DOT ABOVE
'\u1e88': b'\xd6W', # LATIN CAPITAL LETTER W WITH DOT BELOW
'\u1e89': b'\xd6w', # LATIN SMALL LETTER W WITH DOT BELOW
'\u1e8a': b'\xc7X', # LATIN CAPITAL LETTER X WITH DOT ABOVE
'\u1e8b': b'\xc7x', # LATIN SMALL LETTER X WITH DOT ABOVE
'\u1e8c': b'\xc8X', # LATIN CAPITAL LETTER X WITH DIAERESIS
'\u1e8d': b'\xc8x', # LATIN SMALL LETTER X WITH DIAERESIS
'\u1e8e': b'\xc7Y', # LATIN CAPITAL LETTER Y WITH DOT ABOVE
'\u1e8f': b'\xc7y', # LATIN SMALL LETTER Y WITH DOT ABOVE
'\u1e90': b'\xc3Z', # LATIN CAPITAL LETTER Z WITH CIRCUMFLEX
'\u1e91': b'\xc3z', # LATIN SMALL LETTER Z WITH CIRCUMFLEX
'\u1e92': b'\xd6Z', # LATIN CAPITAL LETTER Z WITH DOT BELOW
'\u1e93': b'\xd6z', # LATIN SMALL LETTER Z WITH | |
<gh_stars>0
from SkateUtils.KeyPoseState import State
import numpy as np
import pydart2 as pydart
import math
import IKsolve_double_stance
import IKsolveGlobal
import dart_ik
import copy
import pickle
from fltk import *
from PyCommon.modules.GUI import hpSimpleViewer as hsv
from PyCommon.modules.Renderer import ysRenderer as yr
from os.path import join
render_vector = []
render_vector_origin = []
push_force = []
push_force_origin = []
blade_force = []
blade_force_origin = []
rd_footCenter = []
ik_on = True
# ik_on = False
class MyWorld(pydart.World):
def __init__(self, ):
pydart.World.__init__(self, 1.0 / 1000.0, './data/skel/cart_pole_blade_3dof_with_ground.skel')
# pydart.World.__init__(self, 1.0 / 1000.0, './data/skel/cart_pole_blade.skel')
# pydart.World.__init__(self, 1.0 / 2000.0, './data/skel/cart_pole.skel')
self.force = None
self.force_r = None
self.force_l = None
self.force_hip_r = None
self.duration = 0
self.skeletons[0].body('ground').set_friction_coeff(0.02)
self.ground_height = self.skeletons[0].body(0).to_world((0., 0.025, 0.))[1]
# self.ground_height = -0.5
# print("self.ground_height: ", self.ground_height)
skel = self.skeletons[2]
# print("mass: ", skel.m, "kg")
# print('[Joint]')
# for joint in skel.joints:
# print("\t" + str(joint))
# print("\t\tparent = " + str(joint.parent_bodynode))
# print("\t\tchild = " + str(joint.child_bodynode))
# print("\t\tdofs = " + str(joint.dofs))
# skel.joint("j_abdomen").set_position_upper_limit(10, 0.0)
# skel.joint("j_heel_left").set_position_upper_limit(0, 0.0)
# skel.joint("j_heel_left").set_position_lower_limit(0, -0.0)
# pelvis_pos_y = skel.dof_indices(["j_pelvis_pos_y"])
# pelvis_x = skel.dof_indices(["j_pelvis_rot_x"])
# pelvis = skel.dof_indices(["j_pelvis_rot_y", "j_pelvis_rot_z"])
upper_body = skel.dof_indices(["j_abdomen_x", "j_abdomen_y", "j_abdomen_z"])
spine = skel.dof_indices(["j_spine_x", "j_spine_y", "j_spine_z"])
right_leg = skel.dof_indices(["j_thigh_right_x", "j_thigh_right_y", "j_thigh_right_z", "j_shin_right_z"])
left_leg = skel.dof_indices(["j_thigh_left_x", "j_thigh_left_y", "j_thigh_left_z", "j_shin_left_z"])
knee = skel.dof_indices(["j_shin_left_x", "j_shin_right_x"])
arms = skel.dof_indices(["j_bicep_left_x", "j_bicep_right_x"])
arms_y = skel.dof_indices(["j_bicep_left_y", "j_bicep_right_y"])
arms_z = skel.dof_indices(["j_bicep_left_z", "j_bicep_right_z"])
foot = skel.dof_indices(["j_heel_left_x", "j_heel_left_y", "j_heel_left_z", "j_heel_right_x", "j_heel_right_y", "j_heel_right_z"])
leg_y = skel.dof_indices(["j_thigh_left_y", "j_thigh_right_y"])
elbows = skel.dof_indices(["j_forearm_left_x", "j_forearm_right_x"])
# blade = skel.dof_indices(["j_heel_right_2"])
# import pose angle info. from txt file
self.q_list = []
# self.load_poses(skel, 'data/mocap/jump_poses.txt')
# self.fn = 5
# self.load_poses(skel, 'data/mocap/skate_joint_3d_pos.txt')
# self.load_poses(skel, 'data/mocap/spin_joint_3d_pos.txt')
# self.load_poses(skel, 'data/mocap/crossover1_joint_3d_pos.txt')
# self.load_poses(skel, 'data/mocap/3turn_joint_3d_pos.txt')
# self.load_poses(skel, 'data/mocap/crossover_new_joint_3d_pos.txt')
# self.load_poses(skel, 'data/mocap/back_3turn_joint_3d_pos.txt')
# self.load_poses(skel, 'data/mocap/back_crossover_joint_3d_pos.txt')
# self.load_poses(skel, 'data/mocap/forward_gliding_joint_3d_pos.txt')
# Test Moving Camera result!
file_dir = 'data/mocap/movingcam/'
# fn = 'bc_small'
# fn = 'backflip_a'
# self.load_poses(skel, join(file_dir, fn+'_result_joint_3d_pos.txt'))
file_dir = 'data/mocap/'
fn = 'spiral1'
self.load_poses(skel, join(file_dir, fn+'_joint_3d_pos.txt'))
state_q = []
self.state_list = []
for i in range(self.fn):
state_q.append(self.q_list[i])
state_name = "pos"+str(i)
# print("name: ", state_name)
state_q[i][4] += 0.5
self.state_list.append(State(state_name, 0.3, 0.0, 0.0, state_q[i]))
for i in range(self.fn-1):
self.state_list[i].set_next(self.state_list[i+1])
# filename = 'hmr_skating_3turn.skkey'
# filename = 'hmr_skating_gliding.skkey'
# filename = 'sfv_'+fn + '.skkey'
filename = 'sfv_' + fn + '_global.skkey'
print("filename: ", filename)
with open(filename, 'wb') as f:
pickle.dump(self.state_list, f)
# self.load_poses(skel, 'data/mocap/jump_poses.txt')
#
# s00q = np.zeros(skel.ndofs)
# s00q[upper_body] = 0.0, -0., -0.1
# state00 = State("state00", 2.0, 0.0, 0.2, s00q)
#
# # s0 = self.q_list[0]
# # state0 = State("state0", 5.0, 0.0, 0.0, s0)
#
# s1 = self.q_list[1]
# state1 = State("state1", 1.0, 0.0, 0.0, s1)
#
# s2 = self.q_list[2]
# state2 = State("state2", 0.5, 0.0, 0.0, s2)
#
# s3 = self.q_list[3]
# state3 = State("state3", 0.5, 0.0, 0.0, s3)
#
# s4 = np.zeros(skel.ndofs)
# s4[left_leg] = -0.2, 0.0, -0.1, 0.
# s4[right_leg] = 0.2, 0.0, 0.2, -0.4
# # s4[arms] = -0.5, -0.5
# s4[arms_z] = 0.7, 0.7
# s4[elbows] = -2.0, 2.0
# s4[foot] = 0.0, 0.0, -0.5, 0.0, 0.0, 0.2
# state4 = State("state4", 0.5, 0.0, 0.0, s4)
#
# s5 = self.q_list[4]
# state5 = State("state5", 2.0, 0.0, 0.0, s5)
#
# s_terminal_q = np.zeros(skel.ndofs)
# state_t = State("state_t", 50., 2.0, 2.0, s_terminal_q)
#
# state00.set_next(state1)
# state1.set_next(state2)
# state2.set_next(state3)
# state3.set_next(state4)
# state4.set_next(state5)
# state5.set_next(s_terminal_q)
#
# self.state_list = [state00, state1, state2, state3, state4, state5, state_t]
# filename = 'hmr_skating_jump_test.skkey'
#
# with open(filename, 'wb') as f:
# pickle.dump(self.state_list, f)
state_num = len(self.state_list)
self.state_num = state_num
# print("state_num: ", state_num)
self.curr_state = self.state_list[0]
self.elapsedTime = 0.0
self.curr_state_index = 0
# print("backup angle: ", backup_q)
# print("cur angle: ", self.curr_state.angles)
if ik_on:
# self.ik = IKsolve_double_stance.IKsolver(skel, self.skeletons[3], self.dt)
self.ik = IKsolveGlobal.IKsolver(skel, self.skeletons[3], self.dt)
print('IK ON')
# print("after:", state00.angles)
# self.revise_pose(state00)
# self.revise_pose(state01)
# self.revise_pose(state012)
# self.revise_pose(state02)
# debug_rf1 = self.skeletons[3].body("h_blade_right").to_world([-0.1040 + 0.0216, -0.027 - 0.0216, 0.0])[1]
# debug_rf2 = self.skeletons[3].body("h_blade_right").to_world([-0.1040 - 0.0216, -0.027 - 0.0216, 0.0])[1]
# print("check: ", debug_rf1, debug_rf2)
# print('create controller OK')
self.skeletons[3].set_positions(self.curr_state.angles)
self.rd_contact_forces = []
self.rd_contact_positions = []
# print("dof: ", skel.ndofs)
# nonholonomic constraint initial setting
_th = 5. * math.pi / 180.
self.nh0 = pydart.constraints.NonHolonomicContactConstraint(skel.body('h_blade_right'), np.array((0.0216+0.104, -0.0216-0.027, 0.)))
self.nh1 = pydart.constraints.NonHolonomicContactConstraint(skel.body('h_blade_right'), np.array((0.0216-0.104, -0.0216-0.027, 0.)))
self.nh2 = pydart.constraints.NonHolonomicContactConstraint(skel.body('h_blade_left'), np.array((0.0216+0.104, -0.0216-0.027, 0.)))
self.nh3 = pydart.constraints.NonHolonomicContactConstraint(skel.body('h_blade_left'), np.array((0.0216-0.104, -0.0216-0.027, 0.)))
self.nh1.set_violation_angle_ignore_threshold(_th)
self.nh1.set_length_for_violation_ignore(0.208)
self.nh3.set_violation_angle_ignore_threshold(_th)
self.nh3.set_length_for_violation_ignore(0.208)
self.nh0.add_to_world()
self.nh1.add_to_world()
self.nh2.add_to_world()
self.nh3.add_to_world()
self.step_count = 0
def load_poses(self, skel, file_path):
fn = 0
# read 3d positions of each key pose frame from txt file
self.position_storage = []
# with open('data/mocap/skate_spin.txt') as f:
with open(file_path) as f:
# lines = f.read().splitlines()
for line in f:
q_temp = []
line = line.replace(' \n', '')
values = line.split(" ")
temp_pos = np.asarray([float(values[0]), -float(values[1]), -float(values[2])])
# temp_pos = temp_pos * 0.9 # scaling
# for a in values:
# q_temp.append(float(a))
self.position_storage.append(temp_pos)
fn = fn + 1
fn = int(fn / 19)
self.fn = fn
# print("frame num: ", fn)
self.ik = dart_ik.DartIk(skel)
fi = 0
for f in range(fn):
self.ik.clean_constraints()
# # self.ik.add_orientation_const('h_pelvis', np.asarray([0., 0., 0.]))
self.ik.add_joint_pos_const('j_heel_right', self.position_storage[fi])
self.ik.add_joint_pos_const('j_shin_right', self.position_storage[fi + 1])
self.ik.add_joint_pos_const('j_thigh_right', self.position_storage[fi + 2])
self.ik.add_joint_pos_const('j_thigh_left', self.position_storage[fi + 3])
self.ik.add_joint_pos_const('j_shin_left', self.position_storage[fi + 4])
self.ik.add_joint_pos_const('j_heel_left', self.position_storage[fi + 5])
self.ik.add_joint_pos_const('j_hand_right', self.position_storage[fi + 6])
self.ik.add_joint_pos_const('j_forearm_right', self.position_storage[fi + 7])
# self.ik.add_joint_pos_const('j_scapula_right', self.position_storage[fi + 8])
# self.ik.add_position_const('h_scapula_right', self.position_storage[fi + 8], [0.0, 0.397146, 0.169809])
self.ik.add_joint_pos_const('j_bicep_right', self.position_storage[fi + 8])
# self.ik.add_joint_pos_const('j_scapula_left', self.position_storage[fi + 9])
# self.ik.add_position_const('h_scapula_left', self.position_storage[fi+9], [0.0, 0.397146, -0.169809])
self.ik.add_joint_pos_const('j_bicep_left', self.position_storage[fi + 9])
self.ik.add_joint_pos_const('j_forearm_left', self.position_storage[fi + 10])
self.ik.add_joint_pos_const('j_hand_left', self.position_storage[fi + 11])
# self.ik.add_joint_pos_const('j_forearm_right', self.position_storage[fi + 6])
# self.ik.add_joint_pos_const('j_bicep_right', self.position_storage[fi + 7])
# self.ik.add_joint_pos_const('j_scapula_right', self.position_storage[fi + 8])
#
# self.ik.add_joint_pos_const('j_scapula_left', self.position_storage[fi + 9])
# self.ik.add_joint_pos_const('j_bicep_left', self.position_storage[fi + 10])
# self.ik.add_joint_pos_const('j_forearm_left', self.position_storage[fi + 11])
# self.ik.add_joint_pos_const('j_spine', self.position_storage[fi + 12])
# self.ik.add_joint_pos_const('j_head', self.position_storage[fi + 13])
self.ik.add_position_const('h_spine', self.position_storage[fi + 12], [0.0, -0.0908, 0.0])
self.ik.add_position_const('h_head', self.position_storage[fi + 13], [0.0, 0.07825, 0.0]) #head_up
self.ik.add_position_const('h_head', self.position_storage[fi + 14], [0.0508625, 0.0, 0.0]) #nose
self.ik.add_position_const('h_head', self.position_storage[fi + 17], [0., 0.0, -0.0626]) # left ear
self.ik.add_position_const('h_head', self.position_storage[fi + 18], [0., 0.0, 0.0626]) # right ear
# self.ik.add_position_const('h_blade_left', left_toe_pos, [-0.1040 + 0.0216, +0.80354016 - 0.85354016, 0.0])
# right_blade_pos = self.position_storage[fi] + np.array([0., -0.054, 0.])
# left_blade_pos = self.position_storage[fi+5] + np.array([0., -0.054, 0.])
#
# right_blade_pos = self.position_storage[fi] + np.array([0., -0.1, 0.])
# left_blade_pos = self.position_storage[fi + 5] + np.array([0., -0.2, 0.])
# self.ik.add_position_const('h_blade_right', right_blade_pos,
# [-0.1040 + 0.0216, 0.80354016 - 0.85354016, 0.0])
# self.ik.add_position_const('h_blade_right', right_blade_pos,
# [0.1040 + 0.0216, 0.80354016 - 0.85354016, 0.0])
#
# self.ik.add_position_const('h_blade_left', left_blade_pos,
# [0.1040 + 0.0216, 0.80354016 - 0.85354016, 0.0])
# self.ik.add_position_const('h_blade_left', left_blade_pos,
# [-0.1040 + 0.0216, 0.80354016 - 0.85354016, 0.0])
self.ik.solve()
q = skel.q
# q[0:3] = np.asarray([0., -0.785, 0.5])
# q[3:6] = np.asarray([0., 0.01, 0.])
# q[0:2] = np.asarray([0.0, 0.0])
# q[3:6] = np.asarray([0.0, 0.0, 0.0])
skel.set_positions(q)
self.q_list.append(skel.q)
fi += 19
def revise_pose(self, pose):
self.skeletons[3].set_positions(pose.angles)
ground_height = max(self.skeletons[3].body('h_blade_right').to_world((-0.104 + 0.0216, -0.027 - 0.0216, 0.))[1],
self.skeletons[3].body('h_blade_right').to_world((+0.104 + 0.0216, -0.027 - 0.0216, 0.))[1],
self.skeletons[3].body('h_blade_left').to_world((-0.104 + 0.0216, -0.027 - 0.0216, 0.))[1],
self.skeletons[3].body('h_blade_left').to_world((+0.104 + 0.0216, -0.027 - 0.0216, 0.))[1]
)
ik_res = copy.deepcopy(pose.angles)
self.ik.update_target(ground_height)
ik_res[6:] = self.ik.solve()
pose.angles = ik_res
def step(self):
# print("self.curr_state: ", self.curr_state.name)
if self.curr_state.name == "state_force":
self.force = np.array([-20.0, 0.0, 0.0])
# elif self.curr_state.name == "state2":
# self.force = np.array([0.0, 0.0, -10.0])
else:
self.force = None
if self.force is not None:
self.skeletons[2].body('h_pelvis').add_ext_force(self.force)
self.skeletons[3].set_positions(self.curr_state.angles)
if self.curr_state.dt < self.time() - self.elapsedTime:
# print("change the state!!!", self.curr_state_index)
self.curr_state_index = self.curr_state_index + 1
self.curr_state_index = self.curr_state_index % self.state_num
self.elapsedTime = self.time()
self.curr_state = self.state_list[self.curr_state_index]
# print("state_", self.curr_state_index)
# print(self.curr_state.angles)
# HP QP solve
lf_tangent_vec = np.array([1.0, 0.0, .0])
rf_tangent_vec = np.array([1.0, 0.0, .0])
# character_dir = copy.deepcopy(skel.com_velocity())
character_dir = skel.com_velocity()
# print(character_dir, skel.com_velocity())
character_dir[1] = 0
if np.linalg.norm(character_dir) != 0:
character_dir = character_dir / np.linalg.norm(character_dir)
centripetal_force_dir = np.cross([0.0, 1.0, 0.0], character_dir)
empty_list = []
# nonholonomic constraint
right_blade_front_point = self.skeletons[2].body("h_blade_right").to_world((0.0216+0.104, -0.0216-0.027, 0.))
right_blade_rear_point = self.skeletons[2].body("h_blade_right").to_world((0.0216-0.104, -0.0216-0.027, 0.))
if right_blade_front_point[1] < 0.005+self.ground_height and right_blade_rear_point[1] < 0.005+self.ground_height:
self.nh0.activate(True)
self.nh1.activate(True)
self.nh0.set_joint_pos(right_blade_front_point)
self.nh0.set_projected_vector(right_blade_front_point - right_blade_rear_point)
self.nh1.set_joint_pos(right_blade_rear_point)
self.nh1.set_projected_vector(right_blade_front_point - right_blade_rear_point)
else:
self.nh0.activate(False)
self.nh1.activate(False)
left_blade_front_point = self.skeletons[2].body("h_blade_left").to_world((0.0216+0.104, -0.0216-0.027, 0.))
left_blade_rear_point = self.skeletons[2].body("h_blade_left").to_world((0.0216-0.104, -0.0216-0.027, 0.))
if left_blade_front_point[1] < 0.005 +self.ground_height and left_blade_rear_point[1] < 0.005+self.ground_height:
| |
# (c) 2005 <NAME> and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Mostly taken from PasteDeploy and stripped down for Galaxy
import inspect
import os
import re
import sys
import pkg_resources
from six import iteritems
from six.moves.urllib.parse import unquote
from galaxy.util.properties import NicerConfigParser
__all__ = ('loadapp', 'loadserver', 'loadfilter', 'appconfig')
# ---- from paste.deploy.compat --------------------------------------
"""Python 2<->3 compatibility module"""
def print_(template, *args, **kwargs):
template = str(template)
if args:
template = template % args
elif kwargs:
template = template % kwargs
sys.stdout.writelines(template)
if sys.version_info < (3, 0):
def reraise(t, e, tb):
exec('raise t, e, tb', dict(t=t, e=e, tb=tb))
else:
def reraise(t, e, tb):
exec('raise e from tb', dict(e=e, tb=tb))
# ---- from paste.deploy.util ----------------------------------------
def fix_type_error(exc_info, callable, varargs, kwargs):
"""
Given an exception, this will test if the exception was due to a
signature error, and annotate the error with better information if
so.
Usage::
try:
val = callable(*args, **kw)
except TypeError:
exc_info = fix_type_error(None, callable, args, kw)
raise exc_info[0], exc_info[1], exc_info[2]
"""
if exc_info is None:
exc_info = sys.exc_info()
if (exc_info[0] != TypeError or
str(exc_info[1]).find('arguments') == -1 or
getattr(exc_info[1], '_type_error_fixed', False)):
return exc_info
exc_info[1]._type_error_fixed = True
argspec = inspect.formatargspec(*inspect.getargspec(callable))
args = ', '.join(map(_short_repr, varargs))
if kwargs and args:
args += ', '
if kwargs:
kwargs = sorted(kwargs.keys())
args += ', '.join('%s=...' % n for n in kwargs)
gotspec = '(%s)' % args
msg = '%s; got %s, wanted %s' % (exc_info[1], gotspec, argspec)
exc_info[1].args = (msg,)
return exc_info
def _short_repr(v):
v = repr(v)
if len(v) > 12:
v = v[:8] + '...' + v[-4:]
return v
def fix_call(callable, *args, **kw):
"""
Call ``callable(*args, **kw)`` fixing any type errors that come out.
"""
try:
val = callable(*args, **kw)
except TypeError:
exc_info = fix_type_error(None, callable, args, kw)
reraise(*exc_info)
return val
def lookup_object(spec):
"""
Looks up a module or object from a some.module:func_name specification.
To just look up a module, omit the colon and everything after it.
"""
parts, target = spec.split(':') if ':' in spec else (spec, None)
module = __import__(parts)
for part in parts.split('.')[1:] + ([target] if target else []):
module = getattr(module, part)
return module
# ---- from paste.deploy.loadwsgi ------------------------------------
############################################################
# Utility functions
############################################################
def import_string(s):
return pkg_resources.EntryPoint.parse("x=" + s).load(False)
def _aslist(obj):
"""
Turn object into a list; lists and tuples are left as-is, None
becomes [], and everything else turns into a one-element list.
"""
if obj is None:
return []
elif isinstance(obj, (list, tuple)):
return obj
else:
return [obj]
def _flatten(lst):
"""
Flatten a nested list.
"""
if not isinstance(lst, (list, tuple)):
return [lst]
result = []
for item in lst:
result.extend(_flatten(item))
return result
############################################################
# Object types
############################################################
class _ObjectType(object):
name = None
egg_protocols = None
config_prefixes = None
def __init__(self):
# Normalize these variables:
self.egg_protocols = [_aslist(p) for p in _aslist(self.egg_protocols)]
self.config_prefixes = [_aslist(p) for p in _aslist(self.config_prefixes)]
def __repr__(self):
return '<%s protocols=%r prefixes=%r>' % (
self.name, self.egg_protocols, self.config_prefixes)
def invoke(self, context):
assert context.protocol in _flatten(self.egg_protocols)
return fix_call(context.object,
context.global_conf, **context.local_conf)
class _App(_ObjectType):
name = 'application'
egg_protocols = ['paste.app_factory', 'paste.composite_factory',
'paste.composit_factory']
config_prefixes = [['app', 'application'], ['composite', 'composit'],
'pipeline', 'filter-app']
def invoke(self, context):
if context.protocol in ('paste.composit_factory',
'paste.composite_factory'):
return fix_call(context.object,
context.loader, context.global_conf,
**context.local_conf)
elif context.protocol == 'paste.app_factory':
return fix_call(context.object, context.global_conf, **context.local_conf)
else:
assert 0, "Protocol %r unknown" % context.protocol
APP = _App()
class _Filter(_ObjectType):
name = 'filter'
egg_protocols = [['paste.filter_factory', 'paste.filter_app_factory']]
config_prefixes = ['filter']
def invoke(self, context):
if context.protocol == 'paste.filter_factory':
return fix_call(context.object,
context.global_conf, **context.local_conf)
elif context.protocol == 'paste.filter_app_factory':
def filter_wrapper(wsgi_app):
# This should be an object, so it has a nicer __repr__
return fix_call(context.object,
wsgi_app, context.global_conf,
**context.local_conf)
return filter_wrapper
else:
assert 0, "Protocol %r unknown" % context.protocol
FILTER = _Filter()
class _Server(_ObjectType):
name = 'server'
egg_protocols = [['paste.server_factory', 'paste.server_runner']]
config_prefixes = ['server']
def invoke(self, context):
if context.protocol == 'paste.server_factory':
return fix_call(context.object,
context.global_conf, **context.local_conf)
elif context.protocol == 'paste.server_runner':
def server_wrapper(wsgi_app):
# This should be an object, so it has a nicer __repr__
return fix_call(context.object,
wsgi_app, context.global_conf,
**context.local_conf)
return server_wrapper
else:
assert 0, "Protocol %r unknown" % context.protocol
SERVER = _Server()
# Virtual type: (@@: There's clearly something crufty here;
# this probably could be more elegant)
class _PipeLine(_ObjectType):
name = 'pipeline'
def invoke(self, context):
app = context.app_context.create()
filters = [c.create() for c in context.filter_contexts]
filters.reverse()
for filter_ in filters:
app = filter_(app)
return app
PIPELINE = _PipeLine()
class _FilterApp(_ObjectType):
name = 'filter_app'
def invoke(self, context):
next_app = context.next_context.create()
filter_ = context.filter_context.create()
return filter_(next_app)
FILTER_APP = _FilterApp()
class _FilterWith(_App):
name = 'filtered_with'
def invoke(self, context):
filter_ = context.filter_context.create()
filtered = context.next_context.create()
if context.next_context.object_type is APP:
return filter_(filtered)
else:
# filtering a filter
def composed(app):
return filter_(filtered(app))
return composed
FILTER_WITH = _FilterWith()
############################################################
# Loaders
############################################################
def loadapp(uri, name=None, **kw):
return loadobj(APP, uri, name=name, **kw)
def loadfilter(uri, name=None, **kw):
return loadobj(FILTER, uri, name=name, **kw)
def loadserver(uri, name=None, **kw):
return loadobj(SERVER, uri, name=name, **kw)
def appconfig(uri, name=None, relative_to=None, global_conf=None):
context = loadcontext(APP, uri, name=name,
relative_to=relative_to,
global_conf=global_conf)
return context.config()
_loaders = {}
def loadobj(object_type, uri, name=None, relative_to=None,
global_conf=None):
context = loadcontext(
object_type, uri, name=name, relative_to=relative_to,
global_conf=global_conf)
return context.create()
def loadcontext(object_type, uri, name=None, relative_to=None,
global_conf=None):
if '#' in uri:
if name is None:
uri, name = uri.split('#', 1)
else:
# @@: Ignore fragment or error?
uri = uri.split('#', 1)[0]
if name is None:
name = 'main'
if ':' not in uri:
raise LookupError("URI has no scheme: %r" % uri)
scheme, path = uri.split(':', 1)
scheme = scheme.lower()
if scheme not in _loaders:
raise LookupError(
"URI scheme not known: %r (from %s)"
% (scheme, ', '.join(_loaders.keys())))
return _loaders[scheme](
object_type,
uri, path, name=name, relative_to=relative_to,
global_conf=global_conf)
def _loadconfig(object_type, uri, path, name, relative_to,
global_conf):
isabs = os.path.isabs(path)
# De-Windowsify the paths:
path = path.replace('\\', '/')
if not isabs:
if not relative_to:
raise ValueError(
"Cannot resolve relative uri %r; no relative_to keyword "
"argument given" % uri)
relative_to = relative_to.replace('\\', '/')
if relative_to.endswith('/'):
path = relative_to + path
else:
path = relative_to + '/' + path
if path.startswith('///'):
path = path[2:]
path = unquote(path)
loader = ConfigLoader(path)
if global_conf:
loader.update_defaults(global_conf, overwrite=False)
return loader.get_context(object_type, name, global_conf)
_loaders['config'] = _loadconfig
def _loadegg(object_type, uri, spec, name, relative_to,
global_conf):
loader = EggLoader(spec)
return loader.get_context(object_type, name, global_conf)
_loaders['egg'] = _loadegg
def _loadfunc(object_type, uri, spec, name, relative_to,
global_conf):
loader = FuncLoader(spec)
return loader.get_context(object_type, name, global_conf)
_loaders['call'] = _loadfunc
############################################################
# Loaders
############################################################
class _Loader(object):
def get_app(self, name=None, global_conf=None):
return self.app_context(
name=name, global_conf=global_conf).create()
def get_filter(self, name=None, global_conf=None):
return self.filter_context(
name=name, global_conf=global_conf).create()
def get_server(self, name=None, global_conf=None):
return self.server_context(
name=name, global_conf=global_conf).create()
def app_context(self, name=None, global_conf=None):
return self.get_context(
APP, name=name, global_conf=global_conf)
def filter_context(self, name=None, global_conf=None):
return self.get_context(
FILTER, name=name, global_conf=global_conf)
def server_context(self, name=None, global_conf=None):
return self.get_context(
SERVER, name=name, global_conf=global_conf)
_absolute_re = re.compile(r'^[a-zA-Z]+:')
def absolute_name(self, name):
"""
Returns true if the name includes a scheme
"""
if name is None:
return False
return self._absolute_re.search(name)
class ConfigLoader(_Loader):
def __init__(self, filename):
self.filename = filename = filename.strip()
defaults = {
'here': os.path.dirname(os.path.abspath(filename)),
'__file__': os.path.abspath(filename)
}
self.parser = NicerConfigParser(filename, defaults=defaults)
self.parser.optionxform = str # Don't lower-case keys
with open(filename) as f:
self.parser.read_file(f)
def update_defaults(self, new_defaults, overwrite=True):
for key, value in iteritems(new_defaults):
if not overwrite and key in self.parser._defaults:
continue
self.parser._defaults[key] = value
def get_context(self, object_type, name=None, global_conf=None):
if self.absolute_name(name):
return loadcontext(object_type, name,
relative_to=os.path.dirname(self.filename),
global_conf=global_conf)
section = self.find_config_section(
object_type, name=name)
if global_conf is None:
global_conf = {}
else:
global_conf = global_conf.copy()
defaults = self.parser.defaults()
global_conf.update(defaults)
local_conf = {}
global_additions = {}
get_from_globals = {}
for option in self.parser.options(section):
if option.startswith('set '):
name = option[4:].strip()
global_additions[name] = global_conf[name] = (
self.parser.get(section, option))
elif option.startswith('get '):
name = option[4:].strip()
get_from_globals[name] = self.parser.get(section, option)
else:
if option in defaults:
# @@: It's a global option (?), so skip it
continue
local_conf[option] = self.parser.get(section, option)
for local_var, glob_var in get_from_globals.items():
local_conf[local_var] = global_conf[glob_var]
if object_type in (APP, FILTER) and 'filter-with' in local_conf:
filter_with = local_conf.pop('filter-with')
else:
filter_with = None
if 'require' in local_conf:
for spec in local_conf['require'].split():
pkg_resources.require(spec)
del local_conf['require']
if section.startswith('filter-app:'):
context = self._filter_app_context(
object_type, section, name=name,
global_conf=global_conf, local_conf=local_conf,
global_additions=global_additions)
elif section.startswith('pipeline:'):
context = self._pipeline_app_context(
object_type, section, name=name,
global_conf=global_conf, local_conf=local_conf,
global_additions=global_additions)
elif 'use' in local_conf:
context = self._context_from_use(
object_type, local_conf, global_conf, global_additions,
section)
else:
context = self._context_from_explicit(
object_type, local_conf, global_conf, global_additions,
section)
if filter_with is not None:
filter_with_context = LoaderContext(
obj=None,
object_type=FILTER_WITH,
protocol=None,
global_conf=global_conf, local_conf=local_conf,
loader=self)
filter_with_context.filter_context = self.filter_context(
name=filter_with, global_conf=global_conf)
filter_with_context.next_context = context
return filter_with_context
return context
| |
/ n)) \
if style == 'American' or i in specified else p * np.exp(-self.r * self.t / n)
return reduce(backward, range(1, n), options[option](fn(mc, 0), *strikes))
if option in options:
if exercise == 'barrier':
act, barrier, rebate = info
if not isinstance(barrier, float):
raise ValueError('Barrier must be float.')
if not isinstance(rebate, float):
raise ValueError('Rebate must be float.')
acts = {'up-out': lambda p: (p < barrier).all(), 'down-out': lambda p: (p > barrier).all(),
'up-in': lambda p: (p > barrier).any(), 'down-in': lambda p: (p < barrier).any()}
if style == 'European':
def cutoff(key, mc):
return np.exp(-self.r * self.t) * options[option](mc[-1], *strikes) if acts[key](mc) else rebate
elif style == 'American' or style == 'Bermudan':
def cutoff(key, mc):
return early_exercise(mc, lambda x, i: x[i]) if acts[key](mc) else rebate
else:
raise ValueError('Style type doest not exist.')
return np.fromiter((cutoff(act, self.monte_carlo(True, *setting)) for _ in range(times)),
dtype=np.float).mean()
elif style == 'European':
fns = {'float': lambda fn, mc: (mc[-1], fn(mc)), 'lookback': lambda mc: mc.min()
if (option.split('-')[0] == 'call') ^ (exercise == 'lookback-fixed') else mc.max()}
if exercise == 'vanilla':
p = np.fromiter((options[option](self.monte_carlo(False, *setting), *strikes)
for _ in range(times)), dtype=np.float).mean()
elif exercise == 'Asian-fixed':
p = np.fromiter((options[option](self.monte_carlo(True, *setting).mean(), *strikes)
for _ in range(times)), dtype=np.float).mean()
elif exercise == 'Asian-float':
p = np.fromiter((options[option](*fns['float'](lambda x: x.mean(), self.monte_carlo(True, *setting)))
for _ in range(times)), dtype=np.float).mean()
elif exercise == 'lookback-fixed':
p = np.fromiter((options[option](fns['lookback'](self.monte_carlo(True, *setting)), *strikes)
for _ in range(times)), dtype=np.float).mean()
elif exercise == 'lookback-float':
p = np.fromiter((options[option](*fns['float'](fns['lookback'], self.monte_carlo(True, *setting)))
for _ in range(times)), dtype=np.float).mean()
else:
raise ValueError('Exercise type does not exist.')
return np.exp(-self.r * self.t) * p
elif style == 'American' or style == 'Bermudan':
fns = {'vanilla': lambda x, i: x[i], 'Asian': lambda x, i: x[i:].mean(),
'lookback-fixed-call': lambda x, i: x[i:].max(), 'lookback-fixed-put': lambda x, i: x[i:].min(),
'lookback-float-call': lambda x, i: x[i:].min(), 'lookback-float-put': lambda x, i: x[i:].max()}
exercise, *ff = exercise.split('-')
_float = False
if len(ff):
_float = ff[0] == 'float'
if exercise == 'lookback':
exercise = '-'.join((exercise, *ff, option.split('-')[0]))
return np.fromiter((early_exercise(np.flip(self.monte_carlo(True, *setting)), fns[exercise],
_float) for _ in range(times)), dtype=np.float).mean()
else:
raise ValueError('Style type does not exist.')
else:
raise ValueError('Option type does not exist.')
class OrnsteinUhlenbeck(Stochastic):
""" Interest rate model, also known as Vasicek process """
def __init__(self, r, sigma, t, kappa, theta, s=0.0, q=0.0):
"""
Additional Parameters
---------------------
kappa: float
speed of mean reversion
theta: float
level of mean reversion
"""
super().__init__(s, r, sigma, t, q)
if not isinstance(kappa, float):
raise ValueError('Speed of mean reversion must be float.')
if not isinstance(theta, float):
raise ValueError('Level of mean reversion must be float.')
self.kappa = kappa
self.theta = theta
def simulate(self, n=100, pack=False):
""" dr = [kappa * (theta - r)] * dt + [sigma] * dW """
return super()._simulate((n, True, lambda x: self.kappa * (self.theta - x), lambda x, y: self.sigma), pack)
class CoxIntergellRoss(Stochastic):
""" Interest rate model """
def __init__(self, r, sigma, t, kappa, theta, s=0.0, q=0.0):
"""
Additional Parameters
---------------------
kappa: float
speed of mean reversion
theta: float
level of mean reversion
"""
super().__init__(s, r, sigma, t, q)
if not isinstance(kappa, float):
raise ValueError('Speed of mean reversion must be float.')
if not isinstance(theta, float):
raise ValueError('Level of mean reversion must be float.')
self.kappa = kappa
self.theta = theta
def simulate(self, n=100, pack=False, cutoff='truncate'):
""" dr = [kappa * (theta - r)] * dt + [sigma * sqrt(r)] * dW """
return super()._simulate((n, True, lambda r: self.kappa * (self.theta - r),
lambda r, sigma: self.sigma * np.sqrt(_modify(r, cutoff))), pack)
class CEV(Stochastic):
def __init__(self, s, r, sigma, t, beta, q=0.0):
"""
Additional Parameter
--------------------
beta: skewness of volatility surface
between 0 and 1, inclusive, for stocks
greater than 1 for commodity
"""
super().__init__(s, r, sigma, t, q)
if not isinstance(beta, float) or beta < 0.0 or beta > 1.0:
raise ValueError('Skewness of volatility surface must be float between 0 and 1.')
self.beta = beta
def simulate(self, n=10000, pack=False):
""" ds = [r * s] * dt + [sigma * s ** beta] * dW """
return super()._simulate((n, False, lambda x: self.r * x, lambda x, y: self.sigma * x ** self.beta), pack)
def pde(self, ht, hs, pack, mul=2.0, grid='CN'):
"""
PDE scheme for option pricing
Parameters
----------
ht: positive float
time mesh
hs: positive float
price mesh
pack: tuple(str, str, float[, ...])
parameters for option pricing
option: str
option type selected from {'call', 'put', 'call-spread', 'put-spread'}
exercise: str
exercise type selected from {'European', 'American'}
strikes: float
mul: positive float
constant multiplier to determine upper bound of price in grid
mul > 1
grid: str
scheme type selected from {'EE', 'EI', 'CN'}
Euler Explicit / Euler Implicit / Crank Nicolson
Return
------
p: float
option price
"""
if not isinstance(ht, float) or ht <= 0.0:
raise ValueError('Time mesh must be positive float.')
if not isinstance(hs, float) or hs <= 0.0:
raise ValueError('Price mesh must be positive float.')
if not isinstance(mul, float) or mul <= 1.0:
raise ValueError('Constant multiplier must be float greater than 1.')
option, exercise, *strikes = pack
s_max = max(self.s, *strikes) * mul
if exercise not in ('European', 'American'):
raise ValueError('Exercise type must be selected from {European, American}.')
if grid not in ('EE', 'EI', 'CN'):
raise ValueError('Scheme type must be selected from {EE, EI, CN}.')
m = int(s_max / hs)
if option in options:
c = options[option](hs * np.array(range(1, m)), *strikes)
d = c.copy()
else:
raise ValueError('Option type must be selected from {}.')
def lower(x):
return (self.sigma ** 2 * x ** (2 * self.beta) * hs ** (2 * self.beta - 2) - self.r * x) * ht / 2
def mid(x):
return 1 - (self.sigma ** 2 * x ** (2 * self.beta) * hs ** (2 * self.beta - 2) + self.r) * ht
def upper(x):
return (self.sigma ** 2 * x ** (2 * self.beta) * hs ** (2 * self.beta - 2) + self.r * x) * ht / 2
md = np.array([mid(i) if grid == 'EE' else 2 - mid(i) if grid == 'EI' else 3 - mid(i) for i in range(1, m)])
ld = np.array([0 if i == m else lower(i) if grid == 'EE' else -lower(i) for i in range(2, m + 1)])
ud = np.array([0 if i == 0 else upper(i) if grid == 'EE' else -upper(i) for i in range(0, m - 1)])
diag = np.array([ud, md, ld])
if grid == 'CN':
diag2 = np.array([-ud, 4 - md, -ld])
def euler_explicit(v, i):
v = dia_matrix((diag, [1, 0, -1]), shape=(m - 1, m - 1)).dot(v)
v[0] += lower(1) * d[0] * np.exp(-self.r * (self.t - ht * i))
v[-1] += upper(m - 1) * d[-1] * np.exp(-self.r * (self.t - ht * i))
return v if exercise == 'European' else np.maximum(v, d * np.exp(-self.r * (self.t - ht * i)))
def euler_implicit(v, i):
v[0] += lower(1) * d[0] * np.exp(-self.r * (self.t - ht * i))
v[-1] += upper(m - 1) * d[-1] * np.exp(-self.r * (self.t - ht * i))
v = solve_banded((1, 1), diag, v, overwrite_b=True)
return v if exercise == 'European' else np.maximum(v, d * np.exp(-self.r * (self.t - ht * i)))
def crank_nicolson(v, i):
v = dia_matrix((diag2, [1, 0, -1]), shape=(m - 1, m - 1)).dot(v)
v[0] += lower(1) * d[0] * (
np.exp(-self.r * (self.t - ht * (i - 1))) + np.exp(-self.r * (self.t - ht * i)))
v[-1] += upper(m - 1) * d[-1] * (
np.exp(-self.r * (self.t - ht * (i - 1))) + np.exp(-self.r * (self.t - ht * i)))
v = solve_banded((1, 1), diag, v, overwrite_b=True)
return v if exercise == 'European' else np.maximum(v, d * np.exp(-self.r * (self.t - ht * i)))
n = int(self.t / ht)
if grid == 'EE':
c = reduce(euler_explicit, range(n, 0, -1), c)
elif grid == 'EI':
c = reduce(euler_implicit, range(n, 0, -1), c)
elif grid == 'CN':
c = reduce(crank_nicolson, range(n, 0, -1), c)
return c[int(self.s / hs)]
def calibrate(self, | |
expected = np.int32([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
])
expected = expected[:, :, np.newaxis]
assert np.array_equal(segmap_aug.arr, expected)
def test_augment_segmentation_maps_multichannel_rot90(self):
segmap = ia.SegmentationMapsOnImage(
np.arange(0, 4*4).reshape((4, 4, 1)).astype(np.int32),
shape=(4, 4, 3)
)
aug = iaa.Rot90(1, keep_size=False)
segmaps_aug = aug.augment_segmentation_maps([segmap, segmap, segmap])
for i in range(3):
assert np.allclose(segmaps_aug[i].arr, np.rot90(segmap.arr, -1))
class TestAugmenter_draw_grid(unittest.TestCase):
def setUp(self):
reseed()
def test_draw_grid_list_of_3d_arrays(self):
# list, shape (3, 3, 3)
aug = _DummyAugmenter()
image = np.zeros((3, 3, 3), dtype=np.uint8)
image[0, 0, :] = 10
image[0, 1, :] = 50
image[1, 1, :] = 255
grid = aug.draw_grid([image], rows=2, cols=2)
grid_expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, grid_expected)
def test_draw_grid_list_of_2d_arrays(self):
# list, shape (3, 3)
aug = _DummyAugmenter()
image = np.zeros((3, 3, 3), dtype=np.uint8)
image[0, 0, :] = 10
image[0, 1, :] = 50
image[1, 1, :] = 255
grid = aug.draw_grid([image[..., 0]], rows=2, cols=2)
grid_expected = np.vstack([
np.hstack([image[..., 0:1], image[..., 0:1]]),
np.hstack([image[..., 0:1], image[..., 0:1]])
])
grid_expected = np.tile(grid_expected, (1, 1, 3))
assert np.array_equal(grid, grid_expected)
def test_draw_grid_list_of_1d_arrays_fails(self):
# list, shape (2,)
aug = _DummyAugmenter()
with self.assertRaises(Exception):
_ = aug.draw_grid([np.zeros((2,), dtype=np.uint8)], rows=2, cols=2)
def test_draw_grid_4d_array(self):
# array, shape (1, 3, 3, 3)
aug = _DummyAugmenter()
image = np.zeros((3, 3, 3), dtype=np.uint8)
image[0, 0, :] = 10
image[0, 1, :] = 50
image[1, 1, :] = 255
grid = aug.draw_grid(np.uint8([image]), rows=2, cols=2)
grid_expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, grid_expected)
def test_draw_grid_3d_array(self):
# array, shape (3, 3, 3)
aug = _DummyAugmenter()
image = np.zeros((3, 3, 3), dtype=np.uint8)
image[0, 0, :] = 10
image[0, 1, :] = 50
image[1, 1, :] = 255
grid = aug.draw_grid(image, rows=2, cols=2)
grid_expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, grid_expected)
def test_draw_grid_2d_array(self):
# array, shape (3, 3)
aug = _DummyAugmenter()
image = np.zeros((3, 3, 3), dtype=np.uint8)
image[0, 0, :] = 10
image[0, 1, :] = 50
image[1, 1, :] = 255
grid = aug.draw_grid(image[..., 0], rows=2, cols=2)
grid_expected = np.vstack([
np.hstack([image[..., 0:1], image[..., 0:1]]),
np.hstack([image[..., 0:1], image[..., 0:1]])
])
grid_expected = np.tile(grid_expected, (1, 1, 3))
assert np.array_equal(grid, grid_expected)
def test_draw_grid_1d_array(self):
# array, shape (2,)
aug = _DummyAugmenter()
with self.assertRaises(Exception):
_ = aug.draw_grid(np.zeros((2,), dtype=np.uint8), rows=2, cols=2)
@six.add_metaclass(ABCMeta)
class _TestAugmenter_augment_cbaois(object):
"""Class that is used to test augment_polygons() and augment_line_strings().
Originally this was only used for polygons and then made more flexible.
This is why some descriptions are still geared towards polygons.
Abbreviations:
cba = coordinate based augmentable, e.g. Polygon
cbaoi = coordinate based augmentable on image, e.g. PolygonsOnImage
"""
def setUp(self):
reseed()
@abstractmethod
def _augfunc(self, augmenter, *args, **kwargs):
"""Return augmenter.augment_*(...)."""
@property
@abstractmethod
def _ObjClass(self):
"""Return Polygon, LineString or similar class."""
@property
@abstractmethod
def _ObjOnImageClass(self):
"""Return PolygonsOnImage, LineStringsOnImage or similar class."""
def _Obj(self, *args, **kwargs):
return self._ObjClass(*args, **kwargs)
def _ObjOnImage(self, *args, **kwargs):
return self._ObjOnImageClass(*args, **kwargs)
def _compare_coords_of_cba(self, observed, expected, atol=1e-4, rtol=0):
return np.allclose(observed, expected, atol=atol, rtol=rtol)
def test_single_empty_instance(self):
# single instance of PolygonsOnImage with 0 polygons
aug = iaa.Rot90(1, keep_size=False)
cbaoi = self._ObjOnImage([], shape=(10, 11, 3))
cbaoi_aug = self._augfunc(aug, cbaoi)
assert isinstance(cbaoi_aug, self._ObjOnImageClass)
assert cbaoi_aug.empty
assert cbaoi_aug.shape == (11, 10, 3)
def test_list_of_single_empty_instance(self):
# list of PolygonsOnImage with 0 polygons
aug = iaa.Rot90(1, keep_size=False)
cbaoi = self._ObjOnImage([], shape=(10, 11, 3))
cbaois_aug = self._augfunc(aug, [cbaoi])
assert isinstance(cbaois_aug, list)
assert isinstance(cbaois_aug[0], self._ObjOnImageClass)
assert cbaois_aug[0].empty
assert cbaois_aug[0].shape == (11, 10, 3)
def test_two_cbaois_each_two_cbas(self):
# 2 PolygonsOnImage, each 2 polygons
aug = iaa.Rot90(1, keep_size=False)
cbaois = [
self._ObjOnImage(
[self._Obj([(0, 0), (5, 0), (5, 5)]),
self._Obj([(1, 1), (6, 1), (6, 6)])],
shape=(10, 10, 3)),
self._ObjOnImage(
[self._Obj([(2, 2), (7, 2), (7, 7)]),
self._Obj([(3, 3), (8, 3), (8, 8)])],
shape=(10, 10, 3)),
]
cbaois_aug = self._augfunc(aug, cbaois)
assert isinstance(cbaois_aug, list)
assert isinstance(cbaois_aug[0], self._ObjOnImageClass)
assert isinstance(cbaois_aug[0], self._ObjOnImageClass)
assert len(cbaois_aug[0].items) == 2
assert len(cbaois_aug[1].items) == 2
kp_offset = 0
assert self._compare_coords_of_cba(
cbaois_aug[0].items[0].coords,
[(10-0+kp_offset, 0), (10-0+kp_offset, 5), (10-5+kp_offset, 5)],
atol=1e-4, rtol=0
)
assert self._compare_coords_of_cba(
cbaois_aug[0].items[1].coords,
[(10-1+kp_offset, 1), (10-1+kp_offset, 6), (10-6+kp_offset, 6)],
atol=1e-4, rtol=0
)
assert self._compare_coords_of_cba(
cbaois_aug[1].items[0].coords,
[(10-2+kp_offset, 2), (10-2+kp_offset, 7), (10-7+kp_offset, 7)],
atol=1e-4, rtol=0
)
assert self._compare_coords_of_cba(
cbaois_aug[1].items[1].coords,
[(10-3+kp_offset, 3), (10-3+kp_offset, 8), (10-8+kp_offset, 8)],
atol=1e-4, rtol=0
)
assert cbaois_aug[0].shape == (10, 10, 3)
assert cbaois_aug[1].shape == (10, 10, 3)
def test_randomness_between_and_within_batches(self):
# test whether there is randomness within each batch and between
# batches
aug = iaa.Rot90((0, 3), keep_size=False)
cba = self._Obj([(0, 0), (5, 0), (5, 5)])
cbaoi = self._ObjOnImage(
[cba.deepcopy() for _ in sm.xrange(1)],
shape=(10, 11, 3)
)
cbaois = [cbaoi.deepcopy() for _ in sm.xrange(100)]
cbaois_aug1 = self._augfunc(aug, cbaois)
cbaois_aug2 = self._augfunc(aug, cbaois)
# --> different between runs
cbas1 = [cba
for cbaoi in cbaois_aug1
for cba in cbaoi.items]
cbas2 = [cba
for cbaoi in cbaois_aug2
for cba in cbaoi.items]
assert len(cbas1) == len(cbas2)
same = []
for cba1, cba2 in zip(cbas1, cbas2):
points1 = np.float32(cba1.coords)
points2 = np.float32(cba2.coords)
same.append(self._compare_coords_of_cba(points1, points2,
atol=1e-2, rtol=0))
assert not np.all(same)
# --> different between PolygonOnImages
same = []
points1 = np.float32([cba.coords
for cba
in cbaois_aug1[0].items])
for cba in cbaois_aug1[1:]:
points2 = np.float32([cba.coords
for cba
in cba.items])
same.append(self._compare_coords_of_cba(points1, points2,
atol=1e-2, rtol=0))
assert not np.all(same)
# --> different between polygons
points1 = set()
for cba in cbaois_aug1[0].items:
for point in cba.coords:
points1.add(tuple(
[int(point[0]*10), int(point[1]*10)]
))
assert len(points1) > 1
def test_determinism(self):
aug = iaa.Rot90((0, 3), keep_size=False)
aug_det = aug.to_deterministic()
cba = self._Obj([(0, 0), (5, 0), (5, 5)])
cbaoi = self._ObjOnImage(
[cba.deepcopy() for _ in sm.xrange(1)],
shape=(10, 11, 3)
)
cbaois = [cbaoi.deepcopy() for _ in sm.xrange(100)]
cbaois_aug1 = self._augfunc(aug_det, cbaois)
cbaois_aug2 = self._augfunc(aug_det, cbaois)
# --> different between PolygonsOnImages
same = []
points1 = np.float32([cba.coords
for cba
in cbaois_aug1[0].items])
for cbaoi in cbaois_aug1[1:]:
points2 = np.float32([cba.coords
for cba
in cbaoi.items])
same.append(self._compare_coords_of_cba(points1, points2,
atol=1e-2, rtol=0))
assert not np.all(same)
# --> similar between augmentation runs
cbas1 = [cba
for cbaoi in cbaois_aug1
for cba in cbaoi.items]
cbas2 = [cba
for cbaoi in cbaois_aug2
for cba in cbaoi.items]
assert len(cbas1) == len(cbas2)
for cba1, cba2 in zip(cbas1, cbas2):
points1 = np.float32(cba1.coords)
points2 = np.float32(cba2.coords)
assert self._compare_coords_of_cba(points1, points2,
atol=1e-2, rtol=0)
def test_aligned_with_images(self):
aug = iaa.Rot90((0, 3), keep_size=False)
aug_det = aug.to_deterministic()
image = np.zeros((10, 20), dtype=np.uint8)
image[5, :] = 255
image[2:5, 10] = 255
image_rots = [iaa.Rot90(k, keep_size=False).augment_image(image)
for k in [0, 1, 2, 3]]
cba = self._Obj([(0, 0), (10, 0), (10, 20)])
kp_offs = 0 # offset
cbas_rots = [
[(0, 0), (10, 0), (10, 20)],
[(10-0+kp_offs, 0), (10-0+kp_offs, 10), (10-20+kp_offs, 10)],
[(20-0+kp_offs, 10), (20-10+kp_offs, 10), (20-10+kp_offs, -10)],
[(10-10+kp_offs, 20), (10-10+kp_offs, 10), (10-(-10)+kp_offs, 10)]
]
cbaois = [self._ObjOnImage([cba], shape=image.shape)
for _ in sm.xrange(50)]
images_aug = aug_det.augment_images([image] * 50)
cbaois_aug = self._augfunc(aug_det, cbaois)
seen = set()
for image_aug, cbaoi_aug in zip(images_aug, cbaois_aug):
found_image = False
for img_rot_idx, img_rot in enumerate(image_rots):
if (image_aug.shape == img_rot.shape
and np.allclose(image_aug, img_rot)):
found_image = True
break
found_cba = False
for poly_rot_idx, cba_rot in enumerate(cbas_rots):
coords_observed = cbaoi_aug.items[0].coords
if self._compare_coords_of_cba(coords_observed, cba_rot):
found_cba = True
break
assert found_image
assert found_cba
assert img_rot_idx == poly_rot_idx
seen.add((img_rot_idx, poly_rot_idx))
assert 2 <= len(seen) <= 4 # assert not always the same rot
def test_aligned_with_images_despite_empty_instances(self):
# Test if augmenting lists of e.g. PolygonsOnImage is still aligned
# with image augmentation when one e.g. PolygonsOnImage instance is
# empty (e.g. contains no polygons)
cba = self._Obj([(0, 0), (5, 0), (5, 5), (0, 5)])
cbaoi_lst = [
self._ObjOnImage([cba.deepcopy()], shape=(10, 20)),
self._ObjOnImage([cba.deepcopy()], shape=(10, 20)),
self._ObjOnImage([cba.shift(x=1)], shape=(10, 20)),
self._ObjOnImage([cba.deepcopy()], shape=(10, 20)),
self._ObjOnImage([cba.deepcopy()], shape=(10, 20)),
self._ObjOnImage([], shape=(1, 8)),
self._ObjOnImage([cba.deepcopy()], shape=(10, 20)),
self._ObjOnImage([cba.deepcopy()], shape=(10, 20)),
self._ObjOnImage([cba.shift(x=1)], shape=(10, 20)),
self._ObjOnImage([cba.deepcopy()], shape=(10, 20)),
self._ObjOnImage([cba.deepcopy()], shape=(10, 20))
]
image = np.zeros((10, 20), dtype=np.uint8)
image[0, 0] = 255
image[0, 5] = 255
image[5, 5] = 255
image[5, 0] = 255
images = np.tile(image[np.newaxis, :, :], (len(cbaoi_lst), 1, 1))
aug = iaa.Affine(translate_px={"x": (0, 8)}, order=0, mode="constant",
cval=0)
for _ in sm.xrange(10):
for is_list in [False, True]:
aug_det = aug.to_deterministic()
inputs = images
if is_list:
inputs = list(inputs)
images_aug = aug_det.augment_images(inputs)
cbaoi_aug_lst = self._augfunc(aug_det, cbaoi_lst)
if is_list:
images_aug = np.array(images_aug, dtype=np.uint8)
translations_imgs = np.argmax(images_aug[:, 0, :], axis=1)
translations_points = [
(cbaoi.items[0].coords[0][0] if not cbaoi.empty else None)
for cbaoi
in | |
<filename>project_code/classes.py<gh_stars>1-10
import math
import enum
BranchTypeEnum = enum.Enum(value='BranchTypeEnum',
names=('Line', 'Coupler', 'Transformer',
'Transformer2W', 'Transformer3W3', 'Transformer3W2'))
class Branch:
n_of_branches = 0
IATL_max = 5000 # Threshold above which IATL are regarded as infinite.
Sbase = 1.0 # MVA, for p.u conversion.
def __init__(self, name_from, name_to, order, impedance, PATL, v_base,
branch_type, display_name):
self.index = Branch.n_of_branches
self.name_from = name_from
self.name_to = name_to
self.name_branch = name_from + " " + name_to + " " + order
self.display_name = display_name
self.order = order
self.country = None
self.node_from = None
self.node_to = None
self.ring = 99
self.connected = False
self.is_tie_line = False
self.type = branch_type
self.v_base = v_base # in kV
self.impedance = impedance # should be given in p.u.
if PATL > Branch.IATL_max * math.sqrt(3) * self.v_base / 1000:
self.PATL = 0
else:
self.PATL = PATL # in MW
self.PTDF = 1.0
Branch.n_of_branches += 1
def __str__(self):
return f"Branch nr {self.index}: {self.type} '{self.name_branch}', "\
f"impedance {self.impedance:.5f} pu, max power {self.PATL:.1f} MW, " \
f"ring {self.ring}, is a tie line: {self.is_tie_line}"
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.name_branch < other.name_branch
def set_country(self):
if (self.node_from is None) or (self.node_to is None):
raise ValueError('Cannot set country for branch if there are not two '
'nodes connected to the branch.')
if self.node_to.country != self.node_from.country:
self.country = 'TIE'
self.is_tie_line = True
if self.type == BranchTypeEnum.Coupler:
self.type = BranchTypeEnum.Line
else:
self.country = self.node_from.country
def is_branch_a_tie_line(self):
if self.is_tie_line:
return True
else:
return self.node_from.is_x_node() or self.node_to.is_x_node()
def connect_to_grid(self):
self.connected = True
for node in [self.node_from, self.node_to]:
node.connect_to_grid()
def insert_in_control_area(self):
if not self.is_tie_line:
for node in [self.node_from, self.node_to]:
# print(f'Trying to insert node {node.index}: {node.name}')
node.insert_in_control_area()
def increase_ring(self, ring_idx):
self.update_ring_nr_for_connected_nodes(ring_idx)
self.update_ring_nr_for_branch()
def update_ring_nr_for_connected_nodes(self, ring_idx):
for (node1, node2) in [(self.node_from, self.node_to), (self.node_to, self.node_from)]:
if not (node1.ring == ring_idx and node2.ring == 99):
continue
if not node2.is_x_node():
node2.ring = ring_idx + 1
else: # if x-node: set x-node to lower ring, and update branch from x-node to outside.
node2.ring = ring_idx
for branch in node2.branches:
branch.increase_ring(ring_idx)
def update_ring_nr_for_branch(self):
self.ring = min((self.node_from.ring, self.node_to.ring))
def remove(self, all_branches, all_nodes):
"""Removes branch, and all nodes only connected to that branch"""
# Case 1: if one of the nodes is only conn to branch, then calling the node remove function
# will remove the node, the branch, and the branch from the branch list of the other node.
if len(self.node_from.branches) == 1:
self.node_from.remove(all_branches, all_nodes)
return
elif len(self.node_to.branches) == 1:
self.node_to.remove(all_branches, all_nodes)
return
# Case 2: if not case 1, then just remove from branch list from both nodes and remove branch
for node in (self.node_from, self.node_to):
node.branches = [branch for branch in node.branches if branch != self]
all_branches.remove(self)
# for i in range(len(all_branches)):
# all_branches[i].index = i
# for i in range(len(all_nodes)):
# all_nodes[i].index = i
@staticmethod
def header():
return "Index,Type,Name,Node From,Node To,Impedance_pu,PATL_MW,Ring,Tie-Line"
def save_to_file_str(self):
return f"{self.index},{self.type},{self.name_branch},{self.name_from},{self.name_to}," \
f"{self.impedance:.5f},{self.PATL:.1f},{self.ring},{self.is_tie_line}"
def apply_couplers(self, dict_couplers, nodes=None):
"""Applies couplers.
Logic: a branch X from A to B now has to go from A to C. How to do this:
1: X needs to be removed from the branch list of B.
2: the to_node of X needs to change from B to C (name and actual node)
3: the branch list of C needs to be appended with X
4: the branch name has to be re-established based on new nodes
"""
if self.name_from in dict_couplers:
if self.node_from is not None:
old_node_from = [n for n in nodes if n.name == self.name_from][0]
old_node_from.branches.remove(self)
self.node_from = [n for n in nodes if n.name == dict_couplers[self.name_from]][0]
self.node_from.branches.append(self)
self.name_from = dict_couplers[self.name_from]
if self.name_to in dict_couplers:
if self.node_to is not None:
old_node_to = [n for n in nodes if n.name == self.name_to][0]
old_node_to.branches.remove(self)
self.node_to = [n for n in nodes if n.name == dict_couplers[self.name_to]][0]
self.node_to.branches.append(self)
self.name_to = dict_couplers[self.name_to]
self.name_branch = self.name_from + " " + self.name_to + " " + self.order
class Node:
nbNodes = 0
ring_chars = 7 # Significant characters used to determined rings : 8, one ring per node; 7,
# one ring per voltage level; 6, one ring per substation (all voltage)
def __init__(self, name):
self.index = Node.nbNodes
self.country = None
self.name = name
self.branches = []
self.generators = []
self.ring = 99
self.connected = False
Node.nbNodes += 1
def __str__(self, ):
return f"Node {self.index}: '{self.name}', ring {self.ring}, " \
f"connected: {self.connected}, branches {[elt.index for elt in self.branches]}"
def insert_in_control_area(self):
if self.ring == 99:
self.ring = 0
for branch in self.branches:
# print(f'trying to insert branch {branch.index} to control area '
# f'({branch.type} {branch.name_branch})')
branch.insert_in_control_area()
def connect_to_grid(self):
if not self.connected:
self.connected = True
for branch in self.branches:
branch.connect_to_grid()
def is_x_node(self):
return self.country == 'X'
def is_border(self):
if len([branch for branch in self.branches if branch.is_branch_a_tie_line()]) > 0:
return True
else:
return False
@staticmethod
def header():
return "Index,Name,Ring,Connected,Branches"
def save_to_file_str(self):
return f"{self.index},{self.name},{self.ring},{self.connected}," \
f"{[b.index for b in self.branches]}"
def remove(self, all_branches, all_nodes):
"""Removes node. The following consequences:
A: if you remove a node you should also remove all branches connected to that node:
1: remove the actual branches
2: remove the branches from the branch list that the opposite node has.
B: remove the node itself.
"""
for branch in self.branches: # all branches in branch list of this node:
all_branches.remove(branch) # remove from the system-wide branch list
other_node_on_branch = [node for node in [branch.node_from, branch.node_to]
if node != self][0]
other_node_on_branch.branches.remove(branch) # remove from the branch list of opp. node
all_nodes.remove(self) # remove node from system-wide node list.
def get_equivalent_node_name(self):
if self.is_x_node():
return self.name
else:
return self.name[:Node.ring_chars]
class GenerationUnit:
n_of_generators = 0
def __init__(self, name, power, name_suffix):
self.index = GenerationUnit.n_of_generators
self.node = None
self.node_name = name
if name_suffix is not '':
self.name = name + '_' + name_suffix
else:
self.name = name
self.power = power
self.country = ""
self.connected = False
GenerationUnit.n_of_generators += 1
def __str__(self):
return f"Generator nr {self.index}: '{self.name}', max power {self.power:.1f} MW"
def apply_couplers(self, dict_of_couplers):
if self.node_name in dict_of_couplers:
self.node_name = dict_of_couplers[self.node_name]
class Result_IF:
def __init__(self, eltR, IFN1, nIFN1, IFN2, nIFN2, eltI, eltT, eltIn, eltTn, LODFit, LODFti):
"""
Generates a result with :
-eltR the element whose influence is assessed
-IFN1 : the N-1 IF
-nIFN1 : the normalized N-1 IF
-IFN2 : the N-2 IF (according to CSAM)
-nIFN2 : the normalized N-2 IF
-eltI : a contingency i for which IFN2 is reached
-eltT : an element from the CA for which IFN2 is reached
-eltIn : a contingency i for which nIFN2 is reached
-eltTn : an element from the CA for which nIFN2 is reached
"""
self.eltR = eltR
self.IFN1 = IFN1
self.nIFN1 = nIFN1
self.IFN2 = IFN2
self.nIFN2 = nIFN2
self.eltI = eltI
self.eltT = eltT
self.eltIn = eltIn
self.eltTn = eltTn
self.LODFit = LODFit
self.LODFti = LODFti
@staticmethod
def header(country):
sep = ','
return f"List of R from {country} perspective" \
f"{sep}{sep}{sep}{sep}{sep}{sep}{sep}{sep}{sep}{sep}{sep}{sep}\n\n" \
f"R{sep}Voltage level [kV]{sep}Country{sep}" \
f"Type{sep}Normalized IF{sep}IF{sep}" \
f"PATL R [MW]{sep}Ring R{sep}" \
f"I for norm.IF{sep}" \
f"T for norm.IF{sep}" \
f"I IF{sep}" \
f"T IF{sep}" \
f"PATL for T norm.IF [MW]\n"
def __str__(self):
sep = ','
return f"{self.eltR.display_name}{sep}{self.eltR.v_base:.0f}{sep}{self.eltR.country}{sep}" \
f"{self.eltR.type}{sep}{self.nIFN2:.4f}{sep}{self.IFN2:.4f}{sep}" \
f"{self.eltR.PATL:.0f}{sep}{self.eltR.ring}{sep}" \
f"{self.eltIn.display_name} {self.eltIn.v_base:.0f} {self.eltIn.country}{sep}" \
f"{self.eltTn.display_name} {self.eltTn.v_base:.0f} {self.eltTn.country}{sep}" \
f"{self.eltI.display_name} {self.eltI.v_base:.0f} {self.eltI.country}{sep}" \
f"{self.eltT.display_name} {self.eltT.v_base:.0f} {self.eltT.country}{sep}" \
f"{self.eltTn.PATL:.0f}\n"
class Result_IF_generators:
def __init__(self, name, power, IF, IF_branches_i, IF_branches_t,
IF_norm, IF_norm_branches_i, IF_norm_branches_t):
"""
Generates a result with :
-name: the element whose influence is assessed
-power: the generator power
-IF: the influence factor for this generator
-IF_branches_i: branches i that have this IF for this generator
-IF_branches_t: branches t that have this IF for this generator
-IF_norm: normalized influence factor
-IF_norm_branches_i: branches i that have this norm IF for this generator
-IF_norm_branches_t: branches t that have this norm IF for this generator
"""
self.name = name
self.power = power
self.IF = IF
self.IF_branches_i = IF_branches_i
self.IF_branches_t = IF_branches_t
self.IF_norm = IF_norm
self.IF_norm_branches_i = IF_norm_branches_i
self.IF_norm_branches_t = IF_norm_branches_t
@staticmethod
def header(country):
sep | |
# Created by <NAME>, <NAME> Scientific
# import modules
import xml.etree.cElementTree as etree
from .converter import register, ValueConverter
@register("ED0FB1D9-4E07-47E1-B96C-4013B9AFE534")
class MassSpectrumConverter(ValueConverter):
"""
The pyeds.MassSpectrumConverter is used to convert mass spectrum data from
original binary format into pyeds.MassSpectrum.
"""
def Convert(self, value):
"""
Converts binary spectrum data.
Args:
value: pyeds.Binary
Binary data as stored in result file.
Returns:
pyeds.MassSpectrum or None
Parsed spectrum.
"""
# check value
if not value:
return None
# parse data
return MassSpectrumParser().parse(value.Unzip())
class MassSpectrumParser(object):
"""
The pyeds.MassSpectrumParser is used to parse mass spectrum data from
original binary format into pyeds.MassSpectrum.
"""
def parse(self, xml):
"""
Parses given pattern XML.
Args:
xml: str
Spectrum XML.
Returns:
pyeds.MassSpectrum
Mass spectrum.
"""
# parse XML
tree = etree.fromstring(xml)
# retrieve spectrum header
header_elm = tree.find('Header')
header = self._retrieve_header(header_elm)
# retrieve scan event
event_elm = tree.find('ScanEvent')
event = self._retrieve_event(event_elm)
# retrieve precursor info
precursor_elm = tree.find('PrecursorInfo')
precursor = self._retrieve_precursor(precursor_elm)
# retrieve centroids data
peaks_elm = tree.find('PeakCentroids')
centroids = self._retrieve_centroids(peaks_elm)
# retrieve profile data
points_elm = tree.find('ProfilePoints')
profile = self._retrieve_profile(points_elm)
# free memory
tree.clear()
# create spectrum
spectrum = MassSpectrum()
spectrum.Header = header
spectrum.Event = event
spectrum.Precursor = precursor
spectrum.Centroids = centroids
spectrum.Profile = profile
return spectrum
def _retrieve_header(self, header_elm):
"""Retrieves spectrum header."""
# init header
header = ScanHeader()
# get header data
if header_elm is not None:
elm = header_elm.find('SpectrumID')
if elm is not None and elm.text:
header.SpectrumID = int(elm.text)
elm = header_elm.find('InstrumentName')
if elm is not None:
header.InstrumentName = elm.text
elm = header_elm.find('DataType')
if elm is not None:
header.DataType = elm.text
elm = header_elm.find('LowPosition')
if elm is not None and elm.text:
header.LowPosition = float(elm.text)
elm = header_elm.find('HighPosition')
if elm is not None and elm.text:
header.HighPosition = float(elm.text)
elm = header_elm.find('BasePeakPosition')
if elm is not None and elm.text:
header.BasePeakPosition = float(elm.text)
elm = header_elm.find('BasePeakIntensity')
if elm is not None and elm.text:
header.BasePeakIntensity = float(elm.text)
elm = header_elm.find('TotalIntensity')
if elm is not None and elm.text:
header.TotalIntensity = float(elm.text)
# retrieve identifiers
identifiers_elm = header_elm.find('SpectrumIdentifiers')
if identifiers_elm:
for identifier_elm in identifiers_elm.iter('SpectrumIdentifier'):
identifier = ScanIdentifier()
attr = identifier_elm.get('FileID', None)
if attr is not None and attr != "-1" and attr != "":
identifier.FileID = int(attr)
attr = identifier_elm.get('ScanNumber', None)
if attr is not None and attr != "":
identifier.ScanNumber = int(attr)
attr = identifier_elm.get('MasterScanNumber', None)
if attr is not None and attr != "-1" and attr != "":
identifier.MasterScanNumber = int(attr)
attr = identifier_elm.get('RetentionTime', None)
if attr is not None and attr != "":
identifier.RetentionTime = float(attr)
# add to header
header.SpectrumIdentifiers.append(identifier)
return header
def _retrieve_event(self, event_elm):
"""Retrieves event data."""
# init event
event = ScanEvent()
# get scan event data
if event_elm is not None:
elm = event_elm.find('ActivationTypes')
if elm is not None:
event.ActivationTypes = elm.text
energies_elm = event_elm.find('ActivationEnergies')
if energies_elm is not None:
event.ActivationEnergies = []
for elm in energies_elm.iter('double'):
event.ActivationEnergies.append(float(elm.text))
elm = event_elm.find('CompensationVoltage')
if elm is not None and elm.text:
event.CompensationVoltage = float(elm.text)
elm = event_elm.find('IonizationSource')
if elm is not None and elm.text:
event.IonizationSource = elm.text
elm = event_elm.find('IsMultiplexed')
if elm is not None:
event.IsMultiplexed = elm.text == 'true'
elm = event_elm.find('IsolationMass')
if elm is not None and elm.text:
event.IsolationMass = float(elm.text)
elm = event_elm.find('IsolationWidth')
if elm is not None and elm.text:
event.IsolationWidth = float(elm.text)
elm = event_elm.find('IsolationOffset')
if elm is not None and elm.text:
event.IsolationOffset = float(elm.text)
elm = event_elm.find('MassAnalyzer')
if elm is not None:
event.MassAnalyzer = elm.text
elm = event_elm.find('MSOrder')
if elm is not None:
event.MSOrder = elm.text
elm = event_elm.find('Polarity')
if elm is not None:
event.Polarity = elm.text
elm = event_elm.find('ResolutionAtMass200')
if elm is not None and elm.text:
event.ResolutionAtMass200 = int(elm.text)
elm = event_elm.find('ScanRate')
if elm is not None:
event.ScanRate = elm.text
elm = event_elm.find('ScanType')
if elm is not None:
event.ScanType = elm.text
return event
def _retrieve_precursor(self, precursor_elm):
"""Retrieves precursor data."""
# init precursor
precursor = PrecursorInfo()
# get precursor data
if precursor_elm is not None:
attr = precursor_elm.get('Charge', None)
if attr is not None and attr != "":
precursor.Charge = int(attr)
attr = precursor_elm.get('Intensity', None)
if attr is not None and attr != "":
precursor.Intensity = float(attr)
attr = precursor_elm.get('InstrumentDeterminedCharge', None)
if attr is not None and attr != "":
precursor.InstrumentDeterminedCharge = int(attr)
attr = precursor_elm.get('InstrumentDeterminedMonoisotopicMass', None)
if attr is not None and attr != "":
precursor.InstrumentDeterminedMonoisotopicMass = float(attr)
attr = precursor_elm.get('IonInjectTime', None)
if attr is not None and attr != "":
precursor.IonInjectTime = float(attr)
attr = precursor_elm.get('IsolationMass', None)
if attr is not None and attr != "":
precursor.IsolationMass = float(attr)
attr = precursor_elm.get('IsolationOffset', None)
if attr is not None and attr != "":
precursor.IsolationOffset = float(attr)
attr = precursor_elm.get('IsolationWidth', None)
if attr is not None and attr != "":
precursor.IsolationWidth = float(attr)
attr = precursor_elm.get('PercentIsolationInterference', None)
if attr is not None and attr != "":
precursor.PrecursorInterference = float(attr)
attr = precursor_elm.get('PrecursorMassOrigin', None)
if attr is not None and attr != "":
precursor.PrecursorMassOrigin = str(attr)
attr = precursor_elm.get('Resolution', None)
if attr is not None and attr != "":
precursor.Resolution = int(attr)
attr = precursor_elm.get('SignalToNoise', None)
if attr is not None and attr != "":
precursor.SignalToNoise = float(attr)
attr = precursor_elm.get('SinglyChargedMass', None)
if attr is not None and attr != "":
precursor.SinglyChargedMass = float(attr)
attr = precursor_elm.get('SpectrumNumber', None)
if attr is not None and attr != "":
precursor.SpectrumNumber = int(attr)
# get spectrum header
header_elm = precursor_elm.find('SpectrumHeader')
precursor.Header = self._retrieve_header(header_elm)
# get scan event
event_elm = precursor_elm.find('ScanEvent')
precursor.Event = self._retrieve_event(event_elm)
# get mono centroids
peaks_elm = precursor_elm.find('MonoisotopicPeakCentroids')
precursor.MonoisotopicPeakCentroids = self._retrieve_centroids(peaks_elm)
# get measured centroids
peaks_elm = precursor_elm.find('MeasuredMonoisotopicPeakCentroids')
precursor.MeasuredMonoisotopicPeakCentroids = self._retrieve_centroids(peaks_elm)
# get cluster centroids
peaks_elm = precursor_elm.find('IsotopeClusterPeakCentroids')
precursor.IsotopeClusterPeakCentroids = self._retrieve_centroids(peaks_elm)
return precursor
def _retrieve_centroids(self, peaks_elm):
"""Retrieves centroids data."""
# init centroids
centroids = []
# retrieve centroids
if peaks_elm is not None:
for peak_elm in peaks_elm.iter('Peak'):
centroid = Centroid()
centroid.MZ = float(peak_elm.get('X', 0))
centroid.Intensity = float(peak_elm.get('Y', 0))
centroid.Charge = int(peak_elm.get('Z', None))
centroid.SN = float(peak_elm.get('SN', None))
centroid.Resolution = float(peak_elm.get('R', None))
centroids.append(centroid)
return tuple(centroids)
def _retrieve_profile(self, points_elm):
"""Retrieves profile data."""
# init profile
profile = []
# retrieve profile
if points_elm is not None:
for point_elm in points_elm.iter('Pt'):
mz = float(point_elm.get('X', 0))
ai = float(point_elm.get('Y', 0))
profile.append((mz, ai))
return tuple(profile)
class MassSpectrum(object):
"""
The pyeds.MassSpectrum is used to hold information about mass spectrum.
Attributes:
Header: pyeds.ScanHeader
Contains the spectrum header information.
Event: pyeds.ScanEvent
Contains the scan event information.
Precursor: pyeds.PrecursorInfo
Contains the precursor information.
Centroids: (pyeds.Centroid,)
Collection of spectrum centroids.
Profile: ((float, float),)
Collection of profile points as ((mz, intensity),)
"""
def __init__(self):
"""Initializes a new instance of MassSpectrum."""
self.Header = None
self.Event = None
self.Precursor = None
self.Centroids = None
self.Profile = None
def __str__(self):
"""Gets standard string representation."""
return "%s %s" % (self.Header, self.Event)
def __repr__(self):
"""Gets debug string representation."""
return "%s(%s)" % (self.__class__.__name__, self.__str__())
def __getattr__(self, name):
"""Tries to get unknown attribute from header or event."""
if self.Header is not None and hasattr(self.Header, name):
return getattr(self.Header, name)
if self.Event is not None and hasattr(self.Event, name):
return getattr(self.Event, name)
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
class ScanHeader(object):
"""
The pyeds.ScanHeader is used to hold information from mass spectrum header.
"""
def __init__(self):
"""Initializes a new instance of ScanHeader."""
self.BasePeakIntensity = None
self.BasePeakPosition = None
self.DataType = None
self.HighPosition = None
self.InstrumentName = None
self.LowPosition = None
| |
os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(path,'w+') as f:
f.write(data)
# -----------------------------------------------------------------------------
# Name: mat_check(node)
# Raises: N/A
# Returns: None
# Desc: Checks if material exist and creates it otherwise.
# -----------------------------------------------------------------------------
def mat_check(node):
path = path_mat(node)
if not os.path.isfile(path) :
engine = node.evalParm('engine')
smethod = method_str(node)[0]
curdir =os.path.dirname(os.path.realpath(__file__))
path_source =os.path.join(curdir,'engines',engine,smethod +'.mat')
with open(path_source, 'r') as file:
data = file.read()
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(path,'w+') as f:
f.write(data)
mat_update(node)
# -----------------------------------------------------------------------------
# Name: mat_update(node)
# Raises: N/A
# Returns: None
# Desc: Updates material values.
# -----------------------------------------------------------------------------
def mat_update(node):
#print('Updating Material')
path = path_mat(node)
# print(path)
if not os.path.isfile(path) :
mat_check(node)
else :
node_bounds = node.node("data/OUT_max_min")
geo = node_bounds.geometry()
_numOfFrames = str(geo.attribValue("frange"))
_speed = str(geo.attribValue("speed"))
_posMax = str(geo.attribValue("bbx_max"))
_posMin = str(geo.attribValue("bbx_min"))
_scaleMax = str(geo.attribValue("scale_max"))
_scaleMin = str(geo.attribValue("scale_min"))
_pivMax = str(geo.attribValue("pivot_max"))
_pivMin = str(geo.attribValue("pivot_min"))
_doubleTex = str(geo.attribValue("bitDepthPack"))
_padPowTwo = str(geo.attribValue("padpowtwo"))
_textureSizeX= str(geo.attribValue("img_size1"))
_textureSizeY= str(geo.attribValue("img_size2"))
_paddedSizeX = str(geo.attribValue("pad_size1"))
_paddedSizeY = str(geo.attribValue("pad_size2"))
_packNorm = str(geo.attribValue("pack_norm"))
_packPscale = str(geo.attribValue("pack_pscale"))
_normData = str(geo.attribValue("normalize_data"))
_width = str(geo.attribValue("width_height1"))
_height = str(geo.attribValue("width_height2"))
numOfFrames = -1
speed = -1
posMax = -1
posMin = -1
scaleMax = -1
scaleMin = -1
pivMax = -1
pivMin = -1
packNorm = -1
doubleTex = -1
padPowTwo = -1
textureSizeX = -1
textureSizeY = -1
paddedSizeX = -1
paddedSizeY = -1
packPscale = -1
normData = -1
width = -1
height = -1
with open(path) as f:
for num, line in enumerate(f, 1):
if "_numOfFrames" in line:
numOfFrames = num
if "_speed" in line:
speed = num
if "_posMax" in line:
posMax = num
if "_posMin" in line:
posMin = num
if "_scaleMax" in line:
scaleMax = num
if "_scaleMin" in line:
scaleMin = num
if "_pivMax" in line:
pivMax = num
if "_pivMin" in line:
pivMin = num
if "_packNorm" in line:
packNorm = num
if "_doubleTex" in line:
doubleTex = num
if "_padPowTwo" in line:
padPowTwo = num
if "_textureSizeX" in line:
textureSizeX= num
if "_textureSizeY" in line:
textureSizeY= num
if "_paddedSizeX" in line:
paddedSizeX = num
if "_paddedSizeY" in line:
paddedSizeY = num
if "_packPscale" in line:
packPscale = num
if "_normData" in line:
normData = num
if "_width" in line:
width = num
if "_height" in line:
height = num
list = open(path).readlines()
if "_numOfFrames" != -1 :
list[numOfFrames-1] = ' - _numOfFrames: ' +_numOfFrames+'\n'
if "_speed" != -1 :
list[speed-1] = ' - _speed: ' +_speed+'\n'
if "_posMax" != -1 :
list[posMax-1] = ' - _posMax: ' +_posMax+'\n'
if "_posMin" != -1 :
list[posMin-1] = ' - _posMin: ' +_posMin+'\n'
if "_scaleMax" != -1 :
list[scaleMax-1] = ' - _scaleMax: ' +_scaleMax+'\n'
if "_scaleMin" != -1 :
list[scaleMin-1] = ' - _scaleMin: ' +_scaleMin+'\n'
if "_pivMax" != -1 :
list[pivMax-1] = ' - _pivMax: ' +_pivMax+'\n'
if "_pivMin" != -1 :
list[pivMin-1] = ' - _pivMin: ' +_pivMin+'\n'
if "_packNorm" != -1 :
list[packNorm-1] = ' - _packNorm: ' +_packNorm+'\n'
if "_doubleTex" != -1 :
list[doubleTex-1] = ' - _doubleTex: ' +_doubleTex+'\n'
if "_padPowTwo" != -1 :
list[padPowTwo-1] = ' - _padPowTwo: ' +_padPowTwo+'\n'
if "_textureSizeX"!= -1 :
list[textureSizeX-1]= ' - _textureSizeX: '+_textureSizeX+'\n'
if "_textureSizeY"!= -1 :
list[textureSizeY-1]= ' - _textureSizeY: '+_textureSizeY+'\n'
if "_paddedSizeX" != -1 :
list[paddedSizeX-1] = ' - _paddedSizeX: ' +_paddedSizeX+'\n'
if "_paddedSizeY" != -1 :
list[paddedSizeY-1] = ' - _paddedSizeY: ' +_paddedSizeY+'\n'
if "_packPscale" != -1 :
list[packPscale-1] = ' - _packPscale: ' +_packPscale+'\n'
if "_normData" != -1 :
list[normData-1] = ' - _normData: ' +_normData+'\n'
if "_width" != -1 :
list[width-1] = ' - _width: ' +_width+'\n'
if "_height" != -1 :
list[height-1] = ' - _height: ' +_height+'\n'
open(path,'w').write(''.join(list))
# -----------------------------------------------------------------------------
# Name: data(node)
# Raises: N/A
# Returns: None
# Desc: Updates material values.
# -----------------------------------------------------------------------------
def data(node):
#print('Updating Json')
path = path_data(node)
directory = os.path.dirname(path)
#remove file if exist
try:
os.remove(path)
except OSError:
pass
#create directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
data = vat_attributes(node)
try:
#print(path)
with open(path, 'w') as f:
json.dump(data, f, indent=4, sort_keys=True)
except :
print("Did not write realtime data.")
return
# UI Presets
# -----------------------------------------------------------------------------
# Name: preset(node)
# Raises: N/A
# Returns: None
# Desc: Performs the presets for each engine.
# -----------------------------------------------------------------------------
def preset(node):
engine = node.evalParm('engine')
method = node.evalParm('method')
reset(node)
module = 'hda.vertex_animation_textures.engines.'
module += engine
module += '.preset'
module_check = pkgutil.find_loader(module)
if module_check is not None :
#print(module)
preset = importlib.import_module(module)
preset.preset(node,method)
# -----------------------------------------------------------------------------
# Name: preset_path(node)
# Raises: N/A
# Returns: None
# Desc: Performs the presets for each engine.
# -----------------------------------------------------------------------------
def preset_path(node):
engine = node.evalParm('engine')
method = node.evalParm('method')
module = 'hda.vertex_animation_textures.engines.'
module += engine
module += '.preset'
module_check = pkgutil.find_loader(module)
path = None
if module_check is not None :
#print(module)
preset = importlib.import_module(module)
path = os.path.abspath(preset.__file__)
print(path)
return path
# -----------------------------------------------------------------------------
# Name: reset(node)
# Raises: N/A
# Returns: None
# Desc: Reset all parameters
# -----------------------------------------------------------------------------
def reset(node):
node.parm('normalize_data').revertToDefaults()
node.parm('enable_geo').revertToDefaults()
node.parm('path_geo').revertToDefaults()
node.parm('enable_pos').revertToDefaults()
node.parm('path_pos').revertToDefaults()
node.parm('enable_rot').revertToDefaults()
node.parm('path_rot').revertToDefaults()
node.parm('enable_scale').revertToDefaults()
node.parm('path_scale').revertToDefaults()
node.parm('enable_norm').revertToDefaults()
node.parm('path_norm').revertToDefaults()
node.parm('enable_col').revertToDefaults()
node.parm('path_col').revertToDefaults()
node.parm('enable_mat').revertToDefaults()
node.parm('path_mat').revertToDefaults()
node.parm('enable_shader').revertToDefaults()
node.parm('path_shader').revertToDefaults()
node.parm('reverse_norm').revertToDefaults()
node.parm('convertcolorspace').revertToDefaults()
node.parm('depth').revertToDefaults()
node.parm('pack_norm').revertToDefaults()
node.parm('pack_pscale').revertToDefaults()
node.parm('coord_pos').revertToDefaults()
node.parm('invert_pos').revertToDefaults()
node.parm('coord_rot').revertToDefaults()
node.parm('coord_col').revertToDefaults()
node.parm('invert_col').revertToDefaults()
node.parm('target_polycount').revertToDefaults()
node.parm('target_texture_size').revertToDefaults()
node.parm('scale').revertToDefaults()
node.parm('shop_materialpath').revertToDefaults()
node.parm('scale_max_min').revertToDefaults()
# UI Control Options
# -----------------------------------------------------------------------------
# Name: primcount(node)
# Raises: N/A
# Returns: None
# Desc: Detects the prim count based on the current frame.
# -----------------------------------------------------------------------------
def primcount(node):
polyNode = node.node("data/IN")
print(polyNode.path())
geo = polyNode.geometry()
count = geo.countPrimType('Poly')
if count != 0:
node.parm('target_polycount').deleteAllKeyframes()
node.parm('target_polycount').set(count)
# -----------------------------------------------------------------------------
# Name: _depth(node)
# Raises: N/A
# Returns: None
# Desc: Checks if shader exist and creates it otherwise.
# -----------------------------------------------------------------------------
def _depth(node):
#print(node.path())
depth = node.evalParm('depth')
usebwpoints = node.evalParm('usebwpoints')
ntype = 7
stype = 'float32'
if (depth == 0 ) and usebwpoints == 0 : #or depth == 'int8'
ntype = 0
stype = 'int8'
if (depth == 0 ) and usebwpoints == 1 : #or depth == 'int8'
ntype = 1
stype = 'int8bw'
if (depth == 1 ) and usebwpoints == 0 : #or depth == 'int16'
ntype = 2
stype = 'int16'
if (depth == 1 ) and usebwpoints == 1 : #or depth == 'int16'
ntype = 3
stype = 'int16bw'
if (depth == 2 ) and usebwpoints == 0 : #or depth == 'int32'
ntype = 4
stype = 'int32'
if (depth == 2 ) and usebwpoints == 1 : #or depth == 'int32'
ntype = 5
stype = 'int32bw'
if (depth == 3 ): #or depth == 'float16'
ntype = 6
stype = 'float16'
if (depth == 4 ): #or depth == 'float32'
ntype = 7
stype = 'float32'
return ntype, stype
# -----------------------------------------------------------------------------
# Name: _depth(node)
# Raises: N/A
# Returns: None
# Desc: Checks if shader exist and creates it otherwise.
# -----------------------------------------------------------------------------
def _depth_uv(node):
#print(node.path())
depth = node.evalParm('depth_uv')
ntype = 6
stype = 'float16'
if (depth == 0 ) : #or depth == 'int8'
ntype = 0
stype = 'int8'
if (depth == 1 ) : #or depth == 'int16'
ntype = 2
stype = 'int16'
if (depth == 2 ) : #or depth == 'int32'
ntype = 4
stype = 'int32'
if (depth == 3 ): #or depth == 'float16'
ntype = 6
stype = 'float16'
if (depth == 4 ): #or depth == 'float32'
ntype = 7
stype = 'float32'
return ntype, stype
# -----------------------------------------------------------------------------
# Name: MakeList(node)
# Raises: N/A
# Returns: None
# Desc: Search directory for rendered folders
# -----------------------------------------------------------------------------
def list_engines(node):
try:
curdir =os.path.dirname(os.path.realpath(__file__))
engdir =os.path.join(curdir,'engines')
dirList = [filename for filename in os.listdir(engdir) if os.path.isdir(os.path.join(engdir,filename))]
dirs = []
for dirName in dirList:
dirs += [dirName, dirName]
return dirs
except :
return ["MissingEnginesInScripts", "MissingEnginesInScripts"]
# -----------------------------------------------------------------------------
# Name: _debug_refresh(node)
# Raises: N/A
# Returns: None
# Desc: Sets value
# -----------------------------------------------------------------------------
def debug_refresh(node):
try:
node.node('debug/MESH').parm('reload').pressButton()
hou.hscript("texcache -c")
except :
return
# -----------------------------------------------------------------------------
# Name: frame(node)
# Raises: N/A
# Returns: None
# | |
import numpy as np
import time
import ctypes
from hetu import cpu_links as cpu_op
from hetu import ndarray
from hetu.ndarray import numpyasdlarrayhandle
def save_to_file(data, file):
f = open(file, 'a+')
f.write(data)
f.close()
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
ll = [10, 50, 100, 200, 300, 400, 500, 600, 700, 800, 900,
1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000]
def test_boradcast_to():
for i in range(len(ll)):
# ctx = ndarray.cpu(0)
shape = (ll[i], ll[i])
to_shape = (1000, ll[i], ll[i])
x = np.random.uniform(-1, 1, shape).astype(np.float32)
y = np.empty(to_shape, dtype=np.float32)
arr_x = numpyasdlarrayhandle(x)
arr_y = numpyasdlarrayhandle(y)
# print(arr_x.asnumpy())
start = time.time()
for _ in range(10):
cpu_op.broadcast_to(arr_x, arr_y)
end = time.time()
for _ in range(10):
kkk = np.broadcast_to(x, to_shape)
end1 = time.time()
print(ll[i], " cpu:", end - start, " ", "numpy:", end1 - end)
np.testing.assert_allclose(kkk, y, rtol=1e-5)
# test_boradcast_to()
def test_reduce_sum_axis_zero():
for i in range(len(ll)):
a = 1
#
# shape = (ll[i], ll[i])
# to_shape = (1000, ll[i], ll[i])
# x = np.random.uniform(-1, 1, shape).astype(np.float32)
# y=np.empty(to_shape,dtype=np.float32)
# arr_x = numpyasdlarrayhandle(x)
# arr_y = numpyasdlarrayhandle(y)
# # print(arr_x.asnumpy())
# start = time.time()
# for _ in range(10):
# cpu_op.broadcast_to(arr_x, arr_y)
# end = time.time()
# for _ in range(10):
# kkk = np.broadcast_to(x, to_shape)
# end1 = time.time()
# print(ll[i], " cpu:", end - start, " ", "numpy:", end1 - end)
# np.testing.assert_allclose(kkk, y, rtol=1e-5)
shape = (2, 2, 2)
to_shape = (2, 2)
x = np.random.uniform(-1, 1, shape).astype(np.float32)
y = np.empty(to_shape, dtype=np.float32)
arr_x = numpyasdlarrayhandle(x)
arr_y = numpyasdlarrayhandle(y)
cpu_op.reduce_sum_axis_zero(arr_x, arr_y)
np_y = np.sum(x, axis=0)
print('x:', x)
print('np_y:', np_y)
print('y:', y)
# test_reduce_sum_axis_zero()
def test_average_pooling():
ctx = ndarray.cpu(0)
def np_average_pooling(input, kernel_H, kernel_W, padding=0, stride=1):
N, C, H, W = input.shape
assert ((H + 2 * padding - kernel_H) % stride == 0)
assert ((W + 2 * padding - kernel_W) % stride == 0)
pooled_H = (H + 2 * padding - kernel_H) / stride + 1
pooled_W = (W + 2 * padding - kernel_W) / stride + 1
pooled_layer = np.zeros(
shape=(N, C, pooled_H, pooled_W), dtype=np.float32)
pooling_size = kernel_H * kernel_W
for n in xrange(N):
for c in xrange(C):
for h in xrange(pooled_H):
for w in xrange(pooled_W):
hs = h * stride - padding
ws = w * stride - padding
hend = min(hs + kernel_H, H)
wend = min(ws + kernel_W, W)
hs = max(hs, 0)
ws = max(ws, 0)
for i in xrange(hs, hend):
for j in xrange(ws, wend):
pooled_layer[n][c][h][w] += input[n][c][i][j]
pooled_layer[n][c][h][w] /= pooling_size
return pooled_layer
def np_average_pooling_gradient(gradient_y, kernel_H, kernel_W, padding=0, stride=1):
N, C, pooled_H, pooled_W = gradient_y.shape
H = (pooled_H - 1) * stride + kernel_H - 2 * padding
W = (pooled_W - 1) * stride + kernel_W - 2 * padding
gradient_x = np.zeros(shape=(N, C, H, W), dtype=np.float32)
pooling_size = kernel_H * kernel_W
for n in xrange(N):
for c in xrange(C):
for h in xrange(pooled_H):
for w in xrange(pooled_W):
hs = h * stride - padding
ws = w * stride - padding
hend = min(hs + kernel_H, H)
wend = min(ws + kernel_W, W)
hs = max(hs, 0)
ws = max(ws, 0)
for i in xrange(hs, hend):
for j in xrange(ws, wend):
gradient_x[n][c][i][j] += gradient_y[n][c][h][w] / \
pooling_size
return gradient_x
shapeX = (100, 3, 28, 28)
# (1,1,5,5)
shapeY = (100, 3, 24, 24)
# input : x , filter : f , output: y
x = np.random.uniform(0, 10, size=shapeX).astype(np.float32)
gradient_y = np.random.uniform(0, 10, size=shapeY).astype(np.float32)
arr_x = numpyasdlarrayhandle(x)
arr_gradient_y = numpyasdlarrayhandle(gradient_y)
pool_layer = np.empty(shapeY, dtype=np.float32)
gradient_x = np.empty(shapeX, dtype=np.float32)
arr_pool_layer = numpyasdlarrayhandle(pool_layer)
arr_gradient_x = numpyasdlarrayhandle(gradient_x)
cpu_op.avg_pool(arr_x, 5, 5, arr_pool_layer)
cpu_op.avg_pool_gradient(arr_gradient_y, 5, 5, arr_gradient_x)
np_pool_layer = np_average_pooling(x, 5, 5)
np_gradient_x = np_average_pooling_gradient(gradient_y, 5, 5)
np.testing.assert_allclose(np_pool_layer, pool_layer, rtol=1e-5)
np.testing.assert_allclose(np_gradient_x, gradient_x, rtol=1e-5)
# print(arr_gradient_x.asnumpy())
# print("asdasdf:",np_gradient_x)
# test_average_pooling()
def test_max_pooling():
ctx = ndarray.cpu(0)
def np_max_pooling(input, kernel_H, kernel_W, padding=0, stride=1):
N, C, H, W = input.shape
assert ((H + 2 * padding - kernel_H) % stride == 0)
assert ((W + 2 * padding - kernel_W) % stride == 0)
pooled_H = (H + 2 * padding - kernel_H) / stride + 1
pooled_W = (W + 2 * padding - kernel_W) / stride + 1
pooled_layer = np.zeros(
shape=(N, C, pooled_H, pooled_W), dtype=np.float32)
pooling_size = kernel_H * kernel_W
for n in range(N):
for c in range(C):
for h in range(pooled_H):
for w in range(pooled_W):
hs = h * stride - padding
ws = w * stride - padding
hend = min(hs + kernel_H, H)
wend = min(ws + kernel_W, W)
hs = max(hs, 0)
ws = max(ws, 0)
hargmax = hs
wargmax = ws
for i in range(hs, hend):
for j in range(ws, wend):
if input[n][c][i][j] > input[n][c][hargmax][wargmax]:
hargmax = i
wargmax = j
pooled_layer[n][c][h][w] = input[n][c][hargmax][wargmax]
return pooled_layer
def np_max_pooling_gradient(input, gradient_y, kernel_H, kernel_W, padding=0, stride=1):
N, C, pooled_H, pooled_W = gradient_y.shape
H = (pooled_H - 1) * stride + kernel_H - 2 * padding
W = (pooled_W - 1) * stride + kernel_W - 2 * padding
# print(N,C,H,W)
gradient_x = np.zeros(shape=(N, C, H, W), dtype=np.float32)
pooling_size = kernel_H * kernel_W
for n in xrange(N):
for c in xrange(C):
for h in xrange(pooled_H):
for w in xrange(pooled_W):
hs = h * stride - padding
ws = w * stride - padding
hend = min(hs + kernel_H, H)
wend = min(ws + kernel_W, W)
hs = max(hs, 0)
ws = max(ws, 0)
hargmax = hs
wargmax = ws
for i in xrange(hs, hend):
for j in xrange(ws, wend):
# print(n,c,i,j)
if input[n][c][i][j] > input[n][c][hargmax][wargmax]:
hargmax = i
wargmax = j
gradient_x[n][c][hargmax][wargmax] += gradient_y[n][c][h][w]
return gradient_x
shapeX = (100, 3, 28, 28)
shapeY = (100, 3, 14, 14)
# shapeX=(1,1,2,2)
# shapeY=(1,1,1,1)
x = np.random.uniform(0, 10, size=shapeX).astype(np.float32)
# x = np.arange(1,37).reshape(shapeX)
# print(x)
# x = np.ones(shapeX).astype(np.float32)
gradient_y = np.random.uniform(0, 10, size=shapeY).astype(np.float32)
# gradient_y = np.ones(shapeY).astype(np.float32)
arr_x = numpyasdlarrayhandle(x)
arr_gradient_y = numpyasdlarrayhandle(gradient_y)
pool_layer = np.empty(shapeY, dtype=np.float32)
gradient_x = np.empty(shapeX, dtype=np.float32)
arr_pool_layer = numpyasdlarrayhandle(pool_layer)
arr_gradient_x = numpyasdlarrayhandle(gradient_x)
pool_layer1 = np.empty(shapeY, dtype=np.float32)
gradient_x1 = np.empty(shapeX, dtype=np.float32)
arr_pool_layer1 = numpyasdlarrayhandle(pool_layer1)
arr_gradient_x1 = numpyasdlarrayhandle(gradient_x1)
np_pool_layer = np_max_pooling(x, 2, 2, 0, 2)
cpu_op.max_pool(arr_x, 2, 2, arr_pool_layer, 0, 2)
# print('poollayer:',np_pool_layer)
# print(arr_pool_layer.asnumpy())
np_gradient_x = np_max_pooling_gradient(x, np_pool_layer, 2, 2, 0, 2)
cpu_op.max_pool_gradient(arr_x, arr_pool_layer, 2,
2, arr_gradient_x1, 0, 2)
# print(arr_pool_layer.asnumpy())
# print(np_pool_layer)
np.testing.assert_allclose(np_pool_layer, pool_layer, rtol=1e-5)
np.testing.assert_allclose(np_gradient_x, gradient_x1, rtol=1e-5)
'''
for i in range(len(ll)):
ctx = ndarray.cpu(0)
shape = (3, 3, ll[i], ll[i])
to_shape = (3, 3, ll[i] / 2, ll[i] / 2)
x = np.random.uniform(-1, 1, shape).astype(np.float32)
arr_x = ndarray.array(x, ctx=ctx)
arr_y = ndarray.empty(to_shape, ctx=ctx)
# print(arr_x.asnumpy())
start = time.time()
for _ in range(10):
cpu_op.max_pooling(arr_x, 2, 2, arr_y, 0, 2)
end = time.time()
for _ in range(10):
out=np_max_pooling(x,2,2,0,2)
end1=time.time()
print(ll[i], " cpu:", end - start, " ", "numpy:",end1-end )
y = arr_y.asnumpy()
np.testing.assert_allclose (out, y, rtol=1e-5)
'''
# test_max_pooling()
def test_matrix_multiply():
for i in range(len(ll)):
# ctx = ndarray.cpu(0)
shape = (ll[i], ll[i])
x = np.random.uniform(-5, 5, size=shape).astype(np.float32)
y = np.random.uniform(-5, 5, size=shape).astype(np.float32)
z = np.zeros(shape, dtype=np.float32)
start = time.time()
# numpy
for _ in range(10):
c_np = np.dot(x, y)
time1 = time.time()
arr_x = numpyasdlarrayhandle(x)
arr_y = numpyasdlarrayhandle(y)
arr_z = numpyasdlarrayhandle(z)
time2 = time.time()
for _ in range(1):
cpu_op.matrix_multiply(
arr_x, False,
arr_y, False,
arr_z)
time4 = time.time()
print(ll[i], " cpu:", time4 - time2, " ", "numpy:", time1 - start)
np.testing.assert_allclose(c_np, z, rtol=1e-5)
#
#
# test_matrix_multiply()
def test_matrix_elementwise_multiply_by_const():
for i in range(len(ll)):
ctx = ndarray.cpu(0)
shape = (ll[i], ll[i])
x = np.random.uniform(-1, 1, shape).astype(np.float32)
# y = np.random.uniform(-1, 1, shape).astype(np.float32)
arr_x = numpyasdlarrayhandle(x)
# arr_y = ndarray.array(y, ctx=ctx)
val = 4.754545
z = np.empty(shape, dtype=np.float32)
arr_z = numpyasdlarrayhandle(z)
start = time.time()
for _ in range(10):
cpu_op.matrix_elementwise_multiply_by_const(arr_x, val, arr_z)
end = time.time()
for _ in range(10):
nu = x * val
end1 = time.time()
print(ll[i], " cpu:", end - start, " ", "numpy:", end1 - end)
# output=z.asnumpy()
np.testing.assert_allclose(nu, z, rtol=1e-5)
#
# test_matrix_elementwise_multiply_by_const()
def test_matrix_elementwise_add_by_const():
for i in range(len(ll)):
ctx = ndarray.cpu(0)
shape | |
<filename>music21/meter/core.py
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Name: meter.core.py
# Purpose: Component objects for meters
#
# Authors: <NAME>
# <NAME>
#
# Copyright: Copyright © 2009-2012, 2015, 2021 <NAME>
# and the music21 Project
# License: BSD, see license.txt
# -----------------------------------------------------------------------------
'''
This module defines two component objects for defining nested metrical structures:
:class:`~music21.meter.core.MeterTerminal` and :class:`~music21.meter.core.MeterSequence`.
'''
import copy
from typing import Optional
from music21 import prebase
from music21.common.numberTools import opFrac
from music21.common.objects import SlottedObjectMixin
from music21 import common
from music21 import duration
from music21 import environment
from music21.exceptions21 import MeterException
from music21.meter import tools
environLocal = environment.Environment('meter.core')
# -----------------------------------------------------------------------------
class MeterTerminal(prebase.ProtoM21Object, SlottedObjectMixin):
'''
A MeterTerminal is a nestable primitive of rhythmic division.
>>> a = meter.MeterTerminal('2/4')
>>> a.duration.quarterLength
2.0
>>> a = meter.MeterTerminal('3/8')
>>> a.duration.quarterLength
1.5
>>> a = meter.MeterTerminal('5/2')
>>> a.duration.quarterLength
10.0
'''
# CLASS VARIABLES #
__slots__ = (
'_denominator',
'_duration',
'_numerator',
'_overriddenDuration',
'_weight',
)
# INITIALIZER #
def __init__(self, slashNotation=None, weight=1):
self._duration = None
self._numerator = 0
self._denominator = 1
self._weight = None
self._overriddenDuration = None
if slashNotation is not None:
# assign directly to values, not properties, to avoid
# calling _ratioChanged more than necessary
values = tools.slashToTuple(slashNotation)
if values is not None: # if failed to parse
self._numerator = values.numerator
self._denominator = values.denominator
self._ratioChanged() # sets self._duration
# this will set the underlying weight attribute directly for data checking
# explicitly calling base class method to avoid problems
# in the derived class MeterSequence
self._weight = weight
# SPECIAL METHODS #
def __deepcopy__(self, memo=None):
'''
Helper method to copy.py's deepcopy function. Call it from there.
Defining a custom __deepcopy__ here is a performance boost,
particularly in not copying _duration, directly assigning _weight, and
other benefits.
'''
# call class to get a new, empty instance
new = self.__class__()
# for name in dir(self):
new._numerator = self._numerator
new._denominator = self._denominator
new._ratioChanged() # faster than copying dur
# new._duration = copy.deepcopy(self._duration, memo)
new._weight = self._weight # these are numbers
return new
def _reprInternal(self):
return str(self)
def __str__(self):
return str(int(self.numerator)) + '/' + str(int(self.denominator))
def ratioEqual(self, other):
'''
Compare the numerator and denominator of another object.
Note that these have to be exact matches; 3/4 is not the same as 6/8
>>> from music21 import meter
>>> a = meter.MeterTerminal('3/4')
>>> b = meter.MeterTerminal('6/4')
>>> c = meter.MeterTerminal('2/4')
>>> d = meter.MeterTerminal('3/4')
>>> a.ratioEqual(b)
False
>>> a.ratioEqual(c)
False
>>> a.ratioEqual(d)
True
'''
if other is None:
return False
if (other.numerator == self.numerator
and other.denominator == self.denominator):
return True
else:
return False
# -------------------------------------------------------------------------
def subdivideByCount(self, countRequest=None):
'''
returns a MeterSequence made up of taking this MeterTerminal and
subdividing it into the given number of parts. Each of those parts
is a MeterTerminal
>>> a = meter.MeterTerminal('3/4')
>>> b = a.subdivideByCount(3)
>>> b
<music21.meter.core.MeterSequence {1/4+1/4+1/4}>
>>> len(b)
3
>>> b[0]
<music21.meter.core.MeterTerminal 1/4>
What happens if we do this?
>>> a = meter.MeterTerminal('5/8')
>>> b = a.subdivideByCount(2)
>>> b
<music21.meter.core.MeterSequence {2/8+3/8}>
>>> len(b)
2
>>> b[0]
<music21.meter.core.MeterTerminal 2/8>
>>> b[1]
<music21.meter.core.MeterTerminal 3/8>
But what if you want to divide into 3/8+2/8 or something else?
for that, see the :meth:`~music21.meter.MeterSequence.load` method
of :class:`~music21.meter.MeterSequence`.
'''
# elevate to meter sequence
ms = MeterSequence()
# cannot set the weight of this MeterSequence w/o having offsets
# pass this MeterTerminal as an argument
# when subdividing, use autoWeight
ms.load(self, countRequest, autoWeight=True, targetWeight=self.weight)
return ms
def subdivideByList(self, numeratorList):
'''
Return a MeterSequence dividing this
MeterTerminal according to the numeratorList
>>> a = meter.MeterTerminal('3/4')
>>> b = a.subdivideByList([1, 1, 1])
>>> b
<music21.meter.core.MeterSequence {1/4+1/4+1/4}>
>>> len(b)
3
>>> b[0]
<music21.meter.core.MeterTerminal 1/4>
Unequal subdivisions work:
>>> c = a.subdivideByList([1, 2])
>>> c
<music21.meter.core.MeterSequence {1/4+2/4}>
>>> len(c)
2
>>> (c[0], c[1])
(<music21.meter.core.MeterTerminal 1/4>, <music21.meter.core.MeterTerminal 2/4>)
So does subdividing by strings
>>> c = a.subdivideByList(['2/4', '1/4'])
>>> len(c)
2
>>> (c[0], c[1])
(<music21.meter.core.MeterTerminal 2/4>, <music21.meter.core.MeterTerminal 1/4>)
See :meth:`~music21.meter.MeterSequence.partitionByList` method
of :class:`~music21.meter.MeterSequence` for more details.
'''
# elevate to meter sequence
ms = MeterSequence()
ms.load(self) # do not need to autoWeight here
ms.partitionByList(numeratorList) # this will split weight
return ms
def subdivideByOther(self, other: 'music21.meter.MeterSequence'):
'''
Return a MeterSequence based on another MeterSequence
>>> a = meter.MeterSequence('1/4+1/4+1/4')
>>> a
<music21.meter.core.MeterSequence {1/4+1/4+1/4}>
>>> b = meter.MeterSequence('3/8+3/8')
>>> a.subdivideByOther(b)
<music21.meter.core.MeterSequence {{3/8+3/8}}>
>>> terminal = meter.MeterTerminal('1/4')
>>> divider = meter.MeterSequence('1/8+1/8')
>>> terminal.subdivideByOther(divider)
<music21.meter.core.MeterSequence {{1/8+1/8}}>
'''
# elevate to meter sequence
ms = MeterSequence()
if other.duration.quarterLength != self.duration.quarterLength:
raise MeterException(f'cannot subdivide by other: {other}')
ms.load(other) # do not need to autoWeight here
# ms.partitionByOtherMeterSequence(other) # this will split weight
return ms
def subdivide(self, value):
'''
Subdivision takes a MeterTerminal and, making it into a collection of MeterTerminals,
Returns a MeterSequence.
This is different than a partitioning a MeterSequence in that this does not happen
in place and instead returns a new object.
If an integer is provided, assume it is a partition count
'''
if common.isListLike(value):
return self.subdivideByList(value)
elif isinstance(value, MeterSequence):
return self.subdivideByOther(value)
elif common.isNum(value):
return self.subdivideByCount(value)
else:
raise MeterException(f'cannot process partition argument {value}')
# -------------------------------------------------------------------------
# properties
@property
def weight(self):
'''
Return or set the weight of a MeterTerminal
>>> a = meter.MeterTerminal('2/4')
>>> a.weight = 0.5
>>> a.weight
0.5
'''
return self._weight
@weight.setter
def weight(self, value):
self._weight = value
def _getNumerator(self):
return self._numerator
def _setNumerator(self, value):
'''
>>> a = meter.MeterTerminal('2/4')
>>> a.duration.quarterLength
2.0
>>> a.numerator = 11
>>> a.duration.quarterLength
11.0
'''
self._numerator = value
self._ratioChanged()
numerator = property(_getNumerator, _setNumerator)
def _getDenominator(self):
return self._denominator
def _setDenominator(self, value):
'''
>>> a = meter.MeterTerminal('2/4')
>>> a.duration.quarterLength
2.0
>>> a.numerator = 11
>>> a.duration.quarterLength
11.0
'''
# use duration.typeFromNumDict?
if value not in tools.validDenominatorsSet:
raise MeterException(f'bad denominator value: {value}')
self._denominator = value
self._ratioChanged()
denominator = property(_getDenominator, _setDenominator)
def _ratioChanged(self):
'''If ratio has been changed, call this to update duration
'''
# NOTE: this is a performance critical method and should only be
# called when necessary
if self.numerator is None or self.denominator is None:
self._duration = None
else:
self._duration = duration.Duration()
try:
self._duration.quarterLength = (
(4.0 * self.numerator) / self.denominator
)
except duration.DurationException:
environLocal.printDebug(
['DurationException encountered',
'numerator/denominator',
self.numerator,
self.denominator
]
)
self._duration = None
def _getDuration(self):
'''
duration gets or sets a duration value that
is equal in length of the terminal.
>>> a = meter.MeterTerminal()
>>> a.numerator = 3
>>> a.denominator = 8
>>> d = a.duration
>>> d.type
'quarter'
>>> d.dots
1
>>> d.quarterLength
1.5
'''
if self._overriddenDuration:
return self._overriddenDuration
else:
return self._duration
def _setDuration(self, value):
self._overriddenDuration = value
duration = property(_getDuration, _setDuration)
@property
def depth(self):
'''
Return how many levels deep this part is -- the depth of a terminal is always 1
'''
return 1
# -----------------------------------------------------------------------------
class MeterSequence(MeterTerminal):
'''
A meter sequence is a list of MeterTerminals, or other MeterSequences
'''
# CLASS VARIABLES #
__slots__ = (
'_levelListCache',
'_partition',
'parenthesis',
'summedNumerator',
)
# INITIALIZER #
def __init__(self, value=None, partitionRequest=None):
super().__init__()
self._numerator = None # rationalized
self._denominator = None # lowest common multiple
self._partition = [] # a list of terminals or MeterSequences
self._overriddenDuration = None
self._levelListCache = {}
# this attribute is only used in MeterTerminals, and note
# in MeterSequences; a MeterSequences weight is based solely
# on the sum of its components
# del self._weight -- no -- screws up pickling -- cannot del a slotted object
#: Bool stores whether this meter was provided as a summed numerator
self.summedNumerator = False
#: an optional parameter used only in meter display sequences.
#: needed in cases where a meter component is parenthetical
self.parenthesis = False
if value is not None:
self.load(value, partitionRequest)
# SPECIAL METHODS #
def __deepcopy__(self, memo=None):
'''Helper method to copy.py's deepcopy function. Call it from there.
Defining a custom __deepcopy__ here is a performance boost,
particularly in not copying _duration and other benefits.
Notably, self._levelListCache is not copied,
which may not be needed in the copy and may be large.
>>> from copy import deepcopy
>>> ms1 = meter.MeterSequence('4/4+3/8')
>>> ms2 = deepcopy(ms1)
>>> ms2
<music21.meter.core.MeterSequence {4/4+3/8}>
'''
# call class to get a new, empty instance
new = self.__class__()
# for | |
#!/usr/bin/python
import numpy as np
import argparse
import time
import cv2 as cv
import os
def runYOLODetection(args):
# load my fish class labels that my YOLO model was trained on
labelsPath = os.path.sep.join([args["yolo"], "fish.names"])
#labelsPath = os.path.sep.join([args["yolo"], "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(0)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
print(COLORS)
#COLORS = np.array([255, 0, 0], dtype="uint8")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([args["yolo"], "fish.weights"])
configPath = os.path.sep.join([args["yolo"], "fish_test.cfg"])
#weightsPath = os.path.sep.join([args["yolo"], "yolov3.weights"])
#configPath = os.path.sep.join([args["yolo"], "yolov3.cfg"])
# load my YOLO object detector trained on my fish dataset (1 class)
print("[INFO] loading YOLO from disk ...")
net = cv.dnn.readNetFromDarknet(configPath, weightsPath)
# load input image and grab its spatial dimensions
image = cv.imread(args["image"])
(H, W) = image.shape[:2]
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct a blob from the input image and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes and
# associated probabilities
# NOTE: (608, 608) is my YOLO input image size. However, using
# (416, 416) results in much accutate result. Pretty interesting.
blob = cv.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# show execution time information of YOLO
print("[INFO] YOLO took {:.6f} seconds.".format(end - start))
# initialize out lists of detected bounding boxes, confidences, and
# class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater then the minimum probability
if confidence > args["confidence"]:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update out list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weark and overlapping bounding
# boxes
idxs = cv.dnn.NMSBoxes(boxes, confidences, args["confidence"],
args["threshold"])
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the image
color = [int(c) for c in COLORS[classIDs[i]]]
cv.rectangle(image, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
cv.putText(image, text, (x, y - 5), cv.FONT_HERSHEY_SIMPLEX,
0.5, color, 2)
return image
def runYOLOBoundingBoxes(args):
# load my fish class labels that my YOLO model was trained on
labelsPath = os.path.sep.join([args["yolo"], "fish.names"])
#labelsPath = os.path.sep.join([args["yolo"], "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(0)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
print(COLORS)
#COLORS = np.array([255, 0, 0], dtype="uint8")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([args["yolo"], "fish.weights"])
configPath = os.path.sep.join([args["yolo"], "fish_test.cfg"])
#weightsPath = os.path.sep.join([args["yolo"], "yolov3.weights"])
#configPath = os.path.sep.join([args["yolo"], "yolov3.cfg"])
# load my YOLO object detector trained on my fish dataset (1 class)
print("[INFO] loading YOLO from disk ...")
net = cv.dnn.readNetFromDarknet(configPath, weightsPath)
# load input image and grab its spatial dimensions
image = cv.imread(args["image"])
(H, W) = image.shape[:2]
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct a blob from the input image and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes and
# associated probabilities
# NOTE: (608, 608) is my YOLO input image size. However, using
# (416, 416) results in much accutate result. Pretty interesting.
blob = cv.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# show execution time information of YOLO
print("[INFO] YOLO took {:.6f} seconds.".format(end - start))
# initialize out lists of detected bounding boxes, confidences, and
# class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater then the minimum probability
if confidence > args["confidence"]:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update out list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weark and overlapping bounding
# boxes
idxs = cv.dnn.NMSBoxes(boxes, confidences, args["confidence"],
args["threshold"])
return image, boxes, idxs
def runYOLOBoundingBoxes_streamlit(image, yolopath, _confidence, _threshold):
# load my fish class labels that my YOLO model was trained on
labelsPath = os.path.sep.join([yolopath, "fish.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(0)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
print(COLORS)
#COLORS = np.array([255, 0, 0], dtype="uint8")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([yolopath, "fish.weights"])
configPath = os.path.sep.join([yolopath, "fish_test.cfg"])
# load my YOLO object detector trained on my fish dataset (1 class)
print("[INFO] loading YOLO model ...")
net = cv.dnn.readNetFromDarknet(configPath, weightsPath)
# grab input image's spatial dimensions
(H, W) = image.shape[:2]
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct a blob from the input image and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes and
# associated probabilities
# NOTE: (608, 608) is my YOLO input image size. However, using
# (416, 416) results in much accutate result. Pretty interesting.
blob = cv.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# show execution time information of YOLO
print("[INFO] YOLO took {:.6f} seconds.".format(end - start))
# initialize out lists of detected bounding boxes, confidences, and
# class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater then the minimum probability
| |
<reponame>falconsoft3d/fiscalberry
# -*- coding: UTF-8 -*-
import string
import types
import requests
import logging
import unicodedata
import escpos
from ComandoInterface import ComandoInterface, ComandoException, ValidationError, FiscalPrinterError, formatText
import time
import datetime
from math import ceil
class PrinterException(Exception):
pass
class EscPComandos(ComandoInterface):
# el traductor puede ser: TraductorFiscal o TraductorReceipt
# path al modulo de traductor que este comando necesita
traductorModule = "Traductores.TraductorReceipt"
DEFAULT_DRIVER = "ReceipDirectJet"
__preFillTrailer = None
def _sendCommand(self, comando, skipStatusErrors=False):
try:
ret = self.conector.sendCommand(comando, skipStatusErrors)
return ret
except PrinterException, e:
logging.getLogger().error("PrinterException: %s" % str(e))
raise ComandoException("Error de la impresora: %s.\nComando enviado: %s" % \
(str(e), commandString))
def printTexto(self, texto):
printer = self.conector.driver
printer.start()
printer.text(texto)
printer.cut("PART")
printer.end()
def print_mesa_mozo(self, setTrailer):
for key in setTrailer:
self.doble_alto_x_linea(key)
def openDrawer(self):
printer = self.conector.driver
printer.start()
printer.cashdraw(2)
printer.end()
def printPedido(self, **kwargs):
"imprimir pedido de compras"
printer = self.conector.driver
encabezado = kwargs.get("encabezado", None)
items = kwargs.get("items", [])
printer.start()
printer.set("CENTER", "A", "A", 1, 1)
# colocar en modo ESC P
printer._raw(chr(0x1D) + chr(0xF9) + chr(0x35) + "1")
if encabezado.has_key("es_pedido"):
printer.text(u"Nuevo Pedido \n")
else:
printer.text(u"Nueva OC \n")
printer.set("LEFT", "A", "A", 1, 1)
fecha = datetime.datetime.strftime(datetime.datetime.now(), '%H:%M %x')
if encabezado:
if encabezado.has_key("nombre_proveedor"):
printer.text(u"Proveedor: "+encabezado.get("nombre_proveedor") )
printer.text("\n")
if encabezado.has_key("cuit") and len(encabezado.get("cuit")) > 1:
printer.text(u"CUIT: "+encabezado.get("cuit") )
printer.text("\n")
if encabezado.has_key("telefono") and len(encabezado.get("telefono")) > 1:
printer.text(u"Telefono: "+encabezado.get("telefono") )
printer.text("\n")
if encabezado.has_key("email") and len(encabezado.get("email")) > 1:
printer.text(u"E-mail: "+encabezado.get("email") )
printer.text("\n")
if encabezado.has_key("pedido_recepcionado"):
if encabezado.get("pedido_recepcionado") == 1:
printer.text(u"Esta orden de compra ya ha sido recepcionada\n")
printer.text(u"Fecha: %s \n\n\n" % fecha)
printer.text(u"CANT\tDESCRIPCIÓN\n")
printer.text("\n")
tot_chars = 40
for item in items:
printer.set("LEFT", "A", "A", 1, 1)
desc = item.get('ds')[0:24]
cant = float(item.get('qty'))
unidad_de_medida = item.get('unidad_de_medida')
observacion = item.get('observacion')
cant_tabs = 3
can_tabs_final = cant_tabs - ceil(len(desc) / 8)
strTabs = desc.ljust(int(len(desc) + can_tabs_final), '\t')
printer.text(u"%g%s%s\t%s\n" % (cant," ",unidad_de_medida, strTabs))
if observacion:
printer.set("LEFT", "B", "B", 1, 1)
printer.text(u"OBS: %s\n" % observacion)
printer.text("\n")
barcode = kwargs.get("barcode", None)
if barcode:
printer.barcode(str(barcode).rjust(8, "0"), 'EAN13')
printer.set("CENTER", "A", "B", 2, 2)
printer.cut("PART")
# volver a poner en modo ESC Bematech, temporal para testing
# printer._raw(chr(0x1D) + chr(0xF9) + chr(0x35) + "0")
# dejar letra chica alineada izquierda
printer.set("LEFT", "A", "B", 1, 2)
printer.end()
def __printExtras(self, kwargs):
"imprimir qr y barcodes"
printer = self.conector.driver
printer.set("CENTER", "A", "A", 1, 1)
barcode = kwargs.get("barcode", None)
if barcode:
printer.barcode(str(barcode).rjust(8, "0"), 'EAN13')
qrcode = kwargs.get("qr", None)
if qrcode:
printer.qr(qrcode)
qrcodeml = kwargs.get("qr-mercadopago", None)
if qrcodeml:
printer.text(u'Pagá rápido con Mercado Pago\n')
printer.text(u"1- Escaneá el código QR\n2- Ingresá el monto\n3- Seleccioná tipo de pago\n4- Listo!, ni hace falta que nos avises.\n")
printer.qr(qrcodeml)
def printFacturaElectronica(self, **kwargs):
"imprimir Factura Electronica"
encabezado = kwargs.get("encabezado", None)
# antes de comenzar descargo la imagen del barcode
barcodeImage = requests.get(encabezado.get("barcode_url"), stream=True).raw
items = kwargs.get("items", [])
addAdditional = kwargs.get("addAdditional", None)
setTrailer = kwargs.get("setTrailer", None)
printer = self.conector.driver
printer.start()
printer.set("LEFT", "A", "B", 2, 1)
printer.text(encabezado.get("nombre_comercio")+"\n")
printer.set("LEFT", "A", "A", 1, 1)
printer.text(encabezado.get("razon_social")+"\n")
printer.text("CUIT: "+encabezado.get("cuit_empresa")+"\n")
if encabezado.get('ingresos_brutos'):
printer.text("Ingresos Brutos: "+encabezado.get("ingresos_brutos")+"\n")
printer.text("Inicio de actividades: "+encabezado.get("inicio_actividades")+"\n")
printer.text(encabezado.get("domicilio_comercial")+"\n")
printer.text(encabezado.get("tipo_responsable")+"\n")
printer.set("CENTER", "A", "A", 1, 1)
printer.text("----------------------------------------\n") #40 guíones
printer.set("LEFT", "A", "B", 1, 1)
printer.text(encabezado.get("tipo_comprobante")+" Nro. "+encabezado.get("numero_comprobante")+"\n")
printer.text("Fecha "+encabezado.get("fecha_comprobante")+"\n")
printer.set("CENTER", "A", "A", 1, 1)
printer.text("----------------------------------------\n") #40 guíones
printer.set("LEFT", "A", "A", 1, 1)
if encabezado.has_key("nombre_cliente"):
nombre_cliente = "A "+encabezado.get("nombre_cliente")
tipo_responsable_cliente = encabezado.get("tipo_responsable_cliente")
documento_cliente = encabezado.get("nombre_tipo_documento")+": "+encabezado.get("documento_cliente")
domicilio_cliente = encabezado.get("domicilio_cliente")
printer.text(nombre_cliente+"\n")
if documento_cliente:
printer.text(documento_cliente+"\n")
if tipo_responsable_cliente:
printer.text(tipo_responsable_cliente+"\n")
if domicilio_cliente:
printer.text(domicilio_cliente+"\n")
else:
printer.text("A Consumidor Final \n")
printer.set("CENTER", "A", "A", 1, 1)
printer.text("----------------------------------------\n\n") #40 guíones
printer.set("LEFT", "A", "A", 1, 1)
tot_neto = 0.0
tot_iva = 0.0
total = 0.0
if encabezado.get("tipo_comprobante") == "Factura A" or encabezado.get("tipo_comprobante") == "NOTAS DE CREDITO A":
printer.text(u"DESCRIPCIÓN\t\t(IVA) PRECIO NETO\n")
printer.text("\n")
for item in items:
desc = item.get('ds')[0:24]
cant = float(item.get('qty'))
precio_unitario = float(item.get('importe'))
precio_total = cant * precio_unitario
if item.get('alic_iva'):
porcentaje_iva = float(item.get('alic_iva'))
else:
porcentaje_iva = 21.00
if encabezado.get("tipo_comprobante") == "Factura A" or encabezado.get("tipo_comprobante") == "NOTAS DE CREDITO A":
#es Factura A, enviamos neto e IVA separados.
precio_unitario_neto = precio_unitario / ((porcentaje_iva + 100.0) / 100)
precio_unitario_iva = precio_unitario - precio_unitario_neto
precio_total_neto = cant * precio_unitario_neto
precio_total_iva = cant * precio_unitario_iva
tot_neto += precio_total_neto
tot_iva += precio_total_iva
else:
#Usamos estas variables para no tener que agregar ifs abajo
precio_unitario_neto = precio_unitario
precio_total_neto = precio_total
total += precio_total
cant_tabs = 3
len_desc = len(desc)
if len_desc > 19:
desc = desc[:len_desc - (len_desc - 19)]
if len_desc < 19:
desc = desc.ljust(19 - len_desc)
can_tabs_final = cant_tabs - ceil(len(desc) / 8)
strTabs = desc.ljust(int(len(desc) + can_tabs_final), '\t')
if encabezado.get("tipo_comprobante") == "Factura A":
printer.text(" %g x $%g\n" % (cant, round(precio_unitario_neto, 4)))
printer.text(strTabs+"(%g)\t$%g\n" % (round(porcentaje_iva, 2), round(precio_total_neto, 2)))
else:
printer.text("%g " % (cant))
printer.text(strTabs+"(%g)\t$%g\n" % (round(porcentaje_iva, 2), round(precio_total_neto, 2)))
printer.set("RIGHT", "A", "A", 1, 1)
printer.text("\n")
if addAdditional:
if encabezado.get("tipo_comprobante") != "Factura A" or encabezado.get("tipo_comprobante") != "NOTAS DE CREDITO A":
# imprimir subtotal
printer.text("Subtotal: $%g\n" % round(total, 2))
# imprimir descuento
sAmount = float(addAdditional.get('amount', 0))
total = total - sAmount
printer.set("RIGHT", "A", "A", 1, 1)
printer.text("%s $%g\n" % (addAdditional.get('description')[0:20], round(sAmount, 2)))
if encabezado.get("tipo_comprobante") == "Factura A" or encabezado.get("tipo_comprobante") == "NOTAS DE CREDITO A":
#recalculamos el neto e iva
tot_neto = total / ((porcentaje_iva + 100.0) / 100)
tot_iva = total - tot_neto
if encabezado.get("tipo_comprobante") == "Factura A" or encabezado.get("tipo_comprobante") == "NOTAS DE CREDITO A":
printer.text("Subtotal Neto: $%g\n" % (round(tot_neto, 2)))
printer.text("Subtotal IVA: $%g\n" % (round(tot_iva, 2)))
printer.text("\n")
# imprimir total
printer.set("RIGHT", "A", "A", 2, 2)
printer.text(u"TOTAL: $%g\n" % round(total, 2))
printer.text("\n")
printer.set("LEFT", "B", "A", 1, 1)
if encabezado.get("tipo_comprobante") == "NOTAS DE CREDITO A" or encabezado.get("tipo_comprobante") == "NOTAS DE CREDITO B":
printer.text(u"Firma...................................................\n\n")
printer.text(u"Aclaración..............................................\n")
if self.__preFillTrailer:
self._setTrailer(self.__preFillTrailer)
if setTrailer:
self._setTrailer(setTrailer)
printer.set("LEFT", "A", "A", 1, 1)
#imagen BARCODE bajada de la URL
printer.image( barcodeImage )
cae = encabezado.get("cae")
caeVto = encabezado.get("cae_vto")
printer.text(u"CAE: " + cae + " CAE VTO: " + caeVto +"\n\n")
printer.image('afip.bmp');
printer.text("Comprobante Autorizado \n")
printer.set("CENTER", "B", "B", 1, 1)
printer.text(u"Comprobante electrónico impreso por www.paxapos.com")
printer.cut("PART")
# volver a poner en modo ESC Bematech, temporal para testing
# printer._raw(chr(0x1D) + chr(0xF9) + chr(0x35) + "0")
# dejar letra chica alineada izquierda
printer.set("LEFT", "A", "B", 1, 2)
printer.end()
def printRemitoCorto(self, **kwargs):
"imprimir remito"
printer = self.conector.driver
encabezado = kwargs.get("encabezado", None)
items = kwargs.get("items", [])
addAdditional = kwargs.get("addAdditional", None)
setTrailer = kwargs.get("setTrailer", None)
printer.start()
printer.set("CENTER", "A", "A", 1, 1)
if encabezado.has_key("imprimir_fecha_remito"):
fecha = datetime.datetime.strftime(datetime.datetime.now(), '%H:%M %x')
printer.text(u"Fecha: %s" % fecha)
printer.text(u"\nNO VALIDO COMO FACTURA\n")
if encabezado:
printer.set("LEFT", "A", "A", 1, 1)
if encabezado.has_key("nombre_cliente"):
printer.text(u'\nNombre Cliente: %s\n' % encabezado.get("nombre_cliente"))
if encabezado.has_key("telefono"):
printer.text(u'\nTelefono: %s\n' % encabezado.get("telefono"))
if encabezado.has_key("domicilio_cliente"):
printer.text(u'\nDomicilio: %s\n' % encabezado.get("domicilio_cliente"))
printer.text(u"\n")
tot_importe = 0.0
for item in items:
desc = item.get('ds')[0:20]
cant = float(item.get('qty'))
precio = cant * float(item.get('importe'))
tot_importe += precio
cant_tabs = 3
can_tabs_final = cant_tabs - ceil(len(desc) / 8)
strTabs = desc.ljust(int(len(desc) + can_tabs_final), '\t')
printer.text("%g\t%s$%g\n" % (cant, strTabs, precio))
printer.text("\n")
if addAdditional:
# imprimir subtotal
printer.set("RIGHT", "A", "A", 1, 1)
printer.text("SUBTOTAL: $%g\n" % tot_importe)
# imprimir descuento
sAmount = float(addAdditional.get('amount', 0))
tot_importe = tot_importe - sAmount
printer.set("RIGHT", "A", "A", 1, 1)
printer.text("%s $%g\n" % (addAdditional.get('description'), sAmount))
# imprimir total
printer.set("RIGHT", "A", "A", 2, 2)
printer.text(u"TOTAL: $%g\n" % tot_importe)
printer.set("LEFT", "A", "A", 1, 1)
if self.__preFillTrailer:
self._setTrailer(self.__preFillTrailer)
if setTrailer:
self._setTrailer(setTrailer)
self.__printExtras(kwargs)
printer.cut("PART")
# volver a poner en modo ESC Bematech, temporal para testing
# printer._raw(chr(0x1D) + chr(0xF9) + chr(0x35) + "0")
# dejar letra chica alineada izquierda
printer.set("LEFT", "A", "B", 1, 2)
printer.end()
def printRemito(self, **kwargs):
"imprimir remito"
printer = self.conector.driver
encabezado = kwargs.get("encabezado", None)
items = kwargs.get("items", [])
addAdditional = kwargs.get("addAdditional", None)
setTrailer = kwargs.get("setTrailer", None)
printer.start()
printer.set("CENTER", "A", "A", 1, 1)
printer.set("CENTER", "A", "A", 1, 1)
if encabezado.has_key("imprimir_fecha_remito"):
fecha = datetime.datetime.strftime(datetime.datetime.now(), '%H:%M %x')
printer.text(u"Fecha: %s \n\n\n" % fecha)
printer.text(u"Verifique su cuenta por favor\n")
printer.text(u"NO VALIDO COMO FACTURA\n\n")
if encabezado:
printer.set("CENTER", "A", "A", 1, 2)
if encabezado.has_key("nombre_cliente"):
printer.text(u'\n%s\n' % encabezado.get("nombre_cliente"))
if encabezado.has_key("telefono"):
printer.text(u'\n%s\n' % encabezado.get("telefono"))
if encabezado.has_key("domicilio_cliente"):
printer.text(u'\n%s\n' % encabezado.get("domicilio_cliente"))
printer.text(u"\n")
printer.set("LEFT", "A", "A", 1, 1)
printer.text(u"CANT\tDESCRIPCIÓN\t\tPRECIO\n")
printer.text("\n")
tot_importe = 0.0
for item in items:
desc = item.get('ds')[0:20]
cant = float(item.get('qty'))
precio = cant * float(item.get('importe'))
tot_importe += | |
<filename>evaluation/ope.py
# ------------------------------------------------------------------------------
# CONFIDENTIAL AND PROPRIETARY.
#
# COPYRIGHT (c) 2020. <NAME>. ALL RIGHTS RESERVED.
#
# Unauthorized use or disclosure in any manner may result in disciplinary
# action up to and including termination of employment (in the case of
# employees), termination of an assignment or contract (in the case of
# contingent staff), and potential civil and criminal liability.
#
# For internal use only.
# ------------------------------------------------------------------------------
import os
import re
import numpy as np
import pandas as pd
def overlap_ratio(rect1, rect2):
'''Compute overlap ratio between two rects
Args
rect:2d array of N x [x,y,w,h]
Return:
iou
'''
if rect1.ndim == 1:
rect1 = rect1[np.newaxis, :]
if rect2.ndim == 1:
rect2 = rect2[np.newaxis, :]
left = np.maximum(rect1[:, 0], rect2[:, 0])
right = np.minimum(rect1[:, 0] + rect1[:, 2], rect2[:, 0] + rect2[:, 2])
top = np.maximum(rect1[:, 1], rect2[:, 1])
bottom = np.minimum(rect1[:, 1] + rect1[:, 3], rect2[:, 1] + rect2[:, 3])
intersect = np.maximum(0, right - left) * np.maximum(0, bottom - top)
union = rect1[:, 2] * rect1[:, 3] + rect2[:, 2] * rect2[:, 3] - intersect
iou = intersect / union
iou = np.maximum(np.minimum(1, iou), 0)
return iou
def success_overlap(gt_bb, result_bb, n_frame):
thresholds_overlap = np.arange(0, 1.05, 0.05)
success = np.zeros(len(thresholds_overlap))
iou = np.ones(len(gt_bb)) * (-1)
mask = np.sum(gt_bb > 0, axis=1) == 4
iou[mask] = overlap_ratio(gt_bb[mask], result_bb[mask])
for i in range(len(thresholds_overlap)):
success[i] = np.sum(iou > thresholds_overlap[i]) / float(n_frame)
return success
def success_error(gt_center, result_center, thresholds, n_frame):
# n_frame = len(gt_center)
success = np.zeros(len(thresholds))
dist = np.ones(len(gt_center)) * (-1)
mask = np.sum(gt_center > 0, axis=1) == 2
dist[mask] = np.sqrt(np.sum(
np.power(gt_center[mask] - result_center[mask], 2), axis=1))
for i in range(len(thresholds)):
success[i] = np.sum(dist <= thresholds[i]) / float(n_frame)
return success
def convert_bb_to_center(bboxes):
return np.array([(bboxes[:, 0] + (bboxes[:, 2] - 1) / 2),
(bboxes[:, 1] + (bboxes[:, 3] - 1) / 2)]).T
def convert_bb_to_norm_center(bboxes, gt_wh):
return convert_bb_to_center(bboxes) / (gt_wh + 1e-16)
def get_trajectories(basedir, evaldir, tracker_name, category, video):
if category is not None:
gt_file = os.path.join(basedir, category, video, 'groundtruth.txt')
else:
gt_file = os.path.join(basedir, video, 'groundtruth.txt')
with open(gt_file) as f:
all_gt_boxes_str = f.readlines()
all_gt_boxes = []
for gt_box in all_gt_boxes_str:
gt_box = np.array([float(index) for index in gt_box.split(',')])
all_gt_boxes.append(gt_box)
all_gt_boxes = np.array(all_gt_boxes)
output_file = os.path.join(evaldir, tracker_name, video + '.txt')
with open(output_file) as f:
all_output_boxes_str = f.readlines()
all_output_boxes = []
for output_box in all_output_boxes_str:
output_box = np.array([float(index) for index in re.split(',|\t', output_box)])
all_output_boxes.append(output_box)
all_output_boxes = np.array(all_output_boxes)
if video == 'monkey-17':
all_output_boxes = all_output_boxes[:len(all_gt_boxes)]
if len(all_gt_boxes) == 1 + len(all_output_boxes):
all_output_boxes = np.concatenate([all_gt_boxes[0:1], all_output_boxes])
if len(all_gt_boxes) == 2 + len(all_output_boxes):
all_output_boxes = np.concatenate([all_gt_boxes[0:1], all_output_boxes])
all_gt_boxes = all_gt_boxes[:-1]
if len(all_gt_boxes) == 3 + len(all_output_boxes):
all_gt_boxes = all_gt_boxes[3:]
elif len(all_gt_boxes) < len(all_output_boxes):
all_output_boxes = all_output_boxes[:len(all_gt_boxes)]
elif len(all_gt_boxes) > len(all_output_boxes):
raise ValueError('size of prediction and gt mismatch')
return all_gt_boxes, all_output_boxes
def eval_success(tracker_name, basedir, evaldir, video_names, categories=None):
"""
Args:
eval_trackers: list of tracker name or single tracker name
Return:
res: dict of results
"""
success_ret = {}
for i, video in enumerate(video_names):
all_gt_boxes, all_output_boxes = get_trajectories(basedir, evaldir, tracker_name,
categories[i] if categories is not None else None, video)
success_ret[video] = success_overlap(all_gt_boxes, all_output_boxes, len(all_gt_boxes))
return success_ret
def eval_precision(tracker_name, basedir, evaldir, video_names, categories=None):
"""
Args:
eval_trackers: list of tracker name or single tracker name
Return:
res: dict of results
"""
precision_ret = {}
for i, video in enumerate(video_names):
all_gt_boxes, all_output_boxes = get_trajectories(basedir, evaldir, tracker_name,
categories[i] if categories is not None else None, video)
gt_center = convert_bb_to_center(all_gt_boxes)
tracker_center = convert_bb_to_center(all_output_boxes)
thresholds = np.arange(0, 51, 1)
precision_ret[video] = success_error(gt_center, tracker_center, thresholds, len(all_gt_boxes))
return precision_ret
def eval_norm_precision(tracker_name, basedir, evaldir, video_names, categories=None):
"""
Args:
eval_trackers: list of tracker name or single tracker name
Return:
res: dict of results
"""
norm_precision_ret = {}
for i, video in enumerate(video_names):
all_gt_boxes, all_output_boxes = get_trajectories(basedir, evaldir, tracker_name,
categories[i] if categories is not None else None, video)
gt_center_norm = convert_bb_to_norm_center(all_gt_boxes, all_gt_boxes[:, 2:4])
tracker_center_norm = convert_bb_to_norm_center(all_output_boxes, all_gt_boxes[:, 2:4])
thresholds = np.arange(0, 51, 1) / 100
norm_precision_ret[video] = success_error(gt_center_norm, tracker_center_norm, thresholds, len(all_gt_boxes))
return norm_precision_ret
def success_overlap_iou(ious):
n_frame = len(ious)
thresholds_overlap = np.arange(0, 1.05, 0.05)
success = np.zeros(len(thresholds_overlap))
for i in range(len(thresholds_overlap)):
success[i] = np.sum(ious > thresholds_overlap[i]) / float(n_frame)
return success
def get_label_for_ablation_study(tracker):
if tracker == 'SIAMRPNPP_BASELINE':
return 'PP[B]'
if tracker == 'SIAMRPNPP/HGLMM/FINALIZED':
return 'PP[H]'
if tracker == 'SIAMRPNPP/GLOVE/FINALIZED':
return 'PP[G]'
if tracker == 'SIAMRPNPP/HGLMM/NL_INIT':
return 'PP[NL]'
if tracker == 'SIAMRPN_BASELINE':
return 'RPN[B]'
if tracker == 'SIAMRPN/GLOVE/FINALIZED':
return 'RPN[G]'
if tracker == 'SIAMRPN/HGLMM/FINALIZED':
return 'RPN[H]'
if tracker == 'SiamFC_BASELINE':
return 'FC[B]'
if tracker == "dimp50_000":
return 'DiMP'
if tracker == "prdimp50_000":
return 'PrDiMP'
if tracker == "V3-BEST":
return 'PP[BERT]'
if tracker == "siamrcnn_lasot":
return 'SiamRCNN'
if tracker == "FENG_NL":
return "FENG[NL]"
return tracker
def eval_lasot():
base_dir = '/research/fung/data/LaSOTBenchmark'
testing_set = '/research/fung/repository/lang_tracking_vos/evaluation/nl-consistent-lasot.txt'
tracker_base_dir = '/research/fung/results/cvpr_results/lasot'
trackers = [
"dimp50_000",
"prdimp50_000",
]
with open(testing_set, 'r') as f:
testing_videos = f.readlines()
categories = [test_video.split('-')[0] for test_video in testing_videos]
testing_videos = [test_video.strip() for test_video in testing_videos]
thresholds = np.arange(0, 1.05, 0.05)
success_df = pd.DataFrame(thresholds, columns=['THRESHOLD'])
for idx, tracker in enumerate(trackers):
success_ret = eval_success(tracker, base_dir, tracker_base_dir, testing_videos, categories)
success_df[get_label_for_ablation_study(tracker)] = np.mean(list(success_ret.values()), axis=0)
auc = np.mean(list(success_ret.values()))
print(auc)
print(success_df.to_string(index=False))
# # Evaluation precision plot.
thresholds = np.arange(0, 51, 1)
precision_df = pd.DataFrame(thresholds, columns=['THRESHOLD'])
for idx, tracker in enumerate(trackers):
precision_ret = eval_precision(tracker, base_dir, tracker_base_dir, testing_videos, categories)
precision_df[get_label_for_ablation_study(tracker)] = np.mean(list(precision_ret.values()), axis=0)
precision = np.mean(list(precision_ret.values()), axis=0)[20]
print(precision)
print(precision_df.to_string(index=False))
# Normalized Evaluation precision plot.
thresholds = np.arange(0, 51, 1) / 100.
normed_precision_df = pd.DataFrame(thresholds, columns=['THRESHOLD'])
for idx, tracker in enumerate(trackers):
precision_ret = eval_norm_precision(tracker, base_dir, tracker_base_dir, testing_videos, categories)
precision = np.mean(list(precision_ret.values()), axis=0)[20]
normed_precision_df[get_label_for_ablation_study(tracker)] = np.mean(list(precision_ret.values()), axis=0)
print(precision)
print(normed_precision_df.to_string(index=False))
def eval_nl_lasot():
base_dir = '/research/fung/data/LaSOTBenchmark'
tracker_base_dir = '/research/fung/eccv_results/lasot'
testing_set = 'experiments/eccv/finalized/NL-CONSISTENT-LASOT'
trackers = [
# 'VITAL', 'MDNet', 'ATOM', 'ECO','MEEM',
'SIAMRPNPP_BASELINE', 'SIAMRPNPP/HGLMM/FINALIZED',
'SIAMRPNPP/GLOVE/FINALIZED',
# 'SIAMRPNPP/HGLMM/NL_INIT',
# 'SiamFC_BASELINE',
'SIAMRPN_BASELINE', 'SIAMRPN/HGLMM/FINALIZED',
'SIAMRPN/GLOVE/FINALIZED'
]
with open(testing_set, 'r') as f:
testing_videos = f.readlines()
categories = [test_video.split('-')[0] for test_video in testing_videos]
testing_videos = [test_video.strip() for test_video in testing_videos]
thresholds = np.arange(0, 1.05, 0.05)
success_df = pd.DataFrame(thresholds, columns=['THRESHOLD'])
for idx, tracker in enumerate(trackers):
success_ret = eval_success(tracker, base_dir, tracker_base_dir, testing_videos, categories)
success_df[get_label_for_ablation_study(tracker)] = np.mean(list(success_ret.values()), axis=0)
auc = np.mean(list(success_ret.values()))
print(auc)
print(success_df.to_string(index=False))
# Evaluation precision plot.
thresholds = np.arange(0, 51, 1)
precision_df = pd.DataFrame(thresholds, columns=['THRESHOLD'])
for idx, tracker in enumerate(trackers):
precision_ret = eval_precision(tracker, base_dir, tracker_base_dir, testing_videos, categories)
precision_df[get_label_for_ablation_study(tracker)] = np.mean(list(precision_ret.values()), axis=0)
precision = np.mean(list(precision_ret.values()), axis=0)[20]
print(precision)
print(precision_df.to_string(index=False))
# Normalized Evaluation precision plot.
thresholds = np.arange(0, 51, 1) / 100.
normed_precision_df = pd.DataFrame(thresholds, columns=['THRESHOLD'])
for idx, tracker in enumerate(trackers):
precision_ret = eval_norm_precision(tracker, base_dir, tracker_base_dir, testing_videos, categories)
precision = np.mean(list(precision_ret.values()), axis=0)[20]
normed_precision_df[get_label_for_ablation_study(tracker)] = np.mean(list(precision_ret.values()), axis=0)
print(precision)
print(normed_precision_df.to_string(index=False))
def eval_otb():
base_dir = '/research/fung/data/otb_sentences/OTB_videos'
tracker_base_dir = '/research/fung/cvpr_results/lasot'
testing_set = '/research/fung/client1/research/tmwtt_v2/input_pipeline/otb_testing_set'
trackers = [
"dimp18_000",
"prdimp18_000",
]
with open(testing_set, 'r') as f:
testing_videos = f.readlines()
testing_videos = [test_video.strip() for test_video in testing_videos]
thresholds = np.arange(0, 1.05, 0.05)
success_df = pd.DataFrame(thresholds, columns=['THRESHOLD'])
for idx, tracker in enumerate(trackers):
success_ret = eval_success(tracker, base_dir, tracker_base_dir, testing_videos)
success_df[get_label_for_ablation_study(tracker)] = np.mean(list(success_ret.values()), axis=0)
auc = np.mean(list(success_ret.values()))
print(auc)
print(success_df.to_string(index=False))
# # Evaluation precision plot.
thresholds = np.arange(0, 51, 1)
precision_df = pd.DataFrame(thresholds, columns=['THRESHOLD'])
for idx, tracker in enumerate(trackers):
precision_ret = eval_precision(tracker, base_dir, tracker_base_dir, testing_videos)
precision_df[get_label_for_ablation_study(tracker)] = np.mean(list(precision_ret.values()), axis=0)
precision = np.mean(list(precision_ret.values()), axis=0)[20]
print(precision)
print(precision_df.to_string(index=False))
# Normalized Evaluation precision plot.
thresholds = np.arange(0, 51, 1) / 100.
normed_precision_df = pd.DataFrame(thresholds, columns=['THRESHOLD'])
for idx, tracker in enumerate(trackers):
precision_ret = eval_norm_precision(tracker, base_dir, tracker_base_dir, testing_videos)
precision = np.mean(list(precision_ret.values()), axis=0)[20]
normed_precision_df[get_label_for_ablation_study(tracker)] = np.mean(list(precision_ret.values()), axis=0)
print(precision)
print(normed_precision_df.to_string(index=False))
def lasot_per_video_iou():
base_dir = '/research/fung/data/LaSOTBenchmark'
tracker_base_dir = '/research/fung/cvpr_results/lasot'
testing_set = '/research/fung/client1/research/tmwtt_v2/input_pipeline/LaSOT_testing_set'
# tracker = "prdimp18_000"
tracker = "V3-BEST"
with open(testing_set, 'r') as f:
testing_videos = f.readlines()
categories = [test_video.split('-')[0] for test_video in testing_videos]
testing_videos = [test_video.strip() for test_video in testing_videos]
for i, video in enumerate(testing_videos):
all_gt_boxes, all_output_boxes = get_trajectories(base_dir, tracker_base_dir, tracker,
categories[i] if categories is not None else None, video)
iou = overlap_ratio(all_gt_boxes, all_output_boxes)
# success = success_overlap_iou(iou)
print('%s\t%.2f' % (video, np.mean(iou)))
def lasot_per_cat_success():
base_dir = '/research/fung/data/LaSOTBenchmark'
testing_set = '/research/fung/client1/research/tmwtt_v2/input_pipeline/LaSOT_testing_set'
tracker_base_dir = '/research/fung/cvpr_results/lasot'
trackers = [
"prdimp18_000",
"V3-BEST"
]
# tracker_base_dir = '/research/fung/cvpr_results/lasot'
# trackers = ['SiamRPN++_tracking_result']
with open(testing_set, 'r') as f:
testing_videos = f.readlines()
categories = [test_video.split('-')[0] for test_video in testing_videos]
testing_videos = [test_video.strip() for test_video in testing_videos]
ious = []
per_cat = {"tracker": []}
for c in categories:
per_cat[c] = []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.