id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7 values |
|---|---|---|
/EPANETTOOLS-1.0.0.tar.gz/EPANETTOOLS-1.0.0/src/epanettools/epanet2.py |
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_epanet2')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_epanet2')
_epanet2 = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_epanet2', [dirname(__file__)])
except ImportError:
import _epanet2
return _epanet2
try:
_mod = imp.load_module('_epanet2', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_epanet2 = swig_import_helper()
del swig_import_helper
else:
import _epanet2
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
EN_ELEVATION = _epanet2.EN_ELEVATION
EN_BASEDEMAND = _epanet2.EN_BASEDEMAND
EN_PATTERN = _epanet2.EN_PATTERN
EN_EMITTER = _epanet2.EN_EMITTER
EN_INITQUAL = _epanet2.EN_INITQUAL
EN_SOURCEQUAL = _epanet2.EN_SOURCEQUAL
EN_SOURCEPAT = _epanet2.EN_SOURCEPAT
EN_SOURCETYPE = _epanet2.EN_SOURCETYPE
EN_TANKLEVEL = _epanet2.EN_TANKLEVEL
EN_DEMAND = _epanet2.EN_DEMAND
EN_HEAD = _epanet2.EN_HEAD
EN_PRESSURE = _epanet2.EN_PRESSURE
EN_QUALITY = _epanet2.EN_QUALITY
EN_SOURCEMASS = _epanet2.EN_SOURCEMASS
EN_INITVOLUME = _epanet2.EN_INITVOLUME
EN_MIXMODEL = _epanet2.EN_MIXMODEL
EN_MIXZONEVOL = _epanet2.EN_MIXZONEVOL
EN_TANKDIAM = _epanet2.EN_TANKDIAM
EN_MINVOLUME = _epanet2.EN_MINVOLUME
EN_VOLCURVE = _epanet2.EN_VOLCURVE
EN_MINLEVEL = _epanet2.EN_MINLEVEL
EN_MAXLEVEL = _epanet2.EN_MAXLEVEL
EN_MIXFRACTION = _epanet2.EN_MIXFRACTION
EN_TANK_KBULK = _epanet2.EN_TANK_KBULK
EN_DIAMETER = _epanet2.EN_DIAMETER
EN_LENGTH = _epanet2.EN_LENGTH
EN_ROUGHNESS = _epanet2.EN_ROUGHNESS
EN_MINORLOSS = _epanet2.EN_MINORLOSS
EN_INITSTATUS = _epanet2.EN_INITSTATUS
EN_INITSETTING = _epanet2.EN_INITSETTING
EN_KBULK = _epanet2.EN_KBULK
EN_KWALL = _epanet2.EN_KWALL
EN_FLOW = _epanet2.EN_FLOW
EN_VELOCITY = _epanet2.EN_VELOCITY
EN_HEADLOSS = _epanet2.EN_HEADLOSS
EN_STATUS = _epanet2.EN_STATUS
EN_SETTING = _epanet2.EN_SETTING
EN_ENERGY = _epanet2.EN_ENERGY
EN_DURATION = _epanet2.EN_DURATION
EN_HYDSTEP = _epanet2.EN_HYDSTEP
EN_QUALSTEP = _epanet2.EN_QUALSTEP
EN_PATTERNSTEP = _epanet2.EN_PATTERNSTEP
EN_PATTERNSTART = _epanet2.EN_PATTERNSTART
EN_REPORTSTEP = _epanet2.EN_REPORTSTEP
EN_REPORTSTART = _epanet2.EN_REPORTSTART
EN_RULESTEP = _epanet2.EN_RULESTEP
EN_STATISTIC = _epanet2.EN_STATISTIC
EN_PERIODS = _epanet2.EN_PERIODS
EN_NODECOUNT = _epanet2.EN_NODECOUNT
EN_TANKCOUNT = _epanet2.EN_TANKCOUNT
EN_LINKCOUNT = _epanet2.EN_LINKCOUNT
EN_PATCOUNT = _epanet2.EN_PATCOUNT
EN_CURVECOUNT = _epanet2.EN_CURVECOUNT
EN_CONTROLCOUNT = _epanet2.EN_CONTROLCOUNT
EN_JUNCTION = _epanet2.EN_JUNCTION
EN_RESERVOIR = _epanet2.EN_RESERVOIR
EN_TANK = _epanet2.EN_TANK
EN_CVPIPE = _epanet2.EN_CVPIPE
EN_PIPE = _epanet2.EN_PIPE
EN_PUMP = _epanet2.EN_PUMP
EN_PRV = _epanet2.EN_PRV
EN_PSV = _epanet2.EN_PSV
EN_PBV = _epanet2.EN_PBV
EN_FCV = _epanet2.EN_FCV
EN_TCV = _epanet2.EN_TCV
EN_GPV = _epanet2.EN_GPV
EN_NONE = _epanet2.EN_NONE
EN_CHEM = _epanet2.EN_CHEM
EN_AGE = _epanet2.EN_AGE
EN_TRACE = _epanet2.EN_TRACE
EN_CONCEN = _epanet2.EN_CONCEN
EN_MASS = _epanet2.EN_MASS
EN_SETPOINT = _epanet2.EN_SETPOINT
EN_FLOWPACED = _epanet2.EN_FLOWPACED
EN_CFS = _epanet2.EN_CFS
EN_GPM = _epanet2.EN_GPM
EN_MGD = _epanet2.EN_MGD
EN_IMGD = _epanet2.EN_IMGD
EN_AFD = _epanet2.EN_AFD
EN_LPS = _epanet2.EN_LPS
EN_LPM = _epanet2.EN_LPM
EN_MLD = _epanet2.EN_MLD
EN_CMH = _epanet2.EN_CMH
EN_CMD = _epanet2.EN_CMD
EN_TRIALS = _epanet2.EN_TRIALS
EN_ACCURACY = _epanet2.EN_ACCURACY
EN_TOLERANCE = _epanet2.EN_TOLERANCE
EN_EMITEXPON = _epanet2.EN_EMITEXPON
EN_DEMANDMULT = _epanet2.EN_DEMANDMULT
EN_LOWLEVEL = _epanet2.EN_LOWLEVEL
EN_HILEVEL = _epanet2.EN_HILEVEL
EN_TIMER = _epanet2.EN_TIMER
EN_TIMEOFDAY = _epanet2.EN_TIMEOFDAY
EN_AVERAGE = _epanet2.EN_AVERAGE
EN_MINIMUM = _epanet2.EN_MINIMUM
EN_MAXIMUM = _epanet2.EN_MAXIMUM
EN_RANGE = _epanet2.EN_RANGE
EN_MIX1 = _epanet2.EN_MIX1
EN_MIX2 = _epanet2.EN_MIX2
EN_FIFO = _epanet2.EN_FIFO
EN_LIFO = _epanet2.EN_LIFO
EN_NOSAVE = _epanet2.EN_NOSAVE
EN_SAVE = _epanet2.EN_SAVE
EN_INITFLOW = _epanet2.EN_INITFLOW
def ENepanet(arg1, arg2, arg3, arg4):
return _epanet2.ENepanet(arg1, arg2, arg3, arg4)
ENepanet = _epanet2.ENepanet
def ENopen(arg1, arg2, arg3):
return _epanet2.ENopen(arg1, arg2, arg3)
ENopen = _epanet2.ENopen
def ENsaveinpfile(arg1):
return _epanet2.ENsaveinpfile(arg1)
ENsaveinpfile = _epanet2.ENsaveinpfile
def ENclose():
return _epanet2.ENclose()
ENclose = _epanet2.ENclose
def ENsolveH():
return _epanet2.ENsolveH()
ENsolveH = _epanet2.ENsolveH
def ENsaveH():
return _epanet2.ENsaveH()
ENsaveH = _epanet2.ENsaveH
def ENopenH():
return _epanet2.ENopenH()
ENopenH = _epanet2.ENopenH
def ENinitH(arg1):
return _epanet2.ENinitH(arg1)
ENinitH = _epanet2.ENinitH
def ENrunH():
return _epanet2.ENrunH()
ENrunH = _epanet2.ENrunH
def ENnextH():
return _epanet2.ENnextH()
ENnextH = _epanet2.ENnextH
def ENcloseH():
return _epanet2.ENcloseH()
ENcloseH = _epanet2.ENcloseH
def ENsavehydfile(arg1):
return _epanet2.ENsavehydfile(arg1)
ENsavehydfile = _epanet2.ENsavehydfile
def ENusehydfile(arg1):
return _epanet2.ENusehydfile(arg1)
ENusehydfile = _epanet2.ENusehydfile
def ENsolveQ():
return _epanet2.ENsolveQ()
ENsolveQ = _epanet2.ENsolveQ
def ENopenQ():
return _epanet2.ENopenQ()
ENopenQ = _epanet2.ENopenQ
def ENinitQ(arg1):
return _epanet2.ENinitQ(arg1)
ENinitQ = _epanet2.ENinitQ
def ENrunQ():
return _epanet2.ENrunQ()
ENrunQ = _epanet2.ENrunQ
def ENnextQ():
return _epanet2.ENnextQ()
ENnextQ = _epanet2.ENnextQ
def ENstepQ():
return _epanet2.ENstepQ()
ENstepQ = _epanet2.ENstepQ
def ENcloseQ():
return _epanet2.ENcloseQ()
ENcloseQ = _epanet2.ENcloseQ
def ENwriteline(arg1):
return _epanet2.ENwriteline(arg1)
ENwriteline = _epanet2.ENwriteline
def ENreport():
return _epanet2.ENreport()
ENreport = _epanet2.ENreport
def ENresetreport():
return _epanet2.ENresetreport()
ENresetreport = _epanet2.ENresetreport
def ENsetreport(arg1):
return _epanet2.ENsetreport(arg1)
ENsetreport = _epanet2.ENsetreport
def ENgetcontrol(arg1):
return _epanet2.ENgetcontrol(arg1)
ENgetcontrol = _epanet2.ENgetcontrol
def ENgetcount(arg1):
return _epanet2.ENgetcount(arg1)
ENgetcount = _epanet2.ENgetcount
def ENgetoption(arg1):
return _epanet2.ENgetoption(arg1)
ENgetoption = _epanet2.ENgetoption
def ENgettimeparam(arg1):
return _epanet2.ENgettimeparam(arg1)
ENgettimeparam = _epanet2.ENgettimeparam
def ENgetflowunits():
return _epanet2.ENgetflowunits()
ENgetflowunits = _epanet2.ENgetflowunits
def ENgetpatternindex(arg1):
return _epanet2.ENgetpatternindex(arg1)
ENgetpatternindex = _epanet2.ENgetpatternindex
def ENgetpatternid(arg1):
return _epanet2.ENgetpatternid(arg1)
ENgetpatternid = _epanet2.ENgetpatternid
def ENgetpatternlen(arg1):
return _epanet2.ENgetpatternlen(arg1)
ENgetpatternlen = _epanet2.ENgetpatternlen
def ENgetpatternvalue(arg1, arg2):
return _epanet2.ENgetpatternvalue(arg1, arg2)
ENgetpatternvalue = _epanet2.ENgetpatternvalue
def ENgetqualtype():
return _epanet2.ENgetqualtype()
ENgetqualtype = _epanet2.ENgetqualtype
def ENgeterror(arg1, arg3):
return _epanet2.ENgeterror(arg1, arg3)
ENgeterror = _epanet2.ENgeterror
def ENgetnodeindex(arg1):
return _epanet2.ENgetnodeindex(arg1)
ENgetnodeindex = _epanet2.ENgetnodeindex
def ENgetnodeid(arg1):
return _epanet2.ENgetnodeid(arg1)
ENgetnodeid = _epanet2.ENgetnodeid
def ENgetnodetype(arg1):
return _epanet2.ENgetnodetype(arg1)
ENgetnodetype = _epanet2.ENgetnodetype
def ENgetnodevalue(arg1, arg2):
return _epanet2.ENgetnodevalue(arg1, arg2)
ENgetnodevalue = _epanet2.ENgetnodevalue
def ENgetlinkindex(arg1):
return _epanet2.ENgetlinkindex(arg1)
ENgetlinkindex = _epanet2.ENgetlinkindex
def ENgetlinkid(arg1):
return _epanet2.ENgetlinkid(arg1)
ENgetlinkid = _epanet2.ENgetlinkid
def ENgetlinktype(arg1):
return _epanet2.ENgetlinktype(arg1)
ENgetlinktype = _epanet2.ENgetlinktype
def ENgetlinknodes(arg1):
return _epanet2.ENgetlinknodes(arg1)
ENgetlinknodes = _epanet2.ENgetlinknodes
def ENgetlinkvalue(arg1, arg2):
return _epanet2.ENgetlinkvalue(arg1, arg2)
ENgetlinkvalue = _epanet2.ENgetlinkvalue
def ENgetversion():
return _epanet2.ENgetversion()
ENgetversion = _epanet2.ENgetversion
def ENsetcontrol(arg1, arg2, arg3, arg4, arg5, arg6):
return _epanet2.ENsetcontrol(arg1, arg2, arg3, arg4, arg5, arg6)
ENsetcontrol = _epanet2.ENsetcontrol
def ENsetnodevalue(arg1, arg2, arg3):
return _epanet2.ENsetnodevalue(arg1, arg2, arg3)
ENsetnodevalue = _epanet2.ENsetnodevalue
def ENsetlinkvalue(arg1, arg2, arg3):
return _epanet2.ENsetlinkvalue(arg1, arg2, arg3)
ENsetlinkvalue = _epanet2.ENsetlinkvalue
def ENaddpattern(arg1):
return _epanet2.ENaddpattern(arg1)
ENaddpattern = _epanet2.ENaddpattern
def ENsetpattern(arg1, floatarray, nfloats):
return _epanet2.ENsetpattern(arg1, floatarray, nfloats)
ENsetpattern = _epanet2.ENsetpattern
def ENsetpatternvalue(arg1, arg2, arg3):
return _epanet2.ENsetpatternvalue(arg1, arg2, arg3)
ENsetpatternvalue = _epanet2.ENsetpatternvalue
def ENsettimeparam(arg1, arg2):
return _epanet2.ENsettimeparam(arg1, arg2)
ENsettimeparam = _epanet2.ENsettimeparam
def ENsetoption(arg1, arg2):
return _epanet2.ENsetoption(arg1, arg2)
ENsetoption = _epanet2.ENsetoption
def ENsetstatusreport(arg1):
return _epanet2.ENsetstatusreport(arg1)
ENsetstatusreport = _epanet2.ENsetstatusreport
def ENsetqualtype(arg1, arg2, arg3, arg4):
return _epanet2.ENsetqualtype(arg1, arg2, arg3, arg4)
ENsetqualtype = _epanet2.ENsetqualtype
WRAPPER_ERROR_FILE_OPEN = _epanet2.WRAPPER_ERROR_FILE_OPEN
WRAPPER_ERROR_NOT_IMPLEMENTED = _epanet2.WRAPPER_ERROR_NOT_IMPLEMENTED
def ENsetpatterndim(index, dim):
return _epanet2.ENsetpatterndim(index, dim)
ENsetpatterndim = _epanet2.ENsetpatterndim
def ENsetpatterndim_wrap(index, dim):
return _epanet2.ENsetpatterndim_wrap(index, dim)
ENsetpatterndim_wrap = _epanet2.ENsetpatterndim_wrap
# This file is compatible with both classic and new-style classes.
cvar = _epanet2.cvar | PypiClean |
/Lowdown-0.2.1.tar.gz/Lowdown-0.2.1/doc/installing.rst | ..
:copyright: Copyright (c) 2014 ftrack
.. _installing:
**********
Installing
**********
.. highlight:: bash
Installation is simple with `pip <http://www.pip-installer.org/>`_::
pip install lowdown
Building from source
====================
You can also build manually from the source for more control. First obtain a
copy of the source by either downloading the
`zipball <https://bitbucket.org/ftrack/lowdown/get/master.zip>`_ or
cloning the public repository::
git clone git@bitbucket.org:ftrack/lowdown.git
Then you can build and install the package into your current Python
site-packages folder::
python setup.py install
Alternatively, just build locally and manage yourself::
python setup.py build
Building documentation from source
----------------------------------
To build the documentation from source::
python setup.py build_sphinx
Then view in your browser::
file:///path/to/lowdown/build/doc/html/index.html
Dependencies
============
* `Python <http://python.org>`_ >= 2.7, < 4
* `docutils <http://docutils.sourceforge.net/>`_ >= 0.12, < 1',
* `arrow <http://crsmithdev.com/arrow/>`_ >= 0.4.4, < 1'
Additional For building
-----------------------
* `Sphinx <http://sphinx-doc.org/>`_ >= 1.8.5, < 4
* `sphinx_rtd_theme <https://github.com/snide/sphinx_rtd_theme>`_ >= 0.1.6, < 1
| PypiClean |
/Js2Py-0.74.tar.gz/Js2Py-0.74/js2py/internals/prototypes/jsjson.py | from __future__ import unicode_literals
from ..conversions import *
from ..func_utils import *
from ..operations import strict_equality_op
import json
indent = ''
# python 3 support
import six
if six.PY3:
basestring = str
long = int
xrange = range
unicode = str
def parse(this, args):
text, reviver = get_arg(args, 0), get_arg(args, 1)
s = to_string(text)
try:
unfiltered = json.loads(s)
except:
raise MakeError(
'SyntaxError',
'JSON.parse could not parse JSON string - Invalid syntax')
unfiltered = to_js(unfiltered, args.space)
if is_callable(reviver):
root = args.space.ConstructObject({'': unfiltered})
return walk(root, '', reviver)
else:
return unfiltered
def stringify(this, args):
global indent
value, replacer, space = get_arg(args, 0), get_arg(args, 1), get_arg(
args, 2)
stack = set([])
indent = ''
property_list = replacer_function = undefined
if is_object(replacer):
if is_callable(replacer):
replacer_function = replacer
elif replacer.Class == 'Array':
property_list = []
for e in replacer:
v = replacer[e]
item = undefined
typ = Type(v)
if typ == 'Number':
item = to_string(v)
elif typ == 'String':
item = v
elif typ == 'Object':
if GetClass(v) in ('String', 'Number'):
item = to_string(v)
if not is_undefined(item) and item not in property_list:
property_list.append(item)
if is_object(space):
if GetClass(space) == 'Number':
space = to_number(space)
elif GetClass(space) == 'String':
space = to_string(space)
if Type(space) == 'Number':
space = min(10, to_int(space))
gap = max(int(space), 0) * ' '
elif Type(space) == 'String':
gap = space[:10]
else:
gap = ''
return Str('', args.space.ConstructObject({
'': value
}), replacer_function, property_list, gap, stack, space)
def Str(key, holder, replacer_function, property_list, gap, stack, space):
value = holder.get(key)
if is_object(value):
to_json = value.get('toJSON')
if is_callable(to_json):
value = to_json.call(value, (key, ))
if not is_undefined(replacer_function):
value = replacer_function.call(holder, (key, value))
if is_object(value):
if value.Class == 'String':
value = to_string(value)
elif value.Class == 'Number':
value = to_number(value)
elif value.Class == 'Boolean':
value = to_boolean(value)
typ = Type(value)
if is_null(value):
return 'null'
elif typ == 'Boolean':
return 'true' if value else 'false'
elif typ == 'String':
return Quote(value)
elif typ == 'Number':
if not is_infinity(value):
return to_string(value)
return 'null'
if is_object(value) and not is_callable(value):
if value.Class == 'Array':
return ja(value, stack, gap, property_list, replacer_function,
space)
else:
return jo(value, stack, gap, property_list, replacer_function,
space)
return undefined
def jo(value, stack, gap, property_list, replacer_function, space):
global indent
if value in stack:
raise MakeError('TypeError', 'Converting circular structure to JSON')
stack.add(value)
stepback = indent
indent += gap
if not is_undefined(property_list):
k = property_list
else:
k = [unicode(e) for e, d in value.own.items() if d.get('enumerable')]
partial = []
for p in k:
str_p = Str(p, value, replacer_function, property_list, gap, stack,
space)
if not is_undefined(str_p):
member = json.dumps(p) + ':' + (
' ' if gap else
'') + str_p # todo not sure here - what space character?
partial.append(member)
if not partial:
final = '{}'
else:
if not gap:
final = '{%s}' % ','.join(partial)
else:
sep = ',\n' + indent
properties = sep.join(partial)
final = '{\n' + indent + properties + '\n' + stepback + '}'
stack.remove(value)
indent = stepback
return final
def ja(value, stack, gap, property_list, replacer_function, space):
global indent
if value in stack:
raise MakeError('TypeError', 'Converting circular structure to JSON')
stack.add(value)
stepback = indent
indent += gap
partial = []
length = js_arr_length(value)
for index in xrange(length):
index = unicode(index)
str_index = Str(index, value, replacer_function, property_list, gap,
stack, space)
if is_undefined(str_index):
partial.append('null')
else:
partial.append(str_index)
if not partial:
final = '[]'
else:
if not gap:
final = '[%s]' % ','.join(partial)
else:
sep = ',\n' + indent
properties = sep.join(partial)
final = '[\n' + indent + properties + '\n' + stepback + ']'
stack.remove(value)
indent = stepback
return final
def Quote(string):
return json.dumps(string)
def to_js(d, _args_space):
return convert_to_js_type(d, _args_space)
def walk(holder, name, reviver):
val = holder.get(name)
if GetClass(val) == 'Array':
for i in xrange(js_arr_length(val)):
i = unicode(i)
new_element = walk(val, i, reviver)
if is_undefined(new_element):
val.delete(i)
else:
new_element.put(i, new_element)
elif is_object(val):
for key in [
unicode(e) for e, d in val.own.items() if d.get('enumerable')
]:
new_element = walk(val, key, reviver)
if is_undefined(new_element):
val.delete(key)
else:
val.put(key, new_element)
return reviver.call(holder, (name, val)) | PypiClean |
/FitsGeo-1.0.0.tar.gz/FitsGeo-1.0.0/fitsgeo/const.py | import numpy as np
import vpython
def rgb_to_vector(r: float, g: float, b: float):
"""
Make vpython.vector color from rgb values
:param r: red value 0-255
:param g: green value 0-255
:param b: blue value 0-255
:return: vpython.vector with color
"""
return vpython.vector(r/255, g/255, b/255)
# Math constants
PI = np.pi
# Define basic colors as constants
RED = vpython.color.red
LIME = vpython.color.green
BLUE = vpython.color.blue
BLACK = vpython.color.black
WHITE = vpython.color.white
CYAN = vpython.color.cyan
YELLOW = vpython.color.yellow
MAGENTA = vpython.color.magenta
ORANGE = vpython.color.orange
GAINSBORO = rgb_to_vector(220, 220, 220)
LIGHTGRAY = rgb_to_vector(211, 211, 211)
SILVER = rgb_to_vector(192, 192, 192)
GRAY = rgb_to_vector(169, 169, 169)
DARKGRAY = rgb_to_vector(128, 128, 128)
DIMGRAY = rgb_to_vector(105, 105, 105)
# 6 shades of gray
GRAY_SCALE = [GAINSBORO, LIGHTGRAY, SILVER, GRAY, DARKGRAY, DIMGRAY]
GREEN = rgb_to_vector(0, 128, 0)
OLIVE = rgb_to_vector(128, 128, 0)
BROWN = rgb_to_vector(139, 69, 19)
NAVY = rgb_to_vector(0, 0, 128)
TEAL = rgb_to_vector(0, 128, 128)
PURPLE = rgb_to_vector(128, 0, 128)
MAROON = rgb_to_vector(128, 0, 0)
CRIMSON = rgb_to_vector(220, 20, 60)
TOMATO = rgb_to_vector(255, 99, 71)
GOLD = rgb_to_vector(255, 215, 0)
CHOCOLATE = rgb_to_vector(210, 105, 30)
PERU = rgb_to_vector(205, 133, 63)
INDIGO = rgb_to_vector(75, 0, 130)
KHAKI = rgb_to_vector(240, 230, 140)
SIENNA = rgb_to_vector(160, 82, 45)
DARKRED = rgb_to_vector(139, 0, 0)
PINK = rgb_to_vector(219, 112, 147)
NAVAJOWHITE = rgb_to_vector(255, 222, 173)
DARKORANGE = rgb_to_vector(255, 140, 0)
SADDLEBROWN = rgb_to_vector(139, 69, 19)
DARKBROWN = rgb_to_vector(51, 25, 0)
DARKGOLDENROD = rgb_to_vector(184, 134, 11)
PASTELYELLOW = rgb_to_vector(255, 255, 153)
PASTELGREEN = rgb_to_vector(204, 255, 153)
YELLOWGREEN = rgb_to_vector(178, 255, 102)
DARKGREEN = rgb_to_vector(0, 102, 0)
MOSSGREEN = rgb_to_vector(0, 51, 0)
BLUEGREEN = rgb_to_vector(0, 255, 128)
PASTELCYAN = rgb_to_vector(153, 255, 255)
PASTELBLUE = rgb_to_vector(153, 204, 255)
CYANBLUE = rgb_to_vector(0, 102, 102)
DARKVIOLET = rgb_to_vector(148, 0, 211)
VIOLET = rgb_to_vector(238, 130, 238)
PASTELPURPLE = rgb_to_vector(238, 130, 238)
PASTELVIOLET = rgb_to_vector(204, 153, 255)
PASTELBROWN = rgb_to_vector(131, 105, 83)
# Dictionary with ANGEL colors in correspondence to VPython colors
ANGEL_COLORS = {
"white": WHITE,
"lightgray": LIGHTGRAY,
"gray": GRAY,
"darkgray": DARKGRAY,
"matblack": DIMGRAY,
"black": BLACK,
"darkred": DARKRED,
"red": RED,
"pink": PINK,
"pastelpink": NAVAJOWHITE,
"orange": DARKORANGE,
"brown": SADDLEBROWN,
"darkbrown": DARKBROWN,
"pastelbrown": PASTELBROWN,
"orangeyellow": GOLD,
"camel": OLIVE,
"pastelyellow": PASTELYELLOW,
"yellow": YELLOW,
"pastelgreen": PASTELGREEN,
"yellowgreen": YELLOWGREEN,
"green": GREEN,
"darkgreen": DARKGREEN,
"mossgreen": MOSSGREEN,
"bluegreen": BLUEGREEN,
"pastelcyan": PASTELCYAN,
"pastelblue": PASTELBLUE,
"cyan": CYAN,
"cyanblue": CYANBLUE,
"blue": BLUE,
"violet": DARKVIOLET,
"purple": PURPLE,
"magenta": MAGENTA,
"winered": MAROON,
"pastelmagenta": VIOLET,
"pastelpurple": INDIGO,
"pastelviolet": PASTELVIOLET
}
if __name__ == "__main__":
print(
"--- Welcome to FitsGeo! ---\n" +
"This is a module for FitsGeo!\nImport FitsGeo to use.") | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/accounting/model/link_token.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
class LinkToken(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
defined_types = {
'link_token': (str,), # noqa: E501
'integration_name': (str,), # noqa: E501
'magic_link_url': (str, none_type,), # noqa: E501
}
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
'link_token': 'link_token', # noqa: E501
'integration_name': 'integration_name', # noqa: E501
'magic_link_url': 'magic_link_url', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, link_token, integration_name, *args, **kwargs): # noqa: E501
"""LinkToken - a model defined in OpenAPI
Args:
link_token (str):
integration_name (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
magic_link_url (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.link_token = link_token
self.integration_name = integration_name
self.magic_link_url = kwargs.get("magic_link_url", None)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, link_token, integration_name, *args, **kwargs): # noqa: E501
"""LinkToken - a model defined in OpenAPI
Args:
link_token (str):
integration_name (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
magic_link_url (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.link_token: Union[str] = link_token
self.integration_name: Union[str] = integration_name
self.magic_link_url: Union[str] = kwargs.get("magic_link_url", str()) | PypiClean |
/Minetorch-0.6.17.tar.gz/Minetorch-0.6.17/minetorch/spreadsheet.py | import functools
import logging
from concurrent.futures import ThreadPoolExecutor
from google.oauth2 import service_account
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaFileUpload
from .plugin import Plugin
pool = ThreadPoolExecutor(1)
logger = logging.getLogger(__name__)
def _async(fn):
@functools.wraps(fn)
def _func(*args, **kwargs):
def _inner(*args, **kwargs):
try:
fn(*args, **kwargs)
except Exception as e:
logger.warn(f"error occured while handle task {e}")
raise e
return pool.submit(_inner, *args, **kwargs)
return _func
class ColumnNotExistsError(Exception):
pass
class MinetorchSpreadsheet(Plugin):
def __init__(self):
super().__init__()
self.columns = []
self.cached_row_data = {}
def _create_experiment_row(self):
"""Create a row for this experiment"""
raise NotImplementedError
def _create_end_column_divider(self):
raise NotImplementedError
@_async
def update(self, key, value):
"""Update value for a column"""
if key not in self.columns:
raise ColumnNotExistsError
if not isinstance(value, dict):
value = {"raw": value}
self.cached_row_data[key] = value
def flush(self):
raise NotImplementedError
def onready(self):
"""Called after all the columns are created"""
raise NotImplementedError
def create_column(self, key, title):
"""Create a column on the sheet."""
self.columns.append(key)
@property
def experiment_row_name(self):
return f"_row_{self.code}"
@property
def title_row_name(self):
return "_row_title"
@property
def end_column_name(self):
return "_column_end_divider"
@property
def banner_row_name(self):
return "_row_banner"
class GoogleSheet(MinetorchSpreadsheet):
def __init__(
self, sheet_id, service_account_file, meta_prefix="", build_kwargs=None
):
super().__init__()
if build_kwargs is None:
build_kwargs = {}
self.sheet_id = sheet_id
self.meta_prefix = meta_prefix
credentials = service_account.Credentials.from_service_account_file(
service_account_file
)
service = build("sheets", "v4", credentials=credentials, **build_kwargs)
self.sheet = service.spreadsheets()
self.drive = build("drive", "v3", credentials=credentials)
self.drive_folder_id = self._prepare_drive_directory()
def _meta(self, key):
return f"{self.meta_prefix}{key}"
def _num_to_letter(self, num):
num += 1
letters = ""
while num:
mod = (num - 1) % 26
letters += chr(mod + 65)
num = (num - 1) // 26
return "".join(reversed(letters))
def _index_of(self, key):
search = {
"dataFilters": [
{"developerMetadataLookup": {"metadataKey": self._meta(key)}}
]
}
result = (
self.sheet.developerMetadata()
.search(spreadsheetId=self.sheet_id, body=search)
.execute()
)
if len(result.items()) == 0:
return False
else:
return result["matchedDeveloperMetadata"][0]["developerMetadata"][
"location"
]["dimensionRange"]["startIndex"]
_exists = _index_of
@_async
def reset_index(self):
self.banner_index = self._create_banner_dimension()
self.title_index = self._create_title_dimension()
self.endcol_index = self._create_end_column_divider()
self.experiment_row_index = self._insert_dimension(
self.experiment_row_name, self.title_index + 1, "ROWS"
)
@_async
def prepare(self):
self.reset_index()
self.update("code", self.code)
@property
def dark_bg(self):
return {
"red": 0.10980392156862745,
"green": 0.5686274509803921,
"blue": 0.6039215686274509,
}
@property
def white(self):
return {"red": 1.0, "green": 1.0, "blue": 1.0}
@property
def light_bg(self):
return {
"red": 0.9411764705882353,
"green": 1.0,
"blue": 0.9882352941176471,
}
def _create_banner_dimension(self):
return self._insert_dimension(self.banner_row_name, 0, "ROWS")
def _create_title_dimension(self):
return self._insert_dimension(self.title_row_name, 1, "ROWS")
def _create_end_column_divider(self):
icol = self._insert_dimension(self.end_column_name, 0, "COLUMNS")
requests = [
{
"repeatCell": {
"range": {
"sheetId": 0,
"startColumnIndex": icol,
"endColumnIndex": icol + 1,
"startRowIndex": self.title_index,
"endRowIndex": self.title_index + 1,
},
"cell": {
"userEnteredFormat": {
"backgroundColor": self.dark_bg,
"textFormat": {
"foregroundColor": self.white,
"bold": False,
"fontSize": 12,
},
"horizontalAlignment": "CENTER",
"verticalAlignment": "MIDDLE",
}
},
"fields": "*",
}
},
{
"updateDimensionProperties": {
"range": {
"sheetId": 0,
"dimension": "COLUMNS",
"startIndex": icol,
"endIndex": icol + 1,
},
"properties": {
"hiddenByUser": True,
},
"fields": "hiddenByUser",
}
},
]
self.sheet.batchUpdate(
spreadsheetId=self.sheet_id, body={"requests": requests}
).execute()
return icol
def _insert_dimension(self, row_name, index, dim, extra_request=None):
result = self._exists(row_name)
if result is not False:
return result
if extra_request is None:
extra_request = []
create_row_request = {
"insertDimension": {
"range": {
"sheetId": 0,
"dimension": dim,
"startIndex": index,
"endIndex": index + 1,
},
"inheritFromBefore": False,
}
}
assign_name_request = {
"createDeveloperMetadata": {
"developerMetadata": {
"metadataKey": self._meta(row_name),
"metadataValue": self._meta(row_name),
"location": {
"dimensionRange": {
"sheetId": 0,
"dimension": dim,
"startIndex": index,
"endIndex": index + 1,
}
},
"visibility": "DOCUMENT",
}
}
}
body = {
"requests": [create_row_request, assign_name_request, *extra_request],
}
self.sheet.batchUpdate(spreadsheetId=self.sheet_id, body=body).execute()
return self._exists(row_name)
@_async
def onready(self):
banner = """😀 Thanks for using Minetorch, if you found it's useful, please considering star the project at https://github.com/minetorch/minetorch.
👇 The cells generated by Minetorch should not be manually modified, but you can still change the size of them.
👉 The other cells are free to edit, Minetorch will never override them.
🧶 Happy tweaking !
"""
icol_end = self._index_of(self.end_column_name)
icol_start = self._index_of(self.end_column_name) - len(self.columns)
merge_cells = {
"mergeCells": {
"range": {
"sheetId": 0,
"startRowIndex": self.banner_index,
"endRowIndex": self.banner_index + 1,
"startColumnIndex": icol_start,
"endColumnIndex": icol_end,
},
"mergeType": "MERGE_ALL",
}
}
change_cell = {
"repeatCell": {
"range": {
"sheetId": 0,
"startRowIndex": self.banner_index,
"endRowIndex": self.banner_index + 1,
"startColumnIndex": icol_start,
"endColumnIndex": icol_end,
},
"cell": {"userEnteredFormat": {"wrapStrategy": "WRAP"}},
"fields": "*",
}
}
auto_resize = {
"autoResizeDimensions": {
"dimensions": {
"sheetId": 0,
"dimension": "ROWS",
"startIndex": self.banner_index,
"endIndex": self.banner_index + 1,
}
}
}
body = {"requests": [merge_cells, change_cell, auto_resize]}
self.sheet.batchUpdate(spreadsheetId=self.sheet_id, body=body).execute()
self._update_cells(
f"{self._num_to_letter(icol_start)}{self.banner_index + 1}", [banner]
)
@_async
def create_column(self, key, title, size=None):
super().create_column(key, title)
col_index = self._index_of(self.end_column_name)
change_cell_request = {
"repeatCell": {
"range": {
"sheetId": 0,
"startColumnIndex": col_index,
"endColumnIndex": col_index + 1,
"startRowIndex": self.experiment_row_index,
"endRowIndex": self.experiment_row_index + 1,
},
"cell": {
"userEnteredFormat": {
"backgroundColor": self.light_bg,
"horizontalAlignment": "CENTER",
"verticalAlignment": "MIDDLE",
}
},
"fields": "*",
},
}
assign_name_request = {
"createDeveloperMetadata": {
"developerMetadata": {
"metadataKey": self._meta("__minetorch_column__"),
"metadataValue": key,
"location": {
"dimensionRange": {
"sheetId": 0,
"dimension": "COLUMNS",
"startIndex": col_index,
"endIndex": col_index + 1,
}
},
"visibility": "DOCUMENT",
}
}
}
index = self._insert_dimension(
key, col_index, "COLUMNS", [change_cell_request, assign_name_request]
)
self._update_cells(
f"{self._num_to_letter(index)}{self.title_index + 1}", [title]
)
def _update_cells(self, a1, values):
value_range = {"range": a1, "majorDimension": "ROWS", "values": [values]}
try:
self.sheet.values().update(
spreadsheetId=self.sheet_id,
range=a1,
valueInputOption="USER_ENTERED",
body=value_range,
).execute()
except Exception as e:
self.logger.warn(f"Update sheet failed with {e}")
return
@_async
def flush(self):
irow = self._index_of(self.experiment_row_name)
column_indices = self._get_column_indices()
for key, value in self.cached_row_data.items():
raw_value = value.get("raw")
processor = value.get("processor")
if processor is None:
value = raw_value
else:
value = getattr(self, f"_process_{processor}")(key, raw_value)
icol = column_indices[key]
self._update_cells(f"{self._num_to_letter(icol)}{irow + 1}", [value])
self.cached_row_data = {}
def _process_upload_image(self, key, value, retry=True):
try:
image_id = self._upload_drive_image(key, value)
return f'=IMAGE("https://drive.google.com/uc?export=view&id={image_id}", 2)'
except HttpError as e:
if retry:
return self._process_upload_image(key, value)
raise e
def _process_repr(self, key, value):
return repr(value)
def _get_column_indices(self):
search = {
"dataFilters": [
{
"developerMetadataLookup": {
"metadataKey": self._meta("__minetorch_column__")
}
}
]
}
r = (
self.sheet.developerMetadata()
.search(spreadsheetId=self.sheet_id, body=search)
.execute()
)
result = {}
for item in r["matchedDeveloperMetadata"]:
column_key = item["developerMetadata"]["metadataValue"]
index = item["developerMetadata"]["location"]["dimensionRange"][
"startIndex"
]
result[column_key] = index
return result
def _upload_drive_image(self, key, value, retry=True):
try:
file_metadata = {"name": key, "parents": [self.drive_folder_id]}
media = MediaFileUpload(value, mimetype="image/png")
file = (
self.drive.files()
.create(body=file_metadata, media_body=media, fields="id")
.execute()
)
return file.get("id")
except HttpError as e:
if not retry:
raise e
self.drive_folder_id = self._prepare_drive_directory()
self._upload_drive_image(key, value, retry=False)
def _prepare_drive_directory(self):
try:
result = (
self.drive.files()
.list(q="name='minetorch_assets'", fields="files(id)")
.execute()
)
dir_id = result["files"][0]["id"]
except IndexError:
file_metadata = {
"name": "minetorch_assets",
"mimeType": "application/vnd.google-apps.folder",
}
file = self.drive.files().create(body=file_metadata, fields="id").execute()
dir_id = file.get("id")
self.drive.permissions().create(
fileId=dir_id, body={"role": "writer", "type": "anyone"}
).execute()
return dir_id
"""
if __name__ == '__main__':
from .miner import Miner
google_sheet = GoogleSheet('1pxkEJ7h3gH4LNleHSnhHA2jFn-lMbcPpVqgQCh_dF8I', './quickstart.json', 'pre_295')
google_sheet.code = 'test11'
google_sheet.reset_index()
google_sheet.create_column('code', 'Code of the experiment')
google_sheet.create_column('loss', 'Loss')
google_sheet.create_column('confusion_matrix', 'Confusion Matrix')
google_sheet.create_column('some_image', 'Some Image')
google_sheet.update('code', google_sheet.code)
google_sheet.flush()
import numpy as np
import time
res = np.random.rand(2, 2)
google_sheet.update('some_image', {'raw': './examples/alchemistic_directory/geass/graphs/accuracy.png', 'processor': 'upload_image'})
google_sheet.update('loss', 0.9)
google_sheet.update('confusion_matrix', repr(res))
google_sheet.flush()
google_sheet.onready()
""" | PypiClean |
/GenomicRanges-0.3.2-py3-none-any.whl/genomicranges/io/tiling.py | import math
from typing import MutableMapping, Optional, Union
import pandas as pd
# from ..GenomicRanges import GenomicRanges
from ..SeqInfo import SeqInfo
from ..utils import split_intervals
from .pdf import from_pandas
__author__ = "jkanche"
__copyright__ = "jkanche"
__license__ = "MIT"
def tile_genome(
seqlengths: Union[MutableMapping, SeqInfo],
n: Optional[int] = None,
width: Optional[int] = None,
) -> "GenomicRanges":
"""Create new genomic regions by partitioning a specified genome.
If ``n`` is provided, the region is split into ``n`` intervals. The last interval may
not contain the same 'width' as the other regions.
Alternatively, ``width`` may be provided for each interval. Similarly, the last region
may be less than ``width``.
Either ``n`` or ``width`` must be provided but not both.
Args:
seqlengths (Union[MutableMapping, SeqInfo]): Sequence lengths of each chromosome.
``seqlengths`` may be a dictionary, where keys specify the chromosome and the value is
thelength of each chromosome in the genome.
Alternatively, ``seqlengths`` may be an instance of
:py:class:`~genomicranges.SeqInfo.SeqInfo`.
n (int, optional): Number of intervals to split into.
Defaults to None, then 'width' of each interval is computed from ``seqlengths``.
width (int, optional): Width of each interval. Defaults to None.
Raises:
ValueError: Either ``n`` or ``width`` must be provided but not both.
Returns:
GenomicRanges: The genome with the tiled regions.
"""
if n is not None and width is not None:
raise ValueError("Both `n` or `width` are provided!")
seqlen_ = seqlengths
if isinstance(seqlengths, SeqInfo):
seqlen_ = seqlengths.seqlengths
all_intervals = []
for chrm, chrlen in seqlen_.items():
twidth = None
if n is not None:
twidth = math.ceil(chrlen / n)
elif width is not None:
twidth = width
all_intervals.extend(split_intervals(chrm, "*", 1, chrlen, twidth))
columns = ["seqnames", "strand", "starts", "ends"]
final_df = pd.DataFrame.from_records(all_intervals, columns=columns)
final_df = final_df.sort_values(["seqnames", "strand", "starts", "ends"])
return from_pandas(final_df) | PypiClean |
/LUBEAT-0.13.1-cp38-cp38-macosx_10_9_x86_64.whl/econml/sklearn_extensions/ensemble.py | from ..grf import RegressionForest
from ..utilities import deprecated
@deprecated("The SubsampledHonestForest class has been deprecated by the grf.RegressionForest class; "
"an upcoming release will remove support for the this class.")
def SubsampledHonestForest(n_estimators=100,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
subsample_fr='auto',
honest=True,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False):
"""
An implementation of a subsampled honest random forest regressor on top of an sklearn
regression tree. Implements subsampling and honesty as described in [3]_,
but uses a scikit-learn regression tree as a base. It provides confidence intervals based on ideas
described in [3]_ and [4]_
Parameters
----------
n_estimators : integer, optional (default=100)
The total number of trees in the forest. The forest consists of a
forest of sqrt(n_estimators) sub-forests, where each sub-forest
contains sqrt(n_estimators) trees.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of splitting samples required to split an internal node.
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` splitting samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression. After construction the tree is also pruned
so that there are at least min_samples_leaf estimation samples on
each leaf.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
splitting samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided. After construction
the tree is pruned so that the fraction of the sum total weight
of the estimation samples contained in each leaf node is at
least min_weight_fraction_leaf
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of split samples, ``N_t`` is the number of
split samples at the current node, ``N_t_L`` is the number of split samples in the
left child, and ``N_t_R`` is the number of split samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
subsample_fr : float or 'auto', optional (default='auto')
The fraction of the half-samples that are used on each tree. Each tree
will be built on subsample_fr * n_samples/2.
If 'auto', then the subsampling fraction is set to::
(n_samples/2)**(1-1/(2*n_features+2))/(n_samples/2)
which is sufficient to guarantee asympotitcally valid inference.
honest : boolean, optional (default=True)
Whether to use honest trees, i.e. half of the samples are used for
creating the tree structure and the other half for the estimation at
the leafs. If False, then all samples are used for both parts.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for both `fit` and `predict`.
`None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity when fitting and predicting.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
subsample_fr_ : float
The chosen subsample ratio. Eache tree was trained on ``subsample_fr_ * n_samples / 2``
data points.
References
----------
.. [3] S. Athey, S. Wager, "Estimation and Inference of Heterogeneous Treatment Effects using Random Forests",
Journal of the American Statistical Association 113.523 (2018): 1228-1242.
.. [4] S. Athey, J. Tibshirani, and S. Wager, "Generalized random forests",
The Annals of Statistics, 47(2), 1148-1178, 2019.
"""
return RegressionForest(n_estimators=n_estimators,
criterion=criterion,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
min_impurity_decrease=min_impurity_decrease,
max_samples=.45 if subsample_fr == 'auto' else subsample_fr / 2,
honest=honest,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start) | PypiClean |
/Discode.py-1.1.1.tar.gz/Discode.py-1.1.1/discode/embeds.py | from typing import Union, Optional, List
from .colours import Colour, Color
class Embed:
r"""Represents a Discord embed. This part of a :class:`Message`.
Attributes
----------
title: Optional[str]
The title of the embed.
description: Optional[str] = None
The description of the embed.
colour: Optional[Union[:class:`Colour`, hex, int]] = None
The colour of the embed.
color: Optional[Union[:class:`Colour`, hex, int]] = colour
The colour of the embed.
fields: List[dict]
The list of all the fields of the embed.
"""
def __init__(
self,
title: Optional[str] = None,
description: Optional[str] = None,
colour: Optional[Union[Colour, Color, hex, int]] = None
):
self.title: str = title
self.description: str = description
if isinstance(colour, Colour):
self.colour = self.color= colour.value
else:
self.colour = self.color = colour
self.fields: List[dict] = []
self.footer: dict = None
def add_field(self, name: str, value: str, inline: bool = True) -> "Embed":
r"""Add a field to the embed object.
Returns
--------
:class:`Embed`
The embed object to which the field was added to.
"""
self.fields.append(
{
"name": str(name),
"value": str(value),
"inline": inline
}
)
return self
def set_footer(self, text: str = None, icon_url: str = None) -> "Embed":
r"""Set a footer for the embed.
Returns
--------
:class:`Embed`
The embed to which the field was added to.
"""
data = {}
if text:
data["text"] = str(text)
if icon_url:
data["icon_url"] = str(icon_url)
self.footer = data
return self
def get_payload(self) -> dict:
data = {}
if self.title:
data["title"] = self.title
if self.description:
data["description"] = self.description
if self.colour:
data["color"] = self.colour
if len(self.fields) > 0:
data["fields"] = self.fields
if self.footer:
data["footer"] = self.footer
return data
@classmethod
def from_json(cls: "Embed", data: dict) -> "Embed":
emb: "Embed" = cls(
title = data.pop("title", None),
description = data.pop("description", None),
colour = data.pop("color", None)
)
if "fields" in data:
for field in data.pop("fields", []):
emb.add_field(
name = field.pop("name", None),
value = field.pop("value", None),
inline = data.pop("inline", True)
)
if "footer" in data:
emb.set_footer(
text = data.pop("text", None),
icon_url = data.pop("icon_url", None)
)
return emb | PypiClean |
/OASYS1-SRW-SOLEIL-1.0.1.tar.gz/OASYS1-SRW-SOLEIL-1.0.1/orangecontrib/srw/soleil/widgets/light_sources/ow_soleil_srw_radiation.py | __author__ = 'labx'
import os, sys, numpy
from PyQt5.QtGui import QPalette, QColor, QFont
from PyQt5.QtWidgets import QMessageBox
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from oasys.util.oasys_util import EmittingStream
from syned.storage_ring.light_source import ElectronBeam
from wofrysrw.propagator.wavefront2D.srw_wavefront import WavefrontParameters, WavefrontPrecisionParameters
from wofrysrw.storage_ring.srw_light_source import SRWLightSource
from wofrysrw.storage_ring.srw_electron_beam import SRWElectronBeam
from wofrysrw.storage_ring.light_sources.srw_bending_magnet_light_source import SRWBendingMagnetLightSource
from wofrysrw.storage_ring.light_sources.srw_undulator_light_source import SRWUndulatorLightSource
from soleil.wofrysrw.storage_ring.light_sources.srw_infrared_light_source import SRWIRBendingMagnetLightSource
from orangecontrib.srw.util.srw_util import SRWPlot
from orangecontrib.srw.util.srw_objects import SRWData
from orangecontrib.srw.widgets.gui.ow_srw_wavefront_viewer import SRWWavefrontViewer
class OWSRWRadiation(SRWWavefrontViewer):
maintainer = "Luca Rebuffi"
maintainer_email = "luca.rebuffi(@at@)elettra.eu"
category = "Source"
keywords = ["data", "file", "load", "read"]
name = "Source Radiation"
description = "SRW Source: Radiation"
icon = "icons/radiation.png"
priority = 3
inputs = [("SRWData", SRWData, "receive_srw_data")]
want_main_area=1
source_name = "SRW Source"
electron_energy_in_GeV = 0.0
electron_energy_spread = 0.0
ring_current = 0.0
electron_beam_size_h = 0.0
electron_beam_size_v = 0.0
electron_beam_divergence_h = 0.0
electron_beam_divergence_v = 0.0
moment_x = 0.0
moment_y = 0.0
moment_z = 0.0
moment_xp = 0.0
moment_yp = 0.0
moment_xx = 0.0
moment_xxp = 0.0
moment_xpxp = 0.0
moment_yy = 0.0
moment_yyp = 0.0
moment_ypyp = 0.0
int_photon_energy_min = Setting(0.0)
int_photon_energy_max = Setting(0.0)
int_photon_energy_points=Setting(1)
int_h_slit_gap = Setting(0.0001)
int_v_slit_gap =Setting( 0.0001)
int_h_slit_points=Setting(100)
int_v_slit_points=Setting(100)
int_distance = Setting(1.0)
int_sr_method = Setting(1)
int_relative_precision = Setting(0.01)
int_start_integration_longitudinal_position = Setting(0.0)
int_end_integration_longitudinal_position = Setting(0.0)
int_number_of_points_for_trajectory_calculation = Setting(50000)
int_use_terminating_terms = Setting(1)
int_sampling_factor_for_adjusting_nx_ny = Setting(0.0)
calculated_total_power = 0.0
received_light_source = None
TABS_AREA_HEIGHT = 618
CONTROL_AREA_WIDTH = 405
def __init__(self, show_automatic_box=False):
super().__init__(show_automatic_box=show_automatic_box, show_view_box=False)
self.general_options_box.setVisible(False)
button_box = oasysgui.widgetBox(self.controlArea, "", addSpace=False, orientation="horizontal")
button = gui.button(button_box, self, "Calculate Radiation", callback=self.calculateRadiation)
font = QFont(button.font())
font.setBold(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Blue'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button = gui.button(button_box, self, "Reset Fields", callback=self.callResetSettings)
font = QFont(button.font())
font.setItalic(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Red'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button.setFixedWidth(150)
gui.separator(self.controlArea)
self.controlArea.setFixedWidth(self.CONTROL_AREA_WIDTH)
self.tabs_setting = oasysgui.tabWidget(self.controlArea)
self.tabs_setting.setFixedHeight(self.TABS_AREA_HEIGHT)
self.tabs_setting.setFixedWidth(self.CONTROL_AREA_WIDTH-5)
# INTENSITY/POWER -------------------------------------------
tab_convolution = oasysgui.createTabPage(self.tabs_setting, "Radiation")
int_box = oasysgui.widgetBox(tab_convolution, "Wavefront Parameters", addSpace=True, orientation="vertical")
oasysgui.lineEdit(int_box, self, "int_photon_energy_min", "Photon Energy Min [eV]", labelWidth=260, valueType=float, orientation="horizontal")
oasysgui.lineEdit(int_box, self, "int_photon_energy_max", "Photon Energy Max [eV]", labelWidth=260, valueType=float, orientation="horizontal")
oasysgui.lineEdit(int_box, self, "int_photon_energy_points", "Photon Energy Points", labelWidth=260, valueType=int, orientation="horizontal")
oasysgui.lineEdit(int_box, self, "int_h_slit_gap", "H Slit Gap [m]", labelWidth=260, valueType=float, orientation="horizontal")
oasysgui.lineEdit(int_box, self, "int_v_slit_gap", "V Slit Gap [m]", labelWidth=260, valueType=float, orientation="horizontal")
oasysgui.lineEdit(int_box, self, "int_h_slit_points", "H Slit Points", labelWidth=260, valueType=int, orientation="horizontal")
oasysgui.lineEdit(int_box, self, "int_v_slit_points", "V Slit Points", labelWidth=260, valueType=int, orientation="horizontal")
oasysgui.lineEdit(int_box, self, "int_distance", "Propagation Distance [m]", labelWidth=260, valueType=float, orientation="horizontal")
pre_box = oasysgui.widgetBox(tab_convolution, "Precision Parameters", addSpace=False, orientation="vertical")
tabs_precision = oasysgui.tabWidget(pre_box)
tab_prop = oasysgui.createTabPage(tabs_precision, "Propagation")
gui.comboBox(tab_prop, self, "int_sr_method", label="Calculation Method",
items=["Manual", "Auto"], labelWidth=260,
sendSelectedValue=False, orientation="horizontal")
oasysgui.lineEdit(tab_prop, self, "int_relative_precision", "Relative Precision", labelWidth=260, valueType=float, orientation="horizontal")
oasysgui.lineEdit(tab_prop, self, "int_start_integration_longitudinal_position", "Longitudinal pos. to start integration [m]", labelWidth=260, valueType=float, orientation="horizontal")
oasysgui.lineEdit(tab_prop, self, "int_end_integration_longitudinal_position", "Longitudinal pos. to finish integration [m]", labelWidth=260, valueType=float, orientation="horizontal")
oasysgui.lineEdit(tab_prop, self, "int_number_of_points_for_trajectory_calculation", "Number of points for trajectory calculation", labelWidth=260, valueType=int, orientation="horizontal")
gui.comboBox(tab_prop, self, "int_use_terminating_terms", label="Use \"terminating terms\"or not",
items=["No", "Yes"], labelWidth=260,
sendSelectedValue=False, orientation="horizontal")
oasysgui.lineEdit(tab_prop, self, "int_sampling_factor_for_adjusting_nx_ny", "Sampling factor for adjusting nx/ny", labelWidth=260, valueType=int, orientation="horizontal")
gui.rubber(self.controlArea)
def calculateRadiation(self):
if not self.received_light_source is None:
self.setStatusMessage("")
self.progressBarInit()
try:
self.checkFields()
srw_source = self.get_srw_source(self.get_electron_beam())
self.progressBarSet(10)
self.setStatusMessage("Running SRW")
sys.stdout = EmittingStream(textWritten=self.writeStdOut)
print(srw_source.get_electron_beam().get_electron_beam_geometrical_properties().to_info())
self.print_specific_infos(srw_source)
self.progressBarSet(20)
tickets = []
self.run_calculation_intensity_power(srw_source, tickets)
self.setStatusMessage("Plotting Results")
self.plot_results(tickets)
self.setStatusMessage("")
except Exception as exception:
QMessageBox.critical(self, "Error", str(exception), QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
self.progressBarFinished()
def get_electron_beam(self):
received_electron_beam = self.received_light_source.get_electron_beam()
electron_beam = SRWElectronBeam(energy_in_GeV=received_electron_beam._energy_in_GeV,
energy_spread=received_electron_beam._energy_spread,
current=received_electron_beam._current)
electron_beam._moment_x = 0.0
electron_beam._moment_y = 0.0
electron_beam._moment_z = self.get_default_initial_z()
electron_beam._moment_xp = 0.0
electron_beam._moment_yp = 0.0
electron_beam._moment_xx = received_electron_beam._moment_xx
electron_beam._moment_xxp = received_electron_beam._moment_xxp
electron_beam._moment_xpxp = received_electron_beam._moment_xpxp
electron_beam._moment_yy = received_electron_beam._moment_yy
electron_beam._moment_yyp = received_electron_beam._moment_yyp
electron_beam._moment_ypyp = received_electron_beam._moment_ypyp
return electron_beam
def print_specific_infos(self, srw_source):
if isinstance(self.received_light_source, SRWUndulatorLightSource):
print("1st Harmonic Energy", srw_source.get_resonance_energy())
print(srw_source.get_photon_source_properties(harmonic=1).to_info())
def get_default_initial_z(self):
if isinstance(self.received_light_source, SRWBendingMagnetLightSource) or isinstance(self.received_light_source, SRWIRBendingMagnetLightSource):
return -0.5*self.received_light_source._magnetic_structure._length
elif isinstance(self.received_light_source, SRWUndulatorLightSource):
return -0.5*self.received_light_source._magnetic_structure._period_length*(self.received_light_source._magnetic_structure._number_of_periods + 4) # initial Longitudinal Coordinate (set before the ID)
def get_srw_source(self, electron_beam=ElectronBeam()):
if isinstance(self.received_light_source, SRWBendingMagnetLightSource) or isinstance(self.received_light_source, SRWIRBendingMagnetLightSource):
return SRWBendingMagnetLightSource(name=self.received_light_source._name,
electron_beam=electron_beam,
bending_magnet_magnetic_structure=self.received_light_source._magnetic_structure
)
elif isinstance(self.received_light_source, SRWUndulatorLightSource):
return SRWUndulatorLightSource(name=self.received_light_source._name,
electron_beam=electron_beam,
undulator_magnetic_structure=self.received_light_source._magnetic_structure
)
def getCalculatedTotalPowerString(self):
if self.calculated_total_power == 0:
return ""
else:
return "Total: " + str(int(self.calculated_total_power)) + " W"
def get_automatic_sr_method(self):
if isinstance(self.received_light_source, SRWBendingMagnetLightSource) or isinstance(self.received_light_source, SRWIRBendingMagnetLightSource):
return 2
elif isinstance(self.received_light_source, SRWUndulatorLightSource):
return 1
def get_minimum_propagation_distance(self):
return round(self.get_source_length()*1.01, 6)
def get_source_length(self):
if isinstance(self.received_light_source, SRWBendingMagnetLightSource) or isinstance(self.received_light_source, SRWIRBendingMagnetLightSource):
return self.received_light_source._magnetic_structure._length
elif isinstance(self.received_light_source, SRWUndulatorLightSource):
return self.received_light_source._magnetic_structure._period_length*self.received_light_source._magnetic_structure._number_of_periods
def checkFields(self):
# INTENSITY/POWER
congruence.checkStrictlyPositiveNumber(self.int_photon_energy_min, "Photon Energy Min")
congruence.checkStrictlyPositiveNumber(self.int_photon_energy_max, "Photon Energy Max")
congruence.checkGreaterOrEqualThan(self.int_photon_energy_max, self.int_photon_energy_min, "Photon Energy Max", "Photon Energy Min")
congruence.checkStrictlyPositiveNumber(self.int_photon_energy_points, "Photon Energy Points")
congruence.checkStrictlyPositiveNumber(self.int_h_slit_gap, "H Slit Gap")
congruence.checkStrictlyPositiveNumber(self.int_v_slit_gap, "V Slit Gap")
congruence.checkStrictlyPositiveNumber(self.int_h_slit_points, "H Slit Points")
congruence.checkStrictlyPositiveNumber(self.int_v_slit_points, "V Slit Points")
congruence.checkGreaterOrEqualThan(self.int_distance, self.get_minimum_propagation_distance(),
"Distance", "Minimum Distance out of the Source: " + str(self.get_minimum_propagation_distance()))
congruence.checkStrictlyPositiveNumber(self.int_relative_precision, "Propagation - Relative Precision")
congruence.checkStrictlyPositiveNumber(self.int_number_of_points_for_trajectory_calculation, "Propagation - Number of points for trajectory calculation")
congruence.checkPositiveNumber(self.int_sampling_factor_for_adjusting_nx_ny, " Propagation - Sampling Factor for adjusting nx/ny")
def run_calculation_intensity_power(self, srw_source, tickets, progress_bar_value=30):
wf_parameters = WavefrontParameters(photon_energy_min = self.int_photon_energy_min,
photon_energy_max = self.int_photon_energy_max,
photon_energy_points=self.int_photon_energy_points,
h_slit_gap = self.int_h_slit_gap,
v_slit_gap = self.int_v_slit_gap,
h_slit_points=self.int_h_slit_points,
v_slit_points=self.int_v_slit_points,
distance = self.int_distance,
wavefront_precision_parameters=WavefrontPrecisionParameters(sr_method=0 if self.int_sr_method == 0 else self.get_automatic_sr_method(),
relative_precision=self.int_relative_precision,
start_integration_longitudinal_position=self.int_start_integration_longitudinal_position,
end_integration_longitudinal_position=self.int_end_integration_longitudinal_position,
number_of_points_for_trajectory_calculation=self.int_number_of_points_for_trajectory_calculation,
use_terminating_terms=self.int_use_terminating_terms,
sampling_factor_for_adjusting_nx_ny=self.int_sampling_factor_for_adjusting_nx_ny))
srw_wavefront = srw_source.get_SRW_Wavefront(source_wavefront_parameters=wf_parameters)
e, h, v, i_se = srw_wavefront.get_intensity(multi_electron=False)
tickets.append((i_se, e, h*1e3, v*1e3))
e, h, v, i_me = srw_wavefront.get_intensity(multi_electron=True)
tickets.append((i_me, e, h*1e3, v*1e3))
if len(e) > 1: energy_step = e[1]-e[0]
else: energy_step = 1.0
import scipy.constants as codata
pd = i_se.sum(axis=0)*energy_step*codata.e*1e3
self.calculated_total_power = SRWLightSource.get_total_power_from_power_density(h, v, pd)
print("TOTAL POWER: ", self.calculated_total_power, " W")
tickets.append(SRWPlot.get_ticket_2D(h, v, pd))
self.progressBarSet(progress_bar_value + 10)
def getVariablesToPlot(self):
return [[1, 2], [1, 2], [1, 2]]
def getTitles(self, with_um=False):
if with_um: return ["Intensity SE vs E,X,Y [ph/s/.1%bw/mm\u00b2]",
"Intensity ME vs E,X,Y [ph/s/.1%bw/mm\u00b2]",
"Power Density vs X,Y [W/mm\u00b2]"]
else: return ["Intensity SE vs E,X,Y", "Intensity ME vs E,X,Y", "Power Density vs X,Y"]
def getXTitles(self):
return ["X [mm]", "X [mm]", "X [mm]"]
def getYTitles(self):
return ["Y [mm]", "Y [mm]", "Y [mm]"]
def getXUM(self):
return ["X [mm]", "X [mm]", "X [mm]"]
def getYUM(self):
return ["Y [mm]", "Y [mm]", "X [mm]"]
def receive_srw_data(self, data):
if not data is None:
try:
if isinstance(data, SRWData):
self.received_light_source = data.get_srw_beamline().get_light_source()
if not (isinstance(self.received_light_source, SRWBendingMagnetLightSource) or isinstance(self.received_light_source, SRWUndulatorLightSource) or isinstance(self.received_light_source, SRWIRBendingMagnetLightSource)):
raise ValueError("This source is not supported")
received_wavefront = data.get_srw_wavefront()
if not received_wavefront is None:
if self.int_photon_energy_min == 0.0 and self.int_photon_energy_max == 0.0:
self.int_photon_energy_min = received_wavefront.mesh.eStart
self.int_photon_energy_max = received_wavefront.mesh.eFin
self.int_photon_energy_points=received_wavefront.mesh.ne
self.int_h_slit_gap = received_wavefront.mesh.xFin - received_wavefront.mesh.xStart
self.int_v_slit_gap = received_wavefront.mesh.yFin - received_wavefront.mesh.yStart
self.int_h_slit_points=received_wavefront.mesh.nx
self.int_v_slit_points=received_wavefront.mesh.ny
self.int_distance = received_wavefront.mesh.zStart
else:
raise ValueError("SRW data not correct")
except Exception as exception:
QMessageBox.critical(self, "Error", str(exception), QMessageBox.Ok) | PypiClean |
/CocoPy-1.1.0rc.zip/CocoPy-1.1.0rc/testSuite/TestSync_Scanner.py | import sys
class Token( object ):
def __init__( self ):
self.kind = 0 # token kind
self.pos = 0 # token position in the source text (starting at 0)
self.col = 0 # token column (starting at 0)
self.line = 0 # token line (starting at 1)
self.val = u'' # token value
self.next = None # AW 2003-03-07 Tokens are kept in linked list
class Position( object ): # position of source code stretch (e.g. semantic action, resolver expressions)
def __init__( self, buf, beg, len, col ):
assert isinstance( buf, Buffer )
assert isinstance( beg, int )
assert isinstance( len, int )
assert isinstance( col, int )
self.buf = buf
self.beg = beg # start relative to the beginning of the file
self.len = len # length of stretch
self.col = col # column number of start position
def getSubstring( self ):
return self.buf.readPosition( self )
class Buffer( object ):
EOF = u'\u0100' # 256
def __init__( self, s ):
self.buf = s
self.bufLen = len(s)
self.pos = 0
self.lines = s.splitlines( True )
def Read( self ):
if self.pos < self.bufLen:
result = unichr(ord(self.buf[self.pos]) & 0xff) # mask out sign bits
self.pos += 1
return result
else:
return Buffer.EOF
def ReadChars( self, numBytes=1 ):
result = self.buf[ self.pos : self.pos + numBytes ]
self.pos += numBytes
return result
def Peek( self ):
if self.pos < self.bufLen:
return unichr(ord(self.buf[self.pos]) & 0xff) # mask out sign bits
else:
return Scanner.buffer.EOF
def getString( self, beg, end ):
s = ''
oldPos = self.getPos( )
self.setPos( beg )
while beg < end:
s += self.Read( )
beg += 1
self.setPos( oldPos )
return s
def getPos( self ):
return self.pos
def setPos( self, value ):
if value < 0:
self.pos = 0
elif value >= self.bufLen:
self.pos = self.bufLen
else:
self.pos = value
def readPosition( self, pos ):
assert isinstance( pos, Position )
self.setPos( pos.beg )
return self.ReadChars( pos.len )
def __iter__( self ):
return iter(self.lines)
class Scanner(object):
EOL = u'\n'
eofSym = 0
charSetSize = 256
maxT = 10
noSym = 10
start = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-1]
def __init__( self, s ):
self.buffer = Buffer( unicode(s) ) # the buffer instance
self.ch = u'\0' # current input character
self.pos = -1 # column number of current character
self.line = 1 # line number of current character
self.lineStart = 0 # start position of current line
self.oldEols = 0 # EOLs that appeared in a comment;
self.NextCh( )
self.ignore = set( ) # set of characters to be ignored by the scanner
self.ignore.add( ord(' ') ) # blanks are always white space
# fill token list
self.tokens = Token( ) # the complete input token stream
node = self.tokens
node.next = self.NextToken( )
node = node.next
while node.kind != Scanner.eofSym:
node.next = self.NextToken( )
node = node.next
node.next = node
node.val = u'EOF'
self.t = self.tokens # current token
self.pt = self.tokens # current peek token
def NextCh( self ):
if self.oldEols > 0:
self.ch = Scanner.EOL
self.oldEols -= 1
else:
self.ch = self.buffer.Read( )
self.pos += 1
# replace isolated '\r' by '\n' in order to make
# eol handling uniform across Windows, Unix and Mac
if (self.ch == u'\r') and (self.buffer.Peek() != u'\n'):
self.ch = Scanner.EOL
if self.ch == Scanner.EOL:
self.line += 1
self.lineStart = self.pos + 1
def CheckLiteral( self ):
lit = self.t.val
def NextToken( self ):
while ord(self.ch) in self.ignore:
self.NextCh( )
self.t = Token( )
self.t.pos = self.pos
self.t.col = self.pos - self.lineStart + 1
self.t.line = self.line
state = self.start[ord(self.ch)]
buf = u''
buf += unicode(self.ch)
self.NextCh()
done = False
while not done:
if state == -1:
self.t.kind = Scanner.eofSym # NextCh already done
done = True
elif state == 0:
self.t.kind = Scanner.noSym # NextCh already done
done = True
elif state == 1:
self.t.kind = 1
done = True
elif state == 2:
self.t.kind = 2
done = True
elif state == 3:
self.t.kind = 3
done = True
elif state == 4:
self.t.kind = 4
done = True
elif state == 5:
self.t.kind = 5
done = True
elif state == 6:
self.t.kind = 6
done = True
elif state == 7:
self.t.kind = 7
done = True
elif state == 8:
self.t.kind = 8
done = True
elif state == 9:
self.t.kind = 9
done = True
self.t.val = buf
return self.t
def Scan( self ):
self.t = self.t.next
self.pt = self.t.next
return self.t
def Peek( self ):
self.pt = self.pt.next
while self.pt.kind > self.maxT:
self.pt = self.pt.next
return self.pt
def ResetPeek( self ):
self.pt = self.t | PypiClean |
/LiBai-0.1.1.tar.gz/LiBai-0.1.1/libai/layers/mlp.py |
import oneflow as flow
from oneflow import nn
from libai.layers import Linear, build_activation
class MLP(nn.Module):
"""MLP
MLP will take the input with h hidden state, project it to intermediate
hidden dimension, perform gelu transformation, and project the
state back into h hidden dimension.
Arguments:
hidden_size: size of each input and output sample.
ffn_hidden_size: size of each intermediate sample.
output_dropout_prob: Output dropout probability. Defaults to 0.0.
init_method: method to initialize the first linear weight.
Defaults to :func:`nn.init.xavier_normal_`.
output_layer_init_method: method to initialize the second linear weight. If set to None,
it will use ``init_method`` instead. Defaults to None.
bias_gelu_fusion: If set to ``True``, it will fuse bias adding and elementwise
gelu activation. Defaults to ``False``.
bias_dropout_fusion: If set to ``True``, it will fuse bias adding and dropout.
Defaults to ``False``.
layer_idx: A layer_idx sign which determines the placement. It will be used in
pipeline parallelism. Defaults to 0.
"""
def __init__(
self,
hidden_size,
ffn_hidden_size,
output_dropout_prob=0.0,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
*,
layer_idx=0,
):
super().__init__()
self.output_dropout_prob = output_dropout_prob
self.bias_gelu_fusion = bias_gelu_fusion
self.bias_dropout_fusion = bias_dropout_fusion
if output_layer_init_method is None:
output_layer_init_method = init_method
self.dense_h_to_4h = Linear(
hidden_size,
ffn_hidden_size,
bias=True,
parallel="col",
skip_bias_add=bias_gelu_fusion,
init_method=init_method,
layer_idx=layer_idx,
)
if not bias_gelu_fusion:
self.activation_func = build_activation("gelu")
self.dense_4h_to_h = Linear(
ffn_hidden_size,
hidden_size,
bias=True,
parallel="row",
skip_bias_add=bias_dropout_fusion,
init_method=output_layer_init_method,
layer_idx=layer_idx,
)
if not bias_dropout_fusion:
self.dropout = nn.Dropout(self.output_dropout_prob)
def forward(self, hidden_states):
intermediate = self.dense_h_to_4h(hidden_states)
if self.bias_gelu_fusion:
intermediate, bias = intermediate
intermediate = flow._C.fused_bias_add_gelu(
intermediate, bias, axis=intermediate.ndim - 1
)
else:
intermediate = self.activation_func(intermediate)
output = self.dense_4h_to_h(intermediate)
if self.bias_dropout_fusion:
output, bias = output
output = flow._C.fused_bias_add_dropout(
output, bias, p=self.output_dropout_prob, axis=output.ndim - 1
)
else:
output = self.dropout(output)
return output
def extra_repr(self) -> str:
return "bias_gelu_fusion={}, bias_dropout_fusion={}, dropout={}".format(
self.bias_gelu_fusion, self.bias_dropout_fusion, self.output_dropout_prob
) | PypiClean |
/Booktype-1.5.tar.gz/Booktype-1.5/lib/booki/utils/log.py |
from booki.utils.json_wrapper import simplejson
from booki.editor import models
# logBookHistory
def logBookHistory(book = None, version = None, chapter = None, chapter_history = None, args = {}, user=None, kind = 'unknown'):
"""
Creates history record for book change.
@type book: C{booki.editor.models.Book}
@param book: Book object
@type version: C{booki.editor.models.BookVersion}
@param version: Book version object
@type chapter: C{booki.editor.models.Chapter}
@param chapter: Chapter object
@type chapter_history: C{booki.editor.models.ChapterHistory}
@param chapter_history: Chapter history object
@type args: C{dict}
@param args: Additional arguments
@type user: C{django.contrib.auth.models.User}
@param user: User who did modifications
@type kind: C{string}
@param kind: What kind of modification was done
"""
history = models.BookHistory(book = book,
version = version,
chapter = chapter,
chapter_history = chapter_history,
args = simplejson.dumps(args),
user = user,
kind = models.HISTORY_CHOICES.get(kind, 0))
history.save()
# logChapterHistory
def logChapterHistory(chapter = None, content = None, user = None, comment = '', revision = None):
"""
Creates history record for chapter change.
@type chapter: C{booki.editor.models.Chapter}
@param chapter: Chapter object
@type content: C{string}
@param content: Old content
@type user: C{django.contrib.auth.models.User}
@param user: Booki user object
@type comment: C{string}
@param comment: Comment about this change
@type revision: C{int}
@param revision: Revision number for this change
"""
history = models.ChapterHistory(chapter = chapter,
content = content,
user = user,
revision = revision,
comment = comment)
history.save()
return history
def logError(msg, *args):
"""
Logs error message.
@type msg: C{string}
@param msg: Error message
"""
import logging
logging.getLogger("booki").error(msg, *args)
def logWarning(msg, *args):
"""
Logs warning message.
@type msg: C{string}
@param msg: Warning message
"""
import logging
logging.getLogger("booki").warning(msg, *args)
def printStack(*extra):
"""
Prints entire stack as error message.
"""
import traceback
logError(traceback.format_exc())
for e in extra:
logError(e) | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/accounting/model/journal_entry.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
def lazy_import():
from MergePythonSDK.accounting.model.currency_enum import CurrencyEnum
from MergePythonSDK.accounting.model.journal_line import JournalLine
from MergePythonSDK.shared.model.remote_data import RemoteData
globals()['CurrencyEnum'] = CurrencyEnum
globals()['JournalLine'] = JournalLine
globals()['RemoteData'] = RemoteData
class JournalEntry(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
defined_types = {
'id': (str, none_type,), # noqa: E501
'remote_id': (str, none_type, none_type,), # noqa: E501
'remote_data': ([RemoteData], none_type, none_type,), # noqa: E501
'transaction_date': (datetime, none_type, none_type,), # noqa: E501
'remote_created_at': (datetime, none_type, none_type,), # noqa: E501
'remote_updated_at': (datetime, none_type, none_type,), # noqa: E501
'payments': ([str, none_type], none_type,), # noqa: E501
'memo': (str, none_type, none_type,), # noqa: E501
'currency': (CurrencyEnum, str, none_type,),
'lines': ([JournalLine], none_type,), # noqa: E501
'remote_was_deleted': (bool, none_type,), # noqa: E501
}
expands_types = {"lines": "JournalLine", "payments": "Payment"}
# update types with expands
for key, val in expands_types.items():
if key in defined_types.keys():
expands_model = import_model_by_name(val, "accounting")
if len(defined_types[key]) > 0 and isinstance(defined_types[key][0], list):
defined_types[key][0].insert(0, expands_model)
defined_types[key] = (*defined_types[key], expands_model)
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'remote_id': 'remote_id', # noqa: E501
'remote_data': 'remote_data', # noqa: E501
'transaction_date': 'transaction_date', # noqa: E501
'remote_created_at': 'remote_created_at', # noqa: E501
'remote_updated_at': 'remote_updated_at', # noqa: E501
'payments': 'payments', # noqa: E501
'memo': 'memo', # noqa: E501
'currency': 'currency', # noqa: E501
'lines': 'lines', # noqa: E501
'remote_was_deleted': 'remote_was_deleted', # noqa: E501
}
read_only_vars = {
'id', # noqa: E501
'remote_data', # noqa: E501
'lines', # noqa: E501
'remote_was_deleted', # noqa: E501
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""JournalEntry - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
remote_id (str, none_type): The third-party API ID of the matching object.. [optional] # noqa: E501
remote_data ([RemoteData], none_type): [optional] # noqa: E501
transaction_date (datetime, none_type): The journal entry's transaction date.. [optional] # noqa: E501
remote_created_at (datetime, none_type): When the third party's journal entry was created.. [optional] # noqa: E501
remote_updated_at (datetime, none_type): When the third party's journal entry was updated.. [optional] # noqa: E501
payments ([str, none_type]): Array of `Payment` object IDs.. [optional] # noqa: E501
memo (str, none_type): The journal entry's private note.. [optional] # noqa: E501
currency (bool, dict, float, int, list, str, none_type): The journal's currency.. [optional] # noqa: E501
lines ([JournalLine]): [optional] # noqa: E501
remote_was_deleted (bool): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.remote_id = kwargs.get("remote_id", None)
self.transaction_date = kwargs.get("transaction_date", None)
self.remote_created_at = kwargs.get("remote_created_at", None)
self.remote_updated_at = kwargs.get("remote_updated_at", None)
self.payments = kwargs.get("payments", None)
self.memo = kwargs.get("memo", None)
self.currency = kwargs.get("currency", None)
# Read only properties
self._id = kwargs.get("id", str())
self._remote_data = kwargs.get("remote_data", None)
self._lines = kwargs.get("lines", None)
self._remote_was_deleted = kwargs.get("remote_was_deleted", bool())
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""JournalEntry - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
remote_id (str, none_type): The third-party API ID of the matching object.. [optional] # noqa: E501
remote_data ([RemoteData], none_type): [optional] # noqa: E501
transaction_date (datetime, none_type): The journal entry's transaction date.. [optional] # noqa: E501
remote_created_at (datetime, none_type): When the third party's journal entry was created.. [optional] # noqa: E501
remote_updated_at (datetime, none_type): When the third party's journal entry was updated.. [optional] # noqa: E501
payments ([str, none_type]): Array of `Payment` object IDs.. [optional] # noqa: E501
memo (str, none_type): The journal entry's private note.. [optional] # noqa: E501
currency (bool, dict, float, int, list, str, none_type): The journal's currency.. [optional] # noqa: E501
lines ([JournalLine]): [optional] # noqa: E501
remote_was_deleted (bool): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.remote_id: Union[str, none_type] = kwargs.get("remote_id", None)
self.transaction_date: Union[datetime, none_type] = kwargs.get("transaction_date", None)
self.remote_created_at: Union[datetime, none_type] = kwargs.get("remote_created_at", None)
self.remote_updated_at: Union[datetime, none_type] = kwargs.get("remote_updated_at", None)
self.payments: Union[List[str, none_type]] = kwargs.get("payments", list())
self.memo: Union[str, none_type] = kwargs.get("memo", None)
self.currency: Union[bool, dict, float, int, list, str, none_type] = kwargs.get("currency", None)
# Read only properties
self._id: Union[str] = kwargs.get("id", str())
self._remote_data: Union[List["RemoteData"]] = kwargs.get("remote_data", None)
self._lines: Union[List["JournalLine"]] = kwargs.get("lines", None)
self._remote_was_deleted: Union[bool] = kwargs.get("remote_was_deleted", bool())
# Read only property getters
@property
def id(self):
return self._id
@property
def remote_data(self):
return self._remote_data
@property
def lines(self):
return self._lines
@property
def remote_was_deleted(self):
return self._remote_was_deleted | PypiClean |
/0x_contract_wrappers-2.0.0-py3-none-any.whl/zero_ex/contract_wrappers/i_validator/__init__.py |
# pylint: disable=too-many-arguments
import json
from typing import ( # pylint: disable=unused-import
Any,
List,
Optional,
Tuple,
Union,
)
from eth_utils import to_checksum_address
from mypy_extensions import TypedDict # pylint: disable=unused-import
from hexbytes import HexBytes
from web3 import Web3
from web3.contract import ContractFunction
from web3.datastructures import AttributeDict
from web3.providers.base import BaseProvider
from zero_ex.contract_wrappers.bases import ContractMethod, Validator
from zero_ex.contract_wrappers.tx_params import TxParams
# Try to import a custom validator class definition; if there isn't one,
# declare one that we can instantiate for the default argument to the
# constructor for IValidator below.
try:
# both mypy and pylint complain about what we're doing here, but this
# works just fine, so their messages have been disabled here.
from . import ( # type: ignore # pylint: disable=import-self
IValidatorValidator,
)
except ImportError:
class IValidatorValidator( # type: ignore
Validator
):
"""No-op input validator."""
try:
from .middleware import MIDDLEWARE # type: ignore
except ImportError:
pass
class IsValidSignatureMethod(ContractMethod):
"""Various interfaces to the isValidSignature method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self,
_hash: Union[bytes, str],
signer_address: str,
signature: Union[bytes, str],
):
"""Validate the inputs to the isValidSignature method."""
self.validator.assert_valid(
method_name="isValidSignature",
parameter_name="hash",
argument_value=_hash,
)
self.validator.assert_valid(
method_name="isValidSignature",
parameter_name="signerAddress",
argument_value=signer_address,
)
signer_address = self.validate_and_checksum_address(signer_address)
self.validator.assert_valid(
method_name="isValidSignature",
parameter_name="signature",
argument_value=signature,
)
return (_hash, signer_address, signature)
def call(
self,
_hash: Union[bytes, str],
signer_address: str,
signature: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> Union[bytes, str]:
"""Execute underlying contract method via eth_call.
Verifies that a signature is valid.
:param hash: Message hash that is signed.
:param signature: Proof of signing.
:param signerAddress: Address that should have signed the given hash.
:param tx_params: transaction parameters
:returns: Magic bytes4 value if the signature is valid. Magic value is
bytes4(keccak256("isValidValidatorSignature(address,bytes32,address,by-
tes)"))
"""
(
_hash,
signer_address,
signature,
) = self.validate_and_normalize_inputs(
_hash, signer_address, signature
)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(
_hash, signer_address, signature
).call(tx_params.as_dict())
return Union[bytes, str](returned)
def estimate_gas(
self,
_hash: Union[bytes, str],
signer_address: str,
signature: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(
_hash,
signer_address,
signature,
) = self.validate_and_normalize_inputs(
_hash, signer_address, signature
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
_hash, signer_address, signature
).estimateGas(tx_params.as_dict())
# pylint: disable=too-many-public-methods,too-many-instance-attributes
class IValidator:
"""Wrapper class for IValidator Solidity contract.
All method parameters of type `bytes`:code: should be encoded as UTF-8,
which can be accomplished via `str.encode("utf_8")`:code:.
"""
is_valid_signature: IsValidSignatureMethod
"""Constructor-initialized instance of
:class:`IsValidSignatureMethod`.
"""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
validator: IValidatorValidator = None,
):
"""Get an instance of wrapper for smart contract.
:param web3_or_provider: Either an instance of `web3.Web3`:code: or
`web3.providers.base.BaseProvider`:code:
:param contract_address: where the contract has been deployed
:param validator: for validation of method inputs.
"""
# pylint: disable=too-many-statements
self.contract_address = contract_address
if not validator:
validator = IValidatorValidator(web3_or_provider, contract_address)
web3 = None
if isinstance(web3_or_provider, BaseProvider):
web3 = Web3(web3_or_provider)
elif isinstance(web3_or_provider, Web3):
web3 = web3_or_provider
else:
raise TypeError(
"Expected parameter 'web3_or_provider' to be an instance of either"
+ " Web3 or BaseProvider"
)
# if any middleware was imported, inject it
try:
MIDDLEWARE
except NameError:
pass
else:
try:
for middleware in MIDDLEWARE:
web3.middleware_onion.inject(
middleware["function"], layer=middleware["layer"],
)
except ValueError as value_error:
if value_error.args == (
"You can't add the same un-named instance twice",
):
pass
self._web3_eth = web3.eth
functions = self._web3_eth.contract(
address=to_checksum_address(contract_address), abi=IValidator.abi()
).functions
self.is_valid_signature = IsValidSignatureMethod(
web3_or_provider,
contract_address,
functions.isValidSignature,
validator,
)
@staticmethod
def abi():
"""Return the ABI to the underlying contract."""
return json.loads(
'[{"constant":true,"inputs":[{"internalType":"bytes32","name":"hash","type":"bytes32"},{"internalType":"address","name":"signerAddress","type":"address"},{"internalType":"bytes","name":"signature","type":"bytes"}],"name":"isValidSignature","outputs":[{"internalType":"bytes4","name":"","type":"bytes4"}],"payable":false,"stateMutability":"view","type":"function"}]' # noqa: E501 (line-too-long)
)
# pylint: disable=too-many-lines | PypiClean |
/Keras_Preprocessing-1.1.2.tar.gz/Keras_Preprocessing-1.1.2/keras_preprocessing/image/image_data_generator.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from six.moves import range
import numpy as np
try:
import scipy
# scipy.linalg cannot be accessed until explicitly imported
from scipy import linalg
# scipy.ndimage cannot be accessed until explicitly imported
except ImportError:
scipy = None
from .dataframe_iterator import DataFrameIterator
from .directory_iterator import DirectoryIterator
from .numpy_array_iterator import NumpyArrayIterator
from .affine_transformations import (apply_affine_transform,
apply_brightness_shift,
apply_channel_shift,
flip_axis)
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_whitening: Boolean. Apply ZCA whitening.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats
in the interval `[-1.0, +1.0)`.
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats
in the interval `[-1.0, +1.0)`.
brightness_range: Tuple or list of two floats. Range for picking
a brightness shift value from.
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(after applying all other transformations).
preprocessing_function: function that will be applied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (NumPy tensor with rank 3),
and should output a NumPy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
interpolation_order: int, order to use for
the spline interpolation. Higher is slower.
dtype: Dtype to use for the generated arrays.
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
Example of using ```.flow_from_dataframe(dataframe, directory,
x_col, y_col)```:
```python
train_df = pandas.read_csv("./train.csv")
valid_df = pandas.read_csv("./valid.csv")
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_dataframe(
dataframe=train_df,
directory='data/train',
x_col="filename",
y_col="class",
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_dataframe(
dataframe=valid_df,
directory='data/validation',
x_col="filename",
y_col="class",
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format='channels_last',
validation_split=0.0,
interpolation_order=1,
dtype='float32'):
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
self.dtype = dtype
self.interpolation_order = interpolation_order
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if isinstance(zoom_range, (float, int)):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif (len(zoom_range) == 2 and
all(isinstance(val, (float, int)) for val in zoom_range)):
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % (zoom_range,))
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
if brightness_range is not None:
if (not isinstance(brightness_range, (tuple, list)) or
len(brightness_range) != 2):
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % (brightness_range,))
self.brightness_range = brightness_range
def flow(self,
x,
y=None,
batch_size=32,
shuffle=True,
sample_weight=None,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None):
"""Takes data & label arrays, generates batches of augmented data.
# Arguments
x: Input data. NumPy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another NumPy array or a list of NumPy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, in case
of RGB data, it should have value 3, and in case
of RGBA data, it should have value 4.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a NumPy array of image data
(in the case of a single image input) or a list
of NumPy arrays (in the case with
additional inputs) and `y` is a NumPy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the NumPy array `x` is returned.
"""
return NumpyArrayIterator(
x,
y,
self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset,
dtype=self.dtype
)
def flow_from_directory(self,
directory,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: string, path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](
https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
Whether the images will be converted to
have 1, 3, or 4 channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a NumPy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a NumPy array of corresponding labels.
"""
return DirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation,
dtype=self.dtype
)
def flow_from_dataframe(self,
dataframe,
directory=None,
x_col="filename",
y_col="class",
weight_col=None,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
interpolation='nearest',
validate_filenames=True,
**kwargs):
"""Takes the dataframe and the path to a directory
and generates batches of augmented/normalized data.
**A simple tutorial can be found **[here](
http://bit.ly/keras_flow_from_dataframe).
# Arguments
dataframe: Pandas dataframe containing the filepaths relative to
`directory` (or absolute paths if `directory` is None) of the
images in a string column. It should include other column/s
depending on the `class_mode`:
- if `class_mode` is `"categorical"` (default value) it must
include the `y_col` column with the class/es of each image.
Values in column can be string/list/tuple if a single class
or list/tuple if multiple classes.
- if `class_mode` is `"binary"` or `"sparse"` it must include
the given `y_col` column with class values as strings.
- if `class_mode` is `"raw"` or `"multi_output"` it should contain
the columns specified in `y_col`.
- if `class_mode` is `"input"` or `None` no extra column is needed.
directory: string, path to the directory to read images from. If `None`,
data in `x_col` column should be absolute paths.
x_col: string, column in `dataframe` that contains the filenames (or
absolute paths if `directory` is `None`).
y_col: string or list, column/s in `dataframe` that has the target data.
weight_col: string, column in `dataframe` that contains the sample
weights. Default: `None`.
target_size: tuple of integers `(height, width)`, default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: one of "grayscale", "rgb", "rgba". Default: "rgb".
Whether the images will be converted to have 1 or 3 color channels.
classes: optional list of classes (e.g. `['dogs', 'cats']`).
Default: None. If not provided, the list of classes will be
automatically inferred from the `y_col`,
which will map to the label indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: one of "binary", "categorical", "input", "multi_output",
"raw", sparse" or None. Default: "categorical".
Mode for yielding the targets:
- `"binary"`: 1D NumPy array of binary labels,
- `"categorical"`: 2D NumPy array of one-hot encoded labels.
Supports multi-label output.
- `"input"`: images identical to input images (mainly used to
work with autoencoders),
- `"multi_output"`: list with the values of the different columns,
- `"raw"`: NumPy array of values in `y_col` column(s),
- `"sparse"`: 1D NumPy array of integer labels,
- `None`, no targets are returned (the generator will only yield
batches of image data, which is useful to use in
`model.predict_generator()`).
batch_size: size of the batches of data (default: 32).
shuffle: whether to shuffle the data (default: True)
seed: optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: whether to follow symlinks inside class subdirectories
(default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`, and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed, `"box"` and
`"hamming"` are also supported. By default, `"nearest"` is used.
validate_filenames: Boolean, whether to validate image filenames in
`x_col`. If `True`, invalid images will be ignored. Disabling this
option can lead to speed-up in the execution of this function.
Default: `True`.
# Returns
A `DataFrameIterator` yielding tuples of `(x, y)`
where `x` is a NumPy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a NumPy array of corresponding labels.
"""
if 'has_ext' in kwargs:
warnings.warn('has_ext is deprecated, filenames in the dataframe have '
'to match the exact filenames in disk.',
DeprecationWarning)
if 'sort' in kwargs:
warnings.warn('sort is deprecated, batches will be created in the'
'same order than the filenames provided if shuffle'
'is set to False.', DeprecationWarning)
if class_mode == 'other':
warnings.warn('`class_mode` "other" is deprecated, please use '
'`class_mode` "raw".', DeprecationWarning)
class_mode = 'raw'
if 'drop_duplicates' in kwargs:
warnings.warn('drop_duplicates is deprecated, you can drop duplicates '
'by using the pandas.DataFrame.drop_duplicates method.',
DeprecationWarning)
return DataFrameIterator(
dataframe,
directory,
self,
x_col=x_col,
y_col=y_col,
weight_col=weight_col,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset,
interpolation=interpolation,
validate_filenames=validate_filenames,
dtype=self.dtype
)
def standardize(self, x):
"""Applies the normalization configuration in-place to a batch of inputs.
`x` is changed in-place since the function is mainly used internally
to standardize images and feed them to your network. If a copy of `x`
would be created instead it would have a significant performance cost.
If you want to apply this method without changing the input in-place
you can call the method creating a copy before:
standardize(np.copy(x))
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + 1e-6)
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + 1e-6)
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def get_random_transform(self, img_shape, seed=None):
"""Generates random parameters for a transformation.
# Arguments
seed: Random seed.
img_shape: Tuple of integers.
Shape of the image that is transformed.
# Returns
A dictionary containing randomly chosen parameters describing the
transformation.
"""
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
if seed is not None:
np.random.seed(seed)
if self.rotation_range:
theta = np.random.uniform(
-self.rotation_range,
self.rotation_range)
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= img_shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= img_shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.random.uniform(
-self.shear_range,
self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip
flip_vertical = (np.random.random() < 0.5) * self.vertical_flip
channel_shift_intensity = None
if self.channel_shift_range != 0:
channel_shift_intensity = np.random.uniform(-self.channel_shift_range,
self.channel_shift_range)
brightness = None
if self.brightness_range is not None:
brightness = np.random.uniform(self.brightness_range[0],
self.brightness_range[1])
transform_parameters = {'theta': theta,
'tx': tx,
'ty': ty,
'shear': shear,
'zx': zx,
'zy': zy,
'flip_horizontal': flip_horizontal,
'flip_vertical': flip_vertical,
'channel_shift_intensity': channel_shift_intensity,
'brightness': brightness}
return transform_parameters
def apply_transform(self, x, transform_parameters):
"""Applies a transformation to an image according to given parameters.
# Arguments
x: 3D tensor, single image.
transform_parameters: Dictionary with string - parameter pairs
describing the transformation.
Currently, the following parameters
from the dictionary are used:
- `'theta'`: Float. Rotation angle in degrees.
- `'tx'`: Float. Shift in the x direction.
- `'ty'`: Float. Shift in the y direction.
- `'shear'`: Float. Shear angle in degrees.
- `'zx'`: Float. Zoom in the x direction.
- `'zy'`: Float. Zoom in the y direction.
- `'flip_horizontal'`: Boolean. Horizontal flip.
- `'flip_vertical'`: Boolean. Vertical flip.
- `'channel_shift_intensity'`: Float. Channel shift intensity.
- `'brightness'`: Float. Brightness shift intensity.
# Returns
A transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
x = apply_affine_transform(x, transform_parameters.get('theta', 0),
transform_parameters.get('tx', 0),
transform_parameters.get('ty', 0),
transform_parameters.get('shear', 0),
transform_parameters.get('zx', 1),
transform_parameters.get('zy', 1),
row_axis=img_row_axis,
col_axis=img_col_axis,
channel_axis=img_channel_axis,
fill_mode=self.fill_mode,
cval=self.cval,
order=self.interpolation_order)
if transform_parameters.get('channel_shift_intensity') is not None:
x = apply_channel_shift(x,
transform_parameters['channel_shift_intensity'],
img_channel_axis)
if transform_parameters.get('flip_horizontal', False):
x = flip_axis(x, img_col_axis)
if transform_parameters.get('flip_vertical', False):
x = flip_axis(x, img_row_axis)
if transform_parameters.get('brightness') is not None:
x = apply_brightness_shift(x, transform_parameters['brightness'])
return x
def random_transform(self, x, seed=None):
"""Applies a random transformation to an image.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
params = self.get_random_transform(x.shape, seed)
return self.apply_transform(x, params)
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Fits the data generator to some sample data.
This computes the internal data stats related to the
data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
When `rescale` is set to a value, rescaling is applied to
sample data before computing the internal data stats.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, in case
of RGB data, it should have value 3, and in case
of RGBA data, it should have value 4.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=self.dtype)
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if self.rescale:
x *= self.rescale
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=self.dtype)
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + 1e-6)
if self.zca_whitening:
if scipy is None:
raise ImportError('Using zca_whitening requires SciPy. '
'Install SciPy.')
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T) | PypiClean |
/GxSphinx-1.0.0.tar.gz/GxSphinx-1.0.0/sphinx/builders/singlehtml.py | from os import path
from typing import Any, Dict, List, Tuple, Union
from docutils import nodes
from docutils.nodes import Node
from sphinx.application import Sphinx
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.deprecation import RemovedInSphinx40Warning, deprecated_alias
from sphinx.environment.adapters.toctree import TocTree
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util import progress_message
from sphinx.util.console import darkgreen # type: ignore
from sphinx.util.nodes import inline_all_toctrees
logger = logging.getLogger(__name__)
class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
"""
A StandaloneHTMLBuilder subclass that puts the whole document tree on one
HTML page.
"""
name = 'singlehtml'
epilog = __('The HTML page is in %(outdir)s.')
copysource = False
def get_outdated_docs(self) -> Union[str, List[str]]: # type: ignore
return 'all documents'
def get_target_uri(self, docname: str, typ: str = None) -> str:
if docname in self.env.all_docs:
# all references are on the same page...
return self.config.master_doc + self.out_suffix + \
'#document-' + docname
else:
# chances are this is a html_additional_page
return docname + self.out_suffix
def get_relative_uri(self, from_: str, to: str, typ: str = None) -> str:
# ignore source
return self.get_target_uri(to, typ)
def fix_refuris(self, tree: Node) -> None:
# fix refuris with double anchor
fname = self.config.master_doc + self.out_suffix
for refnode in tree.traverse(nodes.reference):
if 'refuri' not in refnode:
continue
refuri = refnode['refuri']
hashindex = refuri.find('#')
if hashindex < 0:
continue
hashindex = refuri.find('#', hashindex + 1)
if hashindex >= 0:
refnode['refuri'] = fname + refuri[hashindex:]
def _get_local_toctree(self, docname: str, collapse: bool = True, **kwargs: Any) -> str:
if 'includehidden' not in kwargs:
kwargs['includehidden'] = False
toctree = TocTree(self.env).get_toctree_for(docname, self, collapse, **kwargs)
if toctree is not None:
self.fix_refuris(toctree)
return self.render_partial(toctree)['fragment']
def assemble_doctree(self) -> nodes.document:
master = self.config.master_doc
tree = self.env.get_doctree(master)
tree = inline_all_toctrees(self, set(), master, tree, darkgreen, [master])
tree['docname'] = master
self.env.resolve_references(tree, master, self)
self.fix_refuris(tree)
return tree
def assemble_toc_secnumbers(self) -> Dict[str, Dict[str, Tuple[int, ...]]]:
# Assemble toc_secnumbers to resolve section numbers on SingleHTML.
# Merge all secnumbers to single secnumber.
#
# Note: current Sphinx has refid confliction in singlehtml mode.
# To avoid the problem, it replaces key of secnumbers to
# tuple of docname and refid.
#
# There are related codes in inline_all_toctres() and
# HTMLTranslter#add_secnumber().
new_secnumbers = {} # type: Dict[str, Tuple[int, ...]]
for docname, secnums in self.env.toc_secnumbers.items():
for id, secnum in secnums.items():
alias = "%s/%s" % (docname, id)
new_secnumbers[alias] = secnum
return {self.config.master_doc: new_secnumbers}
def assemble_toc_fignumbers(self) -> Dict[str, Dict[str, Dict[str, Tuple[int, ...]]]]:
# Assemble toc_fignumbers to resolve figure numbers on SingleHTML.
# Merge all fignumbers to single fignumber.
#
# Note: current Sphinx has refid confliction in singlehtml mode.
# To avoid the problem, it replaces key of secnumbers to
# tuple of docname and refid.
#
# There are related codes in inline_all_toctres() and
# HTMLTranslter#add_fignumber().
new_fignumbers = {} # type: Dict[str, Dict[str, Tuple[int, ...]]]
# {'foo': {'figure': {'id2': (2,), 'id1': (1,)}}, 'bar': {'figure': {'id1': (3,)}}}
for docname, fignumlist in self.env.toc_fignumbers.items():
for figtype, fignums in fignumlist.items():
alias = "%s/%s" % (docname, figtype)
new_fignumbers.setdefault(alias, {})
for id, fignum in fignums.items():
new_fignumbers[alias][id] = fignum
return {self.config.master_doc: new_fignumbers}
def get_doc_context(self, docname: str, body: str, metatags: str) -> Dict:
# no relation links...
toctree = TocTree(self.env).get_toctree_for(self.config.master_doc, self, False)
# if there is no toctree, toc is None
if toctree:
self.fix_refuris(toctree)
toc = self.render_partial(toctree)['fragment']
display_toc = True
else:
toc = ''
display_toc = False
return {
'parents': [],
'prev': None,
'next': None,
'docstitle': None,
'title': self.config.html_title,
'meta': None,
'body': body,
'metatags': metatags,
'rellinks': [],
'sourcename': '',
'toc': toc,
'display_toc': display_toc,
}
def write(self, *ignored: Any) -> None:
docnames = self.env.all_docs
with progress_message(__('preparing documents')):
self.prepare_writing(docnames) # type: ignore
with progress_message(__('assembling single document')):
doctree = self.assemble_doctree()
self.env.toc_secnumbers = self.assemble_toc_secnumbers()
self.env.toc_fignumbers = self.assemble_toc_fignumbers()
with progress_message(__('writing')):
self.write_doc_serialized(self.config.master_doc, doctree)
self.write_doc(self.config.master_doc, doctree)
def finish(self) -> None:
self.write_additional_files()
self.copy_image_files()
self.copy_download_files()
self.copy_static_files()
self.copy_extra_files()
self.write_buildinfo()
self.dump_inventory()
@progress_message(__('writing additional files'))
def write_additional_files(self) -> None:
# no indices or search pages are supported
# additional pages from conf.py
for pagename, template in self.config.html_additional_pages.items():
logger.info(' ' + pagename, nonl=True)
self.handle_page(pagename, {}, template)
if self.config.html_use_opensearch:
logger.info(' opensearch', nonl=True)
fn = path.join(self.outdir, '_static', 'opensearch.xml')
self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn)
# for compatibility
deprecated_alias('sphinx.builders.html',
{
'SingleFileHTMLBuilder': SingleFileHTMLBuilder,
},
RemovedInSphinx40Warning)
def setup(app: Sphinx) -> Dict[str, Any]:
app.setup_extension('sphinx.builders.html')
app.add_builder(SingleFileHTMLBuilder)
app.add_config_value('singlehtml_sidebars', lambda self: self.html_sidebars, 'html')
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
} | PypiClean |
/M5-0.3.2.tar.gz/M5-0.3.2/docs/tutorial/sim_console.md | M5 Simulator
============
The simulator is a helper application that makes it easy to see you app as it will appear on a real mobile device,
and to test and modify your app very easily.
When you run the simulator, you are actually just running your app, but the m5.simulator.js script re-frames your
app's main div inside an image of a device, and it adds an iframe to the page which shows the simulator console.
The advantage of "injecting" the simulator into your app in this way is that the browser debugger is talking
directly to your app's page. This means you can use all the normal browser debugger tools with your app.
The simulator console gives you these additional features:
* Remote debugging console. Works no matter where your app is running.
* Simple code editor
* Local storage browser
The simulator console is able to talk to your app remotely using Ajax calls. When you're running your
app in the browser, the console isn't super useful since it works the same as the Javascript console.
However, the remote console will also work if you run your app somewhere else, like on an actual phone
or in the iPhone simulator, as long as you're connected to your local server.
To try it, go look at where you ran the _m5 server_ command. It shows a line like this:
Access the app from your phone using: http://scottp-leopard.local:8000
Go open that link on your phone. Now, enter this in the simulator console:
$('#jqt).hide()
You should see your app screen disappear on the phone.
| PypiClean |
/MJOLNIRGui-0.9.10.tar.gz/MJOLNIRGui-0.9.10/src/main/python/Views/Cut1DManager.py | import sys
sys.path.append('..')
try:
from MJOLNIRGui.src.main.python._tools import ProgressBarDecoratorArguments,loadUI
import MJOLNIRGui.src.main.python._tools as _GUItools
from MJOLNIRGui.src.main.python.DataModels import Cut1DModel
from MJOLNIRGui.src.main.python.MJOLNIR_Data import Gui1DCutObject
except ImportError:
from DataModels import Cut1DModel
from MJOLNIR_Data import Gui1DCutObject
from _tools import ProgressBarDecoratorArguments,loadUI
import _tools as _GUItools
from os import path
from PyQt5 import QtWidgets, uic, QtCore, QtGui
import numpy as np
import matplotlib.pyplot as plt
import itertools
def Cut1D_Delete1D_btn_function(self):
self.Cut1DModel.delete(self.ui.Cut1D_listView.selectedIndexes())
self.Cut1DModel.layoutChanged.emit()
self.Cut1DModel.Cut1D_listView.clearSelection()
self.update1DCutLabels()
self.stateMachine.run()
def Cut1D_DoubleClick_Selection_function(self,index,*args,**kwargs):
self.ui.Cut1D_listView.edit(index)
def setupCut1D(self):
self.ui.Cut1D_plot_button.clicked.connect(self.Cut1D_plot_button_function)
self.ui.Cut1D_Generate1D_button.clicked.connect(self.Cut1D_Generate1D_button_function)
self.ui.Cut1D_Delete1D_btn.clicked.connect(self.Delete1D_button_function)
self.ui.Cut1D_SetTitle_button.clicked.connect(self.Cut1D_SetTitle_button_function)
self.Cut1DModel = Cut1DModel(Cut1D_listView=self.ui.Cut1D_listView)
self.ui.Cut1D_listView.setModel(self.Cut1DModel)
self.Cut1DSelectionModel = self.ui.Cut1D_listView.selectionModel()
self.Cut1DSelectionModel.selectionChanged.connect(self.selected1DCutChanged)
self.ui.Cut1D_listView.doubleClicked.connect(self.Cut1D_DoubleClick_Selection_function)
def contextMenu(view,event,gui):
# Generate a context menu that opens on right click
position = event.globalPos()
idx = view.selectedIndexes()
if len(idx)!=0:
items = [gui.Cut1DModel.item(i) for i in idx]
if event.type() == QtCore.QEvent.ContextMenu:
menu = QtWidgets.QMenu()
if len(idx)==1:
text = 'Plot cut'
else:
text = 'Plot cuts individually'
plot = QtWidgets.QAction(text)
plot.setToolTip(text)
plot.setStatusTip(plot.toolTip())
plot.triggered.connect(lambda: [self.plotItem(it) for it in items])
plot.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/plot.png')))
delete = QtWidgets.QAction('Delete')
delete.setToolTip('Delete cut(s)')
delete.setStatusTip(delete.toolTip())
delete.triggered.connect(lambda: gui.Cut1DModel.delete(idx))
delete.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/cross-button.png')))
menu.addAction(plot)
if len(idx)>1: # multiple cuts selected at once
plotTogether = QtWidgets.QAction('Plot Together')
plotTogether.setToolTip('Plot cuts together')
plotTogether.setStatusTip(plot.toolTip())
plotTogether.triggered.connect(self.Cut1D_cut1DPlotTogether)
plotTogether.setIcon(QtGui.QIcon(self.AppContext.get_resource('Icons/Own/plotMany.png')))
menu.addAction(plotTogether)
menu.addAction(delete)
return menu.exec_(position)
self.ui.Cut1D_listView.contextMenuEvent = lambda event: contextMenu(self.ui.Cut1D_listView,event,self)
def selected1DCutChanged(self,*args,**kwargs):
self.update1DCutLabels()
def update1DCutLabels(self):
cuts = self.Cut1DModel.rowCount()
if cuts == 0:
self.ui.Cut1D_Export1D_btn.setEnabled(False)
self.ui.Cut1D_ExporCSV_radioBtn.setEnabled(False)
self.ui.Cut1D_ExporUFIT_radioBtn.setEnabled(False)
else:
self.ui.Cut1D_Export1D_btn.setEnabled(True)
self.ui.Cut1D_ExporCSV_radioBtn.setEnabled(True)
self.ui.Cut1D_ExporUFIT_radioBtn.setEnabled(True)
indices = self.Cut1DModel.Cut1D_listView.selectedIndexes()
if len(indices)>1:
self.ui.Cut1D_plotTogether_btn.setEnabled(True)
else:
self.ui.Cut1D_plotTogether_btn.setEnabled(False)
if not len(indices) == 0: # If index is selected
self.Cut1D_indexChanged(indices[0])
self.ui.Cut1D_Delete1D_btn.setEnabled(True)
else:
self.ui.Cut1D_Delete1D_btn.setEnabled(False)
def cut1DPlotTogether(self):
indices = self.Cut1DModel.Cut1D_listView.selectedIndexes()
#if len(indices) == 0: return
ax = None
for idx in indices:
ax = self.plotItem(self.Cut1DModel.item(idx),ax=ax)
ax.legend()
def extractCutParameters(self):
rlu = self.ui.Cut1D_SelectUnits_RLU_radioButton.isChecked()
cutQ = self.ui.Cut1D_SelectCut_Q_radioButton.isChecked()
HStart = self.ui.Cut1D_HStart_lineEdit.text()
if cutQ:
HEnd = self.ui.Cut1D_HEnd_lineEdit.text()
KEnd = self.ui.Cut1D_KEnd_lineEdit.text()
else:
HEnd = KEnd = LEnd = 0.0
KStart = self.ui.Cut1D_KStart_lineEdit.text()
if rlu:
LStart = self.ui.Cut1D_LStart_lineEdit.text()
if cutQ:
LEnd = self.ui.Cut1D_LEnd_lineEdit.text()
else:
LStart = 0.0
EMax = float(self.ui.Cut1D_EMax_lineEdit.text())
EMin = float(self.ui.Cut1D_EMin_lineEdit.text())
width = float(self.ui.Cut1D_Width_lineEdit.text())
minPixel = float(self.ui.Cut1D_MinPixel_lineEdit.text())
ds = self.DataSetModel.getCurrentDataSet()
if rlu:
q1 = np.array([HStart,KStart,LStart],dtype=float)
q2 = np.array([HEnd,KEnd,LEnd],dtype=float)
else:
q1 = np.array([HStart,KStart],dtype=float)
q2 = np.array([HEnd,KEnd],dtype=float)
return ds,q1,q2,width,minPixel,EMax,EMin,cutQ,rlu
@ProgressBarDecoratorArguments(runningText='Save Cut1D to folder',completedText='Cut1D saved')
def Cut1D_Export1D_btn_function(self):
asCSV = self.ui.Cut1D_ExporCSV_radioBtn.isChecked()
saveString = self.ui.Cut1D_ExportName_lineEdit.text()
if asCSV:
saveFolder = QtWidgets.QFileDialog.getExistingDirectory(self, 'Save 1D cuts')
if not saveFolder is None or saveFolder == '':
if not saveString.split('.')[-1] == 'csv':
saveString = saveString+'.csv'
if not '#' in saveString: # Require file name to include a #
saveString=saveString.replace('.csv','_#.csv')
for cut in self.Cut1DModel.dataCuts1D:
name = cut.name.replace(' ','_')
location = path.join(saveFolder,saveString.replace('#',name))
cut.save(location)
else:
return False
else:
if not saveString.split('.')[-1] == 'ufit':
saveString = saveString+'.ufit'
location,_ = QtWidgets.QFileDialog.getSaveFileName(self,'Save 1D cuts',saveString)
if location is None or location == '':
return False
if not location.split('.')[-1] == 'ufit':
location = location+'.ufit'
self.Cut1D_Save_To_uFit(location)
def checker(q1,q2,width,minPixel,EMax,EMin,cutQ):
"""Checker for 1DCuts. Returns False is an error is detected."""
success = True
if EMax<EMin:
_GUItools.dialog(text='1D Cut could not be made. EMax ({}) < EMin ({})!'.format(EMax,EMin))
success = False
if width<0:
_GUItools.dialog(text='1D Cut could not be made. Q width ({}) is negative!'.format(width))
success = False
if minPixel<0:
_GUItools.dialog(text='1D Cut could not be made. Min Pixel ({}) is negative!'.format(minPixel))
success = False
return success
@ProgressBarDecoratorArguments(runningText='Plotting Cut1D',completedText='Plotting Done')
def Cut1D_plot_button_function(self):
self.Cut1D_Generate1D_button_function()
self.Cut1DModel.dataCuts1D[-1].plot() # Generates ax but does not return it -.-
ax = plt.gca()
self.windows.append(ax.get_figure())
self.Cut1D=ax
return True
@ProgressBarDecoratorArguments(runningText='Cutting Cut1D',completedText='Cutting Done')
def Cut1D_Generate1D_button_function(self):
if not self.stateMachine.requireStateByName('Converted'):
return False
if self.interactiveCut is None:
ds,q1,q2,width,minPixel,EMax,EMin,cutQ,rlu = extractCutParameters(self)
else:
ds,q1,q2,width,minPixel,EMax,EMin,cutQ,rlu = self.interactiveCut
if checker(q1,q2,width,minPixel,EMax,EMin,cutQ) is False:
return False
try:
if cutQ:
pdData,bins = ds.cut1D(q1=q1,q2=q2,width=width,minPixel=minPixel,Emin=EMin,Emax=EMax,rlu=rlu,constantBins=True,ufit=False)
parameters = {'q1':q1,'q2':q2,'EMin':EMin,'EMax':EMax,'rlu':rlu,'width':width,'constantBins':True,'minPixel':minPixel,'method':'cut1D','dataset':ds}
# add parameters to correct edits, loop through q. If rlu sizes matches otherwise len(q) = 2 and padding with 0.0
for q,field in itertools.zip_longest(q1,['Cut1D_HStart_lineEdit','Cut1D_KStart_lineEdit','Cut1D_LStart_lineEdit'],fillvalue=0.0):
parameters[field] = q
for q,field in itertools.zip_longest(q2,['Cut1D_HEnd_lineEdit','Cut1D_KEnd_lineEdit','Cut1D_LEnd_lineEdit'],fillvalue=0.0):
parameters[field] = q
parameters['Cut1D_EMax_lineEdit'] = parameters['EMax']
parameters['Cut1D_EMin_lineEdit'] = parameters['EMin']
parameters['Cut1D_Width_lineEdit'] = width
parameters['Cut1D_MinPixel_lineEdit'] = minPixel
parameters['Cut1D_SelectCut_Q_radioButton'] = True
parameters['Cut1D_SelectCut_E_radioButton'] = False
parameters['Cut1D_SelectUnits_RLU_radioButton'] = rlu
parameters['Cut1D_SelectUnits_AA_radioButton'] = not rlu
else: # else along E
pdData,bins = ds.cut1DE(E1=EMin,E2=EMax,q=q1,rlu=rlu,width=width,constantBins=True, minPixel = minPixel,ufit=False)
parameters = {'EMin':EMin,'EMax':EMax,'q1':q1,'q2':None,'rlu':rlu,'width':width,'constantBins':True,'minPixel':minPixel,'method':'cut1DE','dataset':ds}
for q,field in itertools.zip_longest(q1,['Cut1D_HStart_lineEdit','Cut1D_KStart_lineEdit','Cut1D_LStart_lineEdit','Cut1D_HEnd_lineEdit','Cut1D_KEnd_lineEdit','Cut1D_LEnd_lineEdit'],fillvalue=0.0):
parameters[field] = q
parameters['Cut1D_EMax_lineEdit'] = parameters['EMax']
parameters['Cut1D_EMin_lineEdit'] = parameters['EMin']
parameters['Cut1D_Width_lineEdit'] = width
parameters['Cut1D_MinPixel_lineEdit'] = minPixel
parameters['Cut1D_SelectCut_Q_radioButton'] = False
parameters['Cut1D_SelectCut_E_radioButton'] = True
parameters['Cut1D_SelectUnits_RLU_radioButton'] = rlu
parameters['Cut1D_SelectUnits_AA_radioButton'] = not rlu
# Generate a Gui1DCutObject
if not hasattr(self,'cutNumber'):
self.cutNumber = 1
title = 'Cut {}'.format(self.cutNumber)
parameters['Cut1D_SetTitle_lineEdit'] = title
gui1DCut = Gui1DCutObject(name=title,parameters=parameters,pdData=pdData,bins=bins)
self.cutNumber+=1
self.Cut1DModel.append(gui1DCut)
except AttributeError as e:
raise e
#_GUItools.dialog(text='1D Cut could not be made. Check the limits for the cut and try again!')
return False
def Cut1D_SetTitle_button_function(self):
TitleText=self.ui.Cut1D_SetTitle_lineEdit.text()
if TitleText == '':
TitleText = self.ui.Cut1D_SetTitle_lineEdit.getPlaceholderText()
if hasattr(self, 'Cut1D'):
self.Cut1D.set_title(TitleText)
fig = self.Cut1D.get_figure()
fig.canvas.draw()
def Cut1D_toggle_units_function(self):
if self.ui.Cut1D_SelectUnits_RLU_radioButton.isChecked(): # changed to RLU
# Change titles
self.ui.Cut1D_Hlabel.setText('H')
self.ui.Cut1D_Klabel.setText('K')
self.ui.Cut1D_Llabel.setText('L')
self.ui.Cut1D_LStart_lineEdit.setEnabled(True)
if self.ui.Cut1D_SelectCut_Q_radioButton.isChecked():
self.ui.Cut1D_LEnd_lineEdit.setEnabled(True)
else: # Changing to AA
self.ui.Cut1D_Hlabel.setText('Qx')
self.ui.Cut1D_Klabel.setText('Qy')
self.ui.Cut1D_Llabel.setText('N/A')
self.ui.Cut1D_LStart_lineEdit.setEnabled(False)
self.ui.Cut1D_LEnd_lineEdit.setEnabled(False)
def Cut1D_toggle_CutDir_function(self):
if self.ui.Cut1D_SelectCut_Q_radioButton.isChecked(): # changed to Cut along Q
self.ui.Cut1D_StartLabel.setText('Start')
self.ui.Cut1D_StopLabel.setText('Stop')
self.ui.Cut1D_HEnd_lineEdit.setEnabled(True)
self.ui.Cut1D_KEnd_lineEdit.setEnabled(True)
self.ui.Cut1D_MinPixel_label.setText('Min Pixel [1/AA]')
if self.ui.Cut1D_SelectUnits_RLU_radioButton.isChecked(): # If RLU units
self.ui.Cut1D_LEnd_lineEdit.setEnabled(True)
else: # Changing to AA
self.ui.Cut1D_StartLabel.setText('Point')
self.ui.Cut1D_StopLabel.setText('N/A')
self.ui.Cut1D_HEnd_lineEdit.setEnabled(False)
self.ui.Cut1D_KEnd_lineEdit.setEnabled(False)
self.ui.Cut1D_LEnd_lineEdit.setEnabled(False)
self.ui.Cut1D_MinPixel_label.setText('Min Pixel [meV]')
#@ProgressBarDecoratorArguments(runningText='Saving to file',completedText='Saving Done')
def Cut1D_Save_To_uFit(self,saveFile):
from ufit.gui.session import UfitSession
from ufit.gui.scanitem import ScanDataItem
if self.Cut1DModel.rowCount() == 0:
return
datasets = self.Cut1DModel.dataCuts1D
for data in datasets:
data.ufit.meta['title'] = data.name
self.ufitsaveFile = saveFile
session = UfitSession()
session.add_items([ScanDataItem(data.ufit) for data in datasets])
if saveFile is None or saveFile == '':
return False
if not saveFile.split('.')[-1] == 'ufit':
saveFile+='.ufit'
session.set_filename(saveFile)
session.save()
def plotItem(self,item,ax=None):
#plot the selected Gui1DCutObject into a new window
if not ax is None:
fig = ax.get_figure()
Append = False # Do not append as it is already appended
else:
Append = True
ax = item.plot(ax=ax)
fig = ax.get_figure()
fig.tight_layout()
if Append:
self.windows.append(fig)
return ax
#def Cut1D_Cut_SelectionChanged_function(self):
#self.guiWindow.View3D_indexChanged = lambda index: indexChanged(self.guiWindow,index)
def indexChanged(self,index):
cut1D = self.Cut1DModel.item(index)
if hasattr(cut1D,'parameters'):
for setting,value in cut1D.parameters.items():
if 'radio' in setting or 'checkBox' in setting:
getattr(getattr(self.ui,setting),'setChecked')(value)
elif 'lineEdit' in setting:
getattr(getattr(self.ui,setting),'setText')(str(value))
def Cut1D_DataSet_selectionChanged_function(self):
ds = self.DataSetModel.getCurrentDataSet()
if not ds is None:
title = ds.name
else:
title = ''
self.ui.Cut1D_SetTitle_lineEdit.setPlaceholderText(title)
Cut1DManagerBase, Cut1DManagerForm = loadUI('Cut1D.ui')
class Cut1DManager(Cut1DManagerBase, Cut1DManagerForm):
def __init__(self, parent=None, guiWindow=None):
super(Cut1DManager, self).__init__(parent)
self.setupUi(self)
self.guiWindow = guiWindow
self.initCut1DManager()
def initCut1DManager(self):
self.guiWindow.interactiveCut = None # Used to pass in interactive cuts
self.guiWindow.Cut1D_plot_button_function = lambda: Cut1D_plot_button_function(self.guiWindow)
self.guiWindow.Cut1D_Generate1D_button_function = lambda: Cut1D_Generate1D_button_function(self.guiWindow)
self.guiWindow.Cut1D_SetTitle_button_function = lambda: Cut1D_SetTitle_button_function(self.guiWindow)
self.guiWindow.setupCut1D = lambda: setupCut1D(self.guiWindow)
self.guiWindow.Cut1D_indexChanged = lambda index: indexChanged(self.guiWindow,index)
self.guiWindow.Cut1D_cut1DPlotTogether = lambda: cut1DPlotTogether(self.guiWindow)
self.guiWindow.Cut1D_toggle_units_function = lambda: Cut1D_toggle_units_function(self.guiWindow)
self.guiWindow.Cut1D_toggle_CutDir_function = lambda: Cut1D_toggle_CutDir_function(self.guiWindow)
self.guiWindow.Cut1D_Save_To_uFit = lambda location: Cut1D_Save_To_uFit(self.guiWindow,location)
self.guiWindow.Cut1D_DataSet_selectionChanged_function = lambda: Cut1D_DataSet_selectionChanged_function(self.guiWindow)
self.guiWindow.plotItem = lambda item,ax=None: plotItem(self.guiWindow,item,ax)
self.guiWindow.Cut1D_DoubleClick_Selection_function = lambda index:Cut1D_DoubleClick_Selection_function(self.guiWindow,index)
self.guiWindow.Delete1D_button_function = lambda:Cut1D_Delete1D_btn_function(self.guiWindow)
self.guiWindow.selected1DCutChanged = lambda : selected1DCutChanged(self.guiWindow)
self.guiWindow.update1DCutLabels = lambda:update1DCutLabels(self.guiWindow)
self.guiWindow.Cut1D_Export1D_btn_function = lambda:Cut1D_Export1D_btn_function(self.guiWindow)
for key,value in self.__dict__.items():
if 'Cut1D' in key:
self.guiWindow.ui.__dict__[key] = value
def setup(self):
self.guiWindow.setupCut1D()
self.guiWindow.ui.Cut1D_SelectUnits_RLU_radioButton.toggled.connect(self.guiWindow.Cut1D_toggle_units_function)
self.guiWindow.ui.Cut1D_SelectCut_Q_radioButton.toggled.connect(self.guiWindow.Cut1D_toggle_CutDir_function)
self.guiWindow.ui.Cut1D_Export1D_btn.clicked.connect(self.guiWindow.Cut1D_Export1D_btn_function)
self.guiWindow.ui.Cut1D_SetTitle_lineEdit.returnPressed.connect(self.TitleChanged)
self.guiWindow.DataSetSelectionModel.selectionChanged.connect(self.guiWindow.Cut1D_DataSet_selectionChanged_function)
self.guiWindow.DataSetModel.dataChanged.connect(self.guiWindow.Cut1D_DataSet_selectionChanged_function)
self.guiWindow.ui.Cut1D_plotTogether_btn.clicked.connect(self.guiWindow.Cut1D_cut1DPlotTogether)
def TitleChanged(self):
if self.guiWindow.ui.Cut1D_SetTitle_button.isEnabled():
self.guiWindow.Cut1D_SetTitle_button_function() | PypiClean |
/Auxjad-1.0.0.tar.gz/Auxjad-1.0.0/auxjad/core/Shuffler.py | import random
from typing import Any, Optional, Union
import abjad
from .. import get, mutate
class Shuffler:
r"""Takes an |abjad.Container| (or child class) and shuffles or rotates its
logical ties or pitches. When shuffling or rotating pitches only, tuplets
are supported, otherwise tuplets are not supported.
Basic usage:
Calling the object will output a shuffled selection of the input
container.
>>> container = abjad.Container(r"c'4 d'4 e'4 f'4")
>>> abjad.show(container)
.. docs::
{
c'4
d'4
e'4
f'4
}
.. figure:: ../_images/Shuffler-1oLQZeQNPo.png
>>> shuffler = auxjad.Shuffler(container)
>>> notes = shuffler()
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
d'4
c'4
f'4
e'4
}
.. figure:: ../_images/Shuffler-z2om98675v.png
>>> notes = shuffler()
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
c'4
e'4
d'4
f'4
}
.. figure:: ../_images/Shuffler-xu7sln4vt7n.png
To get the result of the last operation, use the property
:attr:`current_window`.
>>> notes = shuffler.current_window
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
c'4
e'4
d'4
f'4
}
.. figure:: ../_images/Shuffler-gphtpqn9jb.png
Calling the object outputs the same result as using the method
:meth:`shuffle`.
>>> notes = shuffler.shuffle()
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
e'4
f'4
c'4
d'4
}
.. figure:: ../_images/Shuffler-g965k0d03if.png
.. warning::
Unlike the other classes in Auxjad, the very first call of an instance
of this class will already process the initial container. To disable
this behaviour and output the initial container once before shuffling
or rotating it, initialise the class with the keyword argument
:attr:`process_on_first_call` set to ``False``.
>>> container = abjad.Container(r"c'4 d'4 e'4 f'4")
>>> shuffler = auxjad.Shuffler(container,
... process_on_first_call=False,
... )
>>> notes = shuffler()
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
c'4
d'4
e'4
f'4
}
.. figure:: ../_images/Shuffler-76039tn5b9k.png
:func:`len()`:
Applying the :func:`len()` function to the shuffler will return the
number of logical ties of :attr:`contents`.
>>> container = abjad.Container(r"c'4 d'4 e'4 f'4 ~ | f'2 g'2")
>>> shuffler = auxjad.Shuffler(container)
>>> len(shuffler)
5
Do note that consecutive rests are considered as a single logical tie,
so in the example below the :func:`len()` function returns ``5`` and
not ``6``. When shuffling or rotating logical ties, consecutive rests
are also shuffled and rotated together.
>>> container = abjad.Container(r"c'8. d'4 r8 r8. e'16 f'8.")
>>> shuffler = auxjad.Shuffler(container)
>>> len(shuffler)
5
Arguments and properties:
This class has many keyword arguments, all of which can be altered
after instantiation using properties with the same names as shown
below. Setting :attr:`pitch_only` to ``True`` will enable pitch mode;
by default, this class shuffles and rotates logical ties, but in pitch
mode only pitches are shuffled or rotated. By setting
:attr:`preserve_rest_position` to ``True`` the shuffle and rotation
operations will not change the position or duration of rests.
:attr:`disable_rewrite_meter` disables the
|abjad.Meter.rewrite_meter()| mutation which is applied to the
container after every call, and :attr:`omit_time_signatures` will
remove all time signatures from the output (both are ``False`` by
default). The properties :attr:`boundary_depth`,
:attr:`maximum_dot_count`, and :attr:`rewrite_tuplets` are passed as
arguments to |abjad.Meter.rewrite_meter()|, see its documentation
for more information. By default, calling the object will first return
the original container and subsequent calls will process it; set
:attr:`process_on_first_call` to ``True`` and the shuffling process
will be applied on the very first call.
>>> container = abjad.Container(
... r"\time 3/4 c'4 d'4 e'4 \time 2/4 f'4 g'4"
... )
>>> shuffler = auxjad.Shuffler(container,
... pitch_only=False,
... preserve_rest_position=True,
... disable_rewrite_meter=False,
... omit_time_signatures=True,
... boundary_depth=0,
... maximum_dot_count=1,
... rewrite_tuplets=False,
... process_on_first_call=True,
... swap_limit=3,
... )
>>> shuffler.pitch_only
False
>>> shuffler.preserve_rest_position
True
>>> shuffler.disable_rewrite_meter
False
>>> shuffler.omit_time_signatures
True
>>> shuffler.boundary_depth
0
>>> shuffler.maximum_dot_count
1
>>> shuffler.rewrite_tuplets
False
>>> shuffler.process_on_first_call
True
>>> shuffler.swap_limit
3
Use the properties below to change these values after initialisation.
>>> shuffler.pitch_only = True
>>> shuffler.preserve_rest_position = False
>>> shuffler.disable_rewrite_meter = True
>>> shuffler.omit_time_signatures = False
>>> shuffler.boundary_depth = 1
>>> shuffler.maximum_dot_count = 2
>>> shuffler.rewrite_tuplets = True
>>> shuffler.process_on_first_call = False
>>> shuffler.swap_limit = None
>>> shuffler.pitch_only
True
>>> shuffler.preserve_rest_position
True
>>> shuffler.disable_rewrite_meter
True
>>> shuffler.omit_time_signatures
False
>>> shuffler.boundary_depth
1
>>> shuffler.maximum_dot_count
2
>>> shuffler.rewrite_tuplets
True
>>> shuffler.process_on_first_call
False
>>> shuffler.swap_limit
None
:attr:`pitch_only`:
By default, the shuffling operation will shuffle logical ties:
>>> container = abjad.Container(r"c'8. d'4 r8 r8. e'16 f'8.")
>>> shuffler = auxjad.Shuffler(container)
>>> notes = shuffler()
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
r4
r16
e'16
f'8
~
f'16
d'8.
~
d'16
c'8.
}
.. figure:: ../_images/Shuffler-5j79m0wuxu.png
Setting :attr:`pitch_only` to ``True`` enables pitch mode, so only
pitches are shuffled (and not durations). Note how in the example below
the duration of each leaf is the same as the input container.
>>> container = abjad.Container(r"c'8. d'4 r8 r8. e'16 f'8.")
>>> shuffler = auxjad.Shuffler(container, pitch_only=True)
>>> notes = shuffler()
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
f'8.
r4
d'8
~
d'8.
~
c'16
e'8.
}
.. figure:: ../_images/Shuffler-f9jbzqkrkdf.png
.. note::
Altering the value of :attr:`pitch_only`: will replace the original
:attr:`contents`: with the contents of :attr:`current_window`. Note how
in the example below, the shuffled leaves in measure 3 comes from the
previous measure and not from the initial :attr:`contents`:.
>>> container = abjad.Container(r"c'4.. d'16 e'4. f'8")
>>> shuffler = auxjad.Shuffler(container, pitch_only=True)
>>> notes = shuffler.shuffle_n(2)
>>> staff = abjad.Staff(notes)
>>> shuffler.pitch_only = False
>>> notes = shuffler.shuffle_n(2)
>>> staff.append(notes)
>>> auxjad.mutate.remove_repeated_time_signatures(staff[:])
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
d'4..
c'16
f'4.
e'8
d'4..
f'16
c'4.
e'8
f'16
d'4..
e'8
c'4.
c'4.
d'8
~
d'4
~
d'16
e'8
f'16
}
.. figure:: ../_images/Shuffler-tyq8y6q8zr9.png
:attr:`swap_limit`:
The attribute :attr:`swap_limit` can be used to set the number of times
that pairs of elements are swapped on a single invocation of the
shuffling process. Set :attr:`swap_limit` to ``None`` to not limit the
shuffling process.
>>> container = abjad.Container(r"c'8 d'8 e'8 f'8 g'8 a'8 b'8 c''8")
>>> shuffler = auxjad.Shuffler(container,
... swap_limit=1,
... )
>>> notes = shuffler.shuffle_n(3)
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
c'8
d'8
e'8
g'8
f'8
a'8
b'8
c''8
c'8
c''8
e'8
g'8
f'8
a'8
b'8
d'8
c'8
c''8
e'8
g'8
b'8
a'8
f'8
d'8
}
.. figure:: ../_images/Shuffler-Nbo5S6wcfQ.png
:meth:`rotate`:
Besides shuffling, logical ties and pitches can also be rotated using
the :meth:`rotate` method. Similarly to shuffling, it can be applied to
logical ties or pitches only depending on the property
:attr:`pitch_only`.
>>> container = abjad.Container(
... r"\time 3/4 c'16 d'8. ~ d'4 e'4 r4 f'4 ~ f'8.. g'32"
... )
>>> shuffler = auxjad.Shuffler(container)
>>> notes = shuffler.rotate()
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 3/4
d'4..
e'16
~
e'8.
r16
r8.
f'16
~
f'4
~
f'8
~
f'32
g'32
c'16
}
.. figure:: ../_images/Shuffler-7vamgsxlr6.png
>>> container = abjad.Container(
... r"\time 3/4 c'16 d'8. ~ d'4 e'4 r4 f'4 ~ f'8.. g'32"
... )
>>> shuffler = auxjad.Shuffler(container, pitch_only=True)
>>> notes = shuffler.rotate()
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 3/4
d'16
e'8.
~
e'4
r4
f'4
g'4
~
g'8..
c'32
}
.. figure:: ../_images/Shuffler-89cx79bjji8.png
This method can also take the optional parameters ``n_rotations`` and
``anticlockwise``. The first is an :obj:`int` setting the number of
rotations applied to the material, and the second is a :obj:`bool`
setting the direction of the rotation (default ``False``).
>>> container = abjad.Container(
... r"\time 3/4 c'16 d'8. ~ d'4 e'4 r4 f'4 ~ f'8.. g'32"
... )
>>> shuffler = auxjad.Shuffler(container, pitch_only=True)
>>> notes = shuffler.rotate(n_rotations=2, anticlockwise=True)
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 3/4
f'16
g'8.
~
g'4
c'4
d'4
e'4
~
e'8..
r32
}
.. figure:: ../_images/Shuffler-g6v6wjm12ub.png
:attr:`preserve_rest_position`:
If :attr:`preserve_rest_position` is set to ``True``, the positions of
all rests will remain the same after either shuffling and rotation. In
pitch mode (when :attr:`pitch_only` is set to ``True``), this means
that only the pitched notes will be shuffled or rotated, while the
rests remain in the exact same place.
>>> container = abjad.Container(r"c'8. d'4 r8 r8. e'16 f'8.")
>>> shuffler = auxjad.Shuffler(container,
... pitch_only=True,
... preserve_rest_position=True,
... )
>>> notes = shuffler()
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
d'8.
f'4
r8
r8.
c'16
e'8.
}
.. figure:: ../_images/Shuffler-pmou83f7rlj.png
In logical ties mode, the rests will remain at the same index and will
have the same total duration as before, but their position in the
measure might vary since the duration of the pitched logical ties
preceding it might change.
>>> container = abjad.Container(r"c'8. d'4 r8 r8. e'16 f'8.")
>>> shuffler = auxjad.Shuffler(container, preserve_rest_position=True)
>>> notes = shuffler()
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
d'4
e'16
r8.
r8
f'8
~
f'16
c'8.
}
.. figure:: ../_images/Shuffler-7hbp2kdpqof.png
:attr:`disable_rewrite_meter`:
If :attr:`disable_rewrite_meter` is set to ``True``, then the automatic
behaviour of rewriting the leaves according to the meter is disabled.
>>> container = abjad.Container(r"c'4 d'8 e'8 f'2")
>>> shuffler = auxjad.Shuffler(container,
... disable_rewrite_meter=True,
... )
>>> notes = shuffler()
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
e'8
f'2
c'4
d'8
}
.. figure:: ../_images/Shuffler-tb78izpzvjp.png
:meth:`shuffle_n` and :meth:`rotate_n`:
To output several shuffled containers at once, use the methods
:meth:`shuffle_n` and :meth:`rotate_n`, inputting the desired number of
iterations. :meth:`rotate_n` can also take the optional arguments
``n_rotations`` and ``anticlockwise``, similarly to :meth:`rotate`.
>>> container = abjad.Container(r"c'4 d'8 e'4. f'8. g'16")
>>> shuffler = auxjad.Shuffler(container)
>>> notes = shuffler.shuffle_n(2)
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
d'8
g'16
c'16
~
c'8.
f'16
~
f'8
e'4.
g'16
f'8.
e'4
~
e'8
c'4
d'8
}
.. figure:: ../_images/Shuffler-vtia65lbk5.png
>>> container = abjad.Container(r"c'4 d'8 e'4. f'8. g'16")
>>> shuffler = auxjad.Shuffler(container)
>>> notes = shuffler.rotate_n(2)
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
d'8
e'4.
f'8.
g'16
c'4
e'4.
f'8
~
f'16
g'16
c'4
d'8
}
.. figure:: ../_images/Shuffler-3dqhy8eoiez.png
:attr:`omit_time_signatures`:
To disable time signatures altogether, initialise this class with the
keyword argument :attr:`omit_time_signatures` set to ``True`` (default
is ``False``), or change the :attr:`omit_time_signatures` property
after initialisation.
>>> container = abjad.Container(r"\time 3/4 c'16 d'4.. e'4 | r4 f'2")
>>> shuffler = auxjad.Shuffler(container,
... omit_time_signatures=True,
... )
>>> notes = shuffler()
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
d'4..
e'16
~
e'8.
f'16
~
f'4..
r16
r8.
c'16
}
.. figure:: ../_images/Shuffler-1v3lwhj430b.png
.. tip::
All methods that return an |abjad.Selection| will add an initial time
signature to it. The :meth:`shuffle_n` and :meth:`rotate_n` methods
automatically remove repeated time signatures. When joining selections
output by multiple method calls, use
|auxjad.mutate.remove_repeated_time_signatures()| on the whole
container after fusing the selections to remove any unecessary time
signature changes.
Time signature changes:
This class handles time signature changes too:
>>> container = abjad.Container(
... r"\time 3/4 c'8. d'4 r8 r8. \time 2/4 e'16 f'4.."
... )
>>> shuffler = auxjad.Shuffler(container)
>>> notes = shuffler.shuffle_n(2)
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 3/4
e'16
d'8.
~
d'16
f'4..
\time 2/4
c'8.
r16
r4
\time 3/4
c'8.
f'16
~
f'4.
r8
\time 2/4
r8.
d'16
~
d'8.
e'16
}
.. figure:: ../_images/Shuffler-yx11u6o14v.png
Tuplet support:
Tuplets are supported when :attr:`pitch_only` is ``True`` (pitch-only
mode).
>>> container = abjad.Container(
... r"\time 5/4 r4 \times 2/3 {c'4 d'2} e'4. f'8"
... )
>>> shuffler = auxjad.Shuffler(container, pitch_only=True)
>>> notes = shuffler()
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 5/4
d'4
\times 2/3
{
f'4
c'2
}
e'4.
r8
}
.. figure:: ../_images/Shuffler-mjxubkel8y.png
.. error::
Tuplets are not supported when :attr:`pitch_only` is ``False`` (logical
tie mode). Using a container with tuplets and :attr:`pitch_only` set to
``True`` will raise a :exc:`TypeError` exception.
>>> container = abjad.Container(
... r"\time 5/4 r4 \times 2/3 {c'4 d'2} e'4. f'8"
... )
>>> shuffler = auxjad.Shuffler(container)
>>> notes = shuffler()
TypeError: 'contents' contain one ore more tuplets, which are not
currently supported by the shuffle method
Indicators:
This class can also handle dynamics and articulations.
>>> container = abjad.Container(
... r"<c' e' g'>4--\p d'8-. e'8-. f'4-^\f r4"
... )
>>> shuffler = auxjad.Shuffler(container)
>>> notes = shuffler.shuffle_n(3)
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
e'8
\p
- \staccato
d'8
- \staccato
f'4
\f
- \marcato
<c' e' g'>4
\p
- \tenuto
r4
r4
d'8
- \staccato
f'8
\f
- \marcato
~
f'8
<c' e' g'>4
\p
- \tenuto
e'8
- \staccato
f'4
\f
- \marcato
e'8
\p
- \staccato
<c' e' g'>8
- \tenuto
~
<c' e' g'>8
d'8
- \staccato
r4
}
.. figure:: ../_images/Shuffler-2ibui58pj8w.png
.. tip::
The functions |auxjad.mutate.remove_repeated_dynamics()| and
|auxjad.mutate.reposition_clefs()| can be used to clean the output
and remove repeated dynamics and unnecessary clef changes.
.. warning::
Do note that some elements that span multiple notes (such as ottava
indicators, manual beams, etc.) can become problematic when notes
containing them are split into two. As a rule of thumb, it is always
better to attach those to the music after the fading process has ended.
In the case of shuffling logical ties, slurs and hairpins can also
become a problem, since their start and end position can shift around.
Dynamics are shuffled together with their leaves, so the initial leaf
may lack a dynamic marking.
:attr:`contents`:
Use the property :attr:`contents` to get the input container upon which
the shuffler operates. Notice that :attr:`contents` remains invariant
after any shuffling or rotation operations (use :attr:`current_window`
for the transformed selection of music). :attr:`contents` can be used
to change the |abjad.Container| to be shuffled.
>>> container = abjad.Container(r"c'4 d'4 e'4 f'4")
>>> shuffler = auxjad.Shuffler(container)
>>> abjad.show(shuffler.contents)
.. docs::
{
c'4
d'4
e'4
f'4
}
.. figure:: ../_images/Shuffler-qsee7chymo.png
>>> shuffler()
>>> abjad.show(shuffler.contents)
.. docs::
{
c'4
d'4
e'4
f'4
}
.. figure:: ../_images/Shuffler-ii3fxe001ki.png
>>> shuffler.contents = abjad.Container(r"cs2 ds2")
>>> abjad.show(shuffler.contents)
.. docs::
{
cs2
ds2
}
.. figure:: ../_images/Shuffler-p2vd4mfvucp.png
Tweaking |abjad.Meter.rewrite_meter()|:
This function uses the default logical tie splitting algorithm from
|abjad.Meter.rewrite_meter()|.
>>> container = abjad.Container(r"c'4. d'8 e'2")
>>> shuffler = auxjad.Shuffler(container)
>>> notes = shuffler()
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
e'2
c'4.
d'8
}
.. figure:: ../_images/Shuffler-t4lsqxg18ab.png
Set :attr:`boundary_depth` to a different number to change its
behaviour.
>>> shuffler = auxjad.Shuffler(container,
... boundary_depth=1,
... )
>>> notes = shuffler()
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
e'2
c'4
~
c'8
d'8
}
.. figure:: ../_images/Shuffler-7na5znnhhwe.png
Other arguments available for tweaking the output of
|abjad.Meter.rewrite_meter()| are :attr:`maximum_dot_count` and
:attr:`rewrite_tuplets`, which work exactly as the identically named
arguments of |abjad.Meter.rewrite_meter()|.
This class also accepts the arguments ``fuse_across_groups_of_beats``,
``fuse_quadruple_meter``, ``fuse_triple_meter``, and
``extract_trivial_tuplets``, which are passed on to
|auxjad.mutate.prettify_rewrite_meter()| (the latter can be disabled
by setting ``prettify_rewrite_meter`` to ``False``). See the
documentation of this function for more details on these arguments.
:attr:`disable_rewrite_meter`:
By default, this class uses the |abjad.Meter.rewrite_meter()|
mutation.
>>> container = abjad.Container(r"c'4 d'8 e'8 f'2")
>>> shuffler = auxjad.Shuffler(container)
>>> notes = shuffler()
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
e'8
f'8
~
f'4.
c'8
~
c'8
d'8
}
.. figure:: ../_images/Shuffler-7cfnxx7shci.png
Set :attr:`disable_rewrite_meter` to ``True`` in order to disable this
behaviour.
>>> container = abjad.Container(r"c'4 d'8. e'16 f'2")
>>> abjad.show(container)
.. docs::
{
e'16
f'8.
~
f'4
~
f'16
c'8.
~
c'16
d'8.
}
.. figure:: ../_images/Shuffler-6gm4ev48j9k.png
>>> shuffler = auxjad.Shuffler(container,
... disable_rewrite_meter=True,
... )
>>> notes = shuffler()
>>> staff = abjad.Staff(notes)
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 4/4
e'16
f'2
c'4
d'8.
}
.. figure:: ../_images/Shuffler-xlr4x3bhj6n.png
Using as iterator:
The instances of this class can also be used as an iterator, which can
then be used in a for loop. Note that unlike the methods
:meth:`shuffle_n` and :meth:`rotate_n`, time signatures are added to
each window returned by the shuffler. Use the function
|auxjad.mutate.remove_repeated_time_signatures()| to clean the output
when using this class in this way. It is also important to note that a
``break`` statement is needed when using this class as an iterator. The
reason is that shuffling is a process that can happen indefinitely
(unlike some of the other classes in this library).
>>> container = abjad.Container(r"\time 3/4 c'4 d'4 e'4")
>>> shuffler = auxjad.Shuffler(container)
>>> staff = abjad.Staff()
>>> for window in shuffler:
... staff.append(window)
... if abjad.get.duration(staff) == abjad.Duration((9, 4)):
... break
>>> auxjad.mutate.remove_repeated_time_signatures(staff[:])
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 3/4
e'4
c'4
d'4
d'4
c'4
e'4
c'4
e'4
d'4
}
.. figure:: ../_images/Shuffler-3gyz7atvemx.png
"""
### CLASS VARIABLES ###
__slots__ = ('_contents',
'_pitch_only',
'_preserve_rest_position',
'_disable_rewrite_meter',
'_omit_time_signatures',
'_current_window',
'_logical_selections',
'_logical_selections_indeces',
'_pitches',
'_time_signatures',
'_is_first_window',
'_boundary_depth',
'_maximum_dot_count',
'_rewrite_tuplets',
'_process_on_first_call',
'_prettify_rewrite_meter',
'_extract_trivial_tuplets',
'_fuse_across_groups_of_beats',
'_fuse_quadruple_meter',
'_fuse_triple_meter',
'_swap_limit',
)
### INITIALISER ###
def __init__(self,
contents: abjad.Container,
*,
pitch_only: bool = False,
preserve_rest_position: bool = False,
disable_rewrite_meter: bool = False,
omit_time_signatures: bool = False,
boundary_depth: Optional[int] = None,
maximum_dot_count: Optional[int] = None,
rewrite_tuplets: bool = True,
process_on_first_call: bool = True,
prettify_rewrite_meter: bool = True,
extract_trivial_tuplets: bool = True,
fuse_across_groups_of_beats: bool = True,
fuse_quadruple_meter: bool = True,
fuse_triple_meter: bool = True,
swap_limit: Optional[int] = None,
) -> None:
r'Initialises self.'
self.contents = contents
self.pitch_only = pitch_only
self.preserve_rest_position = preserve_rest_position
self.disable_rewrite_meter = disable_rewrite_meter
self.omit_time_signatures = omit_time_signatures
self.boundary_depth = boundary_depth
self.maximum_dot_count = maximum_dot_count
self.rewrite_tuplets = rewrite_tuplets
self.prettify_rewrite_meter = prettify_rewrite_meter
self.extract_trivial_tuplets = extract_trivial_tuplets
self.fuse_across_groups_of_beats = fuse_across_groups_of_beats
self.fuse_quadruple_meter = fuse_quadruple_meter
self.fuse_triple_meter = fuse_triple_meter
self.process_on_first_call = process_on_first_call
self.swap_limit = swap_limit
self._is_first_window = True
### SPECIAL METHODS ###
def __repr__(self) -> str:
r'Returns interpreter representation of :attr:`contents`.'
return abjad.lilypond(self._contents)
def __len__(self) -> int:
r'Returns the number of logical ties of :attr:`contents`.'
return len(self._logical_selections)
def __call__(self) -> abjad.Selection:
r'Calls the shuffling process, returning an |abjad.Selection|'
return self.shuffle()
def __next__(self) -> abjad.Selection:
r"""Calls the shuffling process for one iteration, returning an
|abjad.Selection|.
"""
return self.__call__()
def __iter__(self) -> None:
r'Returns an iterator, allowing instances to be used as iterators.'
return self
### PUBLIC METHODS ###
def shuffle(self) -> abjad.Selection:
r'Shuffles logical ties or pitches of :attr:`contents`.'
if self._is_first_window and not self._process_on_first_call:
if not self._pitch_only:
self._rewrite_logical_selections()
return self.current_window
else:
self._rewrite_pitches()
return self.current_window
else:
if not self._pitch_only:
return self._shuffle_logical_selections()
else:
return self._shuffle_pitches()
def rotate(self,
*,
n_rotations: int = 1,
anticlockwise: bool = False,
) -> abjad.Selection:
r'Rotates logical ties or pitches of :attr:`contents`.'
if not isinstance(n_rotations, int):
raise TypeError("'n_rotations' must be 'int'")
if n_rotations < 1:
raise ValueError("'n_rotations' must be greater than zero")
if not isinstance(anticlockwise, bool):
raise TypeError("'anticlockwise' must be 'bool'")
if self._is_first_window and not self._process_on_first_call:
if not self._pitch_only:
self._rewrite_logical_selections()
return self.current_window
else:
self._rewrite_pitches()
return self.current_window
else:
if not self._pitch_only:
return self._rotate_logical_selections(
n_rotations=n_rotations,
anticlockwise=anticlockwise,
)
else:
return self._rotate_pitches(
n_rotations=n_rotations,
anticlockwise=anticlockwise,
)
def shuffle_n(self,
n: int,
) -> abjad.Selection:
r"""Goes through ``n`` iterations of the shuffling process and outputs
a single |abjad.Selection|.
"""
if not isinstance(n, int):
raise TypeError("argument must be 'int'")
if n < 1:
raise ValueError("argument must be greater than zero")
dummy_container = abjad.Container()
for _ in range(n):
dummy_container.append(self.__call__())
mutate.remove_repeated_time_signatures(dummy_container[:])
mutate.remove_repeated_dynamics(dummy_container[:])
output = dummy_container[:]
dummy_container[:] = []
return output
def rotate_n(self,
n: int,
*,
n_rotations: int = 1,
anticlockwise: bool = False,
) -> abjad.Selection:
r"""Goes through ``n`` iterations of the pitch shuffling process and
outputs a single |abjad.Selection|.
"""
if not isinstance(n, int):
raise TypeError("argument must be 'int'")
if n < 1:
raise ValueError("argument must be greater than zero")
dummy_container = abjad.Container()
for _ in range(n):
dummy_container.append(self.rotate(n_rotations=n_rotations,
anticlockwise=anticlockwise))
mutate.remove_repeated_time_signatures(dummy_container[:])
mutate.remove_repeated_dynamics(dummy_container[:])
output = dummy_container[:]
dummy_container[:] = []
return output
### PRIVATE METHODS ###
def _update_logical_selections(self) -> None:
r'Updates the selection of logical ties of :attr:`contents`.'
self._logical_selections = self._get_logical_selections(
self._contents
)
self._logical_selections_indeces = list(range(self.__len__()))
def _get_pitch_list(self) -> None:
r'Creates a :obj:`list` of all pitches in :attr:`contents`.'
self._pitches = []
for logical_selection in self._logical_selections:
leaf = logical_selection.leaves()[0]
if isinstance(leaf, abjad.Rest):
self._pitches.append(None)
elif isinstance(leaf, abjad.Note):
self._pitches.append(leaf.written_pitch)
elif isinstance(leaf, abjad.Chord):
self._pitches.append(leaf.written_pitches)
def _shuffle_list_preserving_rests(self,
input_list: list[Any],
) -> None:
r'Shuffles a :obj:`list` while keeping rest indeces unchanged.'
dummy_list = [input_list[i] for i in range(len(input_list))
if self._pitches[i] is not None]
self._random_shuffle(dummy_list)
self._replace_list_preserving_rests(dummy_list, input_list)
def _rotate_list_preserving_rests(self,
input_list: list[Any],
*,
n_rotations: int = 1,
anticlockwise: bool = False,
) -> None:
r'Rotates a :obj:`list` while keeping rest indeces unchanged.'
dummy_list = [input_list[i] for i in range(len(input_list))
if self._pitches[i] is not None]
self._rotate_list(dummy_list,
n_rotations=n_rotations,
anticlockwise=anticlockwise,
)
self._replace_list_preserving_rests(dummy_list, input_list)
def _random_shuffle(self,
input_list: list[Any],
) -> None:
r'Random shuffles a :obj:`list`.'
if self._swap_limit is None:
random.shuffle(input_list)
else:
for _ in range(self._swap_limit):
if len(input_list) > 1:
i, j = random.sample(range(len(input_list)), 2)
input_list[i], input_list[j] = input_list[j], input_list[i]
def _replace_list_preserving_rests(self,
input_list: list[Any],
destination_list: list[Any],
) -> None:
r'Substitutes back an altered :obj:`list` while preserving rests.'
counter = 0
for index, pitch in enumerate(self._pitches):
if pitch is not None:
destination_list[index] = input_list[counter]
counter += 1
def _shuffle_logical_selections(self) -> abjad.Selection:
r'Shuffles the logical ties of :attr:`contents`.'
if len(abjad.select(self._contents).tuplets()) > 0:
raise ValueError("'contents' contain one ore more tuplets; "
"tuplets are currently supported only in "
"pitch-only mode")
if not self._preserve_rest_position:
self._random_shuffle(self._logical_selections_indeces)
else:
self._shuffle_list_preserving_rests(
self._logical_selections_indeces
)
self._rewrite_logical_selections()
return self.current_window
def _shuffle_pitches(self) -> abjad.Selection:
r'Shuffles only the pitches of :attr:`contents`.'
if not self._preserve_rest_position:
self._random_shuffle(self._pitches)
else:
self._shuffle_list_preserving_rests(self._pitches)
self._rewrite_pitches()
return self.current_window
def _rotate_logical_selections(self,
*,
n_rotations: int = 1,
anticlockwise: bool = False,
) -> abjad.Selection:
r'Rotates the logical ties of :attr:`contents`.'
if len(abjad.select(self._contents).tuplets()) > 0:
raise ValueError("'contents' contain one ore more tuplets; "
"tuplets are currently supported only in "
"pitch-only mode")
if not self._preserve_rest_position:
self._rotate_list(self._logical_selections_indeces,
n_rotations=n_rotations,
anticlockwise=anticlockwise,
)
else:
self._rotate_list_preserving_rests(
self._logical_selections_indeces,
n_rotations=n_rotations,
anticlockwise=anticlockwise,
)
self._rewrite_logical_selections()
return self.current_window
def _rotate_pitches(self,
*,
n_rotations: int = 1,
anticlockwise: bool = False,
) -> abjad.Selection:
r'Rotates the pitches of :attr:`contents`.'
if not self._preserve_rest_position:
self._rotate_list(self._pitches,
n_rotations=n_rotations,
anticlockwise=anticlockwise,
)
else:
self._rotate_list_preserving_rests(self._pitches,
n_rotations=n_rotations,
anticlockwise=anticlockwise,
)
self._rewrite_pitches()
return self.current_window
def _rewrite_logical_selections(self) -> None:
r'Rewrites the logical selections of the current window.'
# writing dummy_container in shuffled order
dummy_container = abjad.Container()
logical_selections = self._get_logical_selections(
abjad.mutate.copy(self._contents)
)
self._force_dynamics(logical_selections)
for index in self._logical_selections_indeces:
logical_selection = logical_selections[index]
dummy_container.append(logical_selection.leaves())
# splitting leaves at measure line points
abjad.mutate.split(dummy_container[:],
[ts.duration for ts in self._time_signatures],
cyclic=True,
)
# attaching time signature structure
mutate.enforce_time_signature(
dummy_container,
self._time_signatures,
disable_rewrite_meter=True,
)
# handling dynamics and slurs
mutate.reposition_dynamics(dummy_container[:])
mutate.reposition_slurs(dummy_container[:])
# rewrite meter
if not self._disable_rewrite_meter:
mutate.auto_rewrite_meter(
dummy_container,
meter_list=self._time_signatures,
boundary_depth=self._boundary_depth,
maximum_dot_count=self._maximum_dot_count,
rewrite_tuplets=self._rewrite_tuplets,
prettify_rewrite_meter=self._prettify_rewrite_meter,
extract_trivial_tuplets=self._extract_trivial_tuplets,
fuse_across_groups_of_beats=self._fuse_across_groups_of_beats,
fuse_quadruple_meter=self._fuse_quadruple_meter,
fuse_triple_meter=self._fuse_triple_meter,
)
# output
self._is_first_window = False
self._current_window = dummy_container[:]
dummy_container[:] = []
def _rewrite_pitches(self) -> None:
r'Rewrites the pitches of the current window.'
dummy_container = abjad.Container(abjad.mutate.copy(self._contents[:]))
leaf_counter = 0
for pitch, logical_selection in zip(self._pitches,
self._logical_selections,
):
logical_tie = logical_selection.leaves()
for leaf in logical_tie:
if pitch is None:
new_leaf = abjad.Rest(leaf.written_duration)
elif isinstance(pitch, abjad.PitchSegment):
new_leaf = abjad.Chord(pitch, leaf.written_duration)
if (isinstance(leaf, abjad.Rest) and len(logical_tie) > 1
and leaf is not logical_tie[-1]):
abjad.attach(abjad.Tie(), new_leaf)
else:
new_leaf = abjad.Note(pitch, leaf.written_duration)
if (isinstance(leaf, abjad.Rest) and len(logical_tie) > 1
and leaf is not logical_tie[-1]):
abjad.attach(abjad.Tie(), new_leaf)
for indicator in abjad.get.indicators(leaf):
if (isinstance(indicator, (abjad.Tie, abjad.Articulation))
and pitch is None):
continue
if isinstance(indicator, abjad.TimeSignature):
abjad.attach(indicator, new_leaf)
else:
abjad.attach(indicator, new_leaf)
selection = abjad.select(dummy_container).leaf(leaf_counter)
abjad.mutate.replace(selection, new_leaf)
leaf_counter += 1
# attaching time signature structure
mutate.extract_trivial_tuplets(dummy_container[:])
mutate.enforce_time_signature(
dummy_container,
self._time_signatures,
disable_rewrite_meter=True,
)
# output
self._is_first_window = False
self._current_window = dummy_container[:]
dummy_container[:] = []
def _get_lilypond_format(self) -> str:
r'Returns interpreter representation of :attr:`contents`.'
return self.__repr__()
@staticmethod
def _get_logical_selections(container) -> abjad.Selection:
r'Updates the selection of logical ties of a container.'
def group_logical_ties(logical_tie):
if isinstance(logical_tie.head, abjad.Rest):
return True
else:
return logical_tie.head
logical_ties = abjad.select(container).logical_ties()
return logical_ties.group_by(group_logical_ties)
@staticmethod
def _remove_all_time_signatures(container) -> None:
r'Removes all time signatures of an |abjad.Container|.'
for leaf in abjad.select(container).leaves():
if abjad.get.effective(leaf, abjad.TimeSignature):
abjad.detach(abjad.TimeSignature, leaf)
@staticmethod
def _force_dynamics(container) -> None:
logical_ties = abjad.select(container).logical_ties()
for logical_tie in logical_ties[1:]:
if abjad.get.indicator(logical_tie[0], abjad.Dynamic) is None:
index = logical_ties.index(logical_tie)
previous_logical_tie = logical_ties[index - 1]
if (abjad.get.indicator(previous_logical_tie[0], abjad.Dynamic)
is not None):
abjad.attach(abjad.get.indicator(previous_logical_tie[0],
abjad.Dynamic,
),
logical_tie[0],
)
@staticmethod
def _rotate_list(input_list: list[Any],
*,
n_rotations: int = 1,
anticlockwise: bool = False,
) -> None:
r'Rotates a :obj:`list`.'
for _ in range(n_rotations):
if not anticlockwise:
element = input_list.pop(0)
input_list.append(element)
else:
element = input_list.pop(-1)
input_list.insert(0, element)
### PUBLIC PROPERTIES ###
@property
def contents(self) -> abjad.Container:
r'The |abjad.Container| to be shuffled.'
return abjad.mutate.copy(self._contents)
@contents.setter
def contents(self,
contents: abjad.Container,
) -> None:
if not isinstance(contents, abjad.Container):
raise TypeError("'contents' must be 'abjad.Container' or child "
"class")
if not abjad.select(contents).leaves().are_contiguous_logical_voice():
raise ValueError("'contents' must be contiguous logical voice")
if isinstance(contents, abjad.Score):
self._contents = abjad.mutate.copy(contents[0])
elif isinstance(contents, abjad.Tuplet):
self._contents = abjad.Container([abjad.mutate.copy(contents)])
else:
self._contents = abjad.mutate.copy(contents)
dummy_container = abjad.mutate.copy(contents)
self._current_window = dummy_container[:]
dummy_container[:] = []
self._update_logical_selections()
self._get_pitch_list()
self._time_signatures = get.time_signature_list(
self._contents,
do_not_use_none=True,
)
self._is_first_window = True
@property
def pitch_only(self) -> bool:
r"""When ``True``, only the pitches will be shuffled or rotated while
the durations remain the same.
"""
return self._pitch_only
@pitch_only.setter
def pitch_only(self,
pitch_only: bool,
) -> None:
if not isinstance(pitch_only, bool):
raise TypeError("'pitch_only' must be 'bool'")
self._pitch_only = pitch_only
# potentially new logical selections when shifting from pitch-only mode
# to logical selections mode
self._update_logical_selections()
self._get_pitch_list()
self._contents = abjad.Container(
abjad.mutate.copy(self._current_window)
)
@property
def preserve_rest_position(self) -> bool:
r"""When ``True``, shuffle operations will preserve rest positions and
durations.
"""
return self._preserve_rest_position
@preserve_rest_position.setter
def preserve_rest_position(self,
preserve_rest_position: bool,
) -> None:
if not isinstance(preserve_rest_position, bool):
raise TypeError("'preserve_rest_position' must be 'bool'")
self._preserve_rest_position = preserve_rest_position
@property
def disable_rewrite_meter(self) -> bool:
r"""When ``True``, the durations of the notes in the output will not be
rewritten by the |abjad.Meter.rewrite_meter()| mutation.
"""
return self._disable_rewrite_meter
@disable_rewrite_meter.setter
def disable_rewrite_meter(self,
disable_rewrite_meter: bool,
) -> None:
if not isinstance(disable_rewrite_meter, bool):
raise TypeError("'disable_rewrite_meter' must be 'bool'")
self._disable_rewrite_meter = disable_rewrite_meter
@property
def omit_time_signatures(self) -> bool:
r'When ``True``, the output will contain no time signatures.'
return self._omit_time_signatures
@omit_time_signatures.setter
def omit_time_signatures(self,
omit_time_signatures: bool,
) -> None:
if not isinstance(omit_time_signatures, bool):
raise TypeError("'omit_time_signatures' must be 'bool'")
self._omit_time_signatures = omit_time_signatures
@property
def boundary_depth(self) -> Union[int, None]:
r"""Sets the argument ``boundary_depth`` of
|abjad.Meter.rewrite_meter()|.
"""
return self._boundary_depth
@boundary_depth.setter
def boundary_depth(self,
boundary_depth: Optional[int],
) -> None:
if boundary_depth is not None:
if not isinstance(boundary_depth, int):
raise TypeError("'boundary_depth' must be 'int'")
self._boundary_depth = boundary_depth
@property
def maximum_dot_count(self) -> Union[int, None]:
r"""Sets the argument ``maximum_dot_count`` of
|abjad.Meter.rewrite_meter()|.
"""
return self._maximum_dot_count
@maximum_dot_count.setter
def maximum_dot_count(self,
maximum_dot_count: Optional[int],
) -> None:
if maximum_dot_count is not None:
if not isinstance(maximum_dot_count, int):
raise TypeError("'maximum_dot_count' must be 'int'")
self._maximum_dot_count = maximum_dot_count
@property
def rewrite_tuplets(self) -> bool:
r"""Sets the argument ``rewrite_tuplets`` of
|abjad.Meter.rewrite_meter()|.
"""
return self._rewrite_tuplets
@rewrite_tuplets.setter
def rewrite_tuplets(self,
rewrite_tuplets: bool,
) -> None:
if not isinstance(rewrite_tuplets, bool):
raise TypeError("'rewrite_tuplets' must be 'bool'")
self._rewrite_tuplets = rewrite_tuplets
@property
def prettify_rewrite_meter(self) -> bool:
r"""Used to enable or disable the mutation
|auxjad.mutate.prettify_rewrite_meter()| (default ``True``).
"""
return self._prettify_rewrite_meter
@prettify_rewrite_meter.setter
def prettify_rewrite_meter(self,
prettify_rewrite_meter: bool,
) -> None:
if not isinstance(prettify_rewrite_meter, bool):
raise TypeError("'prettify_rewrite_meter' must be 'bool'")
self._prettify_rewrite_meter = prettify_rewrite_meter
@property
def extract_trivial_tuplets(self) -> bool:
r"""Sets the argument ``extract_trivial_tuplets`` of
|auxjad.mutate.prettify_rewrite_meter()|.
"""
return self._extract_trivial_tuplets
@extract_trivial_tuplets.setter
def extract_trivial_tuplets(self,
extract_trivial_tuplets: bool,
) -> None:
if not isinstance(extract_trivial_tuplets, bool):
raise TypeError("'extract_trivial_tuplets' must be 'bool'")
self._extract_trivial_tuplets = extract_trivial_tuplets
@property
def fuse_across_groups_of_beats(self) -> bool:
r"""Sets the argument ``fuse_across_groups_of_beats`` of
|auxjad.mutate.prettify_rewrite_meter()|.
"""
return self._fuse_across_groups_of_beats
@fuse_across_groups_of_beats.setter
def fuse_across_groups_of_beats(self,
fuse_across_groups_of_beats: bool,
) -> None:
if not isinstance(fuse_across_groups_of_beats, bool):
raise TypeError("'fuse_across_groups_of_beats' must be 'bool'")
self._fuse_across_groups_of_beats = fuse_across_groups_of_beats
@property
def fuse_quadruple_meter(self) -> bool:
r"""Sets the argument ``fuse_quadruple_meter`` of
|auxjad.mutate.prettify_rewrite_meter()|.
"""
return self._fuse_quadruple_meter
@fuse_quadruple_meter.setter
def fuse_quadruple_meter(self,
fuse_quadruple_meter: bool,
) -> None:
if not isinstance(fuse_quadruple_meter, bool):
raise TypeError("'fuse_quadruple_meter' must be 'bool'")
self._fuse_quadruple_meter = fuse_quadruple_meter
@property
def fuse_triple_meter(self) -> bool:
r"""Sets the argument ``fuse_triple_meter`` of
|auxjad.mutate.prettify_rewrite_meter()|.
"""
return self._fuse_triple_meter
@fuse_triple_meter.setter
def fuse_triple_meter(self,
fuse_triple_meter: bool,
) -> None:
if not isinstance(fuse_triple_meter, bool):
raise TypeError("'fuse_triple_meter' must be 'bool'")
self._fuse_triple_meter = fuse_triple_meter
@property
def process_on_first_call(self) -> bool:
r"""If ``True`` then :attr:`contents` will be processed in the very
first call.
"""
return self._process_on_first_call
@process_on_first_call.setter
def process_on_first_call(self,
process_on_first_call: bool,
) -> None:
if not isinstance(process_on_first_call, bool):
raise TypeError("'process_on_first_call' must be 'bool'")
self._process_on_first_call = process_on_first_call
@property
def swap_limit(self) -> int:
r"""If :obj:`int` then it dictates how instances of random swapping of
pairs of elements are applied to :attr:`contents` in each iteration. If
``None`` then :attr:`contents` is completely shuffled at each
iteration.
"""
return self._swap_limit
@swap_limit.setter
def swap_limit(self,
swap_limit: Optional[int],
) -> None:
if swap_limit is not None:
if not isinstance(swap_limit, int):
raise TypeError("'swap_limit' must be 'int'")
if swap_limit < 1:
raise ValueError("'swap_limit' must be equal to or greater "
"than 1")
self._swap_limit = swap_limit
@property
def current_window(self) -> abjad.Selection:
r'Read-only property, returns the result of the last operation.'
current_window = abjad.mutate.copy(self._current_window)
if self._omit_time_signatures:
self._remove_all_time_signatures(current_window)
return current_window | PypiClean |
/IteratorDecorator-0.11.tar.gz/IteratorDecorator-0.11/README.rst | .. image:: https://travis-ci.org/stovorov/IteratorDecorator.svg?branch=master
:target: https://travis-ci.org/stovorov/IteratorDecorator
.. image:: https://codecov.io/gh/stovorov/IteratorDecorator/branch/master/graph/badge.svg
:target: https://codecov.io/gh/stovorov/IteratorDecorator
IteratorDecorator
=================
Implementing iterator in Python is nothing complicated though what's missing is possibility to make it even
easier. This small library adds ``iter_attribute`` decorator allowing to quickly choose iterable for which
iterator would be implemented.
Requirements
------------
Python3.5+
Example
-------
.. code:: python
from IteratorDecorator import iter_attribute
@iter_attribute('number')
class CaseClass:
def __init__(self):
self.number = [1, 2, 3, 4]
self.attr = ['attr1', 'attr2', 'attr3']
obj = CaseClass()
for num in obj:
print(num)
Installing
----------
In your virtualenv just call:
::
$ pip install IteratorDecorator
Warning
-------
When using PyCharm or MYPY you'll probably see issues with decorated class not being recognized as Iterator.
That's an issue which I could not overcome yet, it's probably due to the fact that interpretation of object
is being done statically rather than dynamically. MYPY checks for definition of methods in class code which
changes at runtime. Since ``__iter__`` and ``__next__`` are added dynamically MYPY cannot find those
defined in objects before object of a class is created. Possible workarounds for this issue are:
1. Define ``__iter__`` method in class:
.. code:: python
@iter_attribute('attr')
class Test:
def __init__(self) -> None:
self.attr = [1, 2, 3]
def __iter__(self) -> 'Test':
return self
Actually it does not have to be "real" ``__iter__`` since it'll be replaced by decorator implementation, but
the definition is only needed for static checkers.
2. After creating object use cast or assert function denoting that particular instance inherits
.. code:: python
from collections.Iterator:
assert isinstance(my_object, collections.Iterator)
| PypiClean |
/KegBouncer-2.2.4.tar.gz/KegBouncer-2.2.4/changelog.rst | Changelog
=========
2.2.4 released 2019-03-25
#########################
* MAINT: Fix call to deprecated passlib function (fa8440f_)
.. _fa8440f: https://github.com/level12/keg-bouncer/commit/fa8440f
2.2.3 - 2017-04-04
##################
* Integrate helpful fields on user model (f48c745_)
.. _f48c745: https://github.com/level12/keg-bouncer/commit/f48c745
2.2.2 - 2016-12-15
##################
* Fix the docs on PyPi and add a LICENSE file
2.2.1 - 2016-12-14
##################
* MAINT: Remove accidentially bundled wheels from sdist (23fc3b2_)
.. _23fc3b2: https://github.com/level12/keg-bouncer/commit/23fc3b2
2.2.0 - 2016-12-14
##################
* SEC: Enforce minimum key length for tokens (d9ed567_)
* MAINT: Replace the PyCrypto with cryptography (0c36f53_)
* Merge pull request #19 from level12/gh-18-docker-ci-tests (a878053_)
* Merge pull request #15 from level12/fix-ci-python-3.4-installation (b76df21_)
* Merge addition of codecov.yaml configuration (51d0614_)
* Merge enhancement to keep password history ordered (126e2df_)
* Merge new design with password and login history mixins (f794644_)
.. _d9ed567: https://github.com/level12/keg-bouncer/commit/d9ed567
.. _0c36f53: https://github.com/level12/keg-bouncer/commit/0c36f53
.. _a878053: https://github.com/level12/keg-bouncer/commit/a878053
.. _b76df21: https://github.com/level12/keg-bouncer/commit/b76df21
.. _51d0614: https://github.com/level12/keg-bouncer/commit/51d0614
.. _126e2df: https://github.com/level12/keg-bouncer/commit/126e2df
.. _f794644: https://github.com/level12/keg-bouncer/commit/f794644
2.1.0
#####
* Dropped Python 3.4 support
2.0.0
#####
* Redesign API
* Add API for verifying passwords and keeping password history
* Add API for keeping login history
1.0.0
#####
* Support groups/bundles/permissions.
* Support alembic integration
| PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/ding/reward_model/gail_irl_model.py | from typing import List, Dict, Any
import pickle
import random
from collections.abc import Iterable
from easydict import EasyDict
import torch
import torch.nn as nn
import torch.optim as optim
from ding.utils import REWARD_MODEL_REGISTRY
from .base_reward_model import BaseRewardModel
import torch.nn.functional as F
from functools import partial
def concat_state_action_pairs(iterator):
"""
Overview:
Concatenate state and action pairs from input.
Arguments:
- iterator (:obj:`Iterable`): Iterables with at least ``obs`` and ``action`` tensor keys.
Returns:
- res (:obj:`Torch.tensor`): State and action pairs.
"""
assert isinstance(iterator, Iterable)
res = []
for item in iterator:
state = item['obs'].flatten() # to allow 3d obs and actions concatenation
action = item['action']
s_a = torch.cat([state, action.float()], dim=-1)
res.append(s_a)
return res
def concat_state_action_pairs_one_hot(iterator, action_size: int):
"""
Overview:
Concatenate state and action pairs from input. Action values are one-hot encoded
Arguments:
- iterator (:obj:`Iterable`): Iterables with at least ``obs`` and ``action`` tensor keys.
Returns:
- res (:obj:`Torch.tensor`): State and action pairs.
"""
assert isinstance(iterator, Iterable)
res = []
for item in iterator:
state = item['obs'].flatten() # to allow 3d obs and actions concatenation
action = item['action']
action = torch.Tensor([int(i == action) for i in range(action_size)])
s_a = torch.cat([state, action], dim=-1)
res.append(s_a)
return res
class RewardModelNetwork(nn.Module):
def __init__(self, input_size: int, hidden_size: int, output_size: int) -> None:
super(RewardModelNetwork, self).__init__()
self.l1 = nn.Linear(input_size, hidden_size)
self.l2 = nn.Linear(hidden_size, output_size)
self.a1 = nn.Tanh()
self.a2 = nn.Sigmoid()
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = x
out = self.l1(out)
out = self.a1(out)
out = self.l2(out)
out = self.a2(out)
return out
class AtariRewardModelNetwork(nn.Module):
def __init__(self, input_size: int, action_size: int) -> None:
super(AtariRewardModelNetwork, self).__init__()
self.input_size = input_size
self.action_size = action_size
self.conv1 = nn.Conv2d(4, 16, 7, stride=3)
self.conv2 = nn.Conv2d(16, 16, 5, stride=2)
self.conv3 = nn.Conv2d(16, 16, 3, stride=1)
self.conv4 = nn.Conv2d(16, 16, 3, stride=1)
self.fc1 = nn.Linear(784, 64)
self.fc2 = nn.Linear(64 + self.action_size, 1) # here we add 1 to take consideration of the action concat
self.a = nn.Sigmoid()
def forward(self, x: torch.Tensor) -> torch.Tensor:
# input: x = [B, 4 x 84 x 84 + self.action_size], last element is action
actions = x[:, -self.action_size:] # [B, self.action_size]
# get observations
x = x[:, :-self.action_size]
x = x.reshape([-1] + self.input_size) # [B, 4, 84, 84]
x = F.leaky_relu(self.conv1(x))
x = F.leaky_relu(self.conv2(x))
x = F.leaky_relu(self.conv3(x))
x = F.leaky_relu(self.conv4(x))
x = x.reshape(-1, 784)
x = F.leaky_relu(self.fc1(x))
x = torch.cat([x, actions], dim=-1)
x = self.fc2(x)
r = self.a(x)
return r
@REWARD_MODEL_REGISTRY.register('gail')
class GailRewardModel(BaseRewardModel):
"""
Overview:
The Gail reward model class (https://arxiv.org/abs/1606.03476)
Interface:
``estimate``, ``train``, ``load_expert_data``, ``collect_data``, ``clear_date``, \
``__init__``, ``state_dict``, ``load_state_dict``, ``learn``
Config:
== ==================== ======== ============= =================================== =======================
ID Symbol Type Default Value Description Other(Shape)
== ==================== ======== ============= =================================== =======================
1 ``type`` str gail | RL policy register name, refer | this arg is optional,
| to registry ``POLICY_REGISTRY`` | a placeholder
2 | ``expert_data_`` str expert_data. | Path to the expert dataset | Should be a '.pkl'
| ``path`` .pkl | | file
3 | ``learning_rate`` float 0.001 | The step size of gradient descent |
4 | ``update_per_`` int 100 | Number of updates per collect |
| ``collect`` | |
5 | ``batch_size`` int 64 | Training batch size |
6 | ``input_size`` int | Size of the input: |
| | obs_dim + act_dim |
7 | ``target_new_`` int 64 | Collect steps per iteration |
| ``data_count`` | |
8 | ``hidden_size`` int 128 | Linear model hidden size |
9 | ``collect_count`` int 100000 | Expert dataset size | One entry is a (s,a)
| | | tuple
10 | ``clear_buffer_`` int 1 | clear buffer per fixed iters | make sure replay
| ``per_iters`` | buffer's data count
| | isn't too few.
| | (code work in entry)
== ==================== ======== ============= =================================== =======================
"""
config = dict(
# (str) RL policy register name, refer to registry ``POLICY_REGISTRY``.
type='gail',
# (float) The step size of gradient descent.
learning_rate=1e-3,
# (int) How many updates(iterations) to train after collector's one collection.
# Bigger "update_per_collect" means bigger off-policy.
# collect data -> update policy-> collect data -> ...
update_per_collect=100,
# (int) How many samples in a training batch.
batch_size=64,
# (int) Size of the input: obs_dim + act_dim.
input_size=4,
# (int) Collect steps per iteration.
target_new_data_count=64,
# (int) Linear model hidden size.
hidden_size=128,
# (int) Expert dataset size.
collect_count=100000,
# (int) Clear buffer per fixed iters.
clear_buffer_per_iters=1,
)
def __init__(self, config: EasyDict, device: str, tb_logger: 'SummaryWriter') -> None: # noqa
"""
Overview:
Initialize ``self.`` See ``help(type(self))`` for accurate signature.
Arguments:
- cfg (:obj:`EasyDict`): Training config
- device (:obj:`str`): Device usage, i.e. "cpu" or "cuda"
- tb_logger (:obj:`SummaryWriter`): Logger, defaultly set as 'SummaryWriter' for model summary
"""
super(GailRewardModel, self).__init__()
self.cfg = config
assert device in ["cpu", "cuda"] or "cuda" in device
self.device = device
self.tb_logger = tb_logger
obs_shape = config.input_size
if isinstance(obs_shape, int) or len(obs_shape) == 1:
self.reward_model = RewardModelNetwork(config.input_size, config.hidden_size, 1)
self.concat_state_action_pairs = concat_state_action_pairs
elif len(obs_shape) == 3:
action_shape = self.cfg.action_size
self.reward_model = AtariRewardModelNetwork(config.input_size, action_shape)
self.concat_state_action_pairs = partial(concat_state_action_pairs_one_hot, action_size=action_shape)
self.reward_model.to(self.device)
self.expert_data = []
self.train_data = []
self.expert_data_loader = None
self.opt = optim.Adam(self.reward_model.parameters(), config.learning_rate)
self.train_iter = 0
self.load_expert_data()
def load_expert_data(self) -> None:
"""
Overview:
Getting the expert data from ``config.data_path`` attribute in self
Effects:
This is a side effect function which updates the expert data attribute \
(i.e. ``self.expert_data``) with ``fn:concat_state_action_pairs``
"""
with open(self.cfg.data_path + '/expert_data.pkl', 'rb') as f:
self.expert_data_loader: list = pickle.load(f)
self.expert_data = self.concat_state_action_pairs(self.expert_data_loader)
def state_dict(self) -> Dict[str, Any]:
return {
'model': self.reward_model.state_dict(),
}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.reward_model.load_state_dict(state_dict['model'])
def learn(self, train_data: torch.Tensor, expert_data: torch.Tensor) -> float:
"""
Overview:
Helper function for ``train`` which calculates loss for train data and expert data.
Arguments:
- train_data (:obj:`torch.Tensor`): Data used for training
- expert_data (:obj:`torch.Tensor`): Expert data
Returns:
- Combined loss calculated of reward model from using ``train_data`` and ``expert_data``.
"""
# calculate loss, here are some hyper-param
out_1: torch.Tensor = self.reward_model(train_data)
loss_1: torch.Tensor = torch.log(out_1 + 1e-8).mean()
out_2: torch.Tensor = self.reward_model(expert_data)
loss_2: torch.Tensor = torch.log(1 - out_2 + 1e-8).mean()
# log(x) with 0<x<1 is negative, so to reduce this loss we have to minimize the opposite
loss: torch.Tensor = -(loss_1 + loss_2)
self.opt.zero_grad()
loss.backward()
self.opt.step()
return loss.item()
def train(self) -> None:
"""
Overview:
Training the Gail reward model. The training and expert data are randomly sampled with designated\
batch size abstracted from the ``batch_size`` attribute in ``self.cfg`` and \
correspondingly, the ``expert_data`` as well as ``train_data`` attributes initialized ``self`
Effects:
- This is a side effect function which updates the reward model and increment the train iteration count.
"""
for _ in range(self.cfg.update_per_collect):
sample_expert_data: list = random.sample(self.expert_data, self.cfg.batch_size)
sample_train_data: list = random.sample(self.train_data, self.cfg.batch_size)
sample_expert_data = torch.stack(sample_expert_data).to(self.device)
sample_train_data = torch.stack(sample_train_data).to(self.device)
loss = self.learn(sample_train_data, sample_expert_data)
self.tb_logger.add_scalar('reward_model/gail_loss', loss, self.train_iter)
self.train_iter += 1
def estimate(self, data: list) -> List[Dict]:
"""
Overview:
Estimate reward by rewriting the reward key in each row of the data.
Arguments:
- data (:obj:`list`): the list of data used for estimation, with at least \
``obs`` and ``action`` keys.
Effects:
- This is a side effect function which updates the reward values in place.
"""
# NOTE: deepcopy reward part of data is very important,
# otherwise the reward of data in the replay buffer will be incorrectly modified.
train_data_augmented = self.reward_deepcopy(data)
res = self.concat_state_action_pairs(train_data_augmented)
res = torch.stack(res).to(self.device)
with torch.no_grad():
reward = self.reward_model(res).squeeze(-1).cpu()
reward = torch.chunk(reward, reward.shape[0], dim=0)
for item, rew in zip(train_data_augmented, reward):
item['reward'] = -torch.log(rew + 1e-8)
return train_data_augmented
def collect_data(self, data: list) -> None:
"""
Overview:
Collecting training data formatted by ``fn:concat_state_action_pairs``.
Arguments:
- data (:obj:`Any`): Raw training data (e.g. some form of states, actions, obs, etc)
Effects:
- This is a side effect function which updates the data attribute in ``self``
"""
self.train_data.extend(self.concat_state_action_pairs(data))
def clear_data(self) -> None:
"""
Overview:
Clearing training data. \
This is a side effect function which clears the data attribute in ``self``
"""
self.train_data.clear() | PypiClean |
/GalSim-2.4.11-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl/galsim/config/output.py |
import os
import logging
from .util import LoggerWrapper, UpdateNProc, CopyConfig, MultiProcess, SetupConfigRNG
from .util import RetryIO, SetDefaultExt
from .input import ProcessInput
from .extra import valid_extra_outputs, SetupExtraOutput, WriteExtraOutputs
from .extra import AddExtraOutputHDUs, CheckNoExtraOutputHDUs
from .value import ParseValue, CheckAllParams
from .image import BuildImage, GetNObjForImage
from ..errors import GalSimConfigError, GalSimConfigValueError
from ..utilities import ensure_dir
from ..fits import writeMulti
# This file handles building the output files according to the specifications in config['output'].
# This file includes the basic functionality, but it calls out to helper functions for the
# different types of output files. It includes the implementation of the default output type,
# 'Fits'. See output_multifits.py for 'MultiFits' and output_datacube.py for 'DataCube'.
# This module-level dict will store all the registered output types.
# See the RegisterOutputType function at the end of this file.
# The keys are the (string) names of the output types, and the values will be builder objects
# that will perform the different stages of processing to construct and write the output file(s).
valid_output_types = {}
def BuildFiles(nfiles, config, file_num=0, logger=None, except_abort=False):
"""
Build a number of output files as specified in config.
Parameters:
nfiles: The number of files to build.
config: A configuration dict.
file_num: If given, the first file_num. [default: 0]
logger: If given, a logger object to log progress. [default: None]
except_abort: Whether to abort processing when a file raises an exception (True)
or just report errors and continue on (False). [default: False]
Returns:
the final config dict that was used.
"""
logger = LoggerWrapper(logger)
import time
t1 = time.time()
# The next line relies on getting errors when the rng is undefined. However, the default
# rng is None, which is a valid thing to construct a Deviate object from. So for now,
# set the rng to object() to make sure we get errors where we are expecting to.
config['rng'] = object()
# Process the input field for the first file. Often there are "safe" input items
# that won't need to be reprocessed each time. So do them here once and keep them
# in the config for all file_nums. This is more important if nproc != 1.
ProcessInput(config, logger=logger, safe_only=True)
jobs = [] # Will be a list of the kwargs to use for each job
info = [] # Will be a list of (file_num, file_name) correspongind to each jobs.
# Count from 0 to make sure image_num, etc. get counted right. We'll start actually
# building the files at first_file_num.
first_file_num = file_num
file_num = 0
image_num = 0
obj_num = 0
# Figure out how many processes we will use for building the files.
if 'output' not in config: config['output'] = {}
output = config['output']
if nfiles > 1 and 'nproc' in output:
nproc = ParseValue(output, 'nproc', config, int)[0]
# Update this in case the config value is -1
nproc = UpdateNProc(nproc, nfiles, config, logger)
# We'll want a pristine version later to give to the workers.
else:
nproc = 1
orig_config = CopyConfig(config)
if 'timeout' in output:
timeout = ParseValue(output, 'timeout', config, float)[0]
else:
timeout = 3600
if nfiles == 0:
logger.error("No files were made, since nfiles == 0.")
return orig_config
for k in range(nfiles + first_file_num):
SetupConfigFileNum(config, file_num, image_num, obj_num, logger)
builder = valid_output_types[output['type']]
builder.setup(output, config, file_num, logger)
# Process the input fields that might be relevant at file scope:
ProcessInput(config, logger=logger, file_scope_only=True)
# Get the number of objects in each image for this file.
nobj = GetNObjForFile(config, file_num, image_num, logger=logger, approx=True)
# The kwargs to pass to BuildFile
kwargs = {
'file_num' : file_num,
'image_num' : image_num,
'obj_num' : obj_num
}
if file_num >= first_file_num:
# Get the file_name here, in case it needs to create directories, which is not
# safe to do with multiple processes. (At least not without extra code in the
# getFilename function...)
file_name = builder.getFilename(output, config, logger)
jobs.append(kwargs)
info.append( (file_num, file_name) )
# nobj is a list of nobj for each image in that file.
# So len(nobj) = nimages and sum(nobj) is the total number of objects
# This gets the values of image_num and obj_num ready for the next loop.
file_num += 1
image_num += len(nobj)
obj_num += sum(nobj)
def done_func(logger, proc, k, result, t2):
file_num, file_name = info[k]
file_name2, t = result # This is the t for which 0 means the file was skipped.
if file_name2 != file_name: # pragma: no cover (I think this should never happen.)
raise GalSimConfigError("Files seem to be out of sync. %s != %s", file_name, file_name2)
if t != 0 and logger:
if proc is None: s0 = ''
else: s0 = '%s: '%proc
logger.warning(s0 + 'File %d = %s: time = %f sec', file_num, file_name, t)
def except_func(logger, proc, k, e, tr):
file_num, file_name = info[k]
if proc is None: s0 = ''
else: s0 = '%s: '%proc
logger.error(s0 + 'Exception caught for file %d = %s', file_num, file_name)
if except_abort:
logger.debug('%s',tr)
logger.error('File %s not written.',file_name)
else:
logger.warning('%s',tr)
logger.error('File %s not written! Continuing on...',file_name)
# Convert to the tasks structure we need for MultiProcess
# Each task is a list of (job, k) tuples. In this case, we only have one job per task.
tasks = [ [ (job, k) ] for (k, job) in enumerate(jobs) ]
results = MultiProcess(nproc, orig_config, BuildFile, tasks, 'file',
logger=logger, timeout=timeout,
done_func=done_func, except_func=except_func,
except_abort=except_abort)
t2 = time.time()
if len(results) == 0:
nfiles_written = 0
else:
fnames, times = zip(*results)
nfiles_written = sum([ t!=0 for t in times])
if nfiles_written == 0:
logger.error('No files were written. All were either skipped or had errors.')
else:
if nfiles_written > 1 and nproc != 1:
logger.warning('Total time for %d files with %d processes = %f sec',
nfiles_written,nproc,t2-t1)
logger.warning('Done building files')
#Return the config used for the run - this may be useful since one can
#save information here in e.g. custom output types
return orig_config
output_ignore = [ 'nproc', 'timeout', 'skip', 'noclobber', 'retry_io' ]
def BuildFile(config, file_num=0, image_num=0, obj_num=0, logger=None):
"""
Build an output file as specified in config.
Parameters:
config: A configuration dict.
file_num: If given, the current file_num. [default: 0]
image_num: If given, the current image_num. [default: 0]
obj_num: If given, the current obj_num. [default: 0]
logger: If given, a logger object to log progress. [default: None]
Returns:
(file_name, t), a tuple of the file name and the time taken to build file
Note: t==0 indicates that this file was skipped.
"""
logger = LoggerWrapper(logger)
import time
t1 = time.time()
SetupConfigFileNum(config, file_num, image_num, obj_num, logger)
output = config['output']
output_type = output['type']
builder = valid_output_types[output_type]
builder.setup(output, config, file_num, logger)
# Put these values in the config dict so we won't have to run them again later if
# we need them. e.g. ExtraOuput processing uses these.
nobj = GetNObjForFile(config, file_num, image_num, logger=logger)
nimages = len(nobj)
config['nimages'] = nimages
config['nobj'] = nobj
logger.debug('file %d: BuildFile with type=%s to build %d images, starting with %d',
file_num,output_type,nimages,image_num)
# Make sure the inputs and extra outputs are set up properly.
ProcessInput(config, logger=logger)
SetupExtraOutput(config, logger=logger)
# Get the file name
file_name = builder.getFilename(output, config, logger)
# Check if we ought to skip this file
if 'skip' in output and ParseValue(output, 'skip', config, bool)[0]:
logger.warning('Skipping file %d = %s because output.skip = True',file_num,file_name)
return file_name, 0 # Note: time=0 is the indicator that a file was skipped.
if ('noclobber' in output
and ParseValue(output, 'noclobber', config, bool)[0]
and os.path.isfile(file_name)):
logger.warning('Skipping file %d = %s because output.noclobber = True'
' and file exists',file_num,file_name)
return file_name, 0
if logger.isEnabledFor(logging.DEBUG):
logger.debug('file %d: file_name = %s',file_num,file_name)
else:
logger.warning('Start file %d = %s', file_num, file_name)
ignore = output_ignore + list(valid_extra_outputs)
data = builder.buildImages(output, config, file_num, image_num, obj_num, ignore, logger)
# If any images came back as None, then remove them, since they cannot be written.
data = [ im for im in data if im is not None ]
if len(data) == 0:
logger.warning('Skipping file %d = %s because all images were None',file_num,file_name)
return file_name, 0
# Go back to file_num as the default index_key.
config['index_key'] = 'file_num'
data = builder.addExtraOutputHDUs(config, data, logger)
if 'retry_io' in output:
ntries = ParseValue(output,'retry_io',config,int)[0]
# This is how many _re_-tries. Do at least 1, so ntries is 1 more than this.
ntries = ntries + 1
else:
ntries = 1
args = (data, file_name, output, config, logger)
RetryIO(builder.writeFile, args, ntries, file_name, logger)
logger.debug('file %d: Wrote %s to file %r',file_num,output_type,file_name)
builder.writeExtraOutputs(config, data, logger)
t2 = time.time()
return file_name, t2-t1
def GetNFiles(config, logger=None):
"""
Get the number of files that will be made, based on the information in the config dict.
Parameters:
config: The configuration dict.
logger: If given, a logger object to log progress. [default: None]
Returns:
the number of files
"""
output = config.get('output',{})
output_type = output.get('type','Fits')
if output_type not in valid_output_types:
raise GalSimConfigValueError("Invalid output.type.", output_type,
list(valid_output_types.keys()))
return valid_output_types[output_type].getNFiles(output, config, logger=logger)
def GetNImagesForFile(config, file_num, logger=None):
"""
Get the number of images that will be made for the file number file_num, based on the
information in the config dict.
Parameters:
config: The configuration dict.
file_num: The current file number.
logger: If given, a logger object to log progress. [default: None]
Returns:
the number of images
"""
output = config.get('output',{})
output_type = output.get('type','Fits')
if output_type not in valid_output_types:
raise GalSimConfigValueError("Invalid output.type.", output_type,
list(valid_output_types.keys()))
return valid_output_types[output_type].getNImages(output, config, file_num, logger=logger)
def GetNObjForFile(config, file_num, image_num, logger=None, approx=False):
"""
Get the number of objects that will be made for each image built as part of the file file_num,
which starts at image number image_num, based on the information in the config dict.
Parameters:
config: The configuration dict.
file_num: The current file number.
image_num: The current image number.
logger: If given, a logger object to log progress. [default: None]
approx: Whether an approximate/overestimate is ok [default: False]
Returns:
a list of the number of objects in each image [ nobj0, nobj1, nobj2, ... ]
"""
output = config.get('output',{})
output_type = output.get('type','Fits')
if output_type not in valid_output_types:
raise GalSimConfigValueError("Invalid output.type.", output_type,
list(valid_output_types.keys()))
return valid_output_types[output_type].getNObjPerImage(output, config, file_num, image_num,
logger=logger, approx=approx)
def SetupConfigFileNum(config, file_num, image_num, obj_num, logger=None):
"""Do the basic setup of the config dict at the file processing level.
Includes:
- Set config['file_num'] = file_num
- Set config['image_num'] = image_num
- Set config['obj_num'] = obj_num
- Set config['index_key'] = 'file_num'
- Set config['start_image_num'] = image_num
- Set config['start_obj_num'] = obj_num
- Make sure config['output'] exists
- Set default config['output']['type'] to 'Fits' if not specified
- Check that the specified output type is valid.
Parameters:
config: A configuration dict.
file_num: The current file_num. (If file_num=None, then don't set file_num or
start_obj_num items in the config dict.)
image_num: The current image_num.
obj_num: The current obj_num.
logger: If given, a logger object to log progress. [default: None]
"""
logger = LoggerWrapper(logger)
config['file_num'] = file_num
config['start_obj_num'] = obj_num
config['start_image_num'] = image_num
config['image_num'] = image_num
config['obj_num'] = obj_num
config['index_key'] = 'file_num'
if 'output' not in config:
config['output'] = {}
if 'type' not in config['output']:
config['output']['type'] = 'Fits'
# Check that the type is valid
output_type = config['output']['type']
if output_type not in valid_output_types:
raise GalSimConfigValueError("Invalid output.type.", output_type,
list(valid_output_types.keys()))
class OutputBuilder:
"""A base class for building and writing the output objects.
The base class defines the call signatures of the methods that any derived class should follow.
It also includes the implementation of the default output type: Fits.
"""
# A class attribute that sub-classes may override.
default_ext = '.fits'
def setup(self, config, base, file_num, logger):
"""Do any necessary setup at the start of processing a file.
The base class just calls SetupConfigRNG, but this provides a hook for sub-classes to
do more things before any processing gets started on this file.
Parameters:
config: The configuration dict for the output type.
base: The base configuration dict.
file_num: The current file_num.
logger: If given, a logger object to log progress.
"""
seed = SetupConfigRNG(base, logger=logger)
logger.debug('file %d: seed = %d',file_num,seed)
def getFilename(self, config, base, logger):
"""Get the file_name for the current file being worked on.
Note that the base class defines a default extension = '.fits'.
This can be overridden by subclasses by changing the default_ext property.
Parameters:
config: The configuration dict for the output type.
base: The base configuration dict.
logger: If given, a logger object to log progress.
Returns:
the filename to build.
"""
if 'file_name' in config:
SetDefaultExt(config['file_name'], self.default_ext)
file_name = ParseValue(config, 'file_name', base, str)[0]
elif 'root' in base and self.default_ext is not None:
# If a file_name isn't specified, we use the name of the config file + '.fits'
file_name = base['root'] + self.default_ext
else:
raise GalSimConfigError(
"No file_name specified and unable to generate it automatically.")
# Prepend a dir to the beginning of the filename if requested.
if 'dir' in config:
dir = ParseValue(config, 'dir', base, str)[0]
file_name = os.path.join(dir,file_name)
ensure_dir(file_name)
return file_name
def buildImages(self, config, base, file_num, image_num, obj_num, ignore, logger):
"""Build the images for output.
In the base class, this function just calls BuildImage to build the single image to
put in the output file. So the returned list only has one item.
Parameters:
config: The configuration dict for the output field.
base: The base configuration dict.
file_num: The current file_num.
image_num: The current image_num.
obj_num: The current obj_num.
ignore: A list of parameters that are allowed to be in config that we can
ignore here. i.e. it won't be an error if they are present.
logger: If given, a logger object to log progress.
Returns:
a list of the images built
"""
# There are no extra parameters to get, so just check that there are no invalid parameters
# in the config dict.
ignore += [ 'file_name', 'dir', 'nfiles' ]
CheckAllParams(config, ignore=ignore)
image = BuildImage(base, image_num, obj_num, logger=logger)
return [ image ]
def getNFiles(self, config, base, logger=None):
"""Returns the number of files to be built.
In the base class, this is just output.nfiles.
Parameters:
config: The configuration dict for the output field.
base: The base configuration dict.
logger: If given, a logger object to log progress.
Returns:
the number of files to build.
"""
if 'nfiles' in config:
return ParseValue(config, 'nfiles', base, int)[0]
else:
return 1
def getNImages(self, config, base, file_num, logger=None):
"""Returns the number of images to be built for a given ``file_num``.
In the base class, we only build a single image, so it returns 1.
Parameters:
config: The configuration dict for the output field.
base: The base configuration dict.
file_num: The current file number.
logger: If given, a logger object to log progress.
Returns:
the number of images to build.
"""
return 1
def getNObjPerImage(self, config, base, file_num, image_num, logger=None, approx=False):
"""
Get the number of objects that will be made for each image built as part of the file
file_num, which starts at image number image_num, based on the information in the config
dict.
Parameters:
config: The configuration dict.
base: The base configuration dict.
file_num: The current file number.
image_num: The current image number (the first one for this file).
logger: If given, a logger object to log progress.
approx: Whether an approximate/overestimate is ok [default: False]
Returns:
a list of the number of objects in each image [ nobj0, nobj1, nobj2, ... ]
"""
nimages = self.getNImages(config, base, file_num, logger=logger)
nobj = [ GetNObjForImage(base, image_num+j, logger=logger, approx=approx)
for j in range(nimages) ]
base['image_num'] = image_num # Make sure this is set back to current image num.
return nobj
def canAddHdus(self):
"""Returns whether it is permissible to add extra HDUs to the end of the data list.
In the base class, this returns True.
"""
return True
def addExtraOutputHDUs(self, config, data, logger):
"""If appropriate, add any extra output items that go into HDUs to the data list.
Parameters:
config: The configuration dict for the output field.
data: The data to write. Usually a list of images.
logger: If given, a logger object to log progress.
Returns:
data (possibly updated with additional items)
"""
if self.canAddHdus():
data = AddExtraOutputHDUs(config, data, logger)
else:
CheckNoExtraOutputHDUs(config, config['output']['type'], logger)
return data
def writeFile(self, data, file_name, config, base, logger):
"""Write the data to a file.
Parameters:
data: The data to write. Usually a list of images returned by
buildImages, but possibly with extra HDUs tacked onto the end
from the extra output items.
file_name: The file_name to write to.
config: The configuration dict for the output field.
base: The base configuration dict.
logger: If given, a logger object to log progress.
"""
writeMulti(data,file_name)
def writeExtraOutputs(self, config, data, logger):
"""If appropriate, write any extra output items that write their own files.
Parameters:
config: The configuration dict for the output field.
data: The data to write. Usually a list of images.
logger: If given, a logger object to log progress.
"""
WriteExtraOutputs(config, data, logger)
def RegisterOutputType(output_type, builder):
"""Register an output type for use by the config apparatus.
Parameters:
output_type: The name of the type in config['output']
builder: A builder object to use for building and writing the output file.
It should be an instance of OutputBuilder or a subclass thereof.
"""
# Make a concrete instance of the builder.
valid_output_types[output_type] = builder
# The base class is also the builder for type = Fits.
RegisterOutputType('Fits', OutputBuilder()) | PypiClean |
/Mopidy-InternetArchive-3.0.1.tar.gz/Mopidy-InternetArchive-3.0.1/mopidy_internetarchive/library.py | import collections
import logging
from mopidy import backend, models
from . import Extension, translator
logger = logging.getLogger(__name__)
class InternetArchiveLibraryProvider(backend.LibraryProvider):
root_directory = models.Ref.directory(
uri=translator.uri(""), name="Internet Archive"
)
def __init__(self, config, backend):
super().__init__(backend)
self.__collections = config["collections"]
self.__audio_formats = config["audio_formats"]
self.__image_formats = config["image_formats"]
self.__browse_filter = "(mediatype:collection OR format:(%s))" % (
" OR ".join(map(translator.quote, config["audio_formats"]))
)
self.__browse_limit = config["browse_limit"]
self.__browse_views = config["browse_views"]
self.__search_filter = "format:(%s)" % (
" OR ".join(map(translator.quote, config["audio_formats"]))
)
self.__search_limit = config["search_limit"]
self.__search_order = config["search_order"]
self.__directories = collections.OrderedDict()
self.__lookup = {} # track cache for faster lookup
def browse(self, uri):
identifier, filename, query = translator.parse_uri(uri)
if filename:
return []
elif identifier and query:
return self.__browse_collection(identifier, **query)
elif identifier:
return self.__browse_item(identifier)
else:
return self.__browse_root()
def get_images(self, uris):
# map uris to item identifiers
urimap = collections.defaultdict(list)
for uri in uris:
identifier, _, _ = translator.parse_uri(uri)
if identifier:
urimap[identifier].append(uri)
else:
logger.debug("Not retrieving images for %s", uri)
# retrieve item images and map back to uris
results = {}
for identifier, uris in urimap.items():
try:
item = self.backend.client.getitem(identifier)
except Exception as e:
logger.error("Error retrieving images for %s: %s", uris, e)
else:
results.update(dict.fromkeys(uris, self.__images(item)))
return results
def lookup(self, uri):
try:
return [self.__lookup[uri]]
except KeyError:
logger.debug("Lookup cache miss for %r", uri)
identifier, filename, _ = translator.parse_uri(uri)
if identifier:
tracks = self.__tracks(self.backend.client.getitem(identifier))
self.__lookup = trackmap = {t.uri: t for t in tracks}
return [trackmap[uri]] if filename else tracks
else:
return []
def refresh(self, uri=None):
client = self.backend.client
if client.cache:
client.cache.clear()
self.__directories.clear()
self.__lookup.clear()
def search(self, query=None, uris=None, exact=False):
# sanitize uris
uris = set(uris or [self.root_directory.uri])
if self.root_directory.uri in uris:
uris.update(translator.uri(c) for c in self.__collections)
uris.remove(self.root_directory.uri)
# translate query
try:
qs = translator.query(query, uris, exact)
except ValueError as e:
logger.info("Not searching %s: %s", Extension.dist_name, e)
return None
else:
logger.debug("Internet Archive query: %s" % qs)
# fetch results
result = self.backend.client.search(
f"{qs} AND {self.__search_filter}",
fields=["identifier", "title", "creator", "date"],
rows=self.__search_limit,
sort=self.__search_order,
)
logger.debug("Internet Archive result: %s" % list(result))
return models.SearchResult(
uri=translator.uri(q=result.query),
albums=[translator.album(item) for item in result],
)
def __browse_collection(self, identifier, sort=("downloads desc",)):
return [
translator.ref(res)
for res in self.backend.client.search(
f"collection:{identifier} AND {self.__browse_filter}",
fields=["identifier", "mediatype", "title", "creator"],
rows=self.__browse_limit,
sort=sort,
)
]
def __browse_item(self, identifier):
if identifier in self.__directories:
return self.__views(identifier)
item = self.backend.client.getitem(identifier)
if item["metadata"]["mediatype"] == "collection":
return self.__views(identifier)
tracks = self.__tracks(item)
self.__lookup = {t.uri: t for t in tracks} # cache tracks
return [models.Ref.track(uri=t.uri, name=t.name) for t in tracks]
def __browse_root(self):
if not self.__directories:
result = self.backend.client.search(
"mediatype:collection AND identifier:(%s)"
% (" OR ".join(self.__collections)),
fields=["identifier", "mediatype", "title"],
)
objs = {obj["identifier"]: obj for obj in result}
for identifier in self.__collections:
try:
obj = objs[identifier]
except KeyError as e:
logger.warning("Collection not found: %s", e)
else:
self.__directories[identifier] = translator.ref(obj)
return list(self.__directories.values())
def __images(self, item):
uri = self.backend.client.geturl # get download URL for images
return translator.images(item, self.__image_formats, uri)
def __tracks(self, item, key=lambda t: (t.track_no or 0, t.uri)):
tracks = translator.tracks(item, self.__audio_formats)
tracks.sort(key=key)
return tracks
def __views(self, identifier):
refs = []
for order, name in self.__browse_views.items():
uri = translator.uri(identifier, sort=order)
refs.append(models.Ref.directory(name=name, uri=uri))
return refs | PypiClean |
/NeodroidAgent-0.4.8-py36-none-any.whl/neodroidagent/common/session_factory/horizontal/experiment.py |
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 19/01/2020
"""
import base64
import os
import pickle
import time
from pathlib import Path
from cloudpickle import cloudpickle
from neodroid.environments.droid_environment import UnityEnvironment
from neodroidagent.agents import SoftActorCriticAgent
from neodroidagent.common import CategoricalMLP
class Experiment:
def __init__(self, log_dir="", save_dir="", render_environment=False):
pass
def __enter__(self):
"""Set self.sess as the default session.
Returns:
This local runner.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Leave session."""
def run_experiment(
self,
method_call=None,
batch_tasks=None,
exp_prefix="experiment",
exp_name=None,
log_dir=None,
script="garage.experiment.experiment_wrapper",
python_command="python",
dry=False,
env=None,
variant=None,
force_cpu=False,
pre_commands=None,
**kwargs,
):
"""Serialize the method call and run the experiment using the
specified mode.
Args:
method_call (callable): A method call.
batch_tasks (list[dict]): A batch of method calls.
exp_prefix (str): Name prefix for the experiment.
exp_name (str): Name of the experiment.
log_dir (str): Log directory for the experiment.
script (str): The name of the entrance point python script.
python_command (str): Python command to run the experiment.
dry (bool): Whether to do a dry-run, which only prints the
commands without executing them.
env (dict): Extra environment variables.
variant (dict): If provided, should be a dictionary of parameters.
force_cpu (bool): Whether to set all GPU devices invisible
to force use CPU.
pre_commands (str): Pre commands to run the experiment.
"""
if method_call is None and batch_tasks is None:
raise Exception("Must provide at least either method_call or batch_tasks")
for task in batch_tasks or [method_call]:
if not hasattr(task, "__call__"):
raise ValueError("batch_tasks should be callable")
# ensure variant exists
if variant is None:
variant = dict()
if batch_tasks is None:
batch_tasks = [
dict(
kwargs,
pre_commands=pre_commands,
method_call=method_call,
exp_name=exp_name,
log_dir=log_dir,
env=env,
variant=variant,
)
]
global exp_count
if force_cpu:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
for task in batch_tasks:
call = task.pop("method_call")
data = base64.b64encode(cloudpickle.dumps(call)).decode("utf-8")
task["args_data"] = data
exp_count += 1
if task.get("exp_name", None) is None:
task["exp_name"] = f"{exp_prefix}_{time.time()}_{exp_count:04n}"
if task.get("log_dir", None) is None:
task["log_dir"] = (
f"{Path.cwd() / 'data'}/local/{exp_prefix.replace('_', '-')}/"
f"{task['exp_name']}"
)
if task.get("variant", None) is not None:
variant = task.pop("variant")
if "exp_name" not in variant:
variant["exp_name"] = task["exp_name"]
task["variant_data"] = base64.b64encode(pickle.dumps(variant)).decode(
"utf-8"
)
elif "variant" in task:
del task["variant"]
task["env"] = task.get("env", dict()) or dict()
task["env"]["GARAGE_FORCE_CPU"] = str(force_cpu)
for task in batch_tasks:
env = task.pop("env", None)
command = garage.to_local_command(
task, python_command=python_command, script=script
)
print(command)
if dry:
return
try:
if env is None:
env = dict()
os.subprocess.run(
command, shell=True, env=dict(os.environ, **env), check=True
)
except Exception as e:
print(e)
raise
if __name__ == "__main__":
ENV = ""
env = UnityEnvironment(env_name=ENV)
policy = CategoricalMLP(
input_shape=env.observation_space,
output_shape=env.action_space,
hidden_sizes=(32, 32),
)
agent = SoftActorCriticAgent(
policy=policy, max_path_length=100, discount=0.99, max_kl_step=0.01
)
with Experiment(log_dir="", save_dir="", render_environment=False) as experiment:
experiment.setup(agent, env)
experiment.train(n_epochs=100, batch_size=4000) | PypiClean |
/GALFITools-1.0.0.tar.gz/GALFITools-1.0.0/src/galfitools/sky/Sky.py |
import numpy as np
import sys
import os
import stat
import subprocess as sp
import os.path
from astropy.io import fits
import scipy
import scipy.special
import matplotlib.pyplot as plt
import argparse
#introducir sky box y sky ring
#use maskds9 para obtener los pixeles de las regiones
def sky(imgname, maskimage, filereg) -> None:
xlo,xhi,ylo,yhi = GetRegionDs9(filereg)
################################################
################################################
hdu = fits.open(imgname)
imgdat = hdu[0].data.astype(float)
hdu.close()
hdu = fits.open(maskimage)
maskdat = hdu[0].data
hdu.close()
imgdat = imgdat[ylo - 1:yhi, xlo - 1:xhi]
maskdat = maskdat[ylo - 1:yhi, xlo - 1:xhi]
maskdat=np.array(maskdat,dtype=bool)
mask = maskdat == False
img=imgdat[mask]
#print("mean sky: {:.3f} ".format(img.mean()))
#print("std sky: {:.3f} ".format(img.std()))
#print("rms sky: {:.3f} ".format(rms(img)))
#print("Excluding the top and bottom 20%:")
flatimg = img.flatten()
flatimg.sort()
tot = len(flatimg)
top = round(.8*tot)
bot = round(.2*tot)
img2 = flatimg[bot:top]
mean = img2.mean()
sig = img2.std()
return mean, sig
#print("mean sky: {:.3f} ".format(mean))
#print("std sky: {:.3f} ".format(sig))
#img3=img2.copy()
#mask2 = np.abs(img3 - mean) <= 3* sig
#msky = img3[mask2].mean()
#ssky = img3[mask2].std()
#return msky, ssky
def rms(array):
return np.sqrt(np.mean(array ** 2))
def GetRegionDs9(filein):
"Get the size (xmin,xmax,ymin,ymax) from ds9 region file "
xhi=0
xlo=0
ylo=0
yhi=0
with open(filein) as f_in:
lines = (line.rstrip() for line in f_in) # All lines including the blank ones
lines = (line.split('#', 1)[0] for line in lines) # remove comments
lines = (line.rstrip() for line in lines) # remove lines containing only comments
lines = (line for line in lines if line) # Non-blank lines
for line in lines:
(chunks)=line.split(' ')
if (chunks[0] != "image" and chunks[0] != "physical" and chunks[0] != "global" ):
(box,info)=line.split('(')
if(box == "box"):
(xpos,ypos,xlong,ylong,trash)=info.split(',')
xpos=float(xpos)
ypos=float(ypos)
xlong=float(xlong)
ylong=float(ylong)
xlo = xpos - xlong/2
xhi = xpos + xlong/2
ylo = ypos - ylong/2
yhi = ypos + ylong/2
xlo=int(round(xlo))
xhi=int(round(xhi))
ylo=int(round(ylo))
yhi=int(round(yhi))
return xlo,xhi,ylo,yhi
#############################################################################
######################### End of program ###################################
# ______________________________________________________________________
# /___/___/___/___/___/___/___/___/___/___/___/___/___/___/___/___/___/_/|
# |___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|__/|
# |_|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|/|
# |___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|__/|
# |_|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|/|
# |___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|__/|
# |_|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|/
##############################################################################
if __name__ == '__main__':
main() | PypiClean |
/BicycleDataProcessor-0.1.0.tar.gz/BicycleDataProcessor-0.1.0/bicycledataprocessor/main.py |
# built in imports
import os
import datetime
from math import pi
# dependencies
import numpy as np
from scipy import io
from scipy.integrate import cumtrapz
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from tables import NoSuchNodeError
import dtk.process as process
from dtk.bicycle import front_contact, benchmark_to_moore
import bicycleparameters as bp
# local dependencies
from database import (get_row_num, get_cell, pad_with_zeros, run_id_string,
default_paths)
import signalprocessing as sigpro
from bdpexceptions import TimeShiftError
class Signal(np.ndarray):
"""
A subclass of ndarray for collecting the data for a single signal in a
run.
Attributes
----------
conversions : dictionary
A mapping for unit conversions.
name : str
The name of the signal. Should be CamelCase.
runid : str
A five digit identification number associated with the
trial this signal was collected from (e.g. '00104').
sampleRate : float
The sample rate in hertz of the signal.
source : str
The source of the data. This should be 'NI' for the
National Instruments USB-6218 and 'VN' for the VN-100 IMU.
units : str
The physcial units of the signal. These should be specified
as lowercase complete words using only multiplication and
division symbols (e.g. 'meter/second/second').
Signal.conversions will show the available options.
Methods
-------
plot()
Plot's the signal versus time and returns the line.
frequency()
Returns the frequency spectrum of the signal.
time_derivative()
Returns the time derivative of the signal.
filter(frequency)
Returns the low passed filter of the signal.
truncate(tau)
Interpolates and truncates the signal the based on the time shift,
`tau`, and the signal source.
as_dictionary()
Returns a dictionary of the metadata of the signal.
convert_units(units)
Returns a signal with different units. `conversions` specifies the
available options.
"""
# define some basic unit converions
conversions = {'degree->radian': pi / 180.,
'degree/second->radian/second': pi / 180.,
'degree/second/second->radian/second/second': pi / 180.,
'inch*pound->newton*meter': 25.4 / 1000. * 4.44822162,
'pound->newton': 4.44822162,
'feet/second->meter/second': 12. * 2.54 / 100.,
'mile/hour->meter/second': 0.00254 * 12. / 5280. / 3600.}
def __new__(cls, inputArray, metadata):
"""
Returns an instance of the Signal class with the additional signal
data.
Parameters
----------
inputArray : ndarray, shape(n,)
A one dimension array representing a single variable's time
history.
metadata : dictionary
This dictionary contains the metadata for the signal.
name : str
The name of the signal. Should be CamelCase.
runid : str
A five digit identification number associated with the
trial this experiment was collected at (e.g. '00104').
sampleRate : float
The sample rate in hertz of the signal.
source : str
The source of the data. This should be 'NI' for the
National Instruments USB-6218 and 'VN' for the VN-100 IMU.
units : str
The physcial units of the signal. These should be specified
as lowercase complete words using only multiplication and
division symbols (e.g. 'meter/second/second').
Signal.conversions will show the available options.
Raises
------
ValueError
If `inputArray` is not a vector.
"""
if len(inputArray.shape) > 1:
raise ValueError('Signals must be arrays of one dimension.')
# cast the input array into the Signal class
obj = np.asarray(inputArray).view(cls)
# add the metadata to the object
obj.name = metadata['name']
obj.runid = metadata['runid']
obj.sampleRate = metadata['sampleRate']
obj.source = metadata['source']
obj.units = metadata['units']
return obj
def __array_finalize__(self, obj):
if obj is None: return
self.name = getattr(obj, 'name', None)
self.runid = getattr(obj, 'runid', None)
self.sampleRate = getattr(obj, 'sampleRate', None)
self.source = getattr(obj, 'source', None)
self.units = getattr(obj, 'units', None)
def __array_wrap__(self, outputArray, context=None):
# doesn't support these things in basic ufunc calls...maybe one day
# That means anytime you add, subtract, multiply, divide, etc, the
# following are not retained.
outputArray.name = None
outputArray.source = None
outputArray.units = None
return np.ndarray.__array_wrap__(self, outputArray, context)
def as_dictionary(self):
'''Returns the signal metadata as a dictionary.'''
data = {'runid': self.runid,
'name': self.name,
'units': self.units,
'source': self.source,
'sampleRate': self.sampleRate}
return data
def convert_units(self, units):
"""
Returns a signal with the specified units.
Parameters
----------
units : str
The units to convert the signal to. The mapping must be in the
attribute `conversions`.
Returns
-------
newSig : Signal
The signal with the desired units.
"""
if units == self.units:
return self
else:
try:
conversion = self.units + '->' + units
newSig = self * self.conversions[conversion]
except KeyError:
try:
conversion = units + '->' + self.units
newSig = self / self.conversions[conversion]
except KeyError:
raise KeyError(('Conversion from {0} to {1} is not ' +
'possible or not defined.').format(self.units, units))
# make the new signal
newSig = Signal(newSig, self.as_dictionary())
newSig.units = units
return newSig
def filter(self, frequency):
"""Returns the signal filtered by a low pass Butterworth at the given
frequency."""
filteredArray = process.butterworth(self.spline(), frequency, self.sampleRate)
return Signal(filteredArray, self.as_dictionary())
def frequency(self):
"""Returns the frequency content of the signal."""
return process.freq_spectrum(self.spline(), self.sampleRate)
def integrate(self, initialCondition=0., detrend=False):
"""Integrates the signal using the trapezoidal rule."""
time = self.time()
# integrate using trapz and adjust with the initial condition
grated = np.hstack((0., cumtrapz(self, x=time))) + initialCondition
# this tries to characterize the drift in the integrated signal. It
# works well for signals from straight line tracking but not
# necessarily for lange change.
if detrend is True:
def line(x, a, b, c):
return a * x**2 + b * x + c
popt, pcov = curve_fit(line, time, grated)
grated = grated - line(time, popt[0], popt[1], popt[2])
grated = Signal(grated, self.as_dictionary())
grated.units = self.units + '*second'
grated.name = self.name + 'Int'
return grated
def plot(self, show=True):
"""Plots and returns the signal versus time."""
time = self.time()
line = plt.plot(time, self)
if show:
plt.xlabel('Time [second]')
plt.ylabel('{0} [{1}]'.format(self.name, self.units))
plt.title('Signal plot during run {0}'.format(self.runid))
plt.show()
return line
def spline(self):
"""Returns the signal with nans replaced by the results of a cubic
spline."""
splined = process.spline_over_nan(self.time(), self)
return Signal(splined, self.as_dictionary())
def subtract_mean(self):
"""Returns the mean subtracted data."""
return Signal(process.subtract_mean(self), self.as_dictionary())
def time(self):
"""Returns the time vector of the signal."""
return sigpro.time_vector(len(self), self.sampleRate)
def time_derivative(self):
"""Returns the time derivative of the signal."""
# caluculate the numerical time derivative
dsdt = process.derivative(self.time(), self, method='combination')
# map the metadata from self onto the derivative
dsdt = Signal(dsdt, self.as_dictionary())
dsdt.name = dsdt.name + 'Dot'
dsdt.units = dsdt.units + '/second'
return dsdt
def truncate(self, tau):
'''Returns the shifted and truncated signal based on the provided
timeshift, tau.'''
# this is now an ndarray instead of a Signal
return Signal(sigpro.truncate_data(self, tau), self.as_dictionary())
class RawSignal(Signal):
"""
A subclass of Signal for collecting the data for a single raw signal in
a run.
Attributes
----------
sensor : Sensor
Each raw signal has a sensor associated with it. Most sensors contain
calibration data for that sensor/signal.
calibrationType :
Notes
-----
This is a class for the signals that are the raw measurement outputs
collected by the BicycleDAQ software and are already stored in the pytables
database file.
"""
def __new__(cls, runid, signalName, database):
"""
Returns an instance of the RawSignal class with the additional signal
metadata.
Parameters
----------
runid : str
A five digit
signalName : str
A CamelCase signal name that corresponds to the raw signals
output by BicycleDAQ_.
database : pytables object
The hdf5 database for the instrumented bicycle.
.. _BicycleDAQ: https://github.com/moorepants/BicycleDAQ
"""
# get the tables
rTab = database.root.runTable
sTab = database.root.signalTable
cTab = database.root.calibrationTable
# get the row number for this particular run id
rownum = get_row_num(runid, rTab)
signal = database.getNode('/rawData/' + runid, name=signalName).read()
# cast the input array into my subclass of ndarray
obj = np.asarray(signal).view(cls)
obj.runid = runid
obj.timeStamp = matlab_date_to_object(get_cell(rTab, 'DateTime',
rownum))
obj.calibrationType, obj.units, obj.source = [(row['calibration'],
row['units'], row['source'])
for row in sTab.where('signal == signalName')][0]
obj.name = signalName
try:
obj.sensor = Sensor(obj.name, cTab)
except KeyError:
pass
# This just means that there was no sensor associated with that
# signal for calibration purposes.
#print "There is no sensor named {0}.".format(signalName)
# this assumes that the supply voltage for this signal is the same for
# all sensor calibrations
try:
supplySource = [row['runSupplyVoltageSource']
for row in cTab.where('name == signalName')][0]
if supplySource == 'na':
obj.supply = [row['runSupplyVoltage']
for row in cTab.where('name == signalName')][0]
else:
obj.supply = database.getNode('/rawData/' + runid,
name=supplySource).read()
except IndexError:
pass
#print "{0} does not have a supply voltage.".format(signalName)
#print "-" * 79
# get the appropriate sample rate
if obj.source == 'NI':
sampRateCol = 'NISampleRate'
elif obj.source == 'VN':
sampRateCol = 'VNavSampleRate'
else:
raise ValueError('{0} is not a valid source.'.format(obj.source))
obj.sampleRate = rTab[rownum][rTab.colnames.index(sampRateCol)]
return obj
def __array_finalize__(self, obj):
if obj is None: return
self.calibrationType = getattr(obj, 'calibrationType', None)
self.name = getattr(obj, 'name', None)
self.runid = getattr(obj, 'runid', None)
self.sampleRate = getattr(obj, 'sampleRate', None)
self.sensor = getattr(obj, 'sensor', None)
self.source = getattr(obj, 'source', None)
self.units = getattr(obj, 'units', None)
self.timeStamp = getattr(obj, 'timeStamp', None)
def __array_wrap__(self, outputArray, context=None):
# doesn't support these things in basic ufunc calls...maybe one day
outputArray.calibrationType = None
outputArray.name = None
outputArray.sensor = None
outputArray.source = None
outputArray.units = None
return np.ndarray.__array_wrap__(self, outputArray, context)
def scale(self):
"""
Returns the scaled signal based on the calibration data for the
supplied date.
Returns
-------
: ndarray (n,)
Scaled signal.
"""
try:
self.calibrationType
except AttributeError:
raise AttributeError("Can't scale without the calibration type")
# these will need to be changed once we start measuring them
doNotScale = ['LeanPotentiometer',
'HipPotentiometer',
'TwistPotentiometer']
if self.calibrationType in ['none', 'matrix'] or self.name in doNotScale:
#print "Not scaling {0}".format(self.name)
return self
else:
pass
#print "Scaling {0}".format(self.name)
# pick the largest calibration date without surpassing the run date
calibData = self.sensor.get_data_for_date(self.timeStamp)
slope = calibData['slope']
bias = calibData['bias']
intercept = calibData['offset']
calibrationSupplyVoltage = calibData['calibrationSupplyVoltage']
#print "slope {0}, bias {1}, intercept {2}".format(slope, bias,
#intercept)
if self.calibrationType == 'interceptStar':
# this is for potentiometers, where the slope is ratiometric
# and zero degrees is always zero volts
calibratedSignal = (calibrationSupplyVoltage / self.supply *
slope * self + intercept)
elif self.calibrationType == 'intercept':
# this is the typical calibration that I use for all the
# sensors that I calibrate myself
calibratedSignal = (calibrationSupplyVoltage / self.supply *
(slope * self + intercept))
elif self.calibrationType == 'bias':
# this is for the accelerometers and rate gyros that are
# "ratiometric", but I'm still not sure this is correct
calibratedSignal = (slope * (self - self.supply /
calibrationSupplyVoltage * bias))
else:
raise StandardError("None of the calibration equations worked.")
calibratedSignal.name = calibData['signal']
calibratedSignal.units = calibData['units']
calibratedSignal.source = self.source
return calibratedSignal.view(Signal)
def plot_scaled(self, show=True):
'''Plots and returns the scaled signal versus time.'''
time = self.time()
scaled = self.scale()
line = plt.plot(time, scaled[1])
plt.xlabel('Time [s]')
plt.ylabel(scaled[2])
plt.title('{0} signal during run {1}'.format(scaled[0],
str(self.runid)))
if show:
plt.show()
return line
class Sensor():
"""This class is a container for calibration data for a sensor."""
def __init__(self, name, calibrationTable):
"""
Initializes this sensor class.
Parameters
----------
name : string
The CamelCase name of the sensor (e.g. SteerTorqueSensor).
calibrationTable : pyTables table object
This is the calibration data table that contains all the data taken
during calibrations.
"""
self.name = name
self._store_calibration_data(calibrationTable)
def _store_calibration_data(self, calibrationTable):
"""
Stores a dictionary of calibration data for the sensor for all
calibration dates in the object.
Parameters
----------
calibrationTable : pyTables table object
This is the calibration data table that contains all the data taken
during calibrations.
"""
self.data = {}
for row in calibrationTable.iterrows():
if self.name == row['name']:
self.data[row['calibrationID']] = {}
for col in calibrationTable.colnames:
self.data[row['calibrationID']][col] = row[col]
if self.data == {}:
raise KeyError(('{0} is not a valid sensor ' +
'name').format(self.name))
def get_data_for_date(self, runDate):
"""
Returns the calibration data for the sensor for the most recent
calibration relative to `runDate`.
Parameters
----------
runDate : datetime object
This is the date of the run that the calibration data is needed
for.
Returns
-------
calibData : dictionary
A dictionary containing the sensor calibration data for the
calibration closest to but not past `runDate`.
Notes
-----
This method will select the calibration data for the date closest to
but not past `runDate`. **All calibrations must be taken before the
runs.**
"""
# make a list of calibration ids and time stamps
dateIdPairs = [(k, matlab_date_to_object(v['timeStamp']))
for k, v in self.data.iteritems()]
# sort the pairs with the most recent date first
dateIdPairs.sort(key=lambda x: x[1], reverse=True)
# go through the list and return the index at which the calibration
# date is larger than the run date
for i, pair in enumerate(dateIdPairs):
if runDate >= pair[1]:
break
return self.data[dateIdPairs[i][0]]
class Run():
"""The fluppin fundamental class for a run."""
def __init__(self, runid, dataset, pathToParameterData=None,
forceRecalc=False, filterFreq=None, store=True):
"""Loads the raw and processed data for a run if available otherwise it
generates the processed data from the raw data.
Parameters
----------
runid : int or str
The run id should be an integer, e.g. 5, or a five digit string with
leading zeros, e.g. '00005'.
dataset : DataSet
A DataSet object with at least some raw data.
pathToParameterData : string, {'<path>', None}, optional
The path to a data directory for the BicycleParameters package. It
should contain the bicycles and riders used in the experiments.
forceRecalc : boolean, optional, default = False
If true then it will force a recalculation of all the processed
data.
filterSigs : float, optional, default = None
If true all of the processed signals will be low pass filtered with
a second order Butterworth filter at the given filter frequency.
store : boolean, optional, default = True
If true the resulting task signals will be stored in the database.
"""
if pathToParameterData is None:
pathToParameterData = default_paths['pathToParameters']
print "Initializing the run object."
self.filterFreq = filterFreq
dataset.open()
dataTable = dataset.database.root.runTable
signalTable = dataset.database.root.signalTable
taskTable = dataset.database.root.taskTable
runid = run_id_string(runid)
# get the row number for this particular run id
rownum = get_row_num(runid, dataTable)
# make some dictionaries to store all the data
self.metadata = {}
self.rawSignals = {}
# make lists of the input and output signals
rawDataCols = [x['signal'] for x in
signalTable.where("isRaw == True")]
computedCols = [x['signal'] for x in
signalTable.where("isRaw == False")]
# store the metadata for this run
print "Loading metadata from the database."
for col in dataTable.colnames:
if col not in (rawDataCols + computedCols):
self.metadata[col] = get_cell(dataTable, col, rownum)
print "Loading the raw signals from the database."
for col in rawDataCols:
# rawDataCols includes all possible raw signals, but every run
# doesn't have all the signals, so skip the ones that aren't there
try:
self.rawSignals[col] = RawSignal(runid, col, dataset.database)
except NoSuchNodeError:
pass
if self.metadata['Rider'] != 'None':
self.load_rider(pathToParameterData)
self.bumpLength = 1.0 # 1 meter
# Try to load the task signals if they've already been computed. If
# they aren't in the database, the filter frequencies don't match or
# forceRecalc is true the then compute them. This may save some time
# when repeatedly loading runs for analysis.
self.taskFromDatabase = False
try:
runGroup = dataset.database.root.taskData._f_getChild(runid)
except NoSuchNodeError:
forceRecalc = True
else:
# The filter frequency stored in the task table is either a nan
# value or a valid float. If the stored filter frequency is not the
# same as the the one passed to Run, then a recalculation should be
# forced.
taskRowNum = get_row_num(runid, taskTable)
storedFreq = taskTable.cols.FilterFrequency[taskRowNum]
self.taskSignals = {}
if filterFreq is None:
newFilterFreq = np.nan
else:
newFilterFreq = filterFreq
if np.isnan(newFilterFreq) and np.isnan(storedFreq):
for node in runGroup._f_walkNodes():
meta = {k : node._f_getAttr(k) for k in ['units', 'name',
'runid', 'sampleRate', 'source']}
self.taskSignals[node.name] = Signal(node[:], meta)
self.taskFromDatabase = True
elif np.isnan(newFilterFreq) or np.isnan(storedFreq):
forceRecalc = True
else:
if abs(storedFreq - filterFreq) < 1e-10:
for node in runGroup._f_walkNodes():
meta = {k : node._f_getAttr(k) for k in ['units', 'name',
'runid', 'sampleRate', 'source']}
self.taskSignals[node.name] = Signal(node[:], meta)
self.taskFromDatabase = True
else:
forceRecalc = True
dataset.close()
if forceRecalc == True:
try:
del self.taskSignals
except AttributeError:
pass
self.process_raw_signals()
# store the task signals in the database if they are newly computed
if (store == True and self.taskFromDatabase == False
and self.topSig == 'task'):
taskMeta = {
'Duration' :
self.taskSignals['ForwardSpeed'].time()[-1],
'FilterFrequency' : self.filterFreq,
'MeanSpeed' : self.taskSignals['ForwardSpeed'].mean(),
'RunID' : self.metadata['RunID'],
'StdSpeed' : self.taskSignals['ForwardSpeed'].std(),
'Tau' : self.tau,
}
dataset.add_task_signals(self.taskSignals, taskMeta)
# tell the user about the run
print self
def process_raw_signals(self):
"""Processes the raw signals as far as possible and filters the
result if a cutoff frequency was specified."""
print "Computing signals from raw data."
self.calibrate_signals()
# the following maneuvers should never be calculated beyond the
# calibrated signals
maneuver = self.metadata['Maneuver']
con1 = maneuver != 'Steer Dynamics Test'
con2 = maneuver != 'System Test'
con3 = maneuver != 'Static Calibration'
if con1 and con2 and con3:
self.compute_time_shift()
self.check_time_shift(0.15)
self.truncate_signals()
self.compute_signals()
self.task_signals()
if self.filterFreq is not None:
self.filter_top_signals(self.filterFreq)
def filter_top_signals(self, filterFreq):
"""Filters the top most signals with a low pass filter."""
if self.topSig == 'task':
print('Filtering the task signals.')
for k, v in self.taskSignals.items():
self.taskSignals[k] = v.filter(filterFreq)
elif self.topSig == 'computed':
print('Filtering the computed signals.')
for k, v in self.computedSignals.items():
self.computedSignals[k] = v.filter(filterFreq)
elif self.topSig == 'calibrated':
print('Filtering the calibrated signals.')
for k, v in self.calibratedSignals.items():
self.calibratedSignals[k] = v.filter(filterFreq)
def calibrate_signals(self):
"""Calibrates the raw signals."""
# calibrate the signals for the run
self.calibratedSignals = {}
for sig in self.rawSignals.values():
calibSig = sig.scale()
self.calibratedSignals[calibSig.name] = calibSig
self.topSig = 'calibrated'
def task_signals(self):
"""Computes the task signals."""
print('Extracting the task portion from the data.')
self.extract_task()
# compute task specific variables
self.compute_yaw_angle()
self.compute_rear_wheel_contact_rates()
self.compute_rear_wheel_contact_points()
self.compute_front_wheel_contact_points()
self.topSig = 'task'
def compute_signals(self):
"""Computes the task independent quantities."""
self.computedSignals ={}
# transfer some of the signals to computed
noChange = ['FiveVolts',
'PushButton',
'RearWheelRate',
'RollAngle',
'SteerAngle',
'ThreeVolts']
for sig in noChange:
if sig in ['RollAngle', 'SteerAngle']:
self.computedSignals[sig] =\
self.truncatedSignals[sig].convert_units('radian')
else:
self.computedSignals[sig] = self.truncatedSignals[sig]
# compute the quantities that aren't task specific
self.compute_pull_force()
self.compute_forward_speed()
self.compute_steer_rate()
self.compute_yaw_roll_pitch_rates()
self.compute_steer_torque()
def truncate_signals(self):
"""Truncates the calibrated signals based on the time shift."""
self.truncatedSignals = {}
for name, sig in self.calibratedSignals.items():
self.truncatedSignals[name] = sig.truncate(self.tau).spline()
self.topSig = 'truncated'
def compute_time_shift(self):
"""Computes the time shift based on the vertical accelerometer
signals."""
self.tau = sigpro.find_timeshift(
self.calibratedSignals['AccelerometerAccelerationY'],
self.calibratedSignals['AccelerationZ'],
self.metadata['NISampleRate'],
self.metadata['Speed'], plotError=False)
def check_time_shift(self, maxNRMS):
"""Raises an error if the normalized root mean square of the shifted
accelerometer signals is high."""
# Check to make sure the signals were actually good fits by
# calculating the normalized root mean square. If it isn't very
# low, raise an error.
niAcc = self.calibratedSignals['AccelerometerAccelerationY']
vnAcc = self.calibratedSignals['AccelerationZ']
vnAcc = vnAcc.truncate(self.tau).spline()
niAcc = niAcc.truncate(self.tau).spline()
# todo: this should probably check the rms of the mean subtracted data
# because both accelerometers don't always give the same value, this
# may work better with a filtered signal too
# todo: this should probably be moved into the time shift code in the
# signalprocessing model
nrms = np.sqrt(np.mean((vnAcc + niAcc)**2)) / (niAcc.max() - niAcc.min())
if nrms > maxNRMS:
raise TimeShiftError(('The normalized root mean square for this ' +
'time shift is {}, which is greater '.format(str(nrms)) +
'than the maximum allowed: {}'.format(str(maxNRMS))))
def compute_rear_wheel_contact_points(self):
"""Computes the location of the wheel contact points in the ground
plane."""
# get the rates
try:
latRate = self.taskSignals['LateralRearContactRate']
lonRate = self.taskSignals['LongitudinalRearContactRate']
except AttributeError:
print('At least one of the rates are not available. ' +
'The YawAngle was not computed.')
else:
# convert to meters per second
latRate = latRate.convert_units('meter/second')
lonRate = lonRate.convert_units('meter/second')
# integrate and try to account for the drift
lat = latRate.integrate(detrend=True)
lon = lonRate.integrate()
# set the new name and units
lat.name = 'LateralRearContact'
lat.units = 'meter'
lon.name = 'LongitudinalRearContact'
lon.units = 'meter'
# store in task signals
self.taskSignals[lat.name] = lat
self.taskSignals[lon.name] = lon
def compute_front_wheel_contact_points(self):
"""Caluculates the front wheel contact points in the ground plane."""
q1 = self.taskSignals['LongitudinalRearContact']
q2 = self.taskSignals['LateralRearContact']
q3 = self.taskSignals['YawAngle']
q4 = self.taskSignals['RollAngle']
q7 = self.taskSignals['SteerAngle']
p = benchmark_to_moore(self.bicycleRiderParameters)
f = np.vectorize(front_contact)
q9, q10 = f(q1, q2, q3, q4, q7, p['d1'], p['d2'], p['d3'], p['rr'],
p['rf'])
self.taskSignals['LongitudinalFrontContact'] = q9
self.taskSignals['LateralFrontContact'] = q10
def compute_rear_wheel_contact_rates(self):
"""Calculates the rates of the wheel contact points in the ground
plane."""
try:
yawAngle = self.taskSignals['YawAngle']
rearWheelRate = self.taskSignals['RearWheelRate']
rR = self.bicycleRiderParameters['rR'] # this should be in meters
except AttributeError:
print('Either the yaw angle, rear wheel rate or ' +
'front wheel radius is not available. The ' +
'contact rates were not computed.')
else:
yawAngle = yawAngle.convert_units('radian')
rearWheelRate = rearWheelRate.convert_units('radian/second')
lon, lat = sigpro.rear_wheel_contact_rate(rR, rearWheelRate, yawAngle)
lon.name = 'LongitudinalRearContactRate'
lon.units = 'meter/second'
self.taskSignals[lon.name] = lon
lat.name = 'LateralRearContactRate'
lat.units = 'meter/second'
self.taskSignals[lat.name] = lat
def compute_yaw_angle(self):
"""Computes the yaw angle by integrating the yaw rate."""
# get the yaw rate
try:
yawRate = self.taskSignals['YawRate']
except AttributeError:
print('YawRate is not available. The YawAngle was not computed.')
else:
# convert to radians per second
yawRate = yawRate.convert_units('radian/second')
# integrate and try to account for the drift
yawAngle = yawRate.integrate(detrend=True)
# set the new name and units
yawAngle.name = 'YawAngle'
yawAngle.units = 'radian'
# store in computed signals
self.taskSignals['YawAngle'] = yawAngle
def compute_steer_torque(self, plot=False):
"""Computes the rider applied steer torque.
Parameters
----------
plot : boolean, optional
Default is False, but if True a plot is generated.
"""
# steer torque
frameAngRate = np.vstack((
self.truncatedSignals['AngularRateX'],
self.truncatedSignals['AngularRateY'],
self.truncatedSignals['AngularRateZ']))
frameAngAccel = np.vstack((
self.truncatedSignals['AngularRateX'].time_derivative(),
self.truncatedSignals['AngularRateY'].time_derivative(),
self.truncatedSignals['AngularRateZ'].time_derivative()))
frameAccel = np.vstack((
self.truncatedSignals['AccelerationX'],
self.truncatedSignals['AccelerationY'],
self.truncatedSignals['AccelerationZ']))
handlebarAngRate = self.truncatedSignals['ForkRate']
handlebarAngAccel = self.truncatedSignals['ForkRate'].time_derivative()
steerAngle = self.truncatedSignals['SteerAngle']
steerColumnTorque =\
self.truncatedSignals['SteerTubeTorque'].convert_units('newton*meter')
handlebarMass = self.bicycleRiderParameters['mG']
handlebarInertia =\
self.bicycle.steer_assembly_moment_of_inertia(fork=False,
wheel=False, nominal=True)
# this is the distance from the handlebar center of mass to the
# steer axis
w = self.bicycleRiderParameters['w']
c = self.bicycleRiderParameters['c']
lam = self.bicycleRiderParameters['lam']
xG = self.bicycleRiderParameters['xG']
zG = self.bicycleRiderParameters['zG']
handlebarCoM = np.array([xG, 0., zG])
d = bp.geometry.distance_to_steer_axis(w, c, lam, handlebarCoM)
# these are the distances from the point on the steer axis which is
# aligned with the handlebar center of mass to the accelerometer on
# the frame
ds1 = self.bicycle.parameters['Measured']['ds1']
ds3 = self.bicycle.parameters['Measured']['ds3']
ds = np.array([ds1, 0., ds3]) # i measured these
# damping and friction values come from Peter's work, I need to verify
# them still
damping = 0.3475
friction = 0.0861
components = sigpro.steer_torque_components(
frameAngRate, frameAngAccel, frameAccel, handlebarAngRate,
handlebarAngAccel, steerAngle, steerColumnTorque,
handlebarMass, handlebarInertia, damping, friction, d, ds)
steerTorque = sigpro.steer_torque(components)
stDict = {'units':'newton*meter',
'name':'SteerTorque',
'runid':self.metadata['RunID'],
'sampleRate':steerAngle.sampleRate,
'source':'NA'}
self.computedSignals['SteerTorque'] = Signal(steerTorque, stDict)
if plot is True:
time = steerAngle.time()
hdot = (components['Hdot1'] + components['Hdot2'] +
components['Hdot3'] + components['Hdot4'])
cross = (components['cross1'] + components['cross2'] +
components['cross3'])
fig = plt.figure()
frictionAx = fig.add_subplot(4, 1, 1)
frictionAx.plot(time, components['viscous'],
time, components['coulomb'],
time, components['viscous'] + components['coulomb'])
frictionAx.set_ylabel('Torque [N-m]')
frictionAx.legend(('Viscous Friction', 'Coulomb Friction',
'Total Friction'))
dynamicAx = fig.add_subplot(4, 1, 2)
dynamicAx.plot(time, hdot, time, cross, time, hdot + cross)
dynamicAx.set_ylabel('Torque [N-m]')
dynamicAx.legend((r'Torque due to $\dot{H}$',
r'Torque due to $r \times m a$',
r'Total Dynamic Torque'))
additionalAx = fig.add_subplot(4, 1, 3)
additionalAx.plot(time, hdot + cross + components['viscous'] +
components['coulomb'],
label='Total Frictional and Dynamic Torque')
additionalAx.set_ylabel('Torque [N-m]')
additionalAx.legend()
torqueAx = fig.add_subplot(4, 1, 4)
torqueAx.plot(time, components['steerColumn'],
time, hdot + cross + components['viscous'] + components['coulomb'],
time, steerTorque)
torqueAx.set_xlabel('Time [s]')
torqueAx.set_ylabel('Torque [N-m]')
torqueAx.legend(('Measured Torque', 'Frictional and Dynamic Torque',
'Rider Applied Torque'))
plt.show()
return fig
def compute_yaw_roll_pitch_rates(self):
"""Computes the yaw, roll and pitch rates of the bicycle frame."""
try:
omegaX = self.truncatedSignals['AngularRateX']
omegaY = self.truncatedSignals['AngularRateY']
omegaZ = self.truncatedSignals['AngularRateZ']
rollAngle = self.truncatedSignals['RollAngle']
lam = self.bicycleRiderParameters['lam']
except AttributeError:
print('All needed signals are not available. ' +
'Yaw, roll and pitch rates were not computed.')
else:
omegaX = omegaX.convert_units('radian/second')
omegaY = omegaY.convert_units('radian/second')
omegaZ = omegaZ.convert_units('radian/second')
rollAngle = rollAngle.convert_units('radian')
yr, rr, pr = sigpro.yaw_roll_pitch_rate(omegaX, omegaY, omegaZ, lam,
rollAngle=rollAngle)
yr.units = 'radian/second'
yr.name = 'YawRate'
rr.units = 'radian/second'
rr.name = 'RollRate'
pr.units = 'radian/second'
pr.name = 'PitchRate'
self.computedSignals['YawRate'] = yr
self.computedSignals['RollRate'] = rr
self.computedSignals['PitchRate'] = pr
def compute_steer_rate(self):
"""Calculate the steer rate from the frame and fork rates."""
try:
forkRate = self.truncatedSignals['ForkRate']
omegaZ = self.truncatedSignals['AngularRateZ']
except AttributeError:
print('ForkRate or AngularRateZ is not available. ' +
'SteerRate was not computed.')
else:
forkRate = forkRate.convert_units('radian/second')
omegaZ = omegaZ.convert_units('radian/second')
steerRate = sigpro.steer_rate(forkRate, omegaZ)
steerRate.units = 'radian/second'
steerRate.name = 'SteerRate'
self.computedSignals['SteerRate'] = steerRate
def compute_forward_speed(self):
"""Calculates the magnitude of the main component of velocity of the
center of the rear wheel."""
try:
rR = self.bicycleRiderParameters['rR']
rearWheelRate = self.truncatedSignals['RearWheelRate']
except AttributeError:
print('rR or RearWheelRate is not availabe. ' +
'ForwardSpeed was not computed.')
else:
rearWheelRate = rearWheelRate.convert_units('radian/second')
self.computedSignals['ForwardSpeed'] = -rR * rearWheelRate
self.computedSignals['ForwardSpeed'].units = 'meter/second'
self.computedSignals['ForwardSpeed'].name = 'ForwardSpeed'
def compute_pull_force(self):
"""
Computes the pull force from the truncated pull force signal.
"""
try:
pullForce = self.truncatedSignals['PullForce']
except AttributeError:
print 'PullForce was not available. PullForce was not computed.'
else:
pullForce = pullForce.convert_units('newton')
pullForce.name = 'PullForce'
pullForce.units = 'newton'
self.computedSignals[pullForce.name] = pullForce
def __str__(self):
'''Prints basic run information to the screen.'''
line = "=" * 79
info = 'Run # {0}\nEnvironment: {1}\nRider: {2}\nBicycle: {3}\nSpeed:'\
'{4}\nManeuver: {5}\nNotes: {6}'.format(
self.metadata['RunID'],
self.metadata['Environment'],
self.metadata['Rider'],
self.metadata['Bicycle'],
self.metadata['Speed'],
self.metadata['Maneuver'],
self.metadata['Notes'])
return line + '\n' + info + '\n' + line
def export(self, filetype, directory='exports'):
"""
Exports the computed signals to a file.
Parameters
----------
filetype : str
The type of file to export the data to. Options are 'mat', 'csv',
and 'pickle'.
"""
if filetype == 'mat':
if not os.path.exists(directory):
print "Creating {0}".format(directory)
os.makedirs(directory)
exportData = {}
exportData.update(self.metadata)
try:
exportData.update(self.taskSignals)
except AttributeError:
try:
exportData.update(self.truncatedSignals)
except AttributeError:
exportData.update(self.calibratedSignals)
print('Exported calibratedSignals to {}'.format(directory))
else:
print('Exported truncatedSignals to {}'.format(directory))
else:
print('Exported taskSignals to {}'.format(directory))
filename = pad_with_zeros(str(self.metadata['RunID']), 5) + '.mat'
io.savemat(os.path.join(directory, filename), exportData)
else:
raise NotImplementedError(('{0} method is not available' +
' yet.').format(filetype))
def extract_task(self):
"""Slices the computed signals such that data before the end of the
bump is removed and unusable trailng data is removed.
"""
# get the z acceleration from the VN-100
acc = -self.truncatedSignals['AccelerometerAccelerationY'].filter(30.)
# find the mean speed during the task (look at one second in the middle
# of the data)
speed = self.computedSignals['ForwardSpeed']
meanSpeed = speed[len(speed) / 2 - 100:len(speed) / 2 + 100].mean()
wheelbase = self.bicycleRiderParameters['w']
# find the bump
indices = sigpro.find_bump(acc, acc.sampleRate, meanSpeed, wheelbase,
self.bumpLength)
# if it is a pavilion run, then clip the end too
# these are the runs that the length of track method of clipping
# applies to
straight = ['Track Straight Line With Disturbance',
'Balance With Disturbance',
'Balance',
'Track Straight Line']
if (self.metadata['Environment'] == 'Pavillion Floor' and
self.metadata['Maneuver'] in straight):
# this is based on the length of the track in the pavilion that we
# measured on September 21st, 2011
trackLength = 32. - wheelbase - self.bumpLength
end = trackLength / meanSpeed * acc.sampleRate
# i may need to clip the end based on the forward speed dropping
# below certain threshold around the mean
else:
# if it isn't a pavilion run, don't clip the end
end = -1
self.taskSignals = {}
for name, sig in self.computedSignals.items():
self.taskSignals[name] = sig[indices[2]:end]
def load_rider(self, pathToParameterData):
"""Creates a bicycle/rider attribute which contains the physical
parameters for the bicycle and rider for this run."""
print("Loading the bicycle and rider data for " +
"{} on {}".format(self.metadata['Rider'],
self.metadata['Bicycle']))
# currently this isn't very generic, it only assumes that there was
# Luke, Jason, and Charlie riding on the instrumented bicycle.
rider = self.metadata['Rider']
if rider == 'Charlie' or rider == 'Luke':
# Charlie and Luke rode the bike in the same configuration
bicycle = 'Rigidcl'
elif rider == 'Jason' :
bicycle = 'Rigid'
else:
raise StandardError('There are no bicycle parameters ' +
'for {}'.format(rider))
# force a recalculation (but not the period calcs, they take too long)
self.bicycle = bp.Bicycle(bicycle, pathToData=pathToParameterData)
try:
self.bicycle.extras
except AttributeError:
pass
else:
self.bicycle.save_parameters()
# force a recalculation of the human parameters
self.bicycle.add_rider(rider)
if self.bicycle.human is not None:
self.bicycle.save_parameters()
self.bicycleRiderParameters =\
bp.io.remove_uncertainties(self.bicycle.parameters['Benchmark'])
def plot(self, *args, **kwargs):
'''
Returns a plot of the time series of various signals.
Parameters
----------
signalName : string
These should be strings that correspond to the signals available in
the computed data. If the first character of the string is `-` then
the negative signal will be plotted. You can also scale the values
so by adding a value and an ``*`` such as: ``'-10*RollRate'. The
negative sign always has to come first.
signalType : string, optional
This allows you to plot from the other signal types. Options are
'task', 'computed', 'truncated', 'calibrated', 'raw'. The default
is 'task'.
'''
if not kwargs:
kwargs = {'signalType': 'task'}
mapping = {}
for x in ['computed', 'truncated', 'calibrated', 'raw', 'task']:
try:
mapping[x] = getattr(self, x + 'Signals')
except AttributeError:
pass
fig = plt.figure()
ax = fig.add_axes([0.125, 0.125, 0.8, 0.7])
leg = []
for i, arg in enumerate(args):
legName = arg
sign = 1.
# if a negative sign is present
if '-' in arg and arg[0] != '-':
raise ValueError('{} is incorrectly typed'.format(arg))
elif '-' in arg and arg[0] == '-':
arg = arg[1:]
sign = -1.
# if a multiplication factor is present
if '*' in arg:
mul, arg = arg.split('*')
else:
mul = 1.
signal = sign * float(mul) * mapping[kwargs['signalType']][arg]
ax.plot(signal.time(), signal)
leg.append(legName + ' [' + mapping[kwargs['signalType']][arg].units + ']')
ax.legend(leg)
runid = pad_with_zeros(str(self.metadata['RunID']), 5)
ax.set_title('Run: ' + runid + ', Rider: ' + self.metadata['Rider'] +
', Speed: ' + str(self.metadata['Speed']) + 'm/s' + '\n' +
'Maneuver: ' + self.metadata['Maneuver'] +
', Environment: ' + self.metadata['Environment'] + '\n' +
'Notes: ' + self.metadata['Notes'])
ax.set_xlabel('Time [second]')
ax.grid()
return fig
def plot_wheel_contact(self, show=False):
"""Returns a plot of the wheel contact traces.
Parameters
----------
show : boolean
If true the plot will be displayed.
Returns
-------
fig : matplotlib.Figure
"""
q1 = self.taskSignals['LongitudinalRearContact']
q2 = self.taskSignals['LateralRearContact']
q9 = self.taskSignals['LongitudinalFrontContact']
q10 = self.taskSignals['LateralFrontContact']
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(q1, q2, q9, q10)
ax.set_xlabel('Distance [' + q1.units + ']')
ax.set_ylabel('Distance [' + q2.units + ']')
ax.set_ylim((-0.5, 0.5))
rider = self.metadata['Rider']
where = self.metadata['Environment']
speed = '%1.2f' % self.taskSignals['ForwardSpeed'].mean()
maneuver = self.metadata['Maneuver']
ax.set_title(rider + ', ' + where + ', ' + maneuver + ' @ ' + speed + ' m/s')
if show is True:
fig.show()
return fig
def verify_time_sync(self, show=True, saveDir=None):
"""Shows a plot of the acceleration signals that were used to
synchronize the NI and VN data. If it doesn't show a good fit, then
something is wrong.
Parameters
----------
show : boolean
If true, the figure will be displayed.
saveDir : str
The path to a directory in which to save the figure.
"""
if self.topSig == 'calibrated':
sigType = 'calibrated'
else:
sigType = 'truncated'
fig = self.plot('-AccelerometerAccelerationY', 'AccelerationZ',
signalType=sigType)
ax = fig.axes[0]
ax.set_xlim((0, 10))
title = ax.get_title()
ax.set_title(title + '\nSignal Type: ' + sigType)
if saveDir is not None:
if not os.path.exists(saveDir):
print "Creating {0}".format(saveDir)
os.makedirs(saveDir)
runid = run_id_string(self.metadata['RunID'])
fig.savefig(os.path.join(saveDir, runid + '.png'))
if show is True:
fig.show()
return fig
def video(self):
'''
Plays the video of the run.
'''
# get the 5 digit string version of the run id
runid = pad_with_zeros(str(self.metadata['RunID']), 5)
viddir = os.path.join('..', 'Video')
abspath = os.path.abspath(viddir)
# check to see if there is a video for this run
if (runid + '.mp4') in os.listdir(viddir):
path = os.path.join(abspath, runid + '.mp4')
os.system('vlc "' + path + '"')
else:
print "No video for this run"
def matlab_date_to_object(matDate):
'''Returns a date time object based on a Matlab `datestr()` output.
Parameters
----------
matDate : string
String in the form '21-Mar-2011 14:45:54'.
Returns
-------
python datetime object
'''
return datetime.datetime.strptime(matDate, '%d-%b-%Y %H:%M:%S') | PypiClean |
/EOxServer-1.2.12-py3-none-any.whl/eoxserver/services/ows/wps/util.py |
from contextlib import closing
from logging import getLogger
try:
# available in Python 2.7+
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
from django.conf import settings
from django.utils.module_loading import import_string
from django.utils.six.moves.urllib import parse, request, error
from eoxserver.core.util.multiparttools import iterate as iterate_multipart
from eoxserver.services.ows.wps.config import (
DEFAULT_EOXS_PROCESSES, DEFAULT_EOXS_ASYNC_BACKENDS
)
from eoxserver.services.ows.wps.exceptions import NoSuchProcessError
def parse_named_parts(request):
""" Extract named parts of the multi-part request
and return them as dictionary
"""
parts = {}
if request.method == 'POST':
content_type = request.META.get("CONTENT_TYPE", "")
if content_type.startswith("multipart"):
parts = dict(
(content_id, data) for content_id, data in (
(headers.get("Content-Id"), data)
for headers, data in iterate_multipart(
request.body, headers={"Content-Type": content_type}
)
) if content_id
)
return parts
class InMemoryURLResolver(object):
# pylint: disable=too-few-public-methods, no-self-use
""" Simple in-memory URL resolver.
The resolver resolves references and returns them as data strings.
"""
def __init__(self, parts=None, logger=None):
self.parts = parts or {}
self.logger = logger or getLogger(__name__)
def __call__(self, href, body, headers):
""" Resolve reference URL. """
self.logger.debug(
"Resolving reference: %s%s", href, "" if body is None else " (POST)"
)
url = parse.urlparse(href)
if url.scheme == "cid":
return self._resolve_multipart(url.path)
elif url.scheme in ('http', 'https'):
return self._resolve_http(href, body, headers)
else:
raise ValueError("Unsupported URL scheme %r!" % url.scheme)
def _resolve_multipart(self, content_id):
""" Resolve multipart-related."""
try:
return self.parts[content_id]
except KeyError:
raise ValueError("No part with content-id %r." % content_id)
def _resolve_http(self, href, body=None, headers=None):
""" Resolve the HTTP request."""
try:
with closing(request.urlopen(request.Request(href, body, dict(headers)))) as fobj:
return fobj.read()
except error.URLError as exc:
raise ValueError(str(exc))
PROCESSES = None
ASYNC_BACKENDS = None
def _setup_processes():
global PROCESSES
specifiers = getattr(
settings, 'EOXS_PROCESSES',
DEFAULT_EOXS_PROCESSES
)
PROCESSES = [
import_string(identifier)()
for identifier in specifiers
]
def _setup_async_backends():
global ASYNC_BACKENDS
specifiers = getattr(
settings, 'EOXS_ASYNC_BACKENDS',
DEFAULT_EOXS_ASYNC_BACKENDS
)
ASYNC_BACKENDS = [
import_string(identifier)()
for identifier in specifiers
]
def get_processes():
if PROCESSES is None:
_setup_processes()
return PROCESSES
def get_process_by_identifier(identifier: str):
for process in get_processes():
process_identifier = (
getattr(process, 'identifier', None) or type(process).__name__
)
if process_identifier == identifier:
return process
raise NoSuchProcessError(identifier)
def get_async_backends():
if ASYNC_BACKENDS is None:
_setup_async_backends()
return ASYNC_BACKENDS | PypiClean |
/BotEXBotBase-3.1.3.tar.gz/BotEXBotBase-3.1.3/discord/invite.py | from .utils import parse_time
from .mixins import Hashable
from .object import Object
class Invite(Hashable):
"""Represents a Discord :class:`Guild` or :class:`abc.GuildChannel` invite.
Depending on the way this object was created, some of the attributes can
have a value of ``None``.
.. container:: operations
.. describe:: x == y
Checks if two invites are equal.
.. describe:: x != y
Checks if two invites are not equal.
.. describe:: hash(x)
Returns the invite hash.
.. describe:: str(x)
Returns the invite URL.
Attributes
-----------
max_age: :class:`int`
How long the before the invite expires in seconds. A value of 0 indicates that it doesn't expire.
code: :class:`str`
The URL fragment used for the invite.
guild: :class:`Guild`
The guild the invite is for.
revoked: :class:`bool`
Indicates if the invite has been revoked.
created_at: `datetime.datetime`
A datetime object denoting the time the invite was created.
temporary: :class:`bool`
Indicates that the invite grants temporary membership.
If True, members who joined via this invite will be kicked upon disconnect.
uses: :class:`int`
How many times the invite has been used.
max_uses: :class:`int`
How many times the invite can be used.
inviter: :class:`User`
The user who created the invite.
channel: :class:`abc.GuildChannel`
The channel the invite is for.
"""
__slots__ = (
"max_age",
"code",
"guild",
"revoked",
"created_at",
"uses",
"temporary",
"max_uses",
"inviter",
"channel",
"_state",
)
def __init__(self, *, state, data):
self._state = state
self.max_age = data.get("max_age")
self.code = data.get("code")
self.guild = data.get("guild")
self.revoked = data.get("revoked")
self.created_at = parse_time(data.get("created_at"))
self.temporary = data.get("temporary")
self.uses = data.get("uses")
self.max_uses = data.get("max_uses")
inviter_data = data.get("inviter")
self.inviter = None if inviter_data is None else self._state.store_user(inviter_data)
self.channel = data.get("channel")
@classmethod
def from_incomplete(cls, *, state, data):
guild_id = int(data["guild"]["id"])
channel_id = int(data["channel"]["id"])
guild = state._get_guild(guild_id)
if guild is not None:
channel = guild.get_channel(channel_id)
else:
guild = Object(id=guild_id)
channel = Object(id=channel_id)
guild.name = data["guild"]["name"]
guild.splash = data["guild"]["splash"]
guild.splash_url = ""
if guild.splash:
guild.splash_url = "https://cdn.discordapp.com/splashes/{0.id}/{0.splash}.jpg?size=2048".format(
guild
)
channel.name = data["channel"]["name"]
data["guild"] = guild
data["channel"] = channel
return cls(state=state, data=data)
def __str__(self):
return self.url
def __repr__(self):
return "<Invite code={0.code!r}>".format(self)
def __hash__(self):
return hash(self.code)
@property
def id(self):
"""Returns the proper code portion of the invite."""
return self.code
@property
def url(self):
"""A property that retrieves the invite URL."""
return "http://discord.gg/" + self.code
async def delete(self, *, reason=None):
"""|coro|
Revokes the instant invite.
You must have the :attr:`~Permissions.manage_channels` permission to do this.
Parameters
-----------
reason: Optional[str]
The reason for deleting this invite. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to revoke invites.
NotFound
The invite is invalid or expired.
HTTPException
Revoking the invite failed.
"""
await self._state.http.delete_invite(self.code, reason=reason) | PypiClean |
/NovalIDE-1.1.8-py3-none-any.whl/noval/python/interpreter/pythonpath.py | from noval import _
import tkinter as tk
from tkinter import ttk,messagebox
import noval.python.parser.utils as parserutils
import noval.python.interpreter.pythonpathmixin as pythonpathmixin
import noval.util.utils as utils
class PythonPathPanel(ttk.Frame,pythonpathmixin.PythonpathMixin):
def __init__(self,parent):
ttk.Frame.__init__(self, parent)
self.InitUI()
self._interpreter = None
def AppendSysPath(self,interpreter):
self._interpreter = interpreter
self.treeview._clear_tree()
if self._interpreter is not None:
root_item = self.treeview.tree.insert("","end",text=_("Path List"),image=self.LibraryIcon)
path_list = interpreter.SysPathList + interpreter.PythonPathList
for path in path_list:
if path.strip() == "":
continue
#process path contains chinese character
if utils.is_py2():
path = self.ConvertPath(path)
self.treeview.tree.insert(root_item,"end",text=path,image=self.LibraryIcon)
self.treeview.tree.item(root_item, open=True)
self.UpdateUI()
def RemovePath(self):
if self._interpreter is None:
return
selections = self.treeview.tree.selection()
if not selections:
return
if selections[0] == self.treeview.tree.get_children()[0]:
return
path = self.treeview.tree.item(selections[0],"text")
if parserutils.PathsContainPath(self._interpreter.SysPathList,path):
messagebox.showerror(_("Error"),_("The Python System Path could not be removed"),parent=self)
return
pythonpathmixin.PythonpathMixin.RemovePath(self)
def CheckPythonPathList(self):
python_path_list = self.GetPythonPathFromPathList()
is_pythonpath_changed = self.IsPythonPathChanged(python_path_list)
if is_pythonpath_changed:
self._interpreter.PythonPathList = python_path_list
return is_pythonpath_changed
def IsPythonPathChanged(self,python_path_list):
if self._interpreter is None:
return False
if len(python_path_list) != len(self._interpreter.PythonPathList):
return True
for pythonpath in python_path_list:
if not parserutils.PathsContainPath(self._interpreter.PythonPathList,pythonpath):
return True
return False
def CheckPythonPath(self):
return self.IsPythonPathChanged(self.GetPythonPathFromPathList())
def GetPythonPathFromPathList(self):
if self._interpreter is None:
return []
path_list = self.GetPathList()
python_path_list = []
for path in path_list:
#process path contains chinese character
if utils.is_py2():
new_path = self.ConvertPath(path)
elif utils.is_py3_plus():
new_path = path
if not parserutils.PathsContainPath(self._interpreter.SysPathList,new_path):
python_path_list.append(new_path)
return python_path_list
def UpdateUI(self):
if self._interpreter is None:
self.add_path_btn["state"] = tk.DISABLED
self.add_file_btn["state"] = tk.DISABLED
self.remove_path_btn["state"] = tk.DISABLED
else:
self.add_path_btn["state"] = "normal"
self.add_file_btn["state"] = "normal"
self.remove_path_btn["state"] = "normal"
def destroy(self):
if self.menu is not None:
self.menu.destroy()
self.button_menu.destroy()
ttk.Frame.destroy(self) | PypiClean |
/Adafruit_Blinka-8.20.1-py3-none-any.whl/adafruit_blinka/microcontroller/nova/uart.py | """UART Class for Binho Nova"""
from adafruit_blinka.microcontroller.nova import Connection
class UART:
"""Custom UART Class for Binho Nova"""
ESCAPE_SEQUENCE = "+++UART0"
# pylint: disable=too-many-arguments,unused-argument
def __init__(
self,
portid,
baudrate=9600,
bits=8,
parity=None,
stop=1,
timeout=1000,
read_buf_len=None,
flow=None,
):
self._nova = Connection.getInstance()
self._id = portid
self._baudrate = baudrate
self._parity = parity
self._bits = bits
self._stop = stop
self._timeout = timeout
if flow is not None: # default 0
raise NotImplementedError(
"Parameter '{}' unsupported on Binho Nova".format("flow")
)
self._nova.setOperationMode(self._id, "UART")
self._nova.setBaudRateUART(self._id, baudrate)
self._nova.setDataBitsUART(self._id, bits)
self._nova.setParityUART(self._id, parity)
self._nova.setStopBitsUART(self._id, stop)
self._nova.setEscapeSequenceUART(self._id, UART.ESCAPE_SEQUENCE)
self._nova.beginBridgeUART(self._id)
# pylint: enable=too-many-arguments,unused-argument
def __del__(self):
"""Close Nova on delete"""
self.deinit()
self._nova.close()
def deinit(self):
"""Deinitialize"""
self._nova.writeBridgeUART(UART.ESCAPE_SEQUENCE)
self._nova.stopBridgeUART(UART.ESCAPE_SEQUENCE)
def read(self, nbytes=None):
"""Read data from UART and return it"""
if nbytes is None:
return None
data = bytearray()
for _ in range(nbytes):
data.append(ord(self._nova.readBridgeUART()))
return data
def readinto(self, buf, nbytes=None):
"""Read data from UART and into the buffer"""
if nbytes is None:
return None
for _ in range(nbytes):
buf.append(ord(self._nova.readBridgeUART()))
return buf
def readline(self):
"""Read a single line of data from UART"""
out = self._nova.readBridgeUART()
line = out
while out != "\r":
out = self._nova.readBridgeUART()
line += out
return line
def write(self, buf):
"""Write data from the buffer to UART"""
return self._nova.writeBridgeUART(buf) | PypiClean |
/DoorPi-2.4.1.8.tar.gz/DoorPi-2.4.1.8/doorpi/sipphone/from_linphone.py |
import logging
logger = logging.getLogger(__name__)
logger.debug("%s loaded", __name__)
import datetime
from AbstractBaseClass import SipphoneAbstractBaseClass, SIPPHONE_SECTION
import linphone as lin
from doorpi import DoorPi
from doorpi.sipphone.linphone_lib.CallBacks import LinphoneCallbacks
from doorpi.sipphone.linphone_lib.Player import LinphonePlayer
from doorpi.sipphone.linphone_lib.Recorder import LinphoneRecorder
from doorpi.media.CreateDialTone import generate_dial_tone
conf = DoorPi().config
def log_handler(level, msg):
if "pylinphone_Core_instance_method_iterate" in msg: return
if "pylinphone_Core_get_current_call" in msg: return
if "pylinphone_Call_from_native_ptr" in msg: return
if ": keep alive sent to [" in msg: return
method = getattr(logger, level)
method(msg)
if logger.getEffectiveLevel() <= 5: lin.set_log_handler(log_handler)
def get(*args, **kwargs): return LinPhone(*args, **kwargs)
class LinPhone(SipphoneAbstractBaseClass):
@property
def name(self): return 'linphone wrapper'
@property
def lib(self): return self.__Lib
@property
def core(self): return self.__Lib
@property
def recorder(self): return self.__recorder
__recorder = None
@property
def player(self): return self.__player
__player = None
@property
def current_call(self): return self.core.current_call
@property
def video_devices(self):
try:
all_devices = []
for video_device in self.core.video_devices:
all_devices.append({
'name': video_device
})
return all_devices
except Exception:
return []
@property
def sound_devices(self):
try:
all_devices = []
for sound_device in self.core.sound_devices:
all_devices.append({
'name': sound_device,
'capture': self.core.sound_device_can_capture(sound_device),
'record': self.core.sound_device_can_playback(sound_device)
})
return all_devices
except Exception as exp:
logger.exception(exp)
return []
def _create_payload_enum(self, payloads):
try:
all_codecs = []
for codec in payloads:
all_codecs.append({
'name': codec.mime_type,
'channels': codec.channels,
'bitrate': codec.normal_bitrate,
'enable': self.core.payload_type_enabled(codec)
})
return all_codecs
except Exception as exp:
logger.exception(exp)
return []
@property
def video_codecs(self):
return self._create_payload_enum(self.core.video_codecs)
@property
def sound_codecs(self):
return self._create_payload_enum(self.core.audio_codecs)
@property
def current_call_duration(self):
if not self.current_call: return 0
diff_start_and_now = datetime.datetime.utcnow() - self.__current_call_start_datetime
return diff_start_and_now.total_seconds()
@property
def current_call_dump(self):
try:
return {
'direction': 'incoming' if self.current_call.dir == 0 else 'outgoing',
'remote_uri': self.current_call.remote_address_as_string,
'total_time': self.current_call_duration,
'level_incoming': self.current_call.record_volume,
'level_outgoing': self.current_call.play_volume,
'camera': self.current_call.camera_enabled
}
except Exception:
return {}
#TODO: Datetime from linphone CallLog.start_date is more then 30 sec different to python datetime.utcnow()?
__current_call_start_datetime = datetime.datetime.utcnow()
@property
def base_config(self):
params = self.core.create_call_params(None)
params.record_file = self.recorder.parsed_record_filename
params.video_enabled = True
return params
def reset_call_start_datetime(self):
self.__current_call_start_datetime = datetime.datetime.utcnow()
logger.debug('reset current call start datetime to %s', self.__current_call_start_datetime)
return self.__current_call_start_datetime
def __init__(self, whitelist = list(), *args, **kwargs):
logger.debug("__init__")
DoorPi().event_handler.register_action('OnShutdown', self.destroy)
DoorPi().event_handler.register_event('OnSipPhoneCreate', __name__)
DoorPi().event_handler.register_event('OnSipPhoneStart', __name__)
DoorPi().event_handler.register_event('OnSipPhoneDestroy', __name__)
DoorPi().event_handler.register_event('OnSipPhoneRecorderCreate', __name__)
DoorPi().event_handler.register_event('OnSipPhoneRecorderDestroy', __name__)
DoorPi().event_handler.register_event('BeforeSipPhoneMakeCall', __name__)
DoorPi().event_handler.register_event('OnSipPhoneMakeCall', __name__)
DoorPi().event_handler.register_event('OnSipPhoneMakeCallFailed', __name__)
DoorPi().event_handler.register_event('AfterSipPhoneMakeCall', __name__)
DoorPi().event_handler.register_event('OnSipPhoneCallTimeoutNoResponse', __name__)
DoorPi().event_handler.register_event('OnSipPhoneCallTimeoutMaxCalltime', __name__)
DoorPi().event_handler.register_event('OnPlayerCreated', __name__)
#http://pythonhosted.org/linphone/api_reference.html#linphone.Core.new
self.callback = LinphoneCallbacks()
config_path = None
factory_config_path = None
self.__Lib = lin.Core.new(
self.callback.used_callbacks,
config_path,
factory_config_path
)
self.core.primary_contact = '%s <sip:doorpi@127.0.0.1>'%conf.get(SIPPHONE_SECTION, "identity", 'DoorPi')
def start(self):
DoorPi().event_handler('OnSipPhoneCreate', __name__)
self.core.max_calls = conf.get_int(SIPPHONE_SECTION, 'ua.max_calls', 2)
self.core.echo_cancellation_enabled = conf.get_bool(SIPPHONE_SECTION, 'echo_cancellation_enabled', False)
# set local listen ports, default: random
self.core.sip_transports = lin.SipTransports(conf.get_int(SIPPHONE_SECTION, 'local_port', 5060), conf.get_int(SIPPHONE_SECTION, 'local_port', 5060), -1, -1)
self.core.video_display_enabled = conf.get_bool(SIPPHONE_SECTION, 'video_display_enabled', False)
self.core.stun_server = conf.get(SIPPHONE_SECTION, 'stun_server', '')
firewall_policy = conf.get(SIPPHONE_SECTION, 'FirewallPolicy', 'PolicyNoFirewall')
if firewall_policy == "PolicyNoFirewall": self.core.firewall_policy = lin.FirewallPolicy.PolicyNoFirewall
elif firewall_policy == "PolicyUseNatAddress": self.core.firewall_policy = lin.FirewallPolicy.PolicyUseNatAddress
elif firewall_policy == "PolicyUseStun": self.core.firewall_policy = lin.FirewallPolicy.PolicyUseStun
elif firewall_policy == "PolicyUseIce": self.core.firewall_policy = lin.FirewallPolicy.PolicyUseIce
elif firewall_policy == "PolicyUseUpnp": self.core.firewall_policy = lin.FirewallPolicy.PolicyUseUpnp
else: self.core.firewall_policy = lin.FirewallPolicy.PolicyNoFirewall
#http://pythonhosted.org/linphone/api_reference.html#linphone.Core.in_call_timeout
#After this timeout period, the call is automatically hangup.
self.core.in_call_timeout = conf.get_int(SIPPHONE_SECTION, 'max_call_time', 120)
#http://pythonhosted.org/linphone/api_reference.html#linphone.Core.inc_timeout
#If an incoming call isn’t answered for this timeout period, it is automatically declined.
self.core.inc_timeout = conf.get_int(SIPPHONE_SECTION, 'call_timeout', 15)
self.__player = LinphonePlayer()
self.core.ringback = self.player.player_filename
self.__recorder = LinphoneRecorder()
if len(self.core.sound_devices) == 0:
logger.warning('no audio devices available')
else:
self.core.capture_device = conf.get(SIPPHONE_SECTION, 'capture_device', self.core.capture_device)
self.core.playback_device = conf.get(SIPPHONE_SECTION, 'playback_device', self.core.playback_device)
logger.info("found %s possible sounddevices:", len(self.core.sound_devices))
logger.debug("|rec|play| name")
logger.debug("------------------------------------")
for sound_device in self.core.sound_devices:
logger.debug("| %s | %s | %s",
'X' if self.core.sound_device_can_capture(sound_device) else 'O',
'X' if self.core.sound_device_can_playback(sound_device) else 'O',
sound_device
)
logger.debug("------------------------------------")
logger.debug("using capture_device: %s", self.core.capture_device)
logger.debug("using playback_device: %s", self.core.playback_device)
# Only enable PCMU and PCMA audio codecs by default
config_audio_codecs = conf.get_list(SIPPHONE_SECTION, 'audio_codecs', 'PCMA,PCMU')
for codec in self.core.audio_codecs:
if codec.mime_type in config_audio_codecs:
logger.debug('enable audio codec %s', codec.mime_type)
self.core.enable_payload_type(codec, True)
else:
logger.debug('disable audio codec %s', codec.mime_type)
self.core.enable_payload_type(codec, False)
if len(self.core.video_devices) == 0:
self.core.video_capture_enabled = False
logger.warning('no video devices available')
else:
logger.info("found %s possible videodevices:", len(self.core.video_devices))
logger.debug("| name")
logger.debug("------------------------------------")
for video_device in self.core.video_devices:
logger.debug("| %s ", video_device)
logger.debug("------------------------------------")
config_camera = conf.get(SIPPHONE_SECTION, 'video_device', self.core.video_devices[0])
if config_camera not in self.core.video_devices:
logger.warning('camera "%s" from config does not exist in possible video devices.', config_camera)
logger.debug('switching to first possible video device "%s"', self.core.video_devices[0])
config_camera = self.core.video_devices[0]
self.core.video_capture_enabled = True
self.core.video_device = config_camera
self.core.preferred_video_size_by_name = conf.get(SIPPHONE_SECTION, 'video_size', 'vga')
logger.debug("using video_device: %s", self.core.video_device)
# Only enable VP8 video codec
config_video_codecs = conf.get_list(SIPPHONE_SECTION, 'video_codecs', 'VP8')
for codec in self.core.video_codecs:
if codec.mime_type in config_video_codecs and self.core.video_capture_enabled:
logger.debug('enable video codec %s', codec.mime_type)
self.core.enable_payload_type(codec, True)
else:
logger.debug('disable video codec %s', codec.mime_type)
self.core.enable_payload_type(codec, False)
# Configure the SIP account
server = conf.get(SIPPHONE_SECTION, "sipserver_server")
username = conf.get(SIPPHONE_SECTION, "sipserver_username")
password = conf.get(SIPPHONE_SECTION, "sipserver_password", username)
realm = conf.get(SIPPHONE_SECTION, "sipserver_realm", server)
if server and username and password:
logger.info('using DoorPi with SIP-Server')
proxy_cfg = self.core.create_proxy_config()
proxy_cfg.identity_address = lin.Address.new("%s <sip:%s@%s>" % (
conf.get(SIPPHONE_SECTION, "identity", 'DoorPi'), username, server)
)
proxy_cfg.server_addr = "sip:%s"%server
proxy_cfg.register_enabled = True
self.core.add_proxy_config(proxy_cfg)
self.core.default_proxy_config = proxy_cfg
auth_info = self.core.create_auth_info(username, None, password, None, None, realm)
self.core.add_auth_info(auth_info)
else:
logger.info('using DoorPi without SIP-Server? Okay...')
proxy_cfg = self.core.create_proxy_config()
proxy_cfg.register_enabled = False
self.core.add_proxy_config(proxy_cfg)
self.core.default_proxy_config = proxy_cfg
logger.debug('%s',self.core.proxy_config_list)
logger.debug("start successfully")
def destroy(self):
logger.debug("destroy")
self.core.terminate_all_calls()
DoorPi().event_handler.fire_event_synchron('OnSipPhoneDestroy', __name__)
DoorPi().event_handler.unregister_source(__name__, True)
return
def self_check(self, *args, **kwargs):
if not self.core: return
self.core.iterate()
if not self.current_call: return
if self.current_call.state < lin.CallState.Connected:
if self.current_call_duration > self.core.inc_timeout - 0.5:
logger.info("call timeout - hangup current call after %s seconds (max. %s)", self.current_call_duration, self.core.inc_timeout)
self.core.terminate_all_calls()
DoorPi().event_handler('OnSipPhoneCallTimeoutNoResponse', __name__)
else:
if int(self.current_call_duration) > self.core.in_call_timeout - 0.5:
logger.info("max call time reached - hangup current call after %s seconds (max. %s)", self.current_call_duration, self.core.in_call_timeout)
self.core.terminate_all_calls()
DoorPi().event_handler('OnSipPhoneCallTimeoutMaxCalltime', __name__)
def call(self, number):
DoorPi().event_handler('BeforeSipPhoneMakeCall', __name__, {'number':number})
logger.debug("call (%s)",str(number))
if not self.current_call:
logger.debug('no current call -> start new call')
self.reset_call_start_datetime()
if self.core.invite_with_params(number, self.base_config) is None:
if DoorPi().event_handler.db.get_event_log_entries_count('OnSipPhoneMakeCallFailed') > 5:
logger.error('failed to execute call five times')
else:
DoorPi().event_handler('OnSipPhoneMakeCallFailed', __name__, {'number':number})
return None
DoorPi().event_handler('OnSipPhoneMakeCall', __name__, {'number':number})
elif number in self.current_call.remote_address.as_string_uri_only():
if self.current_call_duration <= 2:
logger.debug("same call %s again while call is running since %s seconds? -> skip",
self.core.current_call.remote_address.as_string_uri_only(),
self.current_call_duration
)
else:
logger.debug("press twice with call duration > 1 second? Want to hangup current call? OK...")
self.core.terminate_all_calls()
else:
logger.debug("new call needed? hangup old first...")
self.core.terminate_all_calls()
self.call(number)
DoorPi().event_handler('AfterSipPhoneMakeCall', __name__, {'number':number})
return self.current_call
def is_admin_number(self, remote_uri):
return self.callback.is_admin_number(remote_uri)
def hangup(self):
if self.current_call:
logger.debug("Received hangup request, cancelling current call")
self.core.terminate_call(self.current_call)
else:
logger.debug("Ignoring hangup request as there is no ongoing call") | PypiClean |
/Helmholtz-0.2.0.tar.gz/Helmholtz-0.2.0/helmholtz/editor/management/commands/equipment_constraints.py | from copy import deepcopy
material_constraints = [
{
'displayed_in_navigator':True,
'shunt':True,
'form':'helmholtz.editor.forms.equipment.MaterialForm',
}
]
material = {
'content_type':{
'app_label':'equipment',
'model':'material'
},
'position':1,
'constraints':material_constraints,
}
stereotaxic_type_constraints = {
'form':'helmholtz.editor.forms.equipment.StereotaxicTypeForm',
'displayed_in_navigator':True,
'shunt':True
}
stereotaxic_type = {
'content_type':{
'app_label':'equipment',
'model':'stereotaxictype'
},
'position':2,
'constraints':[
stereotaxic_type_constraints
],
}
base_equipment_type_constraints = {
'form':'helmholtz.editor.forms.equipment.EquipmentTypeForm',
'displayed_in_navigator':True,
'shunt':True
}
base_equipment_type = {
'content_type':{
'app_label':'equipment',
'model':'equipmenttype'
},
'position':1,
'constraints':[
base_equipment_type_constraints
]
}
equipment_type = {
'content_type':{
'app_label':'equipment',
'model':'equipmenttype'
},
'position':2,
'constraints':[
{
'displayed_in_navigator':True,
'display_subclasses':True,
'display_base_class':True,
}
],
'children':[
{'entity':base_equipment_type},
{'entity':stereotaxic_type},
]
}
device_constraints = {
'tableconstraint':{
'actions':[
{'name':'A'},
{'name':'D'},
{'name':'M'},
],
'displayed_in_navigator':False,
'form':'helmholtz.editor.forms.equipment.DeviceForm',
'in_expansion':[
{'field':{'identifier':'notes'}},
],
'in_header':[
{'field':{'identifier':'id'}},
{'field':{'identifier':'label'}},
{'field':{'identifier':'serial_or_id'}},
],
'width':"650px",
'pagination':50
}
}
device = {
'force':True,
'content_type':{
'app_label':'equipment',
'model':'device'
},
'position':1,
'constraints':[
device_constraints
]
}
recording_point_constraints = {
'tableconstraint':{
'shunt':True,
'actions':[
{'name':'A'},
{'name':'D'},
{'name':'M'},
],
'displayed_in_navigator':False,
'form':'helmholtz.editor.forms.equipment.RecordingPointForm',
'in_header':[
{'field':{'identifier':'id'}},
{'field':{'identifier':'label'}},
{'field':{'identifier':'number'}},
],
'width':"650px",
'pagination':50
}
}
recording_point = {
'force':True,
'content_type':{
'app_label':'equipment',
'model':'recordingpoint'
},
'position':1,
'constraints':[
recording_point_constraints
]
}
generic_constraints = {
'tableconstraint':{
'displayed_in_navigator':False,
'form':'helmholtz.editor.forms.equipment.GenericEquipmentForm',
'in_expansion':[
{'field':{'identifier':'recordingpoint_set'}},
{'field':{'identifier':'device_set'}},
],
'in_header':[
{'field':{'identifier':'id'}},
{'field':{'identifier':'model'}},
{'field':{'identifier':'manufacturer'}},
{'field':{'identifier':'type'}},
{'field':{'verbose_name':'recording points', 'identifier':'recordingpoint_set.count'}},
{'field':{'verbose_name':'devices', 'identifier':'device_set.count'}},
],
'width':"750px",
'pagination':50
}
}
generic = {
'content_type':{
'app_label':'equipment',
'model':'genericequipment'
},
'position':1,
'constraints':[
generic_constraints
],
'children':[
{'entity':device},
{'entity':recording_point}
]
}
sharp_constraints = {
'tableconstraint':{
'displayed_in_navigator':False,
'form':'helmholtz.editor.forms.electrophysiology.SharpElectrodeForm',
'in_expansion':[
{'field':{'identifier':'recordingpoint_set'}},
{'field':{'identifier':'device_set'}},
],
'in_header':[
{'field':{'identifier':'id'}},
{'field':{'identifier':'model'}},
{'field':{'identifier':'manufacturer'}},
{'field':{'identifier':'material'}},
{'field':{'identifier':'external_diameter'}},
{'field':{'identifier':'internal_diameter'}},
{'field':{'verbose_name':'recording points', 'identifier':'recordingpoint_set.count'}},
{'field':{'verbose_name':'devices', 'identifier':'device_set.count'}},
],
'width':"750px",
'pagination':50
}
}
sharp = {
'content_type':{
'app_label':'electrophysiology',
'model':'sharpelectrode'
},
'position':1,
'constraints':[
sharp_constraints
],
'children':[
{'entity':deepcopy(device)},
{'entity':deepcopy(recording_point)}
]
}
patch_constraints = {
'tableconstraint':{
'displayed_in_navigator':False,
'form':'helmholtz.editor.forms.electrophysiology.PatchElectrodeForm',
'in_expansion':[
{'field':{'identifier':'recordingpoint_set'}},
{'field':{'identifier':'device_set'}},
],
'in_header':[
{'field':{'identifier':'id'}},
{'field':{'identifier':'model'}},
{'field':{'identifier':'manufacturer'}},
{'field':{'identifier':'material'}},
{'field':{'identifier':'external_diameter'}},
{'field':{'identifier':'internal_diameter'}},
{'field':{'verbose_name':'recording points', 'identifier':'recordingpoint_set.count'}},
{'field':{'verbose_name':'devices', 'identifier':'device_set.count'}},
],
'width':"750px",
'pagination':50
}
}
patch = {
'content_type':{
'app_label':'electrophysiology',
'model':'patchelectrode'
},
'position':1,
'constraints':[
patch_constraints
],
'children':[
{'entity':deepcopy(device)},
{'entity':deepcopy(recording_point)}
]
}
solid_constraints = {
'tableconstraint':{
'displayed_in_navigator':False,
'form':'helmholtz.editor.forms.electrophysiology.SolidElectrodeForm',
'in_expansion':[
{'field':{'identifier':'recordingpoint_set'}},
{'field':{'identifier':'device_set'}},
],
'in_header':[
{'field':{'identifier':'id'}},
{'field':{'identifier':'model'}},
{'field':{'identifier':'manufacturer'}},
{'field':{'identifier':'material'}},
{'field':{'identifier':'external_diameter'}},
{'field':{'verbose_name':'recording points', 'identifier':'recordingpoint_set.count'}},
{'field':{'verbose_name':'devices', 'identifier':'device_set.count'}},
],
'width':"750px",
'pagination':50
}
}
solid = {
'content_type':{
'app_label':'electrophysiology',
'model':'solidelectrode'
},
'position':1,
'constraints':[
solid_constraints
],
'children':[
{'entity':deepcopy(device)},
{'entity':deepcopy(recording_point)}
]
}
equipment = {
'content_type':{
'app_label':'equipment',
'model':'equipment'
},
'position':3,
'constraints':[
{
'displayed_in_navigator':True,
'display_subclasses':True,
'display_base_class':False,
'excluded_subclasses':[
{'app_label':'electrophysiology', 'model':'electrode'},
{'app_label':'electrophysiology', 'model':'discelectrode'},
{'app_label':'electrophysiology', 'model':'hollowelectrode'},
{'app_label':'electrophysiology', 'model':'multielectrode'},
]
}
],
'children':[
{'entity':generic},
{'entity':solid},
{'entity':sharp},
{'entity':patch},
]
}
setup_constraints = {
'tableconstraint':{
'displayed_in_navigator':False,
'shunt':True,
'form':'helmholtz.editor.forms.equipment.SetupForm',
'in_header':[
{'field':{'identifier':'id'}},
{'field':{'identifier':'label'}},
{'field':{'identifier':'place'}},
{'field':{'identifier':'room'}},
{'field':{'identifier':'experiment_set.count', 'verbose_name':'experiments'}},
],
'width':"500px",
'pagination':50
}
}
setup = {
'content_type':{
'app_label':'equipment',
'model':'setup'
},
'position':4,
'constraints':[
setup_constraints
],
# 'children':[
# {'entity':subsystem},
# ]
}
solid = {
'content_type':{
'app_label':'electrophysiology',
'model':'solidelectrodeconfiguration'
},
'position':5,
'constraints':[
{
'tableconstraint':{
'displayed_in_navigator':False,
'shunt':False,
'form':'helmholtz.editor.forms.electrophysiology.SolidElectrodeConfigurationForm',
'in_header':[
{'field':{'identifier':'id'}},
{'field':{'identifier':'label'}},
{'field':{'identifier':'resistance'}},
{'field':{'identifier':'amplification'}},
{'field':{'identifier':'filtering'}},
],
'in_expansion':[
{'field':{'identifier':'notes'}},
],
'width':"700px",
'pagination':50
}
}
]
}
sharp = {
'content_type':{
'app_label':'electrophysiology',
'model':'sharpelectrodeconfiguration'
},
'position':5,
'constraints':[
{
'tableconstraint':{
'displayed_in_navigator':False,
'shunt':False,
'form':'helmholtz.editor.forms.electrophysiology.SharpElectrodeConfigurationForm',
'in_header':[
{'field':{'identifier':'id'}},
{'field':{'identifier':'label'}},
{'field':{'identifier':'solution'}},
{'field':{'identifier':'resistance'}},
{'field':{'identifier':'amplification'}},
{'field':{'identifier':'filtering'}},
],
'in_expansion':[
{'field':{'identifier':'notes'}},
],
'width':"800px",
'pagination':50
}
}
]
}
patch = {
'content_type':{
'app_label':'electrophysiology',
'model':'patchelectrodeconfiguration'
},
'position':5,
'constraints':[
{
'tableconstraint':{
'displayed_in_navigator':False,
'shunt':False,
'form':'helmholtz.editor.forms.electrophysiology.PatchElectrodeConfigurationForm',
'in_header':[
{'field':{'identifier':'id'}},
{'field':{'identifier':'label'}},
{'field':{'identifier':'solution'}},
{'field':{'identifier':'resistance'}},
{'field':{'identifier':'seal_resistance'}},
{'field':{'identifier':'contact_configuration'}},
{'field':{'identifier':'amplification'}},
{'field':{'identifier':'filtering'}},
],
'in_expansion':[
{'field':{'identifier':'notes'}},
],
'width':"900px",
'pagination':50
}
}
]
}
device_configuration = {
'content_type':{
'app_label':'equipment',
'model':'deviceconfiguration'
},
'position':5,
'constraints':[
{
'displayed_in_navigator':True,
'display_subclasses':True,
'display_base_class':False,
'excluded_subclasses':[
{'app_label':'optical_imaging', 'model':'cameraconfiguration'},
{'app_label':'optical_imaging', 'model':'vsdoptical'},
{'app_label':'optical_imaging', 'model':'intrinsicoptical'},
{'app_label':'electrophysiology', 'model':'electrodeconfiguration'},
{'app_label':'electrophysiology', 'model':'discelectrodeconfiguration'},
{'app_label':'electrophysiology', 'model':'hollowelectrodeconfiguration'},
{'app_label':'electrophysiology', 'model':'eeg'},
{'app_label':'electrophysiology', 'model':'ekg'},
]
}
],
'children':[
{'entity':solid},
{'entity':sharp},
{'entity':patch},
]
} | PypiClean |
/AHP-0.0.1.tar.gz/AHP-0.0.1/README.md | # AHP
层次分析法
## How to use
Install
`pip install ahp`
use
```python
from AHP import AHP
import numpy as np
# 准则重要性矩阵
criteria = np.array([[1, 2, 7, 5, 5],
[1 / 2, 1, 4, 3, 3],
[1 / 7, 1 / 4, 1, 1 / 2, 1 / 3],
[1 / 5, 1 / 3, 2, 1, 1],
[1 / 5, 1 / 3, 3, 1, 1]])
# 对每个准则,方案优劣排序
b1 = np.array([[1, 1 / 3, 1 / 8], [3, 1, 1 / 3], [8, 3, 1]])
b2 = np.array([[1, 2, 5], [1 / 2, 1, 2], [1 / 5, 1 / 2, 1]])
b3 = np.array([[1, 1, 3], [1, 1, 3], [1 / 3, 1 / 3, 1]])
b4 = np.array([[1, 3, 4], [1 / 3, 1, 1], [1 / 4, 1, 1]])
b5 = np.array([[1, 4, 1 / 2], [1 / 4, 1, 1 / 4], [2, 4, 1]])
b = [b1, b2, b3, b4, b5]
a = AHP(criteria, b).run()
```
打印:
```text
==========准则层==========
最大特征值5.072084,CR=0.014533,检验通过
准则层权重 = [0.47583538 0.26360349 0.0538146 0.09806829 0.10867824]
==========方案层==========
方案0 方案1 方案2 最大特征值 CR 一致性检验
准则0 0.081935 0.236341 0.681725 3.001542 8.564584e-04 True
准则1 0.595379 0.276350 0.128271 3.005535 3.075062e-03 True
准则2 0.428571 0.428571 0.142857 3.000000 -4.934325e-16 True
准则3 0.633708 0.191921 0.174371 3.009203 5.112618e-03 True
准则4 0.344545 0.108525 0.546931 3.053622 2.978976e-02 True
==========目标层==========
[[0.318586 0.23898522 0.44242878]]
最优选择是方案2
``` | PypiClean |
/Hikka_Pyro-2.0.66-py3-none-any.whl/pyrogram/connection/transport/tcp/tcp_abridged_o.py |
import logging
import os
from typing import Optional
import pyrogram
from pyrogram.crypto import aes
from .tcp import TCP
log = logging.getLogger(__name__)
class TCPAbridgedO(TCP):
RESERVED = (b"HEAD", b"POST", b"GET ", b"OPTI", b"\xee" * 4)
def __init__(self, ipv6: bool, proxy: dict):
super().__init__(ipv6, proxy)
self.encrypt = None
self.decrypt = None
async def connect(self, address: tuple):
await super().connect(address)
while True:
nonce = bytearray(os.urandom(64))
if bytes([nonce[0]]) != b"\xef" and nonce[:4] not in self.RESERVED and nonce[4:8] != b"\x00" * 4:
nonce[56] = nonce[57] = nonce[58] = nonce[59] = 0xef
break
temp = bytearray(nonce[55:7:-1])
self.encrypt = (nonce[8:40], nonce[40:56], bytearray(1))
self.decrypt = (temp[0:32], temp[32:48], bytearray(1))
nonce[56:64] = aes.ctr256_encrypt(nonce, *self.encrypt)[56:64]
await super().send(nonce)
async def send(self, data: bytes, *args):
length = len(data) // 4
data = (bytes([length]) if length <= 126 else b"\x7f" + length.to_bytes(3, "little")) + data
payload = await self.loop.run_in_executor(pyrogram.crypto_executor, aes.ctr256_encrypt, data, *self.encrypt)
await super().send(payload)
async def recv(self, length: int = 0) -> Optional[bytes]:
length = await super().recv(1)
if length is None:
return None
length = aes.ctr256_decrypt(length, *self.decrypt)
if length == b"\x7f":
length = await super().recv(3)
if length is None:
return None
length = aes.ctr256_decrypt(length, *self.decrypt)
data = await super().recv(int.from_bytes(length, "little") * 4)
if data is None:
return None
return await self.loop.run_in_executor(pyrogram.crypto_executor, aes.ctr256_decrypt, data, *self.decrypt) | PypiClean |
/MemeLib-0.1.5-py3-none-any.whl/memelib/api.py | import aiohttp
import random
import requests
from memelib.errors import *
from memelib._utils import _format
class DankMemeClient:
"""The client to get memes from"""
def __init__(
self,
use_reddit_for_memes: bool = True,
reddit_user_agent: str = "MemeLib",
return_embed: bool = False,
embed_color=None,
):
"""Initialize a client. The embed color must be on 0xFFFFFF format"""
self.memes = {"random": "meme()"}
self.meme_subreddits = ["/dankmemes", "/memes", "/wholesomememes"]
self.agent = reddit_user_agent
self.usereddit = use_reddit_for_memes
self.return_embed = return_embed
self.embed_color = embed_color
async def async_meme(self, subreddit=None):
if self.usereddit and subreddit:
async with aiohttp.ClientSession() as session:
async with session.get(
f"https://reddit.com/r/{subreddit}/random.json",
headers={"user-agent": self.agent},
) as r:
res = await r.json()
if r.status != 200:
if r.status == 429:
raise RateLimitError(
"Uh-oh, it looks like you were ratelimited! Try changing your user agent by passing it in the `DankMemeClient` call."
)
return None
elif r.status == 404:
raise SubredditNotFoundError(
"Reddit's API returned a 404 error. Make sure the subreddit that you passed does not include the `r/` in front of it."
)
return None
else:
raise RedditApiError(
f"Reddit's API returned status code {r.status_code}"
)
return None
data = {
"title": res[0]["data"]["children"][0]["data"]["title"],
"author": f"u/{res[0]['data']['children'][0]['data']['author']}",
"subreddit": res[0]["data"]["children"][0]["data"][
"subreddit_name_prefixed"
],
"upvotes": res[0]["data"]["children"][0]["data"]["ups"],
"comments": res[0]["data"]["children"][0]["data"]["num_comments"],
"img_url": res[0]["data"]["children"][0]["data"]["url"],
"post_url": f"https://reddit.com{res[0]['data']['children'][0]['data']['permalink']}",
}
if not self.return_embed:
return data
else:
return _format(data, self.embed_color)
elif self.usereddit and not subreddit:
subreddit = random.choice(self.meme_subreddits)
async with aiohttp.ClientSession() as session:
async with session.get(
f"https://reddit.com/r/{subreddit}/random.json",
headers={"user-agent": self.agent},
) as r:
res = await r.json()
data = {
"title": res[0]["data"]["children"][0]["data"]["title"],
"author": f"u/{res[0]['data']['children'][0]['data']['author']}",
"subreddit": res[0]["data"]["children"][0]["data"][
"subreddit_name_prefixed"
],
"upvotes": res[0]["data"]["children"][0]["data"]["ups"],
"comments": res[0]["data"]["children"][0]["data"]["num_comments"],
"img_url": res[0]["data"]["children"][0]["data"]["url"],
"post_url": f"https://reddit.com{res[0]['data']['children'][0]['data']['permalink']}",
}
if not self.return_embed:
return data
else:
return _format(data, self.embed_color)
elif not self.usereddit:
return "Still in progress"
raise SubredditNotFoundError("You didn't specify a subreddit")
def meme(self, subreddit=None):
if self.usereddit and subreddit:
r = requests.get(
"https://reddit.com/r/{subreddit}/random.json",
headers={"user-agent": self.agent},
)
res = r.json()
if r.status_code != 200:
if r.status_code == 429:
raise RateLimitError(
"Uh-oh, it looks like you were ratelimited! Try changing your user agent by passing it in the `DankMemeClient` call."
)
return None
elif r.status == 404:
raise SubredditNotFoundError(
"Reddit's API returned a 404 error. Make sure the subreddit that you passed does not include the `r/` in front of it."
)
return None
else:
raise RedditApiError(
f"Reddit's API returned status code {r.status_code}"
)
return None
data = {
"title": res[0]["data"]["children"][0]["data"]["title"],
"author": f"u/{res[0]['data']['children'][0]['data']['author']}",
"subreddit": res[0]["data"]["children"][0]["data"][
"subreddit_name_prefixed"
],
"upvotes": res[0]["data"]["children"][0]["data"]["ups"],
"comments": res[0]["data"]["children"][0]["data"]["num_comments"],
"img_url": res[0]["data"]["children"][0]["data"]["url"],
"post_url": f"https://reddit.com{res[0]['data']['children'][0]['data']['permalink']}",
}
if not self.return_embed:
return data
else:
return _format(data, self.embed_color)
elif self.usereddit and not subreddit:
subreddit = random.choice(self.meme_subreddits)
r = requests.get(
"https://reddit.com/r/{subreddit}/random.json",
headers={"user-agent": self.agent},
)
res = r.json()
data = {
"title": res[0]["data"]["children"][0]["data"]["title"],
"author": f"u/{res[0]['data']['children'][0]['data']['author']}",
"subreddit": res[0]["data"]["children"][0]["data"][
"subreddit_name_prefixed"
],
"upvotes": res[0]["data"]["children"][0]["data"]["ups"],
"comments": res[0]["data"]["children"][0]["data"]["num_comments"],
"img_url": res[0]["data"]["children"][0]["data"]["url"],
"post_url": f"https://reddit.com{res[0]['data']['children'][0]['data']['permalink']}",
}
if not self.return_embed:
return data
else:
return _format(data, self.embed_color)
elif not self.usereddit:
return "Still in progress"
raise SubredditNotFoundError("You didn't specify a subreddit") | PypiClean |
/MindsDB-23.8.3.0.tar.gz/MindsDB-23.8.3.0/mindsdb/utilities/fs.py | import os
import tempfile
import threading
import time
from pathlib import Path
from typing import Optional
import psutil
from appdirs import user_data_dir
def create_directory(path):
path = Path(path)
path.mkdir(mode=0o777, exist_ok=True, parents=True)
def get_root_path():
mindsdb_path = user_data_dir("mindsdb", "mindsdb")
return os.path.join(mindsdb_path, "var/")
def get_or_create_data_dir():
data_dir = user_data_dir("mindsdb", "mindsdb")
mindsdb_data_dir = os.path.join(data_dir, "var/")
if os.path.exists(mindsdb_data_dir) is False:
create_directory(mindsdb_data_dir)
try:
assert os.path.exists(mindsdb_data_dir)
assert os.access(mindsdb_data_dir, os.W_OK) is True
except Exception:
raise Exception(
"MindsDB storage directory does not exist and could not be created"
)
return mindsdb_data_dir
def create_dirs_recursive(path):
if isinstance(path, dict):
for p in path.values():
create_dirs_recursive(p)
elif isinstance(path, str):
create_directory(path)
else:
raise ValueError(f"Wrong path: {path}")
def _get_process_mark_id(unified: bool = False) -> str:
"""Creates a text that can be used to identify process+thread
Args:
unified: bool, if True then result will be same for same process+thread
Returns:
mark of process+thread
"""
mark = f"{os.getpid()}-{threading.get_native_id()}"
if unified is True:
return mark
return f"{mark}-{str(time.time()).replace('.', '')}"
def create_process_mark(folder="learn"):
mark = None
if os.name == "posix":
p = Path(tempfile.gettempdir()).joinpath(f"mindsdb/processes/{folder}/")
p.mkdir(parents=True, exist_ok=True)
mark = _get_process_mark_id()
p.joinpath(mark).touch()
return mark
def delete_process_mark(folder: str = "learn", mark: Optional[str] = None):
if mark is None:
mark = _get_process_mark_id()
if os.name == "posix":
p = (
Path(tempfile.gettempdir())
.joinpath(f"mindsdb/processes/{folder}/")
.joinpath(mark)
)
if p.exists():
p.unlink()
def clean_process_marks():
"""delete all existing processes marks"""
if os.name != "posix":
return
p = Path(tempfile.gettempdir()).joinpath("mindsdb/processes/")
if p.exists() is False:
return
for path in p.iterdir():
if path.is_dir() is False:
return
for file in path.iterdir():
file.unlink()
def clean_unlinked_process_marks():
"""delete marks that does not have corresponded processes/threads"""
if os.name != "posix":
return
p = Path(tempfile.gettempdir()).joinpath("mindsdb/processes/")
if p.exists() is False:
return
for path in p.iterdir():
if path.is_dir() is False:
return
for file in path.iterdir():
parts = file.name.split("-")
process_id = int(parts[0])
thread_id = int(parts[1])
try:
process = psutil.Process(process_id)
if process.status() in (psutil.STATUS_ZOMBIE, psutil.STATUS_DEAD):
raise psutil.NoSuchProcess(process_id)
threads = process.threads()
try:
next(t for t in threads if t.id == thread_id)
except StopIteration:
from mindsdb.utilities.log import get_log
get_log("main").warning(
f"We have mark for process/thread {process_id}/{thread_id} but it does not exists"
)
file.unlink()
except psutil.AccessDenied:
from mindsdb.utilities.log import get_log
get_log("main").warning(f"access to {process_id} denied")
continue
except psutil.NoSuchProcess:
from mindsdb.utilities.log import get_log
get_log("main").warning(
f"We have mark for process/thread {process_id}/{thread_id} but it does not exists"
)
file.unlink() | PypiClean |
/Euphorie-15.0.2.tar.gz/Euphorie-15.0.2/src/euphorie/client/resources/oira/script/chunks/45231.ddc8880b90bac9f028f5.min.js | "use strict";(self.webpackChunk_patternslib_patternslib=self.webpackChunk_patternslib_patternslib||[]).push([[45231],{71050:function(n,e,t){var o=t(87537),s=t.n(o),r=t(23645),l=t.n(r)()(s());l.push([n.id,".hljs{display:block;overflow-x:auto;padding:.5em;background:#1c1b19;color:#fce8c3}.hljs-strong,.hljs-emphasis{color:#918175}.hljs-bullet,.hljs-quote,.hljs-link,.hljs-number,.hljs-regexp,.hljs-literal{color:#ff5c8f}.hljs-code,.hljs-selector-class{color:#68a8e4}.hljs-emphasis{font-style:italic}.hljs-keyword,.hljs-selector-tag,.hljs-section,.hljs-attribute,.hljs-variable{color:#ef2f27}.hljs-name,.hljs-title{color:#fbb829}.hljs-type,.hljs-params{color:#0aaeb3}.hljs-string{color:#98bc37}.hljs-subst,.hljs-built_in,.hljs-builtin-name,.hljs-symbol,.hljs-selector-id,.hljs-selector-attr,.hljs-selector-pseudo,.hljs-template-tag,.hljs-template-variable,.hljs-addition{color:#c07abe}.hljs-comment,.hljs-deletion,.hljs-meta{color:#918175}","",{version:3,sources:["webpack://./node_modules/highlight.js/styles/srcery.css"],names:[],mappings:"AAOA,MACE,aAAA,CACA,eAAA,CACA,YAAA,CACA,kBAAA,CACA,aAAA,CAGF,4BAEE,aAAA,CAGF,4EAME,aAAA,CAGF,gCAEE,aAAA,CAGF,eACE,iBAAA,CAGF,8EAKE,aAAA,CAGF,uBAEE,aAAA,CAGF,wBAEE,aAAA,CAGF,aACE,aAAA,CAGF,iLAUE,aAAA,CAGF,wCAGE,aAAA",sourcesContent:["/*\nDescription: Srcery dark color scheme for highlight.js\nAuthor: Chen Bin <chen.bin@gmail.com>\nWebsite: https://srcery-colors.github.io/\nDate: 2020-04-06\n*/\n\n.hljs {\n display: block;\n overflow-x: auto;\n padding: 0.5em;\n background: #1C1B19;\n color: #FCE8C3;\n}\n\n.hljs-strong,\n.hljs-emphasis {\n color: #918175;\n}\n\n.hljs-bullet,\n.hljs-quote,\n.hljs-link,\n.hljs-number,\n.hljs-regexp,\n.hljs-literal {\n color: #FF5C8F;\n}\n\n.hljs-code,\n.hljs-selector-class {\n color: #68A8E4\n}\n\n.hljs-emphasis {\n font-style: italic;\n}\n\n.hljs-keyword,\n.hljs-selector-tag,\n.hljs-section,\n.hljs-attribute,\n.hljs-variable {\n color: #EF2F27;\n}\n\n.hljs-name,\n.hljs-title {\n color: #FBB829;\n}\n\n.hljs-type,\n.hljs-params {\n color: #0AAEB3;\n}\n\n.hljs-string {\n color: #98BC37;\n}\n\n.hljs-subst,\n.hljs-built_in,\n.hljs-builtin-name,\n.hljs-symbol,\n.hljs-selector-id,\n.hljs-selector-attr,\n.hljs-selector-pseudo,\n.hljs-template-tag,\n.hljs-template-variable,\n.hljs-addition {\n color: #C07ABE;\n}\n\n.hljs-comment,\n.hljs-deletion,\n.hljs-meta {\n color: #918175;\n}\n"],sourceRoot:""}]),e.Z=l},23645:function(n){n.exports=function(n){var e=[];return e.toString=function(){return this.map((function(e){var t="",o=void 0!==e[5];return e[4]&&(t+="@supports (".concat(e[4],") {")),e[2]&&(t+="@media ".concat(e[2]," {")),o&&(t+="@layer".concat(e[5].length>0?" ".concat(e[5]):""," {")),t+=n(e),o&&(t+="}"),e[2]&&(t+="}"),e[4]&&(t+="}"),t})).join("")},e.i=function(n,t,o,s,r){"string"==typeof n&&(n=[[null,n,void 0]]);var l={};if(o)for(var a=0;a<this.length;a++){var c=this[a][0];null!=c&&(l[c]=!0)}for(var i=0;i<n.length;i++){var u=[].concat(n[i]);o&&l[u[0]]||(void 0!==r&&(void 0===u[5]||(u[1]="@layer".concat(u[5].length>0?" ".concat(u[5]):""," {").concat(u[1],"}")),u[5]=r),t&&(u[2]?(u[1]="@media ".concat(u[2]," {").concat(u[1],"}"),u[2]=t):u[2]=t),s&&(u[4]?(u[1]="@supports (".concat(u[4],") {").concat(u[1],"}"),u[4]=s):u[4]="".concat(s)),e.push(u))}},e}},87537:function(n){n.exports=function(n){var e=n[1],t=n[3];if(!t)return e;if("function"==typeof btoa){var o=btoa(unescape(encodeURIComponent(JSON.stringify(t)))),s="sourceMappingURL=data:application/json;charset=utf-8;base64,".concat(o),r="/*# ".concat(s," */");return[e].concat([r]).join("\n")}return[e].join("\n")}},45231:function(n,e,t){t.r(e);var o=t(93379),s=t.n(o),r=t(7795),l=t.n(r),a=t(3565),c=t.n(a),i=t(19216),u=t.n(i),h=t(44589),p=t.n(h),A=t(71050),f={};f.styleTagTransform=p(),f.setAttributes=c(),f.insert=function(n){var e=document.head.querySelectorAll("*")[0];e?document.head.insertBefore(n,e):document.head.append(n)},f.domAPI=l(),f.insertStyleElement=u();s()(A.Z,f);e.default=A.Z&&A.Z.locals?A.Z.locals:void 0},93379:function(n){var e=[];function t(n){for(var t=-1,o=0;o<e.length;o++)if(e[o].identifier===n){t=o;break}return t}function o(n,o){for(var r={},l=[],a=0;a<n.length;a++){var c=n[a],i=o.base?c[0]+o.base:c[0],u=r[i]||0,h="".concat(i," ").concat(u);r[i]=u+1;var p=t(h),A={css:c[1],media:c[2],sourceMap:c[3],supports:c[4],layer:c[5]};if(-1!==p)e[p].references++,e[p].updater(A);else{var f=s(A,o);o.byIndex=a,e.splice(a,0,{identifier:h,updater:f,references:1})}l.push(h)}return l}function s(n,e){var t=e.domAPI(e);t.update(n);return function(e){if(e){if(e.css===n.css&&e.media===n.media&&e.sourceMap===n.sourceMap&&e.supports===n.supports&&e.layer===n.layer)return;t.update(n=e)}else t.remove()}}n.exports=function(n,s){var r=o(n=n||[],s=s||{});return function(n){n=n||[];for(var l=0;l<r.length;l++){var a=t(r[l]);e[a].references--}for(var c=o(n,s),i=0;i<r.length;i++){var u=t(r[i]);0===e[u].references&&(e[u].updater(),e.splice(u,1))}r=c}}},19216:function(n){n.exports=function(n){var e=document.createElement("style");return n.setAttributes(e,n.attributes),n.insert(e,n.options),e}},3565:function(n,e,t){n.exports=function(n){var e=t.nc;e&&n.setAttribute("nonce",e)}},7795:function(n){n.exports=function(n){if("undefined"==typeof document)return{update:function(){},remove:function(){}};var e=n.insertStyleElement(n);return{update:function(t){!function(n,e,t){var o="";t.supports&&(o+="@supports (".concat(t.supports,") {")),t.media&&(o+="@media ".concat(t.media," {"));var s=void 0!==t.layer;s&&(o+="@layer".concat(t.layer.length>0?" ".concat(t.layer):""," {")),o+=t.css,s&&(o+="}"),t.media&&(o+="}"),t.supports&&(o+="}");var r=t.sourceMap;r&&"undefined"!=typeof btoa&&(o+="\n/*# sourceMappingURL=data:application/json;base64,".concat(btoa(unescape(encodeURIComponent(JSON.stringify(r))))," */")),e.styleTagTransform(o,n,e.options)}(e,n,t)},remove:function(){!function(n){if(null===n.parentNode)return!1;n.parentNode.removeChild(n)}(e)}}}},44589:function(n){n.exports=function(n,e){if(e.styleSheet)e.styleSheet.cssText=n;else{for(;e.firstChild;)e.removeChild(e.firstChild);e.appendChild(document.createTextNode(n))}}}}]);
//# sourceMappingURL=45231.ddc8880b90bac9f028f5.min.js.map | PypiClean |
/CrudeBHT-1.0.7-py3-none-any.whl/code/body.py | from math import sqrt
G = 6.673e-11 # gravitational constant
class Body:
# cartesian positions
rx: float
ry: float
# velocity components
vx: float
vy: float
# force components
fx: float = 0.0
fy: float = 0.0
mass: float
def __init__(self, rx: float, ry: float,
vx: float, vy: float,
mass: float):
self.rx = rx
self.ry = ry
self.vx = vx
self.vy = vy
self.mass = mass
def __str__(self):
return "{}, {}, {}, {}, {}".format(
self.rx, self.ry, self.vx, self.vy, self.mass
)
def update(self, dt: float):
"""
Update the velocity and position using a time step dt
"""
self.vx += dt * self.fx / self.mass
self.vy += dt * self.fy / self.mass
self.rx += dt * self.vx
self.ry += dt * self.vy
def distance_to(self, other_body: 'Body') -> float:
"""'
Return the distance between two bodies
"""
dx = self.rx - other_body.rx
dy = self.ry - other_body.ry
return sqrt(dx*dx + dy*dy)
def reset_force(self):
"""
Reset the force to 0 for the next iteration
"""
self.fx = 0.0
self.fy = 0.0
def accelerate(self, other_body: 'Body'):
"""
Compute the net force acting between this body and
other_body and add to the net force acting on this
body
"""
# These two checks are not in the original source
if self == other_body:
return
if other_body is None:
return
# softening parameter (just to avoid infinities)
eps = 3E4
dx = other_body.rx - self.rx
dy = other_body.ry - self.ry
dist = sqrt(dx*dx + dy*dy)
F = (G * self.mass * other_body.mass) / (dist*dist + eps*eps)
self.fx = self.fx + F * dx / dist
self.fy = self.fy + F * dy / dist
def is_in(self, quad: 'Quadrant'):
return quad.contains(self.rx, self.ry) | PypiClean |
/AMONG_py-0.0.3.4-py3-none-any.whl/AMONGpy/analysis.py | import csv, json, itertools
def get_recommended_project(exam_logs_json) :
'''
Student json
{
"name" : "이름",
"id" : "아이디",
"test" : [
{"answer" : [5, 4]},
{"answer" : [2, 4]}
]
}
Test json
[
{"name" : "시험 이름",
"number" : 10,
"problems" : [
{
"tags" : ["tag1", "tag2"],
"correctanswer" : 3
},
{
"tags" : ["tag1", "tag2"],
"correctanswer" : 2
}]},
{"name" : "시험 이름",
"number" : 15,
"problems" : [
{
"tags" : ["tag1", "tag2"],
"correctanswer" : 2
}
]}]
:param student_json:
:param test_json:
:return:
{
"name":"프로젝트 이름",
"tool":"사용교구(예. 아두이노, 앱인벤터, ...)"
"difficuly":0~20
}
'''
f = open('AMONGpy/projectKeyword.csv', 'r', encoding='utf-8')
rdr = csv.reader(f)
project_list = []
for idx, line in enumerate(rdr):
if idx >= 1:
project_list.append({"분류":line[0], "프로젝트명":line[1], "난이도":line[2], "키워드":line[3]})
print(project_list)
exam_logs_json = json.loads(exam_logs_json)
scores = [len([a for a, q in zip(e['answers'], e['exam']['questions']) if a['id'] == q['response']['answer']['id']]) / len(e['exam']['questions']) * 20.0 for e in exam_logs_json['exam_logs']]
print(scores)
all_tag = [q['tags'] for e in exam_logs_json['exam_logs'] for q in e['exam']['questions']]
all_tag = list(itertools.chain(*all_tag))
all_tag = [x['name'] for x in all_tag]
all_tag = list(set(all_tag))
all_tag = sorted(all_tag)
accs = [[q for a, q in zip(e['answers'], e['exam']['questions']) if a['id'] == q['response']['answer']['id']]
for e in exam_logs_json['exam_logs']]
for acc in accs :
for p in acc :
tag_list = []
for t in p['tags'] :
tag_list.append(t['name'])
p['tags'] = tag_list
tag_cor = []
for t in all_tag:
acc_scores = [sum([1.0 / len(p['tags']) for p in acc if t in p['tags']]) for i, acc in enumerate(accs)]
tag_cor.append((t, acc_scores))
tag_cor = sorted(tag_cor, key=lambda x : x[1])
print(tag_cor)
project_list = sorted(project_list, key=lambda x : abs(int(x["난이도"]) - scores[-1]))
print(project_list)
distlist = []
for i, p in enumerate(project_list) :
for j, t in enumerate(tag_cor) :
if p["키워드"] == t[0] :
distlist.append((p, i + j * 1.5))
distlist = sorted(distlist, key=lambda x : x[1])
print(distlist)
if len(distlist) == 0 :
print('There was any matched tag with the test and recommandable projects')
return None
most_recommended_project = distlist[0][0]
return json.dumps({"name":most_recommended_project["프로젝트명"], "tool":most_recommended_project["분류"], "difficulty":most_recommended_project["난이도"]})
if __name__ == "__main__" :
p = get_recommended_project('{'
'"id": 7,'
'"exam_logs": ['
'{'
'"id": 5,'
'"answers": ['
'{'
'"id": 3,'
'"text": "우진이가 잘했다"'
'},'
'{'
'"id": 6,'
'"text": "승의가 잘못했다"'
'}'
'],'
'"exam": {'
'"id": 4,'
'"questions": ['
'{'
'"id": 5,'
'"response": {'
'"id": 5,'
'"choices": ['
'{'
'"id": 2,'
'"text": "상준이가 잘했다"'
'},'
'{'
'"id": 3,'
'"text": "우진이가 잘했다"'
'},'
'{'
'"id": 4,'
'"text": "고러엄"'
'},'
'{'
'"id": 5,'
'"text": "안녕"'
'}'
'],'
'"answer": {'
'"id": 3,'
'"text": "우진이가 잘했다"'
'},'
'"polymorphic_ctype": 16,'
'"resourcetype": "UniqueAnswerResponse"'
'},'
'"context_block": {'
'"id": 1,'
'"blocks": []'
'},'
'"tags": ['
'{'
'"id": 2,'
'"name": "아두이노"'
'},'
'{'
'"id": 3,'
'"name": "자료수집"'
'}'
'],'
'"name": "1번문제"'
'},'
'{'
'"id": 6,'
'"response": {'
'"id": 6,'
'"choices": ['
'{'
'"id": 6,'
'"text": "승의가 잘못했다"'
'},'
'{'
'"id": 7,'
'"text": "승의가 잘했다"'
'}'
'],'
'"answer": {'
'"id": 7,'
'"text": "승의가 잘했다"'
'},'
'"polymorphic_ctype": 16,'
'"resourcetype": "UniqueAnswerResponse"'
'},'
'"context_block": {'
'"id": 2,'
'"blocks": ['
'{'
'"id": 7,'
'"text": "과연 상준이가 잘했을까? 우진이가 잘했을까?",'
'"polymorphic_ctype": 15,'
'"resourcetype": "TextBlock"'
'},'
'{'
'"id": 8,'
'"text": "과연 누가 잘했을까? 보기에서 잘 골라보자",'
'"polymorphic_ctype": 15,'
'"resourcetype": "TextBlock"'
'}'
']'
'},'
'"tags": ['
'{'
'"id": 2,'
'"name": "아두이노"'
'}'
'],'
'"name": "두번째 문제"'
'}'
'],'
'"name": "첫번째 시험"'
'}'
'}'
'],'
'"name": "홍승의"'
'}')
print(p) | PypiClean |
/Box2D-2.3.2.tar.gz/Box2D-2.3.2/examples/vertical_stack.py |
from .framework import (Framework, Keys, main)
from Box2D import (b2CircleShape, b2EdgeShape, b2FixtureDef, b2PolygonShape)
class VerticalStack (Framework):
name = "Vertical Stack"
description = ("Tests the stability of stacking circles and boxes\n"
"Press B to launch a horizontal bullet")
bullet = None
def __init__(self):
super(VerticalStack, self).__init__()
columns = 5
rows = 16
ground = self.world.CreateStaticBody(
shapes=[
b2EdgeShape(vertices=[(-40, 0), (40, 0)]),
b2EdgeShape(vertices=[(20, 0), (20, 20)]),
]
)
box = b2FixtureDef(
shape=b2PolygonShape(box=(0.5, 0.5)),
density=1,
friction=0.3)
circle = b2FixtureDef(
shape=b2CircleShape(radius=0.5),
density=1,
friction=0.3)
box_start = -10
box_space = 2.5
circle_start = 8
circle_space = 2.5
for j in range(columns):
for i in range(rows):
self.world.CreateDynamicBody(
fixtures=box,
position=(box_start + box_space * j, 0.752 + 1.54 * i)
)
self.world.CreateDynamicBody(
fixtures=circle,
position=(circle_start + circle_space *
j, 0.752 + 1.54 * i)
)
def Step(self, settings):
super(VerticalStack, self).Step(settings)
def Keyboard(self, key):
if key == Keys.K_b:
if self.bullet:
self.world.DestroyBody(self.bullet)
self.bullet = None
circle = b2FixtureDef(
shape=b2CircleShape(radius=0.25),
density=20,
restitution=0.05)
self.bullet = self.world.CreateDynamicBody(
position=(-31, 5),
bullet=True,
fixtures=circle,
linearVelocity=(400, 0),
)
if __name__ == "__main__":
main(VerticalStack) | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/form/Uploader.js | require({cache:{"url:dojox/form/resources/Uploader.html":"<span class=\"dijit dijitReset dijitInline\"\n\t><span class=\"dijitReset dijitInline dijitButtonNode\"\n\t\tdojoAttachEvent=\"ondijitclick:_onClick\"\n\t\t><span class=\"dijitReset dijitStretch dijitButtonContents\"\n\t\t\tdojoAttachPoint=\"titleNode,focusNode\"\n\t\t\trole=\"button\" aria-labelledby=\"${id}_label\"\n\t\t\t><span class=\"dijitReset dijitInline dijitIcon\" dojoAttachPoint=\"iconNode\"></span\n\t\t\t><span class=\"dijitReset dijitToggleButtonIconChar\">●</span\n\t\t\t><span class=\"dijitReset dijitInline dijitButtonText\"\n\t\t\t\tid=\"${id}_label\"\n\t\t\t\tdojoAttachPoint=\"containerNode\"\n\t\t\t></span\n\t\t></span\n\t></span\n\t><!--no need to have this for Uploader \n\t<input ${!nameAttrSetting} type=\"${type}\" value=\"${value}\" class=\"dijitOffScreen\" tabIndex=\"-1\"\n\t\tdojoAttachPoint=\"valueNode\"\n/--></span>\n"}});
define("dojox/form/Uploader",["dojo/_base/kernel","dojo/_base/declare","dojo/_base/lang","dojo/_base/array","dojo/_base/connect","dojo/_base/window","dojo/dom-style","dojo/dom-geometry","dojo/dom-attr","dojo/dom-construct","dojo/dom-form","dijit","dijit/form/Button","dojox/form/uploader/Base","dojo/i18n!./nls/Uploader","dojo/text!./resources/Uploader.html"],function(_1,_2,_3,_4,_5,_6,_7,_8,_9,_a,_b,_c,_d,_e,_f,_10){
_1.experimental("dojox.form.Uploader");
_2("dojox.form.Uploader",[_e,_d],{uploadOnSelect:false,tabIndex:0,multiple:false,label:_f.label,url:"",name:"uploadedfile",flashFieldName:"",uploadType:"form",showInput:"",_nameIndex:0,templateString:_10,baseClass:"dijitUploader "+_d.prototype.baseClass,postMixInProperties:function(){
this._inputs=[];
this._cons=[];
this.inherited(arguments);
},buildRendering:function(){
console.warn("buildRendering",this.id);
this.inherited(arguments);
_7.set(this.domNode,{overflow:"hidden",position:"relative"});
this._buildDisplay();
_9.set(this.titleNode,"tabIndex",-1);
},_buildDisplay:function(){
if(this.showInput){
this.displayInput=dojo.create("input",{"class":"dijitUploadDisplayInput","tabIndex":-1,"autocomplete":"off"},this.containerNode,this.showInput);
this._attachPoints.push("displayInput");
this.connect(this,"onChange",function(_11){
var i=0,l=_11.length,f,r=[];
while((f=_11[i++])){
if(f&&f.name){
r.push(f.name);
}
}
this.displayInput.value=r.join(", ");
});
this.connect(this,"reset",function(){
this.displayInput.value="";
});
}
},startup:function(){
if(this._buildInitialized){
return;
}
this._buildInitialized=true;
this._getButtonStyle(this.domNode);
this._setButtonStyle();
this.inherited(arguments);
},onChange:function(_12){
},onBegin:function(_13){
},onProgress:function(_14){
},onComplete:function(_15){
this.reset();
},onCancel:function(){
},onAbort:function(){
},onError:function(_16){
},upload:function(_17){
},submit:function(_18){
_18=!!_18?_18.tagName?_18:this.getForm():this.getForm();
var _19=_b.toObject(_18);
this.upload(_19);
},reset:function(){
delete this._files;
this._disconnectButton();
_4.forEach(this._inputs,_a.destroy,dojo);
this._inputs=[];
this._nameIndex=0;
this._createInput();
},getFileList:function(){
var _1a=[];
if(this.supports("multiple")){
_4.forEach(this._files,function(f,i){
_1a.push({index:i,name:f.name,size:f.size,type:f.type});
},this);
}else{
_4.forEach(this._inputs,function(n,i){
if(n.value){
_1a.push({index:i,name:n.value.substring(n.value.lastIndexOf("\\")+1),size:0,type:n.value.substring(n.value.lastIndexOf(".")+1)});
}
},this);
}
return _1a;
},_getValueAttr:function(){
return this.getFileList();
},_setValueAttr:function(_1b){
console.error("Uploader value is read only");
},_setDisabledAttr:function(_1c){
if(this._disabled==_1c){
return;
}
this.inherited(arguments);
_7.set(this.inputNode,"display",_1c?"none":"");
},_getButtonStyle:function(_1d){
this.btnSize={w:_7.get(_1d,"width"),h:_7.get(_1d,"height")};
},_setButtonStyle:function(){
this.inputNodeFontSize=Math.max(2,Math.max(Math.ceil(this.btnSize.w/60),Math.ceil(this.btnSize.h/15)));
this._createInput();
},_createInput:function(){
if(this._inputs.length){
_7.set(this.inputNode,{top:"500px"});
this._disconnectButton();
this._nameIndex++;
}
var _1e;
if(this.supports("multiple")){
_1e=this.name+"s[]";
}else{
_1e=this.name+(this.multiple?this._nameIndex:"");
}
this.focusNode=this.inputNode=_a.create("input",{type:"file",name:_1e},this.domNode,"first");
if(this.supports("multiple")&&this.multiple){
_9.set(this.inputNode,"multiple",true);
}
this._inputs.push(this.inputNode);
_7.set(this.inputNode,{position:"absolute",fontSize:this.inputNodeFontSize+"em",top:"-3px",right:"-3px",opacity:0});
this._connectButton();
},_connectButton:function(){
this._cons.push(_5.connect(this.inputNode,"change",this,function(evt){
this._files=this.inputNode.files;
this.onChange(this.getFileList(evt));
if(!this.supports("multiple")&&this.multiple){
this._createInput();
}
}));
if(this.tabIndex>-1){
this.inputNode.tabIndex=this.tabIndex;
this._cons.push(_5.connect(this.inputNode,"focus",this,function(){
this.titleNode.style.outline="1px dashed #ccc";
}));
this._cons.push(_5.connect(this.inputNode,"blur",this,function(){
this.titleNode.style.outline="";
}));
}
},_disconnectButton:function(){
_4.forEach(this._cons,_5.disconnect);
this._cons.splice(0,this._cons.length);
}});
dojox.form.UploaderOrg=dojox.form.Uploader;
var _1f=[dojox.form.UploaderOrg];
dojox.form.addUploaderPlugin=function(_20){
_1f.push(_20);
_2("dojox.form.Uploader",_1f,{});
};
return dojox.form.Uploader;
}); | PypiClean |
/DJModels-0.0.6-py3-none-any.whl/djmodels/db/migrations/state.py | import copy
from collections import OrderedDict
from contextlib import contextmanager
from djmodels.apps import AppConfig
from djmodels.apps.registry import Apps, apps as global_apps
from djmodels.conf import settings
from djmodels.db import models
from djmodels.db.models.fields.proxy import OrderWrt
from djmodels.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT
from djmodels.db.models.options import DEFAULT_NAMES, normalize_together
from djmodels.db.models.utils import make_model_tuple
from djmodels.utils.functional import cached_property
from djmodels.utils.module_loading import import_string
from djmodels.utils.version import get_docs_version
from .exceptions import InvalidBasesError
def _get_app_label_and_model_name(model, app_label=''):
if isinstance(model, str):
split = model.split('.', 1)
return tuple(split) if len(split) == 2 else (app_label, split[0])
else:
return model._meta.app_label, model._meta.model_name
def _get_related_models(m):
"""Return all models that have a direct relationship to the given model."""
related_models = [
subclass for subclass in m.__subclasses__()
if issubclass(subclass, models.Model)
]
related_fields_models = set()
for f in m._meta.get_fields(include_parents=True, include_hidden=True):
if f.is_relation and f.related_model is not None and not isinstance(f.related_model, str):
related_fields_models.add(f.model)
related_models.append(f.related_model)
# Reverse accessors of foreign keys to proxy models are attached to their
# concrete proxied model.
opts = m._meta
if opts.proxy and m in related_fields_models:
related_models.append(opts.concrete_model)
return related_models
def get_related_models_tuples(model):
"""
Return a list of typical (app_label, model_name) tuples for all related
models for the given model.
"""
return {
(rel_mod._meta.app_label, rel_mod._meta.model_name)
for rel_mod in _get_related_models(model)
}
def get_related_models_recursive(model):
"""
Return all models that have a direct or indirect relationship
to the given model.
Relationships are either defined by explicit relational fields, like
ForeignKey, ManyToManyField or OneToOneField, or by inheriting from another
model (a superclass is related to its subclasses, but not vice versa). Note,
however, that a model inheriting from a concrete model is also related to
its superclass through the implicit *_ptr OneToOneField on the subclass.
"""
seen = set()
queue = _get_related_models(model)
for rel_mod in queue:
rel_app_label, rel_model_name = rel_mod._meta.app_label, rel_mod._meta.model_name
if (rel_app_label, rel_model_name) in seen:
continue
seen.add((rel_app_label, rel_model_name))
queue.extend(_get_related_models(rel_mod))
return seen - {(model._meta.app_label, model._meta.model_name)}
class ProjectState:
"""
Represent the entire project's overall state. This is the item that is
passed around - do it here rather than at the app level so that cross-app
FKs/etc. resolve properly.
"""
def __init__(self, models=None, real_apps=None):
self.models = models or {}
# Apps to include from main registry, usually unmigrated ones
self.real_apps = real_apps or []
self.is_delayed = False
def add_model(self, model_state):
app_label, model_name = model_state.app_label, model_state.name_lower
self.models[(app_label, model_name)] = model_state
if 'apps' in self.__dict__: # hasattr would cache the property
self.reload_model(app_label, model_name)
def remove_model(self, app_label, model_name):
del self.models[app_label, model_name]
if 'apps' in self.__dict__: # hasattr would cache the property
self.apps.unregister_model(app_label, model_name)
# Need to do this explicitly since unregister_model() doesn't clear
# the cache automatically (#24513)
self.apps.clear_cache()
def _find_reload_model(self, app_label, model_name, delay=False):
if delay:
self.is_delayed = True
related_models = set()
try:
old_model = self.apps.get_model(app_label, model_name)
except LookupError:
pass
else:
# Get all relations to and from the old model before reloading,
# as _meta.apps may change
if delay:
related_models = get_related_models_tuples(old_model)
else:
related_models = get_related_models_recursive(old_model)
# Get all outgoing references from the model to be rendered
model_state = self.models[(app_label, model_name)]
# Directly related models are the models pointed to by ForeignKeys,
# OneToOneFields, and ManyToManyFields.
direct_related_models = set()
for name, field in model_state.fields:
if field.is_relation:
if field.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT:
continue
rel_app_label, rel_model_name = _get_app_label_and_model_name(field.related_model, app_label)
direct_related_models.add((rel_app_label, rel_model_name.lower()))
# For all direct related models recursively get all related models.
related_models.update(direct_related_models)
for rel_app_label, rel_model_name in direct_related_models:
try:
rel_model = self.apps.get_model(rel_app_label, rel_model_name)
except LookupError:
pass
else:
if delay:
related_models.update(get_related_models_tuples(rel_model))
else:
related_models.update(get_related_models_recursive(rel_model))
# Include the model itself
related_models.add((app_label, model_name))
return related_models
def reload_model(self, app_label, model_name, delay=False):
if 'apps' in self.__dict__: # hasattr would cache the property
related_models = self._find_reload_model(app_label, model_name, delay)
self._reload(related_models)
def reload_models(self, models, delay=True):
if 'apps' in self.__dict__: # hasattr would cache the property
related_models = set()
for app_label, model_name in models:
related_models.update(self._find_reload_model(app_label, model_name, delay))
self._reload(related_models)
def _reload(self, related_models):
# Unregister all related models
with self.apps.bulk_update():
for rel_app_label, rel_model_name in related_models:
self.apps.unregister_model(rel_app_label, rel_model_name)
states_to_be_rendered = []
# Gather all models states of those models that will be rerendered.
# This includes:
# 1. All related models of unmigrated apps
for model_state in self.apps.real_models:
if (model_state.app_label, model_state.name_lower) in related_models:
states_to_be_rendered.append(model_state)
# 2. All related models of migrated apps
for rel_app_label, rel_model_name in related_models:
try:
model_state = self.models[rel_app_label, rel_model_name]
except KeyError:
pass
else:
states_to_be_rendered.append(model_state)
# Render all models
self.apps.render_multiple(states_to_be_rendered)
def clone(self):
"""Return an exact copy of this ProjectState."""
new_state = ProjectState(
models={k: v.clone() for k, v in self.models.items()},
real_apps=self.real_apps,
)
if 'apps' in self.__dict__:
new_state.apps = self.apps.clone()
new_state.is_delayed = self.is_delayed
return new_state
def clear_delayed_apps_cache(self):
if self.is_delayed and 'apps' in self.__dict__:
del self.__dict__['apps']
@cached_property
def apps(self):
return StateApps(self.real_apps, self.models)
@property
def concrete_apps(self):
self.apps = StateApps(self.real_apps, self.models, ignore_swappable=True)
return self.apps
@classmethod
def from_apps(cls, apps):
"""Take an Apps and return a ProjectState matching it."""
app_models = {}
for model in apps.get_models(include_swapped=True):
model_state = ModelState.from_model(model)
app_models[(model_state.app_label, model_state.name_lower)] = model_state
return cls(app_models)
def __eq__(self, other):
return self.models == other.models and set(self.real_apps) == set(other.real_apps)
class AppConfigStub(AppConfig):
"""Stub of an AppConfig. Only provides a label and a dict of models."""
# Not used, but required by AppConfig.__init__
path = ''
def __init__(self, label):
self.label = label
# App-label and app-name are not the same thing, so technically passing
# in the label here is wrong. In practice, migrations don't care about
# the app name, but we need something unique, and the label works fine.
super().__init__(label, None)
def import_models(self):
self.models = self.apps.all_models[self.label]
class StateApps(Apps):
"""
Subclass of the global Apps registry class to better handle dynamic model
additions and removals.
"""
def __init__(self, real_apps, models, ignore_swappable=False):
# Any apps in self.real_apps should have all their models included
# in the render. We don't use the original model instances as there
# are some variables that refer to the Apps object.
# FKs/M2Ms from real apps are also not included as they just
# mess things up with partial states (due to lack of dependencies)
self.real_models = []
for app_label in real_apps:
app = global_apps.get_app_config(app_label)
for model in app.get_models():
self.real_models.append(ModelState.from_model(model, exclude_rels=True))
# Populate the app registry with a stub for each application.
app_labels = {model_state.app_label for model_state in models.values()}
app_configs = [AppConfigStub(label) for label in sorted(real_apps + list(app_labels))]
super().__init__(app_configs)
# The lock gets in the way of copying as implemented in clone(), which
# is called whenever Django duplicates a StateApps before updating it.
self._lock = None
self.render_multiple(list(models.values()) + self.real_models)
# There shouldn't be any operations pending at this point.
from djmodels.core.checks.model_checks import _check_lazy_references
ignore = {make_model_tuple(settings.AUTH_USER_MODEL)} if ignore_swappable else set()
errors = _check_lazy_references(self, ignore=ignore)
if errors:
raise ValueError("\n".join(error.msg for error in errors))
@contextmanager
def bulk_update(self):
# Avoid clearing each model's cache for each change. Instead, clear
# all caches when we're finished updating the model instances.
ready = self.ready
self.ready = False
try:
yield
finally:
self.ready = ready
self.clear_cache()
def render_multiple(self, model_states):
# We keep trying to render the models in a loop, ignoring invalid
# base errors, until the size of the unrendered models doesn't
# decrease by at least one, meaning there's a base dependency loop/
# missing base.
if not model_states:
return
# Prevent that all model caches are expired for each render.
with self.bulk_update():
unrendered_models = model_states
while unrendered_models:
new_unrendered_models = []
for model in unrendered_models:
try:
model.render(self)
except InvalidBasesError:
new_unrendered_models.append(model)
if len(new_unrendered_models) == len(unrendered_models):
raise InvalidBasesError(
"Cannot resolve bases for %r\nThis can happen if you are inheriting models from an "
"app with migrations (e.g. contrib.auth)\n in an app with no migrations; see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#dependencies "
"for more" % (new_unrendered_models, get_docs_version())
)
unrendered_models = new_unrendered_models
def clone(self):
"""Return a clone of this registry."""
clone = StateApps([], {})
clone.all_models = copy.deepcopy(self.all_models)
clone.app_configs = copy.deepcopy(self.app_configs)
# Set the pointer to the correct app registry.
for app_config in clone.app_configs.values():
app_config.apps = clone
# No need to actually clone them, they'll never change
clone.real_models = self.real_models
return clone
def register_model(self, app_label, model):
self.all_models[app_label][model._meta.model_name] = model
if app_label not in self.app_configs:
self.app_configs[app_label] = AppConfigStub(app_label)
self.app_configs[app_label].apps = self
self.app_configs[app_label].models = OrderedDict()
self.app_configs[app_label].models[model._meta.model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def unregister_model(self, app_label, model_name):
try:
del self.all_models[app_label][model_name]
del self.app_configs[app_label].models[model_name]
except KeyError:
pass
class ModelState:
"""
Represent a Django Model. Don't use the actual Model class as it's not
designed to have its options changed - instead, mutate this one and then
render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(self, app_label, name, fields, options=None, bases=None, managers=None):
self.app_label = app_label
self.name = name
self.fields = fields
self.options = options or {}
self.options.setdefault('indexes', [])
self.options.setdefault('constraints', [])
self.bases = bases or (models.Model,)
self.managers = managers or []
# Sanity-check that fields is NOT a dict. It must be ordered.
if isinstance(self.fields, dict):
raise ValueError("ModelState.fields cannot be a dict - it must be a list of 2-tuples.")
for name, field in fields:
# Sanity-check that fields are NOT already bound to a model.
if hasattr(field, 'model'):
raise ValueError(
'ModelState.fields cannot be bound to a model - "%s" is.' % name
)
# Sanity-check that relation fields are NOT referring to a model class.
if field.is_relation and hasattr(field.related_model, '_meta'):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.to" does. '
'Use a string reference instead.' % name
)
if field.many_to_many and hasattr(field.remote_field.through, '_meta'):
raise ValueError(
'ModelState.fields cannot refer to a model class - "%s.through" does. '
'Use a string reference instead.' % name
)
# Sanity-check that indexes have their name set.
for index in self.options['indexes']:
if not index.name:
raise ValueError(
"Indexes passed to ModelState require a name attribute. "
"%r doesn't have one." % index
)
@cached_property
def name_lower(self):
return self.name.lower()
@classmethod
def from_model(cls, model, exclude_rels=False):
"""Given a model, return a ModelState representing it."""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
if getattr(field, "remote_field", None) and exclude_rels:
continue
if isinstance(field, OrderWrt):
continue
name = field.name
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError("Couldn't reconstruct field %s on %s: %s" % (
name,
model._meta.label,
e,
))
if not exclude_rels:
for field in model._meta.local_many_to_many:
name = field.name
try:
fields.append((name, field.clone()))
except TypeError as e:
raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % (
name,
model._meta.object_name,
e,
))
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["apps", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
ut = model._meta.original_attrs["unique_together"]
options[name] = set(normalize_together(ut))
elif name == "index_together":
it = model._meta.original_attrs["index_together"]
options[name] = set(normalize_together(it))
elif name == "indexes":
indexes = [idx.clone() for idx in model._meta.indexes]
for index in indexes:
if not index.name:
index.set_name_with_model(model)
options['indexes'] = indexes
elif name == 'constraints':
options['constraints'] = [con.clone() for con in model._meta.constraints]
else:
options[name] = model._meta.original_attrs[name]
# If we're ignoring relationships, remove all field-listing model
# options (that option basically just means "make a stub model")
if exclude_rels:
for key in ["unique_together", "index_together", "order_with_respect_to"]:
if key in options:
del options[key]
# Private fields are ignored, so remove options that refer to them.
elif options.get('order_with_respect_to') in {field.name for field in model._meta.private_fields}:
del options['order_with_respect_to']
def flatten_bases(model):
bases = []
for base in model.__bases__:
if hasattr(base, "_meta") and base._meta.abstract:
bases.extend(flatten_bases(base))
else:
bases.append(base)
return bases
# We can't rely on __mro__ directly because we only want to flatten
# abstract models and not the whole tree. However by recursing on
# __bases__ we may end up with duplicates and ordering issues, we
# therefore discard any duplicates and reorder the bases according
# to their index in the MRO.
flattened_bases = sorted(set(flatten_bases(model)), key=lambda x: model.__mro__.index(x))
# Make our record
bases = tuple(
(
base._meta.label_lower
if hasattr(base, "_meta") else
base
)
for base in flattened_bases
)
# Ensure at least one base inherits from models.Model
if not any((isinstance(base, str) or issubclass(base, models.Model)) for base in bases):
bases = (models.Model,)
managers = []
manager_names = set()
default_manager_shim = None
for manager in model._meta.managers:
if manager.name in manager_names:
# Skip overridden managers.
continue
elif manager.use_in_migrations:
# Copy managers usable in migrations.
new_manager = copy.copy(manager)
new_manager._set_creation_counter()
elif manager is model._base_manager or manager is model._default_manager:
# Shim custom managers used as default and base managers.
new_manager = models.Manager()
new_manager.model = manager.model
new_manager.name = manager.name
if manager is model._default_manager:
default_manager_shim = new_manager
else:
continue
manager_names.add(manager.name)
managers.append((manager.name, new_manager))
# Ignore a shimmed default manager called objects if it's the only one.
if managers == [('objects', default_manager_shim)]:
managers = []
# Construct the new ModelState
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
managers,
)
def construct_managers(self):
"""Deep-clone the managers using deconstruction."""
# Sort all managers by their creation counter
sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter)
for mgr_name, manager in sorted_managers:
as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct()
if as_manager:
qs_class = import_string(qs_path)
yield mgr_name, qs_class.as_manager()
else:
manager_class = import_string(manager_path)
yield mgr_name, manager_class(*args, **kwargs)
def clone(self):
"""Return an exact copy of this ModelState."""
return self.__class__(
app_label=self.app_label,
name=self.name,
fields=list(self.fields),
# Since options are shallow-copied here, operations such as
# AddIndex must replace their option (e.g 'indexes') rather
# than mutating it.
options=dict(self.options),
bases=self.bases,
managers=list(self.managers),
)
def render(self, apps):
"""Create a Model object from our current state into the given apps."""
# First, make a Meta object
meta_contents = {'app_label': self.app_label, 'apps': apps, **self.options}
meta = type("Meta", (), meta_contents)
# Then, work out our bases
try:
bases = tuple(
(apps.get_model(base) if isinstance(base, str) else base)
for base in self.bases
)
except LookupError:
raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,))
# Turn fields into a dict for the body, add other bits
body = {name: field.clone() for name, field in self.fields}
body['Meta'] = meta
body['__module__'] = "__fake__"
# Restore managers
body.update(self.construct_managers())
# Then, make a Model object (apps.register_model is called in __new__)
return type(self.name, bases, body)
def get_field_by_name(self, name):
for fname, field in self.fields:
if fname == name:
return field
raise ValueError("No field called %s on model %s" % (name, self.name))
def get_index_by_name(self, name):
for index in self.options['indexes']:
if index.name == name:
return index
raise ValueError("No index named %s on model %s" % (name, self.name))
def get_constraint_by_name(self, name):
for constraint in self.options['constraints']:
if constraint.name == name:
return constraint
raise ValueError('No constraint named %s on model %s' % (name, self.name))
def __repr__(self):
return "<%s: '%s.%s'>" % (self.__class__.__name__, self.app_label, self.name)
def __eq__(self, other):
return (
(self.app_label == other.app_label) and
(self.name == other.name) and
(len(self.fields) == len(other.fields)) and
all((k1 == k2 and (f1.deconstruct()[1:] == f2.deconstruct()[1:]))
for (k1, f1), (k2, f2) in zip(self.fields, other.fields)) and
(self.options == other.options) and
(self.bases == other.bases) and
(self.managers == other.managers)
) | PypiClean |
/BlueWhale3_SingleCell-1.3.5-py3-none-any.whl/orangecontrib/single_cell/widgets/owfilter.py | import sys
import enum
import math
import numbers
from contextlib import contextmanager
from types import SimpleNamespace
import typing
from typing import Optional, Sequence, Tuple, Dict, Callable, Union, Iterable
import numpy as np
from scipy import stats
from AnyQt.QtCore import Qt, QSize, QPointF, QRectF, QLineF, QTimer
from AnyQt.QtCore import pyqtSignal as Signal, pyqtSlot as Slot
from AnyQt.QtGui import (
QPainter, QPolygonF, QPainterPath, QPalette, QPen, QBrush, QColor,
QKeySequence
)
from AnyQt.QtWidgets import (
QLabel, QDoubleSpinBox, QGroupBox, QHBoxLayout, QAction,
QGraphicsPathItem, QGraphicsRectItem, QGraphicsItem, QFormLayout,
QApplication, QButtonGroup, QRadioButton, QCheckBox, QStackedWidget
)
import pyqtgraph as pg
import Orange.data
import Orange.widgets.utils.plot.owpalette
from Orange.widgets import widget, gui, settings
from orangecontrib.single_cell.i18n_config import *
def __(key):
return i18n.t('single_cell.owfilter.' + key)
#: Filter type
Cells, Genes, Data = 0, 1, 2
#: Filter quality control measure (apply to Cell/Genes type only)
DetectionCount = 0 # number of genes/features with non-zero expression level
TotalCounts = 1 # total counts by cell/gene
#: Filter descriptions for various roles in UI
#: (short name, name, description)
FilterInfo = {
Cells: (__("btn.cells"), __('label.cell'), __('label.cell_tip')),
Genes: (__("btn.genes"), __('label.genes'), __('label.genes_tip')),
Data: (__("btn.data"), __("label.data"), __('label.data_tip'))
}
# Quality control measure descriptions for UI
MeasureInfo = {
TotalCounts: (__('gbox.total_counts'),
__('gbox.total_counts_tip')),
DetectionCount: (__("gbox.detection_count"),
__("gbox.detection_count_tip"))
}
# Plot scales
class Scale(enum.Enum):
Linear = "Linear"
Log1p = "Log1p"
def log1p(x, base=10.):
x = np.log1p(x)
x /= np.log(base)
return x
def expm1(x, base=10.):
x = np.asarray(x)
x *= np.log(base)
return np.expm1(x)
class ScatterPlotItem(pg.ScatterPlotItem):
def paint(self, painter, *args):
if self.opts["antialias"]:
painter.setRenderHint(QPainter.Antialiasing, True)
if self.opts["pxMode"]:
painter.setRenderHint(QPainter.SmoothPixmapTransform, True)
super().paint(painter, *args)
if typing.TYPE_CHECKING:
ArrayLike = Union[np.ndarray, np.generic, numbers.Number, Iterable]
Transform = Callable[[ArrayLike], np.ndarray]
# filter state data class
class _FilterData:
#: The array used for filtering/ploting. Is some marginal statistic
#: of the input.
x = ... # type: np.ndarray
#: The transformed array x (log1p).
#: NOTE: xt.size need not match x.size (non finite values can be omitted)
xt = ... # type: np.ndarray
#: The transformation function mapping x to xt
transform = ... # type: Transform
#: The inverse of `transform`,
transform_inv = ... # type: Transform
#: min/max bounds of x
xmin = xmax = ... # type: float
#: min/max bounds of xt
xtmin = xtmax = ... # type: float
class OWFilter(widget.OWWidget):
name = __('name')
icon = 'icons/Filter.svg'
description = __("desc")
priority = 210
class Inputs:
data = widget.Input('Data', Orange.data.Table, label=i18n.t("single_cell.common.data"))
class Outputs:
data = widget.Output('Data', Orange.data.Table, label=i18n.t("single_cell.common.data"))
class Warning(widget.OWWidget.Warning):
sampling_in_effect = widget.Msg(__("msg.sampling_in_effect"))
class Error(widget.OWWidget.Error):
invalid_range = widget.Msg(__('msg.invalid_range'))
invalid_domain = widget.Msg(
__('msg.invalid_domain')
)
#: Filter mode.
#: Filter out rows/columns or 'zap' data values in range.
Cells, Genes, Data = Cells, Genes, Data
settings_version = 3
#: The selected filter mode
selected_filter_type = settings.Setting(Cells) # type: int
#: Selected filter statistics / QC measure indexed by filter_type
selected_filter_metric = settings.Setting(TotalCounts) # type: int
#: Augment the violin plot with a dot plot (strip plot) of the (non-zero)
#: measurement counts in Cells/Genes mode or data matrix values in Data
#: mode.
display_dotplot = settings.Setting(True) # type: bool
#: Is min/max range selection enable
limit_lower_enabled = settings.Setting(True) # type: bool
limit_upper_enabled = settings.Setting(True) # type: bool
#: The lower and upper selection limit for each filter type
thresholds = settings.Setting({
(Cells, DetectionCount): (0, 2 ** 31 - 1),
(Cells, TotalCounts): (0, 2 ** 31 - 1),
(Genes, DetectionCount): (0, 2 ** 31 - 1),
(Genes, TotalCounts): (0, 2 ** 31 - 1),
(Data, -1): (0.0, 2.0 ** 31 - 1)
}) # type: Dict[Tuple[int, int], Tuple[float, float]]
#: Plot scale: 'Linear' or 'Log1p'
scale = settings.Setting(Scale.Linear.name) # type: str
auto_commit = settings.Setting(True) # type: bool
def __init__(self):
super().__init__()
self.data = None # type: Optional[Orange.data.Table]
self._state = None # type: Optional[_FilterData]
box = gui.widgetBox(self.controlArea, __('box.info'))
self._info = QLabel(box, wordWrap=True)
self._info.setText(__('box.info_label'))
box.layout().addWidget(self._info)
box = gui.widgetBox(self.controlArea, __('box.filter_type'), spacing=-1)
rbg = QButtonGroup(box, exclusive=True)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
for id_ in [Cells, Genes, Data]:
name, _, tip = FilterInfo[id_]
b = QRadioButton(
name, toolTip=tip, checked=id_ == self.selected_filter_type
)
rbg.addButton(b, id_)
layout.addWidget(b, stretch=10, alignment=Qt.AlignCenter)
box.layout().addLayout(layout)
rbg.buttonClicked[int].connect(self.set_filter_type)
self.filter_metric_cb = gui.comboBox(
box, self, "selected_filter_metric", callback=self._update_metric,
enabled=self.selected_filter_type != Data
)
for id_ in [DetectionCount, TotalCounts]:
text, ttip = MeasureInfo[id_]
self.filter_metric_cb.addItem(text)
idx = self.filter_metric_cb.count() - 1
self.filter_metric_cb.setItemData(idx, ttip, Qt.ToolTipRole)
self.filter_metric_cb.setCurrentIndex(self.selected_filter_metric)
form = QFormLayout(
labelAlignment=Qt.AlignLeft,
formAlignment=Qt.AlignLeft,
fieldGrowthPolicy=QFormLayout.AllNonFixedFieldsGrow
)
self._filter_box = box = gui.widgetBox(
self.controlArea, __("box.filter"), orientation=form
) # type: QGroupBox
self.threshold_stacks = (
QStackedWidget(enabled=self.limit_lower_enabled),
QStackedWidget(enabled=self.limit_upper_enabled),
)
finfo = np.finfo(np.float64)
for filter_ in [Cells, Genes, Data]:
if filter_ in {Cells, Genes}:
minimum = 0.0
ndecimals = 1
metric = self.selected_filter_metric
else:
minimum = finfo.min
ndecimals = 3
metric = -1
spinlower = QDoubleSpinBox(
self, minimum=minimum, maximum=finfo.max, decimals=ndecimals,
keyboardTracking=False,
)
spinupper = QDoubleSpinBox(
self, minimum=minimum, maximum=finfo.max, decimals=ndecimals,
keyboardTracking=False,
)
lower, upper = self.thresholds.get((filter_, metric), (0, 0))
spinlower.setValue(lower)
spinupper.setValue(upper)
self.threshold_stacks[0].addWidget(spinlower)
self.threshold_stacks[1].addWidget(spinupper)
spinlower.valueChanged.connect(self._limitchanged)
spinupper.valueChanged.connect(self._limitchanged)
self.threshold_stacks[0].setCurrentIndex(self.selected_filter_type)
self.threshold_stacks[1].setCurrentIndex(self.selected_filter_type)
self.limit_lower_enabled_cb = cb = QCheckBox(
__("btn.min"), checked=self.limit_lower_enabled
)
cb.toggled.connect(self.set_lower_limit_enabled)
cb.setAttribute(Qt.WA_LayoutUsesWidgetRect, True)
form.addRow(cb, self.threshold_stacks[0])
self.limit_upper_enabled_cb = cb = QCheckBox(
__("btn.max"), checked=self.limit_upper_enabled
)
cb.toggled.connect(self.set_upper_limit_enabled)
cb.setAttribute(Qt.WA_LayoutUsesWidgetRect, True)
form.addRow(cb, self.threshold_stacks[1])
box = gui.widgetBox(self.controlArea, __('box.plot_options'))
self._showpoints = gui.checkBox(
box, self, "display_dotplot", __('btn.data_points'),
callback=self._update_dotplot
)
self.log_scale_cb = QCheckBox(
__('btn.log_scale'), checked=self.scale == Scale.Log1p.name
)
self.log_scale_cb.toggled[bool].connect(
lambda state:
self.set_filter_scale(Scale.Log1p if state else Scale.Linear)
)
box.layout().addWidget(self.log_scale_cb)
self.controlArea.layout().addStretch(10)
gui.auto_commit(self.controlArea, self, "auto_commit", __("btn.commit"))
self._view = pg.GraphicsView()
self._view.enableMouse(False)
self._view.setAntialiasing(True)
self._plot = plot = ViolinPlot()
self._plot.setDataPointsVisible(self.display_dotplot)
self._plot.setSelectionMode(
(ViolinPlot.Low if self.limit_lower_enabled else 0) |
(ViolinPlot.High if self.limit_upper_enabled else 0)
)
self._plot.setRange(QRectF(-1., 0., 2., 1.))
self._plot.selectionEdited.connect(self._limitchanged_plot)
self._view.setCentralWidget(self._plot)
bottom = self._plot.getAxis("bottom") # type: pg.AxisItem
bottom.hide()
plot.setMouseEnabled(False, False)
plot.hideButtons()
self.mainArea.layout().addWidget(self._view)
# Coalescing commit timer
self._committimer = QTimer(self, singleShot=True)
self._committimer.timeout.connect(self.commit)
self.addAction(
QAction(__("btn.select_all"), self, shortcut=QKeySequence.SelectAll,
triggered=self._select_all)
)
self._setup_axes()
def sizeHint(self):
sh = super().sizeHint() # type: QSize
return sh.expandedTo(QSize(800, 600))
def set_filter_type(self, type_):
if self.selected_filter_type != type_:
assert type_ in (Cells, Genes, Data), str(type_)
self.selected_filter_type = type_
self.threshold_stacks[0].setCurrentIndex(type_)
self.threshold_stacks[1].setCurrentIndex(type_)
self.filter_metric_cb.setEnabled(type_ != Data)
self._setup_axes()
if self.data is not None:
self._setup(self.data, type_)
self._schedule_commit()
def filter_type(self):
return self.selected_filter_type
def set_filter_scale(self, scale):
# type: (Scale) -> None
if self.scale != scale:
self.scale = scale.name
self.log_scale_cb.setChecked(scale == Scale.Log1p)
self._update_scale()
def filter_scale(self):
return Scale[self.scale]
def _update_metric(self):
self._update_scale()
if self.data is not None:
self._setup(self.data, self.selected_filter_type, )
def set_upper_limit_enabled(self, enabled):
if enabled != self.limit_upper_enabled:
self.limit_upper_enabled = enabled
self.threshold_stacks[1].setEnabled(enabled)
self.limit_upper_enabled_cb.setChecked(enabled)
self._update_filter()
self._schedule_commit()
def set_lower_limit_enabled(self, enabled):
if enabled != self.limit_lower_enabled:
self.limit_lower_enabled = enabled
self.threshold_stacks[0].setEnabled(enabled)
self.limit_lower_enabled_cb.setChecked(enabled)
self._update_filter()
self._schedule_commit()
def _update_filter(self):
mode = 0
if self.limit_lower_enabled:
mode |= ViolinPlot.Low
if self.limit_upper_enabled:
mode |= ViolinPlot.High
self._plot.setSelectionMode(mode)
self._update_info()
self._schedule_commit()
def _is_filter_enabled(self):
return self.limit_lower_enabled or self.limit_upper_enabled
@Inputs.data
def set_data(self, data):
# type: (Optional[Orange.data.Table]) -> None
self.clear()
if data is not None and \
any(type(v) is not Orange.data.ContinuousVariable
for v in data.domain.attributes):
self.Error.invalid_domain()
data = None
if data is not None and np.any(data.X < 0):
self.Error.invalid_range()
data = None
self.data = data
if data is not None:
self._setup(data, self.filter_type())
self.unconditional_commit()
def clear(self):
self.data = None
self._state = None
self._plot.clear()
# reset the plot range
self._plot.setRange(QRectF(-1., 0., 2., 1.))
self._update_info()
self.Warning.clear()
self.Error.clear()
def _update_info(self):
text = []
if self.data is None:
text += [__('text.missing_data')]
else:
N, M = len(self.data), len(self.data.domain.attributes)
text = []
text += [__("text.info").format(N=N, M=M)]
if self._is_filter_enabled() and \
self.filter_type() in [Cells, Genes]:
counts = self._state.x
mask = np.ones(counts.shape, dtype=bool)
if self.limit_lower_enabled:
mask &= self.limit_lower <= counts
if self.limit_upper_enabled:
mask &= counts <= self.limit_upper
n = np.count_nonzero(mask)
subject = __("text.cell") if self.filter_type() == Cells else __("text.gene")
if n == 0:
text += [__('text.out_filtered').format(subject)]
else:
text += [__("text.select_subject").format(n, subject=subject)
]
else:
text += [""]
self._info.setText("\n".join(text))
def _select_all(self):
self.limit_lower = 0
self.limit_upper = 2 ** 31 - 1
self._limitchanged()
def _setup_axes(self):
# Setup the plot axes and title
filter_type = self.filter_type()
info = FilterInfo[filter_type]
_, title, _, *_ = info
if filter_type in [Cells, Genes]:
measure = self.selected_filter_metric
else:
measure = None
if filter_type == Cells and measure == TotalCounts:
axis_label = __("label.total_counts_library")
elif filter_type == Cells and measure == DetectionCount:
axis_label = __('label.express_genes')
elif filter_type == Genes and measure == TotalCounts:
axis_label = __('label.total_counts')
elif filter_type == Genes and measure == DetectionCount:
# TODO: Too long
axis_label = __("label.number_cells")
elif filter_type == Data:
axis_label = __("label.gene_expression")
ax = self._plot.getAxis("left")
if self.filter_scale() == Scale.Log1p:
axis_label = __('label.log_scale').format(axis_label)
ax.setLabel(axis_label)
ax.setLogMode(True)
else:
ax.setLogMode(False)
ax.setLabel(axis_label)
# Reset the tick text area width
ax.textWidth = 30
ax.setWidth(None)
self._plot.setTitle(title)
def _setup(self, data, filter_type):
self._plot.clear()
self._state = None
self._setup_axes()
span = -1.0 # data span
measure = self.selected_filter_metric if filter_type != Data else None
state = _FilterData()
if filter_type in [Cells, Genes]:
if filter_type == Cells:
axis = 1
else:
axis = 0
if measure == TotalCounts:
counts = np.nansum(data.X, axis=axis)
else:
mask = (data.X != 0) & (np.isfinite(data.X))
counts = np.count_nonzero(mask, axis=axis)
x = counts
self.Warning.sampling_in_effect.clear()
elif filter_type == Data:
x = data.X.ravel()
x = x[np.isfinite(x)]
x = x[x != 0]
MAX_DISPLAY_SIZE = 20000
if x.size > MAX_DISPLAY_SIZE:
self.Warning.sampling_in_effect(MAX_DISPLAY_SIZE, x.size)
# tails to preserve exactly
tails = 1
assert x.flags.owndata
x.partition(tails - 1)
xrest = x[tails:]
xrest.partition(xrest.size - tails)
x1, x2, x3 = x[:tails], x[tails:x.size - tails], x[x.size - tails:]
assert x1.size + x2.size + x3.size == x.size
x2 = np.random.RandomState(0x667).choice(
x2, size=MAX_DISPLAY_SIZE - 2 * tails, replace=False,
)
x = np.r_[x1, x2, x3]
else:
self.Warning.sampling_in_effect.clear()
else:
assert False
state.x = x
scale = self.filter_scale()
if scale == Scale.Log1p:
scale_transform = log1p
scale_transform_inv = expm1
else:
scale_transform = lambda x: x
scale_transform_inv = scale_transform
state.transform = scale_transform
state.transform_inv = scale_transform_inv
if x.size:
xmin, xmax = np.min(x), np.max(x)
else:
xmin = xmax = 0., 1.
state.xmin, state.xmax = xmin, xmax
xs = scale_transform(x)
xs = xs[np.isfinite(xs)]
state.xt = xs
if xs.size:
xsmin, xsmax = np.min(xs), np.max(xs)
# find effective xmin, xmax (valid in both original and transformed
# space
xmin_, xmax_ = scale_transform_inv([xsmin, xsmax])
xmin, xmax = max(xmin, xmin_), min(xmax, xmax_)
lower = np.clip(self.limit_lower, xmin, xmax)
upper = np.clip(self.limit_upper, xmin, xmax)
else:
xmin, xmax = 0., 1.
lower, upper = 0., 1.
state.xtmin, state.xtmax = xsmin, xsmax
spinlow = self.threshold_stacks[0].widget(filter_type)
spinhigh = self.threshold_stacks[1].widget(filter_type)
if filter_type == Data or measure == TotalCounts:
span = xmax - xmin
if span > 0:
ndecimals = max(4 - int(np.floor(np.log10(span))), 1)
else:
ndecimals = 1
else:
ndecimals = 1
# Round effective bounds (spin <=> plot cut lines)
lower = round(lower, ndecimals)
upper = round(upper, ndecimals)
if xs.size > 0:
# TODO: Need correction for lower bounded distribution (counts)
# Use reflection around 0, but gaussian_kde does not provide
# sufficient flexibility w.r.t bandwidth selection.
self._plot.setData(xs, 1000)
self._plot.setBoundary(*scale_transform([lower, upper]))
spinlow.setDecimals(ndecimals)
self.limit_lower = lower
spinhigh.setDecimals(ndecimals)
self.limit_upper = upper
self._state = state
self._update_info()
def _update_dotplot(self):
self._plot.setDataPointsVisible(self.display_dotplot)
def current_filter_thresholds(self):
if self.selected_filter_type in {Cells, Genes}:
metric = self.selected_filter_metric
else:
metric = -1
return self.thresholds[self.selected_filter_type, metric]
def set_current_filter_thesholds(self, lower, upper):
if self.selected_filter_type in {Cells, Genes}:
metric = self.selected_filter_metric
else:
metric = -1
self.thresholds[self.selected_filter_type, metric] = (lower, upper)
def _update_scale(self):
self._setup_axes()
if self.data is not None:
self._setup(self.data, self.filter_type())
@property
def limit_lower(self):
return self.current_filter_thresholds()[0]
@limit_lower.setter
def limit_lower(self, value):
_, upper = self.current_filter_thresholds()
self.set_current_filter_thesholds(value, upper)
stacklower, _ = self.threshold_stacks
sb = stacklower.widget(self.selected_filter_type)
# prevent changes due to spin box rounding
sb.setValue(value)
@property
def limit_upper(self):
return self.current_filter_thresholds()[1]
@limit_upper.setter
def limit_upper(self, value):
lower, _ = self.current_filter_thresholds()
self.set_current_filter_thesholds(lower, value)
_, stackupper = self.threshold_stacks
sb = stackupper.widget(self.selected_filter_type)
sb.setValue(value)
@Slot()
def _limitchanged(self):
# Low/high limit changed via the spin boxes
stacklow, stackhigh = self.threshold_stacks
filter_ = self.selected_filter_type
lower = stacklow.widget(filter_).value()
upper = stackhigh.widget(filter_).value()
self.set_current_filter_thesholds(lower, upper)
state = self._state
if state is not None and state.x.size:
xmin, xmax = state.xmin, state.xmax
lower = np.clip(lower, xmin, xmax)
upper = np.clip(upper, xmin, xmax)
lower, upper = state.transform([lower, upper])
self._plot.setBoundary(lower, upper)
# TODO: Only when the actual selection/filter mask changes
self._schedule_commit()
self._update_info()
def _limitchanged_plot(self):
# Low/high limit changed via the plot
if self._state is not None:
state = self._state
newlower_, newupper_ = self._plot.boundary()
newlower, newupper = state.transform_inv([newlower_, newupper_])
filter_ = self.selected_filter_type
lower, upper = self.current_filter_thresholds()
stacklow, stackhigh = self.threshold_stacks
spin_lower = stacklow.widget(filter_)
spin_upper = stackhigh.widget(filter_)
# do rounding to match the spin box's precision
if self.limit_lower_enabled:
newlower = round(newlower, spin_lower.decimals())
else:
newlower = lower
if self.limit_upper_enabled:
newupper = round(newupper, spin_upper.decimals())
else:
newupper = upper
if self.limit_lower_enabled and newlower != lower:
self.limit_lower = newlower
if self.limit_upper_enabled and newupper != upper:
self.limit_upper = newupper
newlower_, newupper_ = state.transform([newlower, newupper])
self._plot.setBoundary(newlower_, newupper_)
# TODO: Only when the actual selection/filter mask changes
self._schedule_commit()
self._update_info()
def _schedule_commit(self):
self._committimer.start()
def commit(self):
self._committimer.stop()
data = self.data
if data is not None and self._is_filter_enabled():
if self.filter_type() in [Cells, Genes]:
state = self._state
assert state is not None
counts = state.x
cmax = self.limit_upper
cmin = self.limit_lower
mask = np.ones(counts.shape, dtype=bool)
if self.limit_lower_enabled:
mask &= cmin <= counts
if self.limit_upper_enabled:
mask &= counts <= cmax
if self.filter_type() == Cells:
assert counts.size == len(data)
data = data[mask]
else:
assert counts.size == len(data.domain.attributes)
atts = [v for v, m in zip(data.domain.attributes, mask)
if m]
data = data.from_table(
Orange.data.Domain(
atts, data.domain.class_vars, data.domain.metas
),
data
)
if (
len(data) == 0
or len(data.domain.variables) + len(data.domain.metas) == 0
):
data = None
elif self.filter_type() == Data:
dmin, dmax = self.limit_lower, self.limit_upper
data = data.copy()
assert data.X.base is None
mask = None
if self.limit_lower_enabled:
mask = data.X < dmin
if self.limit_upper_enabled:
if mask is not None:
mask |= data.X > dmax
else:
mask = data.X < dmax
data.X[mask] = 0.0
else:
assert False
self.Outputs.data.send(data)
def onDeleteWidget(self):
self.clear()
self._plot.close()
self._view.scene().clear()
super().onDeleteWidget()
@classmethod
def migrate_settings(cls, settings, version):
if (version is None or version < 2) and \
("limit_lower" in settings and "limit_upper" in settings):
# v2 changed limit_lower, limit_upper to per filter limits stored
# in a single dict
lower = settings.pop("limit_lower")
upper = settings.pop("limit_upper")
settings["thresholds"] = {
(Cells, TotalCounts): (lower, upper),
(Cells, DetectionCount): (lower, upper),
(Genes, TotalCounts): (lower, upper),
(Genes, DetectionCount): (lower, upper),
(Data, -1): (lower, upper),
}
if version == 2:
thresholds = settings["thresholds"]
c = thresholds.pop(Cells)
g = thresholds.pop(Genes)
d = thresholds.pop(Data)
thresholds = {
(Cells, TotalCounts): c,
(Cells, DetectionCount): c,
(Genes, TotalCounts): g,
(Genes, DetectionCount): g,
(Data, -1): d,
}
settings["thresholds"] = thresholds
@contextmanager
def block_signals(qobj):
b = qobj.blockSignals(True)
try:
yield
finally:
qobj.blockSignals(b)
class AxisItem(pg.AxisItem):
def logTickStrings(self, values, scale, spacing):
# reimplemented
values = [10 ** v for v in values]
return [render_exp(v, 1) for v in values]
class ViolinPlot(pg.PlotItem):
"""
A violin plot item with interactive data boundary selection.
"""
#: Emitted when the selection boundary has changed
selectionChanged = Signal()
#: Emitted when the selection boundary has been edited by the user
#: (by dragging the boundary lines)
selectionEdited = Signal()
#: Selection Flags
NoSelection, Low, High = 0, 1, 2
def __init__(self, *args, enableMenu=False, axisItems=None, **kwargs):
if axisItems is None:
axisItems = {}
for position in ("left", 'right', 'top', 'bottom'):
axisItems.setdefault(position, AxisItem(position))
super().__init__(*args, enableMenu=enableMenu, axisItems=axisItems,
**kwargs)
self.__data = None
#: min/max cutoff line positions
self.__min = 0
self.__max = 0
self.__dataPointsVisible = True
self.__selectionEnabled = True
self.__selectionMode = ViolinPlot.High | ViolinPlot.Low
self._plotitems = None
def setData(self, data, nsamples, sample_range=None, color=Qt.magenta):
assert np.all(np.isfinite(data))
if data.size > 0:
xmin, xmax = np.min(data), np.max(data)
else:
xmin = xmax = 0.0
if sample_range is None:
xrange = xmax - xmin
sample_min = xmin - xrange * 0.025
sample_max = xmax + xrange * 0.025
else:
sample_min, sample_max = sample_range
sample = np.linspace(sample_min, sample_max, nsamples)
if data.size < 2:
est = np.full(sample.size, 1. / sample.size, )
else:
try:
density = stats.gaussian_kde(data)
est = density.evaluate(sample)
except np.linalg.LinAlgError:
est = np.zeros(sample.size)
item = QGraphicsPathItem(violin_shape(sample, est))
color = QColor(color)
color.setAlphaF(0.5)
item.setBrush(QBrush(color))
pen = QPen(self.palette().color(QPalette.Shadow))
pen.setCosmetic(True)
item.setPen(pen)
est_max = np.max(est)
x = np.random.RandomState(0xD06F00D).uniform(
-est_max, est_max, size=data.size
)
dots = ScatterPlotItem(
x=x, y=data, size=3,
)
dots.setVisible(self.__dataPointsVisible)
pen = QPen(self.palette().color(QPalette.Shadow), 1)
hoverPen = QPen(self.palette().color(QPalette.Highlight), 1.5)
cmax = SelectionLine(
angle=0, pos=xmax, movable=True, bounds=(sample_min, sample_max),
pen=pen, hoverPen=hoverPen
)
cmin = SelectionLine(
angle=0, pos=xmin, movable=True, bounds=(sample_min, sample_max),
pen=pen, hoverPen=hoverPen
)
cmax.setCursor(Qt.SizeVerCursor)
cmin.setCursor(Qt.SizeVerCursor)
selection_item = QGraphicsRectItem(
QRectF(-est_max, xmin, est_max * 2, xmax - xmin)
)
selection_item.setPen(QPen(Qt.NoPen))
selection_item.setBrush(QColor(0, 250, 0, 50))
def update_selection_rect():
mode = self.__selectionMode
p = selection_item.parentItem() # type: Optional[QGraphicsItem]
while p is not None and not isinstance(p, pg.ViewBox):
p = p.parentItem()
if p is not None:
viewbox = p # type: pg.ViewBox
else:
viewbox = None
rect = selection_item.rect() # type: QRectF
if mode & ViolinPlot.High:
rect.setTop(cmax.value())
elif viewbox is not None:
rect.setTop(viewbox.viewRect().bottom())
else:
rect.setTop(cmax.maxRange[1])
if mode & ViolinPlot.Low:
rect.setBottom(cmin.value())
elif viewbox is not None:
rect.setBottom(viewbox.viewRect().top())
else:
rect.setBottom(cmin.maxRange[0])
selection_item.setRect(rect.normalized())
cmax.sigPositionChanged.connect(update_selection_rect)
cmin.sigPositionChanged.connect(update_selection_rect)
cmax.visibleChanged.connect(update_selection_rect)
cmin.visibleChanged.connect(update_selection_rect)
def setupper(line):
ebound = self.__effectiveBoundary()
elower, eupper = ebound
mode = self.__selectionMode
if not mode & ViolinPlot.High:
return
upper = line.value()
lower = min(elower, upper)
if lower != elower and mode & ViolinPlot.Low:
self.__min = lower
cmin.setValue(lower)
if upper != eupper:
self.__max = upper
if ebound != self.__effectiveBoundary():
self.selectionEdited.emit()
self.selectionChanged.emit()
def setlower(line):
ebound = self.__effectiveBoundary()
elower, eupper = ebound
mode = self.__selectionMode
if not mode & ViolinPlot.Low:
return
lower = line.value()
upper = max(eupper, lower)
if upper != eupper and mode & ViolinPlot.High:
self.__max = upper
cmax.setValue(upper)
if lower != elower:
self.__min = lower
if ebound != self.__effectiveBoundary():
self.selectionEdited.emit()
self.selectionChanged.emit()
cmax.sigPositionChanged.connect(setupper)
cmin.sigPositionChanged.connect(setlower)
selmode = self.__selectionMode
cmax.setVisible(selmode & ViolinPlot.High)
cmin.setVisible(selmode & ViolinPlot.Low)
selection_item.setVisible(selmode)
self.addItem(dots)
self.addItem(item)
self.addItem(cmax)
self.addItem(cmin)
self.addItem(selection_item)
self.setRange(
QRectF(-est_max, np.min(sample), est_max * 2, np.ptp(sample))
)
self._plotitems = SimpleNamespace(
pointsitem=dots,
densityitem=item,
cmax=cmax,
cmin=cmin,
selection_item=selection_item
)
self.__min = xmin
self.__max = xmax
def setDataPointsVisible(self, visible):
self.__dataPointsVisible = visible
if self._plotitems is not None:
self._plotitems.pointsitem.setVisible(visible)
def setSelectionMode(self, mode):
oldlower, oldupper = self.__effectiveBoundary()
oldmode = self.__selectionMode
mode = mode & 0b11
if self.__selectionMode == mode:
return
self.__selectionMode = mode
if self._plotitems is None:
return
cmin = self._plotitems.cmin
cmax = self._plotitems.cmax
selitem = self._plotitems.selection_item
cmin.setVisible(mode & ViolinPlot.Low)
cmax.setVisible(mode & ViolinPlot.High)
selitem.setVisible(bool(mode))
lower, upper = self.__effectiveBoundary()
# The recorded values are not bounded by each other on gui interactions
# when one is disabled. Rectify this now.
if (oldmode ^ mode) & ViolinPlot.Low and mode & ViolinPlot.High:
# Lower activated and High enabled
lower = min(lower, upper)
if (oldmode ^ mode) & ViolinPlot.High and mode & ViolinPlot.Low:
# High activated and Low enabled
upper = max(lower, upper)
with block_signals(self):
if lower != oldlower and mode & ViolinPlot.Low:
cmin.setValue(lower)
if upper != oldupper and mode & ViolinPlot.High:
cmax.setValue(upper)
self.selectionChanged.emit()
def setBoundary(self, low, high):
"""
Set the lower and upper selection boundary value.
"""
changed = 0
mode = self.__selectionMode
if self.__min != low:
self.__min = low
changed |= mode & ViolinPlot.Low
if self.__max != high:
self.__max = high
changed |= mode & ViolinPlot.High
if changed:
if self._plotitems:
with block_signals(self):
if changed & ViolinPlot.Low:
self._plotitems.cmin.setValue(low)
if changed & ViolinPlot.High:
self._plotitems.cmax.setValue(high)
self.selectionChanged.emit()
def boundary(self):
"""
Return the current lower and upper selection boundary values.
"""
return self.__min, self.__max
def __effectiveBoundary(self):
# effective boundary, masked by selection mode
low, high = -np.inf, np.inf
if self.__selectionMode & ViolinPlot.Low:
low = self.__min
if self.__selectionMode & ViolinPlot.High:
high = self.__max
return low, high
def clear(self):
super().clear()
self._plotitems = None
def mouseDragEvent(self, event):
mode = self.__selectionMode
if mode != ViolinPlot.NoSelection and event.buttons() & Qt.LeftButton:
start = event.buttonDownScenePos(Qt.LeftButton) # type: QPointF
pos = event.scenePos() # type: QPointF
cmin, cmax = self._plotitems.cmin, self._plotitems.cmax
assert cmin.parentItem() is cmax.parentItem()
pos = self.mapToItem(cmin.parentItem(), pos)
start = self.mapToItem(cmin.parentItem(), start)
if mode & ViolinPlot.Low and mode & ViolinPlot.High:
lower, upper = min(pos.y(), start.y()), max(pos.y(), start.y())
cmin.setValue(lower)
cmax.setValue(upper)
elif mode & ViolinPlot.Low:
lower = pos.y()
cmin.setValue(lower)
elif mode & ViolinPlot.High:
upper = pos.y()
cmax.setValue(upper)
event.accept()
def violin_shape(x, p):
# type: (Sequence[float], Sequence[float]) -> QPainterPath
points = [QPointF(pi, xi) for xi, pi in zip(x, p)]
points += [QPointF(-pi, xi) for xi, pi in reversed(list(zip(x, p)))]
poly = QPolygonF(points)
path = QPainterPath()
path.addPolygon(poly)
return path
class SelectionLine(pg.InfiniteLine):
def paint(self, painter, option, widget=None):
brect = self.boundingRect()
c = brect.center()
line = QLineF(brect.left(), c.y(), brect.right(), c.y())
t = painter.transform()
line = t.map(line)
painter.save()
painter.resetTransform()
painter.setPen(self.currentPen)
painter.drawLine(line)
painter.restore()
def render_exp(value, prec=2):
# type: (float, int) -> str
if not math.isfinite(value):
return repr(value)
exp = "{:.{prec}G}".format(value, prec=prec)
try:
frac, exp = exp.split("E", 1)
except ValueError:
return exp
frac = float(frac)
exp = int(exp)
if exp == 0:
return str(frac)
elif frac == 1.0:
return "10{exp}".format(exp=_superscript(str(exp)))
else:
return "{frac:g}\u00D710{exp}".format(
frac=frac, exp=_superscript(str(exp))
)
def _superscript(string):
# type: (str) -> str
table = str.maketrans(
"0123456789+-",
"\u2070\u00B9\u00B2\u00B3\u2074\u2075\u2076\u2077\u2078\u2079"
"\u207A\u207B",
)
return string.translate(table)
def main(argv=None): # pragma: no cover
app = QApplication(list(argv or sys.argv))
argv = app.arguments()
w = OWFilter()
if len(argv) > 1:
filename = argv[1]
data = Orange.data.Table(filename)
else:
X = np.random.exponential(size=(1000, 1050)) - 1
X[X < 0] = 0
data = Orange.data.Table.from_numpy(None, X)
w.set_data(data)
w.show()
w.raise_()
app.exec()
w.saveSettings()
w.onDeleteWidget()
if __name__ == "__main__": # pragma: no cover
sys.exit(main(sys.argv)) | PypiClean |
/EuroPython2006_PyQt4_Examples-1.0.zip/EuroPython2006_PyQt4_Examples-1.0/Custom Widgets/Qt Examples/charactermap.py |
#############################################################################
##
## Copyright (C) 2004-2006 Trolltech ASA. All rights reserved.
##
## This file is part of the example classes of the Qt Toolkit.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following information to ensure GNU
## General Public Licensing requirements will be met:
## http://www.trolltech.com/products/qt/opensource.html
##
## If you are unsure which license is appropriate for your use, please
## review the following information:
## http://www.trolltech.com/products/qt/licensing.html or contact the
## sales department at sales@trolltech.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
import sys
from PyQt4 import QtCore, QtGui
class CharacterWidget(QtGui.QWidget):
def __init__(self, parent = None):
QtGui.QWidget.__init__(self, parent)
self.displayFont = QtGui.QFont()
self.lastKey = -1
self.setMouseTracking(True)
def updateFont(self, fontFamily):
self.displayFont.setFamily(fontFamily)
self.displayFont.setPixelSize(16)
self.update()
def updateStyle(self, fontStyle):
fontDatabase = QtGui.QFontDatabase()
self.displayFont = fontDatabase.font(self.displayFont.family(),
fontStyle, 12)
self.displayFont.setPixelSize(16)
self.update()
def sizeHint(self):
return QtCore.QSize(32*24, (65536/32)*24)
def mouseMoveEvent(self, event):
widgetPosition = self.mapFromGlobal(event.globalPos())
key = (widgetPosition.y()/24)*32 + widgetPosition.x()/24
QtGui.QToolTip.showText(event.globalPos(), QtCore.QString.number(key), self)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.lastKey = (event.y()/24)*32 + event.x()/24
if QtCore.QChar(self.lastKey).category() != QtCore.QChar.NoCategory:
self.emit(QtCore.SIGNAL("characterSelected(const QString &)"), QtCore.QString(QtCore.QChar(self.lastKey)))
self.update()
else:
QtGui.QWidget.mousePressEvent(self, event)
def paintEvent(self, event):
painter = QtGui.QPainter()
painter.begin(self)
painter.fillRect(event.rect(), QtCore.Qt.white)
painter.setFont(self.displayFont)
redrawRect = event.rect()
beginRow = redrawRect.top()/24
endRow = redrawRect.bottom()/24
beginColumn = redrawRect.left()/24
endColumn = redrawRect.right()/24
painter.setPen(QtCore.Qt.gray)
for row in range(beginRow, endRow + 1):
for column in range(beginColumn, endColumn + 1):
painter.drawRect(column*24, row*24, 24, 24)
fontMetrics = QtGui.QFontMetrics(self.displayFont)
painter.setPen(QtCore.Qt.black)
for row in range(beginRow, endRow + 1):
for column in range(beginColumn, endColumn + 1):
key = row*32 + column
painter.setClipRect(column*24, row*24, 24, 24)
if key == self.lastKey:
painter.fillRect(column*24, row*24, 24, 24, QtCore.Qt.red)
painter.drawText(column*24 + 12 - fontMetrics.width(QtCore.QChar(key))/2,
row*24 + 4 + fontMetrics.ascent(),
QtCore.QString(QtCore.QChar(key)))
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent = None):
QtGui.QMainWindow.__init__(self, parent)
centralWidget = QtGui.QWidget()
fontLabel = QtGui.QLabel(self.tr("Font:"))
self.fontCombo = QtGui.QComboBox()
styleLabel = QtGui.QLabel(self.tr("Style:"))
self.styleCombo = QtGui.QComboBox()
self.scrollArea = QtGui.QScrollArea()
self.characterWidget = CharacterWidget()
self.scrollArea.setWidget(self.characterWidget)
self.findFonts()
self.findStyles()
self.lineEdit = QtGui.QLineEdit()
clipboardButton = QtGui.QPushButton(self.tr("&To clipboard"))
self.clipboard = QtGui.QApplication.clipboard()
self.connect(self.fontCombo, QtCore.SIGNAL("activated(const QString &)"),
self.findStyles)
self.connect(self.fontCombo, QtCore.SIGNAL("activated(const QString &)"),
self.characterWidget.updateFont)
self.connect(self.styleCombo, QtCore.SIGNAL("activated(const QString &)"),
self.characterWidget.updateStyle)
self.connect(self.characterWidget, QtCore.SIGNAL("characterSelected(const QString &)"),
self.insertCharacter)
self.connect(clipboardButton, QtCore.SIGNAL("clicked()"), self.updateClipboard)
controlsLayout = QtGui.QHBoxLayout()
controlsLayout.addWidget(fontLabel)
controlsLayout.addWidget(self.fontCombo, 1)
controlsLayout.addWidget(styleLabel)
controlsLayout.addWidget(self.styleCombo, 1)
controlsLayout.addStretch(1)
lineLayout = QtGui.QHBoxLayout()
lineLayout.addWidget(self.lineEdit, 1)
lineLayout.addSpacing(12)
lineLayout.addWidget(clipboardButton)
centralLayout = QtGui.QVBoxLayout()
centralLayout.addLayout(controlsLayout)
centralLayout.addWidget(self.scrollArea, 1)
centralLayout.addSpacing(4)
centralLayout.addLayout(lineLayout)
centralWidget.setLayout(centralLayout)
self.setCentralWidget(centralWidget)
self.setWindowTitle(self.tr("Character Map"))
def findFonts(self):
fontDatabase = QtGui.QFontDatabase()
self.fontCombo.clear()
for family in fontDatabase.families():
self.fontCombo.addItem(family)
def findStyles(self):
fontDatabase = QtGui.QFontDatabase()
currentItem = self.styleCombo.currentText()
self.styleCombo.clear()
for style in fontDatabase.styles(self.fontCombo.currentText()):
self.styleCombo.addItem(style)
index = self.styleCombo.findText(currentItem)
if index == -1:
self.styleCombo.setCurrentIndex(0)
else:
self.styleCombo.setCurrentIndex(index)
self.characterWidget.updateStyle(self.styleCombo.currentText())
def insertCharacter(self, character):
self.lineEdit.insert(character)
def updateClipboard(self):
self.clipboard.setText(self.lineEdit.text(), QtGui.QClipboard.Clipboard)
self.clipboard.setText(self.lineEdit.text(), QtGui.QClipboard.Selection)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_()) | PypiClean |
/NehorayRapid1-0.0.1-py3-none-any.whl/RapidBase/Utils/MISCELENEOUS.py | from RapidBase.Basic_Import_Libs import *
import numpy as np
import collections
import os
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def save_dict(obj, name):
with open(name.split('.pkl')[0] + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_dict(name):
with open(name, 'rb') as f:
return pickle.load(f)
class timer():
def __init__(self):
self.acc = 0
self.tic()
def tic(self):
self.t0 = time.time()
def toc(self):
return time.time() - self.t0
def hold(self):
self.acc += self.toc()
def release(self):
ret = self.acc
self.acc = 0
return ret
def reset(self):
self.acc = 0
def string_rjust(original_number, final_number_size):
return str.rjust(str(original_number), final_number_size, '0')
def scientific_notation(input_number,number_of_digits_after_point=2):
format_string = '{:.' + str(number_of_digits_after_point) + 'e}'
return format_string.format(input_number)
def decimal_notation(input_number, number_of_digits_after_point=2):
output_number = int(input_number*(10**number_of_digits_after_point))/(10**number_of_digits_after_point)
return str(output_number)
def print_list(input_list):
for i in input_list:
print(i)
def print_dict(input_dict):
for k in input_dict.keys():
print(k + ': ' + str(input_dict[k]))
def get_elements_from_list_by_indices(input_list, indices):
return [input_list[i] for i in indices]
def get_random_start_stop_indices_for_crop(crop_size, max_number):
start_index = random.randint(0, max(0, max_number-crop_size))
stop_index = start_index + min(crop_size,max_number)
return start_index, stop_index
_all_ = ['as_variable', 'as_numpy', 'mark_volatile'] #### NOTE!!!!!!@#!#! understand why when i put __all__ instead of _all_ i can't use the functions here!@#@!?#@!?#?#!?
def as_variable(obj):
if isinstance(obj, Variable):
return obj
if isinstance(obj, collections.Sequence):
return [as_variable(v) for v in obj]
elif isinstance(obj, collections.Mapping):
return {k: as_variable(v) for k, v in obj.items()}
else:
return Variable(obj)
def as_numpy(obj):
if isinstance(obj, collections.Sequence):
return [as_numpy(v) for v in obj]
elif isinstance(obj, collections.Mapping):
return {k: as_numpy(v) for k, v in obj.items()}
elif isinstance(obj, Variable):
return obj.data.cpu().numpy()
elif torch.is_tensor(obj):
return obj.cpu().numpy()
else:
return np.array(obj)
def mark_volatile(obj):
if torch.is_tensor(obj):
obj = Variable(obj)
if isinstance(obj, Variable):
obj.no_grad = True
return obj
elif isinstance(obj, collections.Mapping):
return {k: mark_volatile(o) for k, o in obj.items()}
elif isinstance(obj, collections.Sequence):
return [mark_volatile(o) for o in obj]
else:
return obj
def to_list_of_certain_size(input_number, number_of_elements):
if type(input_number)==tuple or type(input_number)==list:
if len(input_number)==1:
return input_number*number_of_elements;
else:
return input_number;
else:
return [input_number]*number_of_elements
def to_tuple_of_certain_size(input_number, number_of_elements):
if type(input_number)==tuple or type(input_number)==list:
if len(input_number)==1:
return tuple(list(input_number)*number_of_elements)
else:
return tuple(input_number)
else:
return tuple([input_number]*number_of_elements)
def permute_tuple_indices(input_tuple, new_positions):
return tuple(input_tuple[i] for i in new_positions)
def permute_list_indices(input_tuple, new_positions):
return list(input_tuple[i] for i in new_positions)
def make_list_of_certain_size(n_layers, *args):
args_list = []
for current_arg in args:
if type(current_arg) != list:
args_list.append([current_arg] * n_layers)
else:
assert len(current_arg) == n_layers, str(current_arg) + ' must have the same length as n_layers'
args_list.append(current_arg)
return args_list
# def combined_several_checkpoints_of_different_parts_of_the_network_into_one():
# ### If Network has several parts, each coresponding to a different part of the network, like DVDNet, compine them to a single one: ###
# model_spatial_file = path_fix_path_for_linux(r'/home/mafat\PycharmProjects\IMOD\models\DVDNet\dvdnet/model_spatial.pth')
# model_temp_file = path_fix_path_for_linux(r'/home/mafat\PycharmProjects\IMOD\models\DVDNet\dvdnet/model_temp.pth')
# model_combined_final_save_name = path_fix_path_for_linux(r'/home/mafat\PycharmProjects\IMOD\models\DVDNet\dvdnet/model_combined.pth')
# state_spatial_dict = torch.load(model_spatial_file, map_location=torch.device('cuda'))
# state_temp_dict = torch.load(model_temp_file, map_location=torch.device('cuda'))
# state_spatial_dict = remove_dataparallel_wrapper(state_spatial_dict)
# state_temp_dict = remove_dataparallel_wrapper(state_temp_dict)
# model_DVDNet = DVDNet(temp_psz=5, mc_algo='DeepFlow')
# model_DVDNet.model_spatial.load_state_dict(state_spatial_dict)
# model_DVDNet.model_temporal.load_state_dict(state_temp_dict)
#
# dictionary_to_save = {}
# dictionary_to_save['model_state_dict'] = model_DVDNet.state_dict()
# dictionary_to_save['variables_dictionary'] = None
# dictionary_to_save['optimizer_state_dict'] = None
# dictionary_to_save['complete_network'] = None
# dictionary_to_save['complete_optimzier'] = None
# torch.save(dictionary_to_save, model_combined_final_save_name)
# combined_state_dict = torch.load(model_combined_final_save_name, map_location=torch.device('cuda'))
# model_DVDNet.load_state_dict(combined_state_dict['model_state_dict'])
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.initialized = False
self.val = None
self.avg = None
self.sum = None
self.count = None
def initialize(self, val, weight):
self.val = val
self.avg = val
self.sum = val * weight
self.count = weight
self.initialized = True
def update(self, val, weight=1):
if not self.initialized:
self.initialize(val, weight)
else:
self.add(val, weight)
def add(self, val, weight):
self.val = val
self.sum += val * weight
self.count += weight
self.avg = self.sum / self.count
def value(self):
return self.val
def average(self):
return self.avg
class AverageMeter_Dict(object):
def __init__(self):
self.inner_dict = EasyDict() # a dictionary containing the average value
self.average_meters_dict = EasyDict() # a dictionary containing the average_meter classes for every key
def update_key(self, key, value):
if key in self.inner_dict.keys():
self.average_meters_dict[key].update(value)
setattr(self, key, self.average_meters_dict[key].avg)
self.inner_dict[key] = getattr(self, key)
else:
self.average_meters_dict[key] = AverageMeter()
self.average_meters_dict[key].update(value)
setattr(self, key, self.average_meters_dict[key].avg)
self.inner_dict[key] = getattr(self, key)
def update_dict(self, input_dict):
for key, value in input_dict.items():
self.update_key(key, value)
return self.inner_dict
def keys(self):
return self.inner_dict.keys()
def items(self):
return self.inner_dict.items()
def __getitem__(self, key):
return self.inner_dict[key]
class KeepValuesHistory_Dict(object):
def __init__(self):
self.inner_dict = EasyDict() # a dictionary containing the average value
self.average_meters_dict = EasyDict() # a dictionary containing the average_meter classes for every key
def update_key(self, key, value):
if key in self.inner_dict.keys():
self.inner_dict[key].append(value)
setattr(self, key, self.inner_dict[key])
else:
self.inner_dict[key] = []
self.inner_dict[key].append(value)
setattr(self, key, self.inner_dict[key])
def update_dict(self, input_dict):
for key, value in input_dict.items():
self.update_key(key, value)
return self.inner_dict
def keys(self):
return self.inner_dict.keys()
def items(self):
return self.inner_dict.items()
def __getitem__(self, key):
return self.inner_dict[key]
def update_dict(main_dict, input_dict):
# update main dict with input dict
default_dict = EasyDict(main_dict)
if input_dict is None:
input_dict = EasyDict()
default_dict.update(input_dict)
return default_dict
def convert_range(old_mat, new_range):
old_min = old_mat.min()
old_max = old_mat.max()
new_min = new_range[0]
new_max = new_range[1]
old_range_delta = old_max-old_min
new_range_delta = new_max-new_min
new_mat = (old_mat-old_min)*(new_range_delta/old_range_delta) + new_min
return new_mat
def get_random_number_in_range(min_num, max_num, array_size=(1)):
return (np.random.random(array_size)*(max_num-min_num) + min_num).astype('float32')
import fnmatch
def string_match_pattern(input_string, input_pattern):
return fnmatch.fnmatch(input_string, input_pattern)
def path_make_path_if_none_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def unique(ar, return_index=False, return_inverse=False, return_counts=False):
ar = np.asanyarray(ar).flatten()
optional_indices = return_index or return_inverse
optional_returns = optional_indices or return_counts
if ar.size == 0:
if not optional_returns:
ret = ar
else:
ret = (ar,)
if return_index:
ret += (np.empty(0, np.bool),)
if return_inverse:
ret += (np.empty(0, np.bool),)
if return_counts:
ret += (np.empty(0, np.intp),)
return ret
if optional_indices:
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
aux = ar[perm]
else:
ar.sort()
aux = ar
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if not optional_returns:
ret = aux[flag]
else:
ret = (aux[flag],)
if return_index:
ret += (perm[flag],)
if return_inverse:
iflag = np.cumsum(flag) - 1
inv_idx = np.empty(ar.shape, dtype=np.intp)
inv_idx[perm] = iflag
ret += (inv_idx,)
if return_counts:
idx = np.concatenate(np.nonzero(flag) + ([ar.size],))
ret += (np.diff(idx),)
return ret
def clean_up_filenames_list(input_list):
i = 0
flag_loop = True
while flag_loop:
if input_list[i] == []:
del input_list[i]
else:
i += 1
flag_loop = i < len(input_list)
return input_list
import random
def get_random_color(pastel_factor = 0.5):
return [(x+pastel_factor)/(1.0+pastel_factor) for x in [random.uniform(0,1.0) for i in [1,2,3]]]
def color_distance(c1,c2):
return sum([abs(x[0]-x[1]) for x in zip(c1,c2)])
def generate_new_color(existing_colors, pastel_factor = 0.5):
max_distance = None
best_color = None
for i in range(0, 100):
color = get_random_color(pastel_factor = pastel_factor)
if not existing_colors:
return color
best_distance = min([color_distance(color,c) for c in existing_colors])
if not max_distance or best_distance > max_distance:
max_distance = best_distance
best_color = color
return best_color
def get_n_colors(number_of_colors=10, pastel_factor=0.6):
colors = []
for i in np.arange(number_of_colors):
colors.append(generate_new_color(colors, pastel_factor=pastel_factor))
colors = [list(np.array(current_color)*255) for current_color in colors]
return colors
def create_folder_if_needed(folder_full_path):
if not os.path.exists(folder_full_path):
os.makedirs(folder_full_path)
def create_folder_if_doesnt_exist(folder_full_path):
if not os.path.exists(folder_full_path):
os.makedirs(folder_full_path)
def assign_attributes_from_dict(input_object, input_dict):
for key in input_dict:
setattr(input_object, key, input_dict[key])
def remove_dataparallel_wrapper(state_dict):
r"""Converts a DataParallel model to a normal one by removing the "module."
wrapper in the module dictionary
Args:
state_dict: a torch.nn.DataParallel state dictionary
"""
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, vl in state_dict.items():
name = k[7:] # remove 'module.' of DataParallel
new_state_dict[name] = vl
return new_state_dict
def create_empty_list_of_lists(number_of_elements):
return [[] for x in np.arange(number_of_elements)] | PypiClean |
/NeuralPlayground-0.0.7.tar.gz/NeuralPlayground-0.0.7/neuralplayground/agents/whittington_2020_extras/whittington_2020_analyse.py | import numpy as np
import torch
def performance(forward, model, environments):
"""
Track prediction accuracy over walk, and calculate fraction of locations visited and actions taken to assess performance.
Parameters
----------
forward : list
List of forward passes through the model, each containing the model input, the model output, and the model state.
model : TEM
The model that was used to generate the forward passes.
environments : list
List of environments that were used to generate the forward passes.
Returns
-------
all_correct : list
List of lists of booleans, indicating for each step whether the model predicted the observation correctly.
all_location_frac : list
List of lists of floats, indicating for each step the fraction of locations visited.
all_action_frac : list
List of lists of floats, indicating for each step the fraction of actions taken.
"""
# Keep track of whether model prediction were correct, as well as the fraction of nodes/edges visited, across environments
all_correct, all_location_frac, all_action_frac = [], [], []
# Run through environments and monitor performance in each
for env_i, env in enumerate(environments):
# Keep track for each location whether it has been visited
location_visited = np.full(env.n_locations, False)
# And for each action in each location whether it has been taken
action_taken = np.full((env.n_locations, model.hyper["n_actions"]), False)
# Not all actions are available at every location (e.g. edges of grid world). Find how many actions can be taken
action_available = np.full((env.n_locations, model.hyper["n_actions"]), False)
for currLocation in env.locations:
for currAction in currLocation["actions"]:
if np.sum(currAction["transition"]) > 0:
if model.hyper["has_static_action"]:
if currAction["id"] > 0:
action_available[currLocation["id"], currAction["id"] - 1] = True
else:
action_available[currLocation["id"], currAction["id"]] = True
# Make array to list whether the observation was predicted correctly or not
correct = []
# Make array that stores for each step the fraction of locations visited
location_frac = []
# And an array that stores for each step the fraction of actions taken
action_frac = []
# Run through iterations of forward pass to check when an action is taken for the first time
for step in forward:
# Update the states that have now been visited
location_visited[step.g[env_i]["id"]] = True
# ... And the actions that now have been taken
if model.hyper["has_static_action"]:
if step.a[env_i] > 0:
action_taken[step.g[env_i]["id"], step.a[env_i] - 1] = True
else:
action_taken[step.g[env_i]["id"], step.a[env_i]] = True
# Mark the location of the previous iteration as visited
correct.append((torch.argmax(step.x_gen[2][env_i]) == torch.argmax(step.x[env_i])).numpy())
# Add the fraction of locations visited for this step
location_frac.append(np.sum(location_visited) / location_visited.size)
# ... And also add the fraction of actions taken for this step
action_frac.append(np.sum(action_taken) / np.sum(action_available))
# Add performance and visitation fractions of this environment to performance list across environments
all_correct.append(correct)
all_location_frac.append(location_frac)
all_action_frac.append(action_frac)
# Return
return all_correct, all_location_frac, all_action_frac
def location_accuracy(forward, model, environments):
"""
Track prediction accuracy per location, after a transition towards the location.
Parameters
----------
forward : list
List of forward passes through the model, each containing the model input, the model output, and the model state.
model : TEM
The model that was used to generate the forward passes.
environments : list
List of environments that were used to generate the forward passes.
Returns
-------
accuracy_from : list
List of lists of floats, indicating for each location the fraction of correct predictions after arriving at
that location.
accuracy_to : list
List of lists of floats, indicating for each location the fraction of correct predictions after leaving
that location.
"""
# Keep track of whether model prediction were correct for each environment, separated by arrival and departure location
accuracy_from, accuracy_to = [], []
# Run through environments and monitor performance in each
for env_i, env in enumerate(environments):
# Make array to list whether the observation was predicted correctly or not
correct_from = [[] for _ in range(env[2])]
correct_to = [[] for _ in range(env[2])]
# Run through iterations of forward pass to check when an action is taken for the first time
for step_i, step in enumerate(forward[1:]):
# Prediction on arrival: sensory prediction when arriving at given node
correct_to[step.g[env_i]["id"]].append(
(torch.argmax(step.x_gen[2][env_i]) == torch.argmax(step.x[env_i])).numpy().tolist()
)
correct_from[forward[step_i].g[env_i]["id"]].append(
(torch.argmax(step.x_gen[2][env_i]) == torch.argmax(step.x[env_i])).numpy().tolist()
)
# Add performance and visitation fractions of this environment to performance list across environments
accuracy_from.append(
[
sum(correct_from_location) / (len(correct_from_location) if len(correct_from_location) > 0 else 1)
for correct_from_location in correct_from
]
)
accuracy_to.append(
[
sum(correct_to_location) / (len(correct_to_location) if len(correct_to_location) > 0 else 1)
for correct_to_location in correct_to
]
)
# Return
return accuracy_from, accuracy_to
def location_occupation(forward, model, environments):
"""
Track how often each location was visited during the walk.
Parameters
----------
forward : list
List of forward passes through the model, each containing the model input, the model output, and the model state.
model : TEM
The model that was used to generate the forward passes.
environments : list
List of environments that were used to generate the forward passes.
Returns
-------
occupation : list
List of lists of integers, indicating for each location how often it was visited during the walk.
"""
# Keep track of how many times each location was visited
occupation = []
# Run through environments and monitor performance in each
for env_i, env in enumerate(environments):
# Make array to list whether the observation was predicted correctly or not
visits = [0 for _ in range(env[2])]
# Run through iterations of forward pass to check when an action is taken for the first time
for step in forward:
# Prediction on arrival: sensory prediction when arriving at given node
visits[step.g[env_i]["id"]] += 1
# Add performance and visitation fractions of this environment to performance list across environments
occupation.append(visits)
# Return occupation of states during walk across environments
return occupation
def zero_shot(forward, model, environments, include_stay_still=True):
"""
Track whether the model can predict the observation correctly when it visits a location for the first time.
Parameters
----------
forward : list
List of forward passes through the model, each containing the model input, the model output, and the model state.
model : TEM
The model that was used to generate the forward passes.
environments : list
List of environments that were used to generate the forward passes.
include_stay_still : bool
Whether to include standing still actions in the zero-shot inference analysis.
Returns
-------
all_correct_zero_shot : list
List of lists of booleans, indicating for each step whether the model predicted the observation correctly
when visiting a location for the first time.
"""
# Get the number of actions in this model
n_actions = model.hyper["n_actions"] + model.hyper["has_static_action"]
# Track for all opportunities for zero-shot inference if the predictions were correct across environments
all_correct_zero_shot = []
# Run through environments and check for zero-shot inference in each of them
for env_i, env in enumerate(environments):
# Keep track for each location whether it has been visited
location_visited = np.full(env[2], False)
# And for each action in each location whether it has been taken
action_taken = np.full((env[2], n_actions), False)
# Get the very first iteration
prev_iter = forward[0]
# Make list that for all opportunities for zero-shot inference tracks if the predictions were correct
correct_zero_shot = []
# Run through iterations of forward pass to check when an action is taken for the first time
for step in forward[1:]:
# Get the previous action and previous location location
prev_a, prev_g = prev_iter.a[env_i], prev_iter.g[env_i]["id"]
if model.hyper["has_static_action"] and prev_a == 0 and not include_stay_still:
prev_a = None
# Mark the location of the previous iteration as visited
location_visited[prev_g] = True
# Zero shot inference occurs when the current location was visited, but the previous action wasn't taken before
if location_visited[step.g[env_i]["id"]] and prev_a is not None and not action_taken[prev_g, prev_a]:
# Find whether the prediction was correct
correct_zero_shot.append((torch.argmax(step.x_gen[2][env_i]) == torch.argmax(step.x[env_i])).numpy())
# Update the previous action as taken
if prev_a is not None:
action_taken[prev_g, prev_a] = True
# And update the previous iteration to the current iteration
prev_iter = step
# Having gone through the full forward pass for one environment, add the zero-shot performance to the list of all
all_correct_zero_shot.append(correct_zero_shot)
# Return lists of success of zero-shot inference for all environments
return all_correct_zero_shot
def compare_to_agents(forward, model, environments, include_stay_still=True):
"""
Compare TEM performance to a 'node' and an 'edge' agent, that remember previous observations and guess others.
Parameters
----------
forward : list
List of forward passes through the model, each containing the model input, the model output, and the model state.
model : TEM
The model that was used to generate the forward passes.
environments : list
List of environments that were used to generate the forward passes.
include_stay_still : bool
Whether to include standing still actions in the zero-shot inference analysis.
Returns
-------
all_correct_model : list
List of lists of booleans, indicating for each step whether the model predicted the observation correctly.
all_correct_node : list
List of lists of booleans, indicating for each step whether the node agent predicted the observation correctly.
all_correct_edge : list
List of lists of booleans, indicating for each step whether the edge agent predicted the observation correctly.
"""
# Get the number of actions in this model
n_actions = model.hyper["n_actions"] + model.hyper["has_static_action"]
# Store for each environment for each step whether is was predicted correctly by the model, and by a perfect node and
# perfect edge agent
all_correct_model, all_correct_node, all_correct_edge = [], [], []
# Run through environments and check for correct or incorrect prediction
for env_i, env in enumerate(environments):
# Keep track for each location whether it has been visited
location_visited = np.full(env[2], False)
# And for each action in each location whether it has been taken
action_taken = np.full((env[2], n_actions), False)
# Make array to list whether the observation was predicted correctly or not for the model
correct_model = []
# And the same for a node agent, that picks a random observation on first encounter of a node, and the correct
# one every next time
correct_node = []
# And the same for an edge agent, that picks a random observation on first encounter of an edge, and the correct
# one every next time
correct_edge = []
# Get the very first iteration
prev_iter = forward[0]
# Run through iterations of forward pass to check when an action is taken for the first time
for step in forward[1:]:
# Get the previous action and previous location
prev_a, prev_g = prev_iter.a[env_i], prev_iter.g[env_i]["id"]
# If the previous action was standing still: only count as valid transition standing still actions
# are included as zero-shot inference
if model.hyper["has_static_action"] and prev_a == 0 and not include_stay_still:
prev_a = None
# Mark the location of the previous iteration as visited
location_visited[prev_g] = True
# Update model prediction for this step
correct_model.append((torch.argmax(step.x_gen[2][env_i]) == torch.argmax(step.x[env_i])).numpy())
# Update node agent prediction for this step: correct when this state was visited beofre, otherwise chance
correct_node.append(
True
if location_visited[step.g[env_i]["id"]]
else np.random.randint(model.hyper["n_x"]) == torch.argmax(step.x[env_i]).numpy()
)
# Update edge agent prediction for this step: always correct if no action taken, correct when action leading
# to this state was taken before, otherwise chance
correct_edge.append(
True
if prev_a is None
else True
if action_taken[prev_g, prev_a]
else np.random.randint(model.hyper["n_x"]) == torch.argmax(step.x[env_i]).numpy()
)
# Update the previous action as taken
if prev_a is not None:
action_taken[prev_g, prev_a] = True
# And update the previous iteration to the current iteration
prev_iter = step
# Add the performance of model, node agent, and edge agent for this environment to list across environments
all_correct_model.append(correct_model)
all_correct_node.append(correct_node)
all_correct_edge.append(correct_edge)
# Return list of prediction success for all three agents across environments
return all_correct_model, all_correct_node, all_correct_edge
def rate_map(forward, model, environments):
"""
Calculate the firing rate of each cell in the model for each location in each environment.
Parameters
----------
forward : list
List of forward passes through the model, each containing the model input, the model output, and the model state.
model : TEM
The model that was used to generate the forward passes.
environments : list
List of environments that were used to generate the forward passes.
Returns
-------
all_g : list
List of lists of lists of floats, indicating for each frequency module, for each location, and for each
cell the firing rate.
all_p : list
List of lists of lists of floats, indicating for each frequency module, for each location, and for each
cell the firing rate.
"""
# Store location x cell firing rate matrix for abstract and grounded location representation across environments
all_g, all_p = [], []
# Go through environments and collect firing rates in each
for env_i, env in enumerate(environments):
# Collect grounded location/hippocampal/place cell representation during walk: separate into frequency
# modules, then locations
p = [[[] for loc in range(env[2])] for f in range(model.hyper["n_f"])]
# Collect abstract location/entorhinal/grid cell representation during walk: separate into frequency
# modules, then locations
g = [[[] for loc in range(env[2])] for f in range(model.hyper["n_f"])]
# In each step, concatenate the representations to the appropriate list
for step in forward:
# Run through frequency modules and append the firing rates to the correct location list
for f in range(model.hyper["n_f"]):
g[f][step.g[env_i]["id"]].append(step.g_inf[f][env_i].detach().numpy())
p[f][step.g[env_i]["id"]].append(step.p_inf[f][env_i].detach().numpy())
# Now average across location visits to get a single represenation vector for each location for each frequency
for cells, n_cells in zip([p, g], [model.hyper["n_p"], model.hyper["n_g"]]):
for f, frequency in enumerate(cells):
# Average across visits of the each location, but only the second half of the visits so model
# roughly know the environment
for i, location in enumerate(frequency):
frequency[i] = (
sum(location[int(len(location) / 2) :]) / len(location[int(len(location) / 2) :])
if len(location[int(len(location) / 2) :]) > 0
else np.zeros(n_cells[f])
)
# Then concatenate the locations to get a [locations x cells for this frequency] matrix
cells[f] = np.stack(frequency, axis=0)
# Append the final average representations of this environment to the list of representations across environments
all_g.append(g)
all_p.append(p)
# Return list of locations x cells matrix of firing rates for each frequency module for each environment
return all_g, all_p
def generate_input(environment, walk):
"""
Generate model input from environment and walk.
Parameters
----------
environment : Environment
Environment from which to generate the model input.
walk : list
List of lists of lists, indicating for each step the location, observation, and action.
Returns
-------
model_input : list
List of lists of lists, indicating for each step the location, observation, and action.
"""
# If no walk was provided: use the environment to generate one
if walk is None:
# Generate a single walk from environment with length depending on number of locations (so you're
# likely to visit each location)
walk = environment.generate_walks(environment.graph["n_locations"] * 100, 1)[0]
# Now this walk needs to be adjusted so that it looks like a batch with batch size 1
for step in walk:
# Make single location into list
step[0] = [step[0]]
# Make single 1D observation vector into 2D row vector
step[1] = step[1].unsqueeze(dim=0)
# Make single action into list
step[2] = [step[2]]
return walk
def smooth(a, wsz):
"""
Smooth a 1D array with a window size.
Parameters
----------
a : list
1D array to be smoothed.
wsz : int
Window size to use for smoothing.
Returns
-------
out : list
Smoothed 1D array.
"""
out0 = np.convolve(a, np.ones(wsz, dtype=int), "valid") / wsz
r = np.arange(1, wsz - 1, 2)
start = np.cumsum(a[: wsz - 1])[::2] / r
stop = (np.cumsum(a[:-wsz:-1])[::2] / r)[::-1]
return np.concatenate((start, out0, stop)) | PypiClean |
/Glint-0.2.0.zip/Glint-0.2.0/README.md | # Glint
[](https://travis-ci.org/mlowen/Glint)
Glint is a micro framework for command line applications, it creates the needed parameters that should be passed to the application based on the function signatures that it is supplied.
### Inspiration
The inspiration for Glint came from wanting to have a command driven cli app similar to how git works which I was unable to replicate with [argparse](http://docs.python.org/dev/library/argparse.html).
## Requirements
Glint requires Python 3.3 or higher to run, it has no other dependencies outside of the python core library.
## Installation
To install Glint once you have a copy run from the projects root directory:
```
python ./setup.py install
```
## Usage
All usage documentation for Glint can be found over at [read the docs](https://glint.readthedocs.org/).
## Future Plans
If you want to see what's coming up in the near future for Glint go visit the [huboard](http://huboard.com/mlowen/Glint/board) for the project.
## Contributing
The source for Glint can be found over at [Github](https://github.com/mlowen/Glint), if you want to contribute that would be a good place to start. If you are wanting to report a bug all of that is kept at github as well in the [issue tracker](https://github.com/mlowen/Glint/issues), the issues are also used to track upcoming work on Glint though features/issues that will be worked on before the next release can be more easily visualised over at [huboard](http://huboard.com/mlowen/Glint/board). If you want to keep a track of the status of the bleeding edge then you'll able to see the current state at [Travis](https://travis-ci.org/mlowen/Glint).
### Submitting changes
You've downloaded/cloned Glint and made some changes you'd like to have merged in, firstly awesome thanks hugely! There are a couple of guidelines around submitting changes:
* Changes will only be accepted via pull requests on github.
* Any code changes should have some unit tests, if it's a fix for a bug it won't be accepted unless it has tests.
* Again with any code changes if your pull request [breaks the build](https://travis-ci.org/mlowen/Glint/pull_requests) it won't be accepted.
### Running the tests
While Glint doesn't have any dependencies outside of the python core library you will need to install [nose](https://nose.readthedocs.org/en/latest/) to run the tests. Once that is installed you have two options for running the test you can run either:
```
nostests
```
or
```
python ./setup.py test
```
## License
Glint is available under the MIT license which is as follows:
Copyright © 2013 Michael Lowen
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | PypiClean |
/Js2Py-0.74.tar.gz/Js2Py-0.74/js2py/constructors/jsfloat32array.py |
from ..base import *
try:
import numpy
except:
pass
@Js
def Float32Array():
TypedArray = (PyJsInt8Array, PyJsUint8Array, PyJsUint8ClampedArray,
PyJsInt16Array, PyJsUint16Array, PyJsInt32Array,
PyJsUint32Array, PyJsFloat32Array, PyJsFloat64Array)
a = arguments[0]
if isinstance(a, PyJsNumber): # length
length = a.to_uint32()
if length != a.value:
raise MakeError('RangeError', 'Invalid array length')
temp = Js(numpy.full(length, 0, dtype=numpy.float32))
temp.put('length', a)
return temp
elif isinstance(a, PyJsString): # object (string)
temp = Js(numpy.array(list(a.value), dtype=numpy.float32))
temp.put('length', Js(len(list(a.value))))
return temp
elif isinstance(a, PyJsArray) or isinstance(a, TypedArray) or isinstance(
a, PyJsArrayBuffer): # object (Array, TypedArray)
array = a.to_list()
array = [(int(item.value) if item.value != None else 0)
for item in array]
temp = Js(numpy.array(array, dtype=numpy.float32))
temp.put('length', Js(len(array)))
return temp
elif isinstance(a, PyObjectWrapper): # object (ArrayBuffer, etc)
if len(a.obj) % 4 != 0:
raise MakeError(
'RangeError',
'Byte length of Float32Array should be a multiple of 4')
if len(arguments) > 1:
offset = int(arguments[1].value)
if offset % 4 != 0:
raise MakeError(
'RangeError',
'Start offset of Float32Array should be a multiple of 4')
else:
offset = 0
if len(arguments) > 2:
length = int(arguments[2].value)
else:
length = int((len(a.obj) - offset) / 4)
array = numpy.frombuffer(
a.obj, dtype=numpy.float32, count=length, offset=offset)
temp = Js(array)
temp.put('length', Js(length))
temp.buff = array
return temp
temp = Js(numpy.full(0, 0, dtype=numpy.float32))
temp.put('length', Js(0))
return temp
Float32Array.create = Float32Array
Float32Array.own['length']['value'] = Js(3)
Float32Array.define_own_property(
'prototype', {
'value': Float32ArrayPrototype,
'enumerable': False,
'writable': False,
'configurable': False
})
Float32ArrayPrototype.define_own_property(
'constructor', {
'value': Float32Array,
'enumerable': False,
'writable': True,
'configurable': True
})
Float32ArrayPrototype.define_own_property('BYTES_PER_ELEMENT', {
'value': Js(4),
'enumerable': False,
'writable': False,
'configurable': False
}) | PypiClean |
/AllanTools-2019.9.tar.gz/AllanTools-2019.9/allantools/dataset.py | from . import allantools
class Dataset(object):
""" Dataset class for Allantools
:Example:
::
import numpy as np
# Load random data
a = allantools.Dataset(data=np.random.rand(1000))
# compute mdev
a.compute("mdev")
print(a.out["stat"])
compute() returns the result of the computation and also stores it in the
object's ``out`` member.
"""
def __init__(self, data=None, rate=1.0, data_type="phase", taus=None):
""" Initialize object with input data
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional)
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency. Defaults to "phase".
taus: np.array
Array of tau values, in seconds, for which to compute statistic.
Optionally set taus=["all"|"octave"|"decade"] for automatic
calculation of taus list
Returns
-------
Dataset()
A Dataset() instance
"""
#: input data Dict,
self.inp = {"data": None,
"rate": None,
"data_type": None,
"taus": None}
#: output data Dict, to be populated by compute()
self.out = {"taus": None,
"stat": None,
"stat_err": None,
"stat_n": None,
"stat_unc": None,
"stat_id": None}
self.inp["data"] = data
self.inp["rate"] = rate
self.inp["data_type"] = data_type
self.inp["taus"] = taus
def set_input(self, data,
rate=1.0, data_type="phase", taus=None):
""" Optionnal method if you chose not to set inputs on init
Parameters
----------
data: np.array
Input data. Provide either phase or frequency (fractional,
adimensional)
rate: float
The sampling rate for data, in Hz. Defaults to 1.0
data_type: {'phase', 'freq'}
Data type, i.e. phase or frequency. Defaults to "phase".
taus: np.array
Array of tau values, in seconds, for which to compute statistic.
Optionally set taus=["all"|"octave"|"decade"] for automatic
"""
self.inp["data"] = data
self.inp["rate"] = rate
self.inp["data_type"] = data_type
self.inp["taus"] = taus
def compute(self, function):
"""Evaluate the passed function with the supplied data.
Stores result in self.out.
Parameters
----------
function: str
Name of the :mod:`allantools` function to evaluate
Returns
-------
result: dict
The results of the calculation.
"""
try:
func = getattr(allantools, function)
except AttributeError:
raise AttributeError("function must be defined in allantools")
whitelisted = ["theo1", "mtie", "tierms"]
if function[-3:] != "dev" and function not in whitelisted:
# this should probably raise a custom exception type so
# it's easier to distinguish from other bad things
raise RuntimeError("function must be one of the 'dev' functions")
result = func(self.inp["data"], rate=self.inp["rate"],
data_type=self.inp["data_type"], taus=self.inp["taus"])
keys = ["taus", "stat", "stat_err", "stat_n"]
result = {key: result[i] for i, key in enumerate(keys)}
self.out = result.copy()
self.out["stat_id"] = function
return result
def write_results(self, filename, digits=5, header_params={}):
""" Output result to text
Save calculation results to disk. Will overwrite any existing file.
Parameters
----------
filename: str
Path to the output file
digits: int
Number of significant digits in output
header_params: dict
Arbitrary dict of params to be included in header
Returns
-------
None
"""
with open(filename, 'w') as fp:
fp.write("# Generated by Allantools {}\n".format(
allantools.__version__))
fp.write("# Input data type: {}\n".format(self.inp["data_type"]))
fp.write("# Input data rate: {}\n".format(self.inp["rate"]))
for key, val in header_params.items():
fp.write("# {}: {}\n".format(key, val))
# Fields
fp.write(("{af:>5s} {tau:>{width}s} {n:>10s} {alpha:>5s} "
"{minsigma:>{width}} "
"{sigma:>{width}} "
"{maxsigma:>{width}} "
"\n").format(
af="AF",
tau="Tau",
n="N",
alpha="alpha",
minsigma="min_" + self.out["stat_id"],
sigma=self.out["stat_id"],
maxsigma="max_" + self.out["stat_id"],
width=digits + 5
)
)
out_fmt = ("{af:5d} {tau:.{prec}e} {n:10d} {alpha:5s} "
"{minsigma:.{prec}e} "
"{sigma:.{prec}e} "
"{maxsigma:.{prec}e} "
"\n")
for i in range(len(self.out["taus"])):
fp.write(out_fmt.format(
af=int(self.out["taus"][i] / self.out["taus"][0]),
tau=self.out["taus"][i],
n=int(self.out["stat_n"][i]),
alpha="NaN", # Not implemented yet
minsigma=self.out["stat"][i] - self.out["stat_err"][i]/2,
sigma=self.out["stat"][i],
maxsigma=(self.out["stat"][i] +
self.out["stat_err"][i]/2),
prec=digits-1,
)) | PypiClean |
/BigJob-0.64.5.tar.gz/BigJob-0.64.5/docs/source/tutorial/part2.rst | ##################
Simple Ensembles
##################
You might be wondering how to create your own BigJob script or how BigJob can be useful for your needs. Before delving into the remote job and data submission capabilities that BigJob has, its important to understand the basics.
========================
Hands-On Job Submission
========================
The simplest usage of a pilot-job system is to submit multiple identical tasks collectively, i.e. as one big job! Such usage arises, for example to perform either a parameter sweep job or a set of ensemble simulation.
We will create an example which submits N jobs using BigJob. The jobs are all identical, except that they each record their number in their output. This type of run is called a "simple ensemble" run; it is very useful if you are running many jobs using the same executable (but perhaps different input files). Rather than submit each job individually to the queuing system and then wait for every job to become active and complete, you submit just one 'Big' job (called a Pilot) that reserves the number of cores needed to run all of your jobs. When this BigJob becomes active, your jobs are pulled by BigJob from the Redis server and executed.
Create a new file ``simple_ensembles.py`` and paste the following code:
.. literalinclude:: ../../../examples/tutorial/simple_ensembles.py
:language: python
------------------------
How to Edit The Examples
------------------------
Open the file ``simple_ensembles.py.`` There are two critical sections that must be filled in by the user. We will cover both in this tutorial.
Line 11 of this file says, "BEGIN REQUIRED PILOT SETUP." All of the variables in this section are required in order for your BigJob script to run. You might be wondering what values are valid for these variables; we have created a table to answer these questions.
`Click here to visit the table of valid parameters
<http://saga-project.github.io/BigJob/sphinxdoc/tutorial/table.html>`_.
We will go over the variables not listed in the table below.
REDIS_PWD and REDIS_URL correspond to the redis server that you set up during the installation process of this tutorial. If you are not running the redis on localhost, you must change the URL line. Please do not delete the ``redis://`` directive, as this tells BigJob that it is using a redis server.
USER_NAME: This does not appear in the table, because it corresponds to your individual username on the remote resource. For instance, if the machine you want to run BigJob on has a user account corresponding to jdoe, the USER_NAME field will be ``jdoe.``
WORKDIR: This field is required. It tells BigJob where to place your files. If you recall, in the installation guide for this tutorial, we created an "agent" directory in our home directory. If our home directory on the resource where we created the agent directory is ``/N/u/username``, then our BigJob working directory (WORKDIR) would be ``/N/u/username/agent``.
Special Note: When you see "N/A" in the table, you should fill in None in the simple_ensembles.py file. For example, if the machine you are running on does not have a PROJECT allocation, this line will look like:
``PROJECT = None # Add project / allocation / account to charge``
NUMBER_JOBS: This is the number of tasks you want to run. Note that this may be different than the PILOT_SIZE. You may want to run 32 executables of 2 cores each, i.e. NUMBER_JOBS=32, but PILOT_SIZE=64 (if all are to run at once). For the purposes of this tutorial, a good value for the NUMBER_JOBS = 4.
Line 44, "END REQUIRED PILOT SETUP," concludes this section.
Now, let's take a look at how these variables are used to make the "Pilot Compute Description (PCD)" on Lines 51-58. You do not have to modify the PCD at all, because it uses the variables we filled in during the REQUIRED PILOT SETUP section. This is just to show you how these variables are communicated to BigJob. The PCD describes all the parameters for the Pilot-Job.
.. code-block:: python
pilot_description = pilot.PilotComputeDescription()
pilot_description.service_url = "%s://%s@%s" % (SAGA_ADAPTOR,USER_NAME,HOSTNAME)
pilot_description.queue = QUEUE
pilot_description.project = PROJECT
pilot_description.number_of_processes = PILOT_SIZE
pilot_description.working_directory = WORKDIR
pilot_description.walltime = WALLTIME
pilot_description.processes_per_node = PROCESSES_PER_NODE
pilot_description.spmd_variation = SPMD_VARIATION
The second important section of this code will not require any modification for the tutorial, but we cover it here so you can see how to modify it for your own purposes.
Go to Line 68, "BEGIN USER DEFINED TASK DESCRIPTION." This is where you will define the jobs that the overall Pilot-Job will be executing. This normally includes the path to your specific executable and any inputs or environment setup that is required. It is copied below for discussion purposes:
.. code-block:: python
# -------- BEGIN USER DEFINED TASK DESCRIPTION --------- #
task_desc = pilot.ComputeUnitDescription()
task_desc.executable = '/bin/echo'
task_desc.arguments = ['I am task number $TASK_NO', ]
task_desc.environment = {'TASK_NO': i}
task_desc.spmd_variation = single # Valid values are single or mpi
task_desc.number_of_processes = 1
task_desc.output = 'simple-ensemble-stdout.txt'
task_desc.error = 'simple-ensemble-stderr.txt'
# -------- END USER DEFINED TASK DESCRIPTION --------- #
Let's discuss the above example. We define our executable as "/bin/echo," the simple UNIX command that writes arguments to standard output. Next, we need to provide the arguments. In this case, "I am task number $TASK_NO," would correspond to typing ``/bin/echo 'I am task number $TASK_NO'`` on command line. ``$TASK_NO`` is an environment variable, so we will need to provide a value for it, as is done on the next line: ``{'TASK_NO': i}``. Note that this block of code is in a python for loop, therefore, i corresponds to what iteration we are on. The spmd_variation for a job can be either single or mpi. This refers to the way BigJob attempts to run your job (and also is dependent on the application you are running) - use MPI if your executable uses MPI, otherwise just use single. We then define number_of_processes for the task - this is the number of cores that the task requires (think, for example, of a 4-core MPI executable - that would mean 1 executable requires 4-cores, i.e. the number_of_processes = 4). Note that a spmd_variation of 'single' can also require multiple cores.
Lastly, we define the filenames for the tasks as output and error - these capture the stdout and stderr of running the task itself. These files will not capture BigJob behavior or error messages - those will be discussed later. The directory structure of BigJob will be discussed after we run this example.
-------------
Run the Code
-------------
Save the file and execute it **(make sure your virtualenv is activated):**
.. code-block:: bash
python simple_ensembles.py
The output should look something like this:
.. code-block:: none
* Submitted task 1 with id 8044 to localhost
Waiting for tasks to finish...
Terminating BigJob...
-----------------
Check the Output
-----------------
**Steps to interpreting your output:**
1.) We will find all the relevant BigJob output in the directory that we defined as "WORKDIR" in the above example.
``cd`` into your "working directory." In the examples on this tutorial, this would be the ``$HOME/agent`` directory.
2.) The agent directory contains a directory that is named after the Pilot Service. This directory begins with ``bj-####``, where ### is a unique identifier.
Example:
.. code-block:: bash
(python)-bash-3.2$ cd agent
(python)-bash-3.2$ ls
bj-bac51654-d96b-11e2-8773-a4badb0c3696
stdout-bj-bac51654-d96b-11e2-8773-a4badb0c3696-agent.txt
stderr-bj-bac51654-d96b-11e2-8773-a4badb0c3696-agent.txt
``cd`` into the ``bj-####`` directory.
3.) If you do an ``ls`` in this directory, you should see a number of directories that begin with ``sj-###`` where ### is a unique identifier. These sj-directories represent each task that ran. For example, if NUMBER_JOBS=4, then we ran 4 tasks in the above example, so we would expect 4 sub-job directories as follows:
.. code-block:: bash
(python)-bash-3.2$ cd bj-bac51654-d96b-11e2-8773-a4badb0c3696/
(python)-bash-3.2$ ls
sj-bb1dcfb0-d96b-11e2-8773-a4badb0c3696
sj-bb634946-d96b-11e2-8773-a4badb0c3696
sj-bb409194-d96b-11e2-8773-a4badb0c3696
sj-bb85fd9c-d96b-11e2-8773-a4badb0c3696
4.) Contained within each sub-job directory is a stdout.txt (which contains the output of your executable) and stderr.txt (which contains the output of standard error on the command line -- it will be blank if there were no errors when running your executable) file. Please note that if you defined different names for the "output" and "error" fields in your compute unit description, then the names stdout and stderr will vary.
Example:
.. code-block:: bash
(python)-bash-3.2$ cd sj-bb1dcfb0-d96b-11e2-8773-a4badb0c3696/
(python)-bash-3.2$ ls
simple-ensemble-stdout.txt
simple-ensemble-stderr.txt
5.) Open simple-ensemble-stdout.txt and note the output from your script.
----------------------
Logging and Debugging
----------------------
Since working with distributed systems is inherently complex and much of the
complexity is hidden within BigJob, it is necessary to do a lot of internal
logging. By default, logging output is disabled, but if something goes wrong or
if you're just curious, you can enable the logging output by setting the
environment variable ``BIGJOB_VERBOSE`` to a value between 1 (print only critical
messages) and 5 (print all messages). Give it a try with the above example:
.. code-block:: bash
BIGJOB_VERBOSE=5 python simple_ensembles.py
| PypiClean |
/emily_editor-0.9-py3-none-any.whl/src/emily0_9/html_texting.py | from . import texting
from guibits1_0 import type_checking2_0
# author R.N.Bosworth
# version 2 Mar 2023 15:15
"""
Contractor which allows the client to advance and retreat through HTML text,
as it appears on the screen,
ignoring tags and dealing with escaped code points.
An "HTML character" is either an escaped or non-escaped Unicode code point.
HTML tags are ignored by this contractor.
So
"<b>1 < 2</b>" is treated as "1 < 2"
Copyright (C) 2018,2021 R.N.Bosworth
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License (gpl.txt) for more details.
"""
"""
StartTag(Name) ::= < Name OptionalSpaces >
EndTag(Name) ::= < / Name OptionalSpaces >
EscapedHtmlCharacter ::= ( & a m p ;
| & g t ;
| & l t ;
| any Unicode code point except '&' (U+26), '>' (U+3E) and '<' (U+3C)
"""
# exposed procedures
# ------------------
def advance(t):
"""
pre:
t = texting.Text whose cursor is to be advanced
post:
if t is at end of text, false has been returned
otherwise,
true has been returned
t's cursor has been moved one position towards the end of the text,
ignoring any HTML tags and treating escaped code points as one position
test:
t = None
t = "<b><i>1<2</i></b>x<b></b>"
cursor at (0,0)
call advance as many times as necessary!
"""
type_checking2_0.check_derivative(t,texting.Text)
line_offset = texting.cursor_line_offset(t)
code_point_offset = texting.cursor_code_point_offset(t)
texting.set_cursor_end(t)
if line_offset == texting.cursor_line_offset(t) and code_point_offset == texting.cursor_code_point_offset(t):
# at end of text
return False
texting.set_cursor(t,line_offset,code_point_offset)
cp = texting.current_code_point(t)
texting.advance(t)
if cp == ord('<'):
while texting.current_code_point(t) != ord('>'):
texting.advance(t)
texting.advance(t)
# text cursor just past '>'
return advance(t)
if cp == ord('&'):
while texting.current_code_point(t) != ord(';'):
texting.advance(t)
texting.advance(t)
# text cursor just past ';'
return True
# else cp is a LF, a SPACE or a NonSpaceChar
return True
def delete_after(t):
"""
pre:
t = texting.Text for which the HTML character following the cursor is to be deleted
post:
if t is at end of text, false has been returned
otherwise,
true has been returned
the HTML character following t's cursor has been deleted
test:
t = None
t = "<b><i>1<2</i></b>x<b></b>"
cursor at (0,0)
call delete_after as many times as necessary!
"""
type_checking2_0.check_derivative(t,texting.Text)
line_offset = texting.cursor_line_offset(t)
code_point_offset = texting.cursor_code_point_offset(t)
texting.set_cursor_end(t)
if line_offset == texting.cursor_line_offset(t) and code_point_offset == texting.cursor_code_point_offset(t):
# at end of text
return False
texting.set_cursor(t,line_offset,code_point_offset)
if texting.current_code_point(t) == ord('<'):
texting.advance(t)
while texting.current_code_point(t) != ord('>'):
texting.advance(t)
texting.advance(t)
# text cursor just past '>'
return delete_after(t)
if texting.current_code_point(t) == ord('&'):
texting.delete_after(t)
while texting.current_code_point(t) != ord(';'):
texting.delete_after(t)
texting.delete_after(t)
# escaped HTML character has been deleted
return True
# else cp is a LF, a SPACE or a NonSpaceChar
texting.delete_after(t)
return True
def retreat(t):
"""
pre:
t = texting.Text whose cursor is to be retreated
post:
if t is at start of text, false has been returned
otherwise,
true has been returned
t's cursor has been moved one position towards the start of the text,
ignoring any HTML tags and treating escaped code points as one position
test:
t = None
t = "x;<b><i>1<; ;2</i></b>y"
cursor at end of text
call retreat as many times as necessary!
t = "<align>x"
cursor at end of text
call retreat as many times as necessary!
"""
type_checking2_0.check_derivative(t,texting.Text)
if texting.cursor_line_offset(t) == 0 and texting.cursor_code_point_offset(t) == 0:
return False
texting.retreat(t)
if texting.current_code_point(t) == ord('>'):
texting.retreat(t)
while texting.current_code_point(t) != ord('<'):
texting.retreat(t)
# text cursor just before '<'
return retreat(t)
if texting.current_code_point(t) == ord(';'):
semicolon_position = texting.cursor_code_point_offset(t)
# in case it really is a semicolon
texting.retreat(t)
cp = texting.current_code_point(t)
while cp != ord('&') and cp != ord(';') and cp != ord(' ') and cp != ord('>') and texting.cursor_code_point_offset(t) > 0:
# to ensure termination
texting.retreat(t)
cp = texting.current_code_point(t)
# text cursor EITHER just before '&' OR just before ';' OR just before ' ' OR
# just before '>' OR at start of line
# (note ';', ' ' and '>' not allowed in escaped code point)
if cp != ord('&'):
texting.set_cursor(t,texting.cursor_line_offset(t),semicolon_position)
# just before actual ';', as no '&' found
# just before previous code point, escaped or not
return True
# else current_code_point is a non-escaped HTML Character
return True | PypiClean |
/Ibidas-0.1.26.tar.gz/Ibidas-0.1.26/ibidas/itypes/rtypes.py | import platform
import copy
import numpy
import operator
from collections import defaultdict
from ..constants import *
from ..utils import util
from ..parser_objs import *
_delay_import_(globals(),"dimensions")
_delay_import_(globals(),"dimpaths")
_delay_import_(globals(),"casts")
_delay_import_(globals(),"type_attribute_freeze")
_delay_import_(globals(),"..utils.missing","Missing")
#}}}
#typename to type class dictionary
__typenames__ = {}
#children types classes for each parent type class
__typechildren__ = defaultdict(list)
from ibidas import constants
def addType(newtypecls):
"""Adds a type `newtypecls`
to the type hiearchy"""
__typenames__[newtypecls.name] = newtypecls
for basecls in newtypecls.__bases__:
__typechildren__[basecls].append(newtypecls)
class Type(object):#{{{
"""Base type class. Represents the type of a data structure.
"""
name = "?"
_dtype = "object"
_scalar = numpy.object
_defval = None
_reqRPCcon=True
has_missing = True
@classmethod
def commonType(cls, type1, type2):
"""Returns a common supertype for type1 and type2"""
return unknown
def _requiresRPCconversion(self):
return self._reqRPCcon
def toNumpy(self):
"""Returns numpy dtype compatible with this type
:rtype: numpy dtype
"""
return numpy.dtype(self._dtype)
def toScalar(self):
"""Returns numpy scalar classs compatible with this type
:rtype: numpy scalar class
"""
return self._scalar
def toDefval(self):
"""Returns default value."""
return self._defval
def toMissingval(self):
return Missing
def hasMissingValInfo(self):
try:
self.toMissingval()
return True
except RuntimeError:
return False
def hasMissing(self):
return self.has_missing
def getName(self):
"""Returns base type name"""
return self.name
def _callSubtypes(self, methodname, *params, **kwds):
"""If has subtype, calls subtype with methodname and params
:param methodname: Name of type method. Should return type object
:param params: arguments for ``methodname``.
:param kwds: Keyword arguments for ``methodname``
"""
return self
#dim changes
def _removeDepDim(self, pos, elem_specifier, has_missing=False):
return self._callSubtypes("_removeDepDim",pos, elem_specifier, has_missing=has_missing)
def _updateDepDim(self, pos, ndim):
return self._callSubtypes("_updateDepDim",pos, ndim)
def _insertDepDim(self, pos, ndim):
return self._callSubtypes("_insertDepDim",pos, ndim)
def _permuteDepDim(self, prevdims, permute_idxs):
return self._callSubtypes("_permuteDepDim",prevdims, permute_idxs)
#subtypes
def getSubTypeNumber(self):
"""Returns number of subtypes of this type"""
return 0
def getSubType(self, subtype_id=0):
"""Returns subtype if this is a non-scalar type.
Otherwise, raises TypeError. If subtype_id invalid, raised IndexError.
:param subtype_id: id of subtype, default 0. Should be >= 0 and < :py:meth:`getSubTypeNumber`.
:rtype: obj of class :py:class:`Type`"""
return unknown
#comparison
def __eq__(self, other):
if(isinstance(other, basestring)):
other = createType(other)
return other.__class__ is self.__class__
def __ne__(self, other):
return not self.__eq__(other)
def __le__(self, other):
return self.__eq__(other)
def __ge__(self, other):
return True
def __gt__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return False
def __hash__(self):
return hash(self.__class__)
def __repr__(self):
return self.name
def copy(self):
"""Returns copy of this type"""
return self
@classmethod
def getDescendantTypes(cls):
"""Returns descendant type classes as list
:rtype: list of descendant type classes"""
if(not hasattr(cls, "_desc_types")):
desc_types = [tcls.getDescendantTypes()
for tcls in __typechildren__[cls]]
#flatten list (concat each list in list)
if(desc_types):
desc_types = sum(desc_types, [])
desc_types.append(cls)
cls._desc_types = desc_types
return cls._desc_types
class TypeUnknown(Type):
"""Unknown type represents the lack of information about
the actual type.
As this type has only one possible state,
a module singleton is available through rtypes.unknown
"""
addType(TypeUnknown)#}}}
#unknown singleton
unknown = TypeUnknown()
class TypeAny(TypeUnknown):#{{{
"""Type which represents that any value is allowed"""
name = "any"
_ptype = lambda x : x
def __init__(self, has_missing=False):
"""
Creates type object.
:param has_missing: bool, optional
"""
TypeUnknown.__init__(self)
self.has_missing = has_missing
@classmethod
def commonType(cls, type1, type2):
return cls(type1.has_missing or type2.has_missing)
def toNumpy(self):
"""Returns dtype of a numpy container which
can hold this type efficiently.
"""
if(self.has_missing):
return numpy.dtype(object)
else:
return numpy.dtype(self._dtype)
def getSubType(self, subtype_id=0):
"""Returns subtype if this is a non-scalar type.
Otherwise, raises TypeError. If subtype_id invalid, raised IndexError.
:param subtype_id: id of subtype, default 0. Should be >= 0 and < :py:meth:`getSubTypeNumber`.
:rtype: obj of class :py:class:`Type`"""
raise TypeError, "Expected subtypeable type, but found " + str(self)
def getPythonType(self):
return self._ptype
def __eq__(self, other):
if(isinstance(other, basestring)):
other = createType(other)
return (self.__class__ is other.__class__ and
self.has_missing is other.has_missing)
def __ne__(self, other):
return not self.__eq__(other)
def __le__(self, other):
if(isinstance(other, basestring)):
other = createType(other)
if(other.__class__ == self.__class__):
if(self.has_missing):
return other.has_missing
return True
if(other.__class__ in self.__class__.__mro__):
return True
return False
def __ge__(self, other):
if(isinstance(other, basestring)):
other = createType(other)
if(other.__class__ == self.__class__):
if(other.has_missing):
return self.has_missing
return True
if(self.__class__ in other.__class__.__mro__):
return True
return False
def __gt__(self, other):
if(isinstance(other, basestring)):
other = createType(other)
if(other.__class__ == self.__class__):
return self.has_missing and not other.has_missing
if(self.__class__ in other.__class__.__mro__):
return True
return False
def __lt__(self, other):
if(isinstance(other, basestring)):
other = createType(other)
if(other.__class__ == self.__class__):
return not self.has_missing and other.has_missing
if(other.__class__ in self.__class__.__mro__):
return True
return False
def __hash__(self):
return hash(self.__class__) ^ hash(self.has_missing)
def copy(self, **newattr):
"""Returns copy of this type"""
return copy.copy(self)
def setHasMissing(self, value):
if self.has_missing != value:
res = self.copy()
res.has_missing = value
else:
res = self
return res
def __repr__(self):
res = self.name
if(self.has_missing):
res += "?"
return res
addType(TypeAny)#}}}
class TypeTuple(TypeAny):#{{{
"""Tuple or record type, having multiple values of
possibly different types"""
name = "tuple"
_ptype = tuple
def __init__(self, has_missing=False, subtypes=(),
fieldnames=()):
"""
Creates type object.
:param has_missing: bool, optional
:param subtypes: tuple(type objects, i.e. subclasses of TypeUnknown), optional
:param fieldnames: tuple(strings), optional
"""
assert isinstance(subtypes, tuple), \
"The subtypes argument should be a tuple"
assert all(isinstance(fieldname, basestring) for fieldname in fieldnames), \
"Fieldnames should be strings"
assert not fieldnames or len(fieldnames) == len(subtypes), \
"Length of fieldnames should be equal to length of tuples (or empty"
TypeAny.__init__(self, has_missing)
self.subtypes = subtypes
self.fieldnames = fieldnames
@classmethod
def commonType(cls, type1, type2):
if(not type1.subtypes or not type2.subtypes or
len(type1.subtypes) != len(type2.subtypes)):
return cls(type1.has_missing or type2.has_missing)
else:
subtypes = [casts.castImplicitCommonType(lstype, rstype)
for lstype, rstype in zip(type1.subtypes, type2.subtypes)]
if(False in subtypes):
return False
res = cls(type1.has_missing or type2.has_missing, tuple(subtypes), type1.fieldnames)
return res
def toDefval(self):
"""Returns default value."""
if(not "_defval" in self.__dict__):
self._defval = tuple((subtype.toDefval()
for subtype in self.subtypes))
return self._defval
def getSubTypeNumber(self):
"""Returns number of subtypes of this type"""
return len(self.subtypes)
def getSubType(self, subtype_id=0):
"""Returns subtype if this is a non-scalar type.
Otherwise, raises TypeError. If subtype_id invalid, raised IndexError.
:param subtype_id: id of subtype, default 0. Should be >= 0 and < :py:meth:`getSubTypeNumber`.
:rtype: obj of class :py:class:`Type`"""
if(self.subtypes):
return self.subtypes[subtype_id]
else:
return unknown
def __eq__(self, other):
if(isinstance(other, basestring)):
other = createType(other)
return (self.__class__ is other.__class__ and
self.has_missing is other.has_missing and
self.subtypes == other.subtypes and
self.fieldnames == other.fieldnames)
def __ne__(self, other):
return not self.__eq__(other)
def __le__(self, other):
if(isinstance(other, basestring)):
other = createType(other)
if(other.__class__ == self.__class__):
if(self.has_missing and not other.has_missing):
return False
if((not self.subtypes and other.subtypes) or (other.subtypes and
len(self.subtypes) != len(other.subtypes))):
return False
if(not all([st <= ot for st, ot in
zip(self.subtypes, other.subtypes)])):
return False
return True
if(other.__class__ in self.__class__.__mro__):
return True
return False
def __ge__(self, other):
if(isinstance(other, basestring)):
other = createType(other)
if(other.__class__ == self.__class__):
if(other.has_missing and not self.has_missing):
return False
if((not other.subtypes and self.subtypes) or (self.subtypes and
len(self.subtypes) != len(other.subtypes))):
return False
if(not all([st >= ot for st, ot in
zip(self.subtypes, other.subtypes)])):
return False
return True
if(self.__class__ in other.__class__.__mro__):
return True
return False
def __gt__(self, other):
if(isinstance(other, basestring)):
other = createType(other)
if(other.__class__ == self.__class__):
if(other.has_missing and not self.has_missing):
return False
if(not other.subtypes or (self.subtypes and
len(self.subtypes) != len(other.subtypes))):
return False
if(not all([st > ot for st, ot in
zip(self.subtypes, other.subtypes)])):
return False
return True
if(self.__class__ in other.__class__.__mro__):
return True
return False
def __lt__(self, other):
if(isinstance(other, basestring)):
other = createType(other)
if(other.__class__ == self.__class__):
if(self.has_missing and not other.has_missing):
return False
if(not self.subtypes or (other.subtypes and
len(self.subtypes) != len(other.subtypes))):
return False
if(not all([st < ot for st, ot in
zip(self.subtypes, other.subtypes)])):
return False
return True
if(other.__class__ in self.__class__.__mro__):
return True
return False
def __hash__(self):
return (hash(self.__class__) ^
hash(self.has_missing) ^
hash(self.subtypes) ^
hash(self.fieldnames))
def _callSubtypes(self, methodname, *params, **kwds):
nsubtypes = tuple([getattr(subtype,methodname)(*params,**kwds) for subtype in self.subtypes])
if(all([nsubtype is subtype for nsubtype,subtype in zip(nsubtypes,self.subtypes)])):
nself = self
else:
nself = self.copy()
nself.subtypes = nsubtypes
return nself
def __repr__(self):
res = '('
if(len(self.fieldnames) == len(self.subtypes)):
res += ", ".join((fname + "=" + str(subtype)
for fname, subtype in zip(self.fieldnames, self.subtypes)))
else:
res += ", ".join((str(subtype)
for subtype in self.subtypes))
res += ')'
if(self.has_missing):
res += "?"
return res
addType(TypeTuple)#}}}
class TypeRecordDict(TypeTuple):#{{{
name = "record_dict"
_ptype = None
def __repr__(self):
res = '{'
if(len(self.fieldnames) == len(self.subtypes)):
res += ", ".join((fname + "=" + str(subtype)
for fname, subtype in zip(self.fieldnames, self.subtypes)))
else:
res += ", ".join((str(subtype)
for subtype in self.subtypes))
res += '}'
if(self.has_missing):
res += "?"
return res
addType(TypeRecordDict)#}}}
class TypeIndexDict(TypeTuple):#{{{
name = "index_dict"
_ptype = None
def __init__(self, has_missing=False, subtypes=(),
fieldnames=("key","value")):
"""
Creates type object.
:param has_missing: bool, optional
:param subtypes: tuple(type objects, i.e. subclasses of TypeUnknown), optional
:param fieldnames: tuple(strings), optional
"""
assert isinstance(subtypes, tuple), \
"The subtypes argument should be a tuple"
assert all(isinstance(fieldname, basestring) for fieldname in fieldnames), \
"Fieldnames should be strings"
assert not fieldnames or len(fieldnames) == len(subtypes), \
"Length of fieldnames should be equal to length of tuples (or empty"
assert len(subtypes) == 2, "IndexDict should have a key and value subtype"
TypeAny.__init__(self, has_missing)
self.subtypes = subtypes
self.fieldnames = fieldnames
def __repr__(self):
res = 'dict('
if(len(self.fieldnames) == len(self.subtypes)):
res += ": ".join((fname + "=" + str(subtype)
for fname, subtype in zip(self.fieldnames, self.subtypes)))
else:
res += ": ".join((str(subtype)
for subtype in self.subtypes))
res += ')'
if(self.has_missing):
res += "?"
return res
addType(TypeIndexDict)#}}}
#pylint: disable-msg=E1101
class TypeArray(TypeAny):#{{{
"""Type representing a collection of values,
possibly in an dimensional structure"""
name = "array"
_ptype = list
def __init__(self, has_missing=False, dims=(), \
subtypes=(unknown,)):
"""
Creates type object.
:param has_missing: bool, optional
:param dims: tuple(Dim's), optional
:param subtypes: tuple(type object, i.e. subclass of TypeUnknown), optional
A dimid is a unique identifier for a dimension, helping the
system to determine equality between dimensions. If there are
no similarities with dimensions in other types, should be left empty.
"""
assert isinstance(dims, dimpaths.DimPath), \
"Dims of an array should be a dimpath"
assert len(dims) == 1, "Array should have one dimension"
assert all([isinstance(dim, dimensions.Dim) for dim in dims]), \
"Dims tuple should contain Dim objects"
assert isinstance(subtypes, tuple) and len(subtypes) == 1, \
"One subtype should be set for array type"
has_missing = dims[0].has_missing or has_missing
self.subtypes = subtypes
self.dims = dims
TypeAny.__init__(self, has_missing)
@classmethod
def commonType(cls, type1, type2):
if(not type1.dims or not type2.dims or
len(type1.dims) != len(type2.dims)):
return TypeAny(type1.has_missing or type2.has_missins)
else:
subtypes = [casts.castImplicitCommonType(lstype, rstype)
for lstype, rstype in zip(type1.subtypes, type2.subtypes)]
if(False in subtypes):
return False
ndims = []
for ldim, rdim in zip(type1.dims, type2.dims):
ndims.append(ldim.merge(rdim))
dims = dimpaths.DimPath(*ndims)
res = cls(has_missing=type1.has_missing or type2.has_missing, dims=dims, subtypes=tuple(subtypes))
return res
def setHasMissing(self, value):
if value and not self.dims[0].dependent: #fixed dim, needs has_missing subtype if unpacked
s = self.subtypes[0].setHasMissing(value)
else:
s = self.subtypes[0]
if not self.has_missing == value or not s is self.subtypes[0]:
self = self.copy()
self.has_missing = value
self.subtypes = (s,)
return self
def toMissingval(self):
s = self.subtypes[0].toMissingval()
if self.dims[0].dependent:
res = numpy.array([],dtype=self.subtypes[0].toNumpy())
else:
if self.dims[0].shape == UNDEFINED:
raise RuntimeError, "Cannot determine shape for missing value. Please cast dim " + str(self.dims[0])
res = numpy.array([s] * self.dims[0].shape,dtype=self.toNumpy())
return res
def hasMissing(self):
return self.subtypes[0].hasMissing()
def toDefval(self):
"""Returns default value."""
subtype = self.subtypes[0]
shape = [dim.shape for dim in self.dims]
for pos, sdim in enumerate(shape):
if(sdim < 0):
shape[pos] = 0
res = numpy.empty(shape, dtype=subtype.toNumpy())
subdv = subtype.toDefval()
#workaround numpy problem
#(cannot set sequence for a range)
if(operator.isSequenceType(subdv)):
flatres = res.ravel()
for i in range(len(flatres)):
flatres[i] = subdv
else:
res[:] = subdv
return res
def getSubTypeNumber(self):
"""Returns number of subtypes of this type"""
return len(self.subtypes)
def getSubType(self, subtype_id=0):
"""Returns subtype if this is a non-scalar type.
Otherwise, raises TypeError. If subtype_id invalid, raised IndexError.
:param subtype_id: id of subtype, default 0. Should be >= 0 and < :py:meth:`getSubTypeNumber`.
:rtype: obj of class :py:class:`Type`"""
assert (subtype_id == 0), "Invalid subtype id given"
if(self.subtypes):
return self.subtypes[subtype_id]
else:
return unknown
def __eq__(self, other):
if(isinstance(other, basestring)):
other = createType(other)
return (self.__class__ is other.__class__ and
self.has_missing is other.has_missing and
self.subtypes == other.subtypes and
self.dims == other.dims)
def __ne__(self, other):
return not self.__eq__(other)
def __le__(self, other):
if(isinstance(other, basestring)):
other = createType(other)
if(other.__class__ == self.__class__):
if(self.has_missing and not other.has_missing):
return False
if((not self.subtypes and other.subtypes) or
len(self.subtypes) != len(other.subtypes)):
return False
if(not all([st <= ot for st, ot in
zip(self.subtypes, other.subtypes)])):
return False
return all([d1.shape == d2.shape for d1, d2 in
zip(self.dims, other.dims)])
if(other.__class__ in self.__class__.__mro__):
return True
return False
def __ge__(self, other):
if(isinstance(other, basestring)):
other = createType(other)
if(other.__class__ == self.__class__):
if(other.has_missing and not self.has_missing):
return False
if((not other.subtypes and self.subtypes) or
len(self.subtypes) != len(other.subtypes)):
return False
if(not all([st >= ot for st, ot in
zip(self.subtypes, other.subtypes)])):
return False
return all([d1.shape == d2.shape for d1, d2 in
zip(self.dims, other.dims)])
if(self.__class__ in other.__class__.__mro__):
return True
return False
def __gt__(self, other):
if(isinstance(other, basestring)):
other = createType(other)
if(other.__class__ == self.__class__):
if(other.has_missing and not self.has_missing):
return False
if(not other.subtypes or
len(self.subtypes) != len(other.subtypes)):
return False
if(not all([st > ot for st, ot in
zip(self.subtypes, other.subtypes)])):
return False
return all([d1.shape == d2.shape for d1, d2 in
zip(self.dims, other.dims)])
if(self.__class__ in other.__class__.__mro__):
return True
return False
def __lt__(self, other):
if(isinstance(other, basestring)):
other = createType(other)
if(other.__class__ == self.__class__):
if(self.has_missing and not other.has_missing):
return False
if(not self.subtypes or
len(self.subtypes) != len(other.subtypes)):
return False
if(not all([st < ot for st, ot in
zip(self.subtypes, other.subtypes)])):
return False
return all([d1.shape == d2.shape for d1, d2 in
zip(self.dims, other.dims)])
if(other.__class__ in self.__class__.__mro__):
return True
return False
def __hash__(self):
return (hash(self.__class__) ^
hash(self.has_missing) ^
hash(self.subtypes) ^
hash(self.dims))
def _callSubtypes(self, methodname, *params, **kwds):
nsubtypes = tuple([getattr(subtype,methodname)(*params,**kwds) for subtype in self.subtypes])
if(all([nsubtype is subtype for nsubtype,subtype in zip(nsubtypes,self.subtypes)])):
nself = self
else:
nself = self.copy()
nself.subtypes = nsubtypes
return nself
def _removeDepDim(self, pos, elem_specifier, has_missing=False):
nself = self._callSubtypes("_removeDepDim",pos - len(self.dims), elem_specifier, has_missing=has_missing)
ndims = self.dims.removeDim(pos,elem_specifier, has_missing=has_missing)
if(not ndims is self.dims):
if(self is nself):
nself = self.copy()
nself.dims = ndims
return nself
def _updateDepDim(self, pos, ndim):
nself = self._callSubtypes("_updateDepDim", pos - len(self.dims), ndim)
ndims = self.dims.updateDim(pos,ndim)
if(not ndims is self.dims):
if(self is nself):
nself = self.copy()
nself.dims = ndims
return nself
def _insertDepDim(self, pos, ndim):
nself = self._callSubtypes("_insertDepDim",pos - len(self.dims), ndim)
ndims = self.dims.insertDim(pos, ndim)
if(not ndims is self.dims):
if(self is nself):
nself = self.copy()
nself.dims = ndims
return nself
def _permuteDepDim(self, prevdims, permute_idxs):
nself = self._callSubtypes("_permuteDepDim", prevdims + self.dims, permute_idxs)
ndims = self.dims.permuteDims(permute_idxs, prevdims=prevdims)
if(not ndims is self.dims):
if(self is nself):
nself = self.copy()
nself.dims = ndims
return self
def __repr__(self, unpack_depth=0):
res = '[' + ",".join([str(dim) for dim in self.dims]) + ']'
if(self.has_missing):
res += "?"
if(unpack_depth > 1):
assert isinstance(self.subtypes, TypeArray), \
"Unpack depth of tuple is larger than number of " + \
"array subtypes. Found: " + str(self.subtypes[0])
res += '<' + self.subtypes[0].__repr__(unpack_depth - 1)
else:
res += ':' + str(self.subtypes[0])
return res
addType(TypeArray)#}}}
class TypeSet(TypeArray):#{{{
name = "set"
_ptype = set
def __init__(self, has_missing=False, dims=(), subtypes=(unknown,)):
assert (isinstance(dims, dimpaths.DimPath) and len(dims) == 1), \
"Dimensions of a set should be a dimpath of size 1"
assert subtypes and len(subtypes) == 1 and isinstance(subtypes,tuple), \
"Number of subtypes should be 1"
TypeArray.__init__(self, has_missing, dims,
subtypes)
def __repr__(self):
res = ""
if(self.has_missing):
res += "?"
res += '{' + ",".join([str(dim) for dim in self.dims]) + '}'
res += '<' + str(self.subtypes[0])
return res
def hasMissing(self):
return self.has_missing
def toMissingval(self):
return frozenset()
addType(TypeSet)#}}}
class TypeString(TypeArray):#{{{
"""String type (supports unicode strings)"""
name = "string"
_dtype = "U"
_defval = u""
_ptype = unicode
_reqRPCcon=False
def __init__(self, has_missing=False, dims=()):
assert (isinstance(dims, dimpaths.DimPath) and len(dims) == 1), \
"Dimensions of a string should be a dimpath of size 1"
TypeArray.__init__(self, has_missing, dims,
(TypeChar(),))
@classmethod
def commonType(cls, type1, type2):
if(type1.dims[0].shape == UNDEFINED or
type2.dims[0].shape == UNDEFINED):
shape = UNDEFINED
else:
shape = max(type1.dims[0].shape, type2.dims[0].shape)
dim = dimensions.Dim(shape)
res = cls(has_missing=type1.has_missing or type2.has_missing,
dims=dimpaths.DimPath(dim))
return res
def toNumpy(self):
"""Returns dtype of a numpy container which
can hold this type efficiently."""
return numpy.dtype(object)
#f(self.dims[0].shape == UNDEFINED or self.has_missing or self.dims[0].shape > 32):
# return numpy.dtype(object)
#lse:
# return numpy.dtype(self._dtype + str(max(self.dims[0].shape,1)))
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.has_missing is other.has_missing and \
self.dims[0].shape == other.dims[0].shape)
def __hash__(self):
return (hash(self.__class__) ^
hash(self.has_missing) ^
hash(self.dims[0].shape))
def toMissingval(self):
return Missing
def hasMissing(self):
return self.has_missing
def __repr__(self):
res = self.name
if(self.has_missing):
res += "?"
if(self.dims[0].shape >= 0):
res += '[' + str(self.dims[0].shape) + ']'
return res
def toDefval(self):
"""Returns default value."""
return ""
addType(TypeString)#}}}
class TypeBytes(TypeString):#{{{
"""String type (only byte/ascii characters)"""
name = "bytes"
_dtype = "S"
_defval = b""
_ptype = bytes
addType(TypeBytes)#}}}
class TypeSequence(TypeBytes):#{{{
"""Sequence (DNA/protein) type (only byte/ascii characters)"""
name = "sequence"
addType(TypeSequence)#}}}
class TypeDNASequence(TypeSequence):#{{{
"""Sequence (DNA/protein) type (only byte/ascii characters)"""
name = "DNA"
addType(TypeDNASequence)#}}}
class TypeProteinSequence(TypeSequence):#{{{
"""Sequence (DNA/protein) type (only byte/ascii characters)"""
name = "protein"
addType(TypeProteinSequence)#}}}
class TypePickle(TypeBytes):#{{{
name = "pickle"
addType(TypePickle)#}}}
class TypeScalar(TypeAny):#{{{
"""Type representing atom-like values"""
name = "scalar"
addType(TypeScalar)#}}}
class TypeChar(TypeScalar):#{{{
"""Type representing characters"""
name = "char"
addType(TypeChar)#}}}
class TypeSlice(TypeScalar):#{{{
"""Type representing slices"""
name = "slice"
_ptype = slice
addType(TypeSlice)#}}}
class TypeNumber(TypeScalar):#{{{
"""Type representing the number values"""
name = "number"
_defval = 0
def __init__(self, has_missing=False):
TypeScalar.__init__(self, has_missing)
addType(TypeNumber)#}}}
class TypeComplex(TypeNumber):#{{{
"""Complex number"""
name = "complex"
_defval = 0.0j
def toNumpy(self):
"""Returns dtype of a numpy container which
can hold this type efficiently.
"""
if(self.has_missing):
return numpy.dtype(object)
else:
return numpy.dtype(self._dtype)
addType(TypeComplex)#}}}
class TypeComplex128(TypeComplex):#{{{
"""Complex number, representable by machine doubles"""
name = "complex128"
_dtype = "complex128"
_scalar = numpy.complex128
addType(TypeComplex128)#}}}
class TypeComplex64(TypeComplex128):#{{{
"""Complex number, representable by machine singles"""
name = "complex64"
_dtype = "complex64"
_scalar = numpy.complex64
addType(TypeComplex64)#}}}
class TypeReal64(TypeComplex128):#{{{
"""Floating point number, representable by machine double"""
name = "real64"
_dtype = "float64"
_scalar = numpy.float64
_defval = 0.0
_reqRPCcon=False
_ptype = float
addType(TypeReal64)#}}}
class TypeReal32(TypeReal64, TypeComplex64):#{{{
"""Floating point number, representable by machine single"""
name = "real32"
_dtype = "float32"
_scalar = numpy.float32
addType(TypeReal32)#}}}
class TypeInteger(TypeReal32):#{{{
"""Integer number"""
name = "long"
_dtype = "object"
_minmax = (-numpy.inf, numpy.inf)
_ptype = int
@classmethod
def getMinValue(cls):
"""Returns minimum integer that can be stored
with this type"""
return cls._minmax[0]
@classmethod
def getMaxValue(cls):
"""Returns maximum integer that can be stored
with this type"""
return cls._minmax[1]
def toNumpy(self):
"""Returns dtype of a numpy container which
can hold this type efficiently.
"""
if(self.has_missing):
return numpy.dtype(object)
else:
return numpy.dtype(self._dtype)
addType(TypeInteger)#}}}
class TypeInt64(TypeInteger):#{{{
"""Integer number,
range -9,223,372,036,854,775,808 to +9,223,372,036,854,775,807"""
name = "int64"
_minmax = (-2**63, 2**63-1)
_dtype = "int64"
_scalar = numpy.int64
addType(TypeInt64)#}}}
class TypeInt32(TypeInt64):#{{{
"""Integer number,
range -2,147,483,648 to +2,147,483,647"""
name = "int32"
_minmax = (-2**31, 2**31-1)
_dtype = "int32"
_scalar = numpy.int32
addType(TypeInt32)#}}}
class TypeInt16(TypeInt32):#{{{
"""Integer number,
range -32,768 to +32,767"""
name = "int16"
_minmax = (-2**15, 2**15-1)
_dtype = "int16"
_scalar = numpy.int16
addType(TypeInt16)#}}}
class TypeInt8(TypeInt16):#{{{
"""Integer number,
range -128 to 127"""
name = "int8"
_minmax = (-2**7, 2**7-1)
_dtype = "int8"
_scalar = numpy.int8
addType(TypeInt8)#}}}
class TypeUnsignedInteger(TypeInteger):#{{{
"""Integer number (no negative integers)"""
name = "ulong"
addType(TypeUnsignedInteger)#}}}
class TypeUInt64(TypeUnsignedInteger):#{{{
"""Integer number,
range 0 to 18,446,744,073,709,551,615"""
name = "uint64"
_minmax = (0, 2**64-1)
_dtype = "uint64"
_scalar = numpy.uint64
addType(TypeUInt64)#}}}
class TypeUInt32(TypeUInt64, TypeInt64):#{{{
"""Integer number,
range 0 to 4,294,967,295"""
name = "uint32"
_minmax = (0, 2**32-1)
_dtype = "uint32"
_scalar = numpy.uint32
addType(TypeUInt32)#}}}
class TypeUInt16(TypeUInt32, TypeInt32):#{{{
"""Integer number,
range 0 to 65,535"""
name = "uint16"
_dtype = "uint16"
_minmax = (0, 2**16-1)
_scalar = numpy.uint16
addType(TypeUInt16)#}}}
class TypeUInt8(TypeUInt16, TypeInt16):#{{{
"""Integer number,
range 0 to 255"""
name = "uint8"
_minmax = (0, 2**8-1)
_dtype = "uint8"
_scalar = numpy.uint8
addType(TypeUInt8)#}}}
class TypeBool(TypeUInt8, TypeInt8):#{{{
"""Integer number,
range 0 to 1 (or False, True)"""
name = "bool"
_minmax = (0, 1)
_dtype = "bool"
_scalar = numpy.bool
_defval = False
_reqRPCcon=True
_ptype = bool
addType(TypeBool)#}}}
#maps numpy type to internal type
__objtype_map__ = {
numpy.dtype('bool'):TypeBool,
numpy.dtype("object"):TypeAny,
numpy.dtype("int8"):TypeInt8,
numpy.dtype("int16"):TypeInt16,
numpy.dtype("int32"):TypeInt32,
numpy.dtype("int64"):TypeInt64,
numpy.dtype("uint8"):TypeInt8,
numpy.dtype("uint16"):TypeInt16,
numpy.dtype("uint32"):TypeInt32,
numpy.dtype("uint64"):TypeInt64,
numpy.dtype("float32"):TypeReal32,
numpy.dtype("float64"):TypeReal64,
numpy.dtype("complex64"):TypeComplex64,
numpy.dtype("complex128"):TypeComplex128}
NO_NAME = 0
NAME_FOUND = 1
INDIM = 2
INSUBTYPES = 3
NAME_FOUND_ARRAY = 2
IN_SUBTYPE = 2
EXIT_SUBTYPE = 3
IN_DIM = 4
OUT_DIM = 5
IN_SUBTYPE_DIM = 6
def createType(name, dimpos=0, refdims=[], env={}):#{{{
"""Creates a type object from string representation.
:param name: str
Format:
Description of type formats
Examples:
>>> createType("unicode")
>>> createType("array(int64)[]")
>>> createType("tuple(int64,unicode)")
"""
if(not isinstance(name, basestring)):
if(not isinstance(name, numpy.dtype)):
name = numpy.dtype(name)
if(name.char == 'S'):
if(name.itemsize == 0):
dim = dimensions.Dim(UNDEFINED, (True,) * dimpos, False)
return TypeBytes(dims=dimpaths.DimPath(dim))
else:
dim = dimensions.Dim(name.itemsize, (True,) * dimpos, False)
return TypeBytes(dims=dimpaths.DimPath(dim))
elif(name.char == 'U'):
if(name.itemsize == 0):
dim = dimensions.Dim(UNDEFINED, (True,) * dimpos, False)
return TypeString(dims=dimpaths.DimPath(dim))
else:
usize = numpy.array("test").dtype.itemsize / 4
dim = dimensions.Dim(name.itemsize / usize, (True,) * dimpos, False)
return TypeString(dims = dimpaths.DimPath(dim))
elif(name in __objtype_map__):
return __objtype_map__[name](False)
else:
raise TypeError,"Unknown type description: " + str(name)
return _createType(name, dimpos, refdims, env)#}}}
class TypeStringScanner(GenericScanner):#{{{
def tokenize(self, input):
self.rv = []
GenericScanner.tokenize(self,input)
return self.rv
def t_whitespace(self,s):
r' \s+ '
pass
def t_name(self,s):
r' [a-zA-Z_][a-zA-Z_\d]* '
t = Token(type='name',attr=s)
self.rv.append(t)
def t_integer(self,s):
r' \d+ '
t = Token(type="integer",attr=int(s))
self.rv.append(t)
def t_symbol(self,s):
r' \= | \$ | \? | \. | \{ | \} | \< | \[ | \] | \( | \) | \, | \* | \~ | \! | \& | \# | \@ | \: | \^ | \| | \' '
t = Token(type=s)
self.rv.append(t)#}}}
class TypeStringParser(GenericParser):#{{{
def __init__(self, start="typenest"):
GenericParser.__init__(self, start)
def p_param_1(self,args):
' param ::= @ name '
return AST(type="param",kids=args[1:])
def p_tmatcher(self, args):
"""
tmatcher ::= typenest
tmatcher ::= ^ typenest
"""
if len(args) > 1:
return AST(type="tmatcher",kids=args)
else:
return args[0]
def p_tmatcher_list(self, args):
"""
type_orlist ::= tmatcher
type_orlist ::= type_orlist | tmatcher
"""
return AST(type="type_orlist",kids=args[:1] + args[2:])
def p_type_match_0(self, args):
"""
type_andlist ::= type_orlist
type_andlist ::= type_andlist & type_orlist
"""
return AST(type="type_andlist", kids=args[:1] + args[2:])
def p_type_andlist_1(self, args):
"""
typenest ::= # type_andlist #
"""
return AST(type="typenest", kids=args[1:2])
def p_dmatcher(self, args):
"""
dmatcher ::= nameddim
dmatcher ::= ^ nameddim
"""
if len(args) > 1:
return AST(type="dmatcher",kids=args)
else:
return args[0]
def p_dmatcher_list(self, args):
"""
dim_orlist ::= dmatcher
dim_orlist ::= dim_orlist | dmatcher
"""
return AST(type="dim_orlist",kids=args[:1] + args[2:])
def p_dim_match_0(self, args):
"""
dim_andlist ::= dim_orlist
dim_andlist ::= dim_andlist & dim_orlist
"""
return AST(type="dim_andlist", kids=args[:1] + args[2:])
def p_dim_andlist(self, args):
"""
nameddim ::= # dim_andlist #
"""
return AST(type="nameddim", kids=args[1:2])
def p_param_call(self, args):
"""
param ::= param ( typelist )
param ::= param ( dimlist )
"""
return AST(type="param_call", kids=[args[0], args[2]])
def p_type_1(self,args):
"""
type ::= param
type ::= name
type ::= ?
"""
return AST(type="createtype",kids=args[:1])
def p_type_2(self,args):
"""
type ::= type $
type ::= type ?
type ::= type !
"""
return AST(type="hasmissing", kids=args)
def p_type_2b(self,args):
"""
type ::= type '
"""
return AST(type="strict", kids=args)
def p_type_3(self,args):
' type ::= type [ dimlist ] '
return AST(type="dims",kids=(args[0], args[2]))
def p_type_3b(self,args):
' type ::= type [ ] '
return AST(type="dims",kids=(args[0],))
def p_type_4(self,args):
' type ::= type ( typelist ) '
return AST(type="subtypes",kids=(args[0], args[2]))
def p_type_6(self,args):
' type ::= [ dimlist ] '
return AST(type="dims",kids=(
AST(type="createtype", kids=(Token(type="name",attr="array"),)), args[1]))
def p_type_7(self,args):
' type ::= { dimlist } '
return AST(type="dims",kids=(
AST(type="createtype", kids=(Token(type="name",attr="set"),)), args[1]))
def p_type_8(self,args):
'''
typeelem ::= typenest
typeelem ::= name = typenest
'''
if len(args) == 1:
return args[0]
else:
return AST(type="typeelem",kids=(args[2], args[0]))
def p_type_9(self,args):
' type ::= ( typelist ) '
return AST(type="subtypes",kids=(
AST(type="createtype", kids=(Token(type="name",attr="tuple"),)), args[1]))
def p_type_9b(self,args):
' type ::= { typelist } '
return AST(type="subtypes",kids=(
AST(type="createtype", kids=(Token(type="name",attr="record_dict"),)), args[1]))
def p_typenest_2(self,args):
"""
typenest ::= type
typenest ::= typenest < type
typenest ::= typenest : type
typenest ::= typenest < typenest
typenset ::= typenest : typenest
"""
return AST(type="typenest",kids=args)
def p_var_1(self,args):
'''
var ::= .
var ::= *
'''
return args[0]
def p_varlist_1(self,args):
'''
varlist ::= var
'''
return AST(type="varlist",kids=args[:1])
def p_varlist_2(self,args):
'''
varlist ::= varlist var
'''
return AST(type="varlist",kids=args)
def p_dim_1(self,args):
'''
dim ::= integer
dim ::= ~
dim ::= varlist
dim ::= param
'''
return AST(type="createdim",kids=args)
def p_nameddim_1(self,args):
''' nameddim ::= name : dim '''
return AST(type="namedim",kids=(args[2], args[0]))
def p_nameddim_2(self,args):
''' nameddim ::= dim '''
return args[0]
def p_nameddim_3(self,args):
''' nameddim ::= name '''
return AST(type="namedim",kids=(
AST(type="createdim", kids=(Token(type="inherit"),)), args[0]))
def p_nameddim_4(self,args):
'''
nameddim ::= nameddim $
nameddim ::= nameddim ?
nameddim ::= nameddim !
'''
return AST(type="hasmissing",kids=args)
def p_dim_6(self, args):
"""
dimelem ::= nameddim
dimelem ::= ?
dimelem ::= ? ?
"""
if(len(args) > 1):
ntoken = Token(type="??")
else:
ntoken = args[0]
return ntoken
def p_dimlist_1(self,args):
'''
dimlist ::= dimelem
dimlist ::= dimlist , dimelem
'''
return AST(type="dimlist",kids=args[:1] + args[2:])
def p_typelist_1(self,args):
''' typelist ::= typeelem
typelist ::= typelist , typeelem '''
return AST(type="typelist", kids=(args[:1] + args[2:]))#}}}
class TypeStringASTRewriterPass1(GenericASTRewriter):#{{{
def process(self, tree):
self.dim_annot = {}
return (self.postorder(tree), self.dim_annot)
def collapse_list(listname):
def func(self, node):
if(len(node.kids) > 1):
if node.kids[0].type == listname:
node.kids = tuple(node.kids[0].kids) + (node.kids[1],)
return node
else:
return node.kids[0]
return func
n_type_orlist = collapse_list("type_orlist")
n_dim_orlist = collapse_list("dim_orlist")
n_type_andlist = collapse_list("type_andlist")
n_dim_andlist = collapse_list("dim_andlist")
n_typelist = collapse_list("typelist")
def n_dimlist(self, node):
if(node.kids[0].type == "dimlist"):
node.kids = tuple(node.kids[0].kids) + (self.annotateDim(node.kids[1]),)
else:
node.kids = (self.annotateDim(node.kids[0]),)
return node
def n_varlist(self, node):
if(node.kids[0].type == "varlist"):
node.kids = tuple(node.kids[0].kids) + (self.processVar(node.kids[1]),)
else:
node.kids = (self.processVar(node.kids[0]),)
return node
def processVar(self,node):
if(node.type == '.'):
return True
elif(node.type == '*'):
return False
else:
raise RuntimeError, "Unexpected character as dim var"
def n_typenest(self,node):
if(len(node.kids) > 1):
if node.kids[2].type == "typenest":
node.packed = node.kids[0].packed + [node.kids[1].type == "<"] + node.kids[2].packed
node.kids = tuple(node.kids[0].kids) + tuple(node.kids[2].kids)
else:
node.packed = node.kids[0].packed + [node.kids[1].type == "<"]
node.kids = tuple(node.kids[0].kids) + (node.kids[2],)
else:
node.packed = []
node.kids = (node.kids[0],)
return node
def n_createtype(self, node):
node.kids = [node.kids[0], None, None, None, None, False] #hasmissing, name, dims, subtypes, strict
return node
def n_createdim(self, node):
node.kids = [node.kids[0], None, None, False] #hasmissing, name, strict
return node
def n_hasmissing(self, node):
if(node.kids[0].type == "createtype"):
node.kids[0].kids[1] = node.kids[1].type in ("?","$")
elif(node.kids[0].type == "createdim"):
node.kids[0].kids[1] = node.kids[1].type in ("?","$")
else:
raise RuntimeError, "Invalid AST!"
return node.kids[0]
def n_strict(self, node):
if(node.kids[0].type == "createtype"):
node.kids[0].kids[5] = True
elif(node.kids[0].type == "createdim"):
node.kids[0].kids[3] = True
else:
raise RuntimeError, "Invalid AST!"
return node.kids[0]
def n_dims(self, node):
assert node.kids[0].type == "createtype", "Invalid AST!"
if(len(node.kids) > 1):
assert node.kids[1].type == "dimlist", "Invalid AST!"
node.kids[0].kids[3] = node.kids[1]
return node.kids[0]
def n_subtypes(self, node):
assert node.kids[0].type == "createtype", "Invalid AST!"
assert node.kids[1].type == "typelist", "Invalid AST!"
node.kids[0].kids[4] = node.kids[1]
return node.kids[0]
def n_namedim(self, node):
if(node.kids[0].type == "createdim"):
node.kids[0].kids[2] = node.kids[1].attr
else:
return node
return node.kids[0]
def n_typeelem(self, node):
if(node.kids[0].type == "createtype"):
node.kids[0].kids[2] = node.kids[1].attr
else:
return node
return node.kids[0]
def annotateDim(self,node):
if not node.type == "createdim":
return node
depshape, has_missing, name, strict = node.kids
if(name is None):
return node
if(depshape.type == "varlist"):
dependent = depshape.kids
while(dependent and dependent[-1] is False):
dependent = dependent[:-1]
dependent = tuple(dependent)
shape = UNDEFINED
elif(depshape.type == "integer"):
dependent = tuple()
shape = depshape.attr
elif(depshape.type == "~"):
dependent = "~"
shape = UNDEFINED
elif(depshape.type == "inherit"):
dependent = None
shape = None
else:
raise RuntimeError, "Invalid AST!"
if(name in self.dim_annot):
ahas_missing, adependent, ashape = self.dim_annot[name]
if(not ashape is None):
if(not shape is None and shape != ashape):
raise RuntimeError, "Similar named dim: " + name + " with different shape: " + str(shape) + ", " + str(ashape)
shape = ashape
if(not adependent is None):
if(not dependent is None and dependent != adependent):
raise RuntimeError, "Similar named dim: " + name + " with different dependent dims: " + str(dependent) + ", " + str(adependent)
dependent = adependent
if(not ahas_missing is None):
if(not has_missing is None and has_missing != ahas_missing):
raise RuntimeError, "Similar named dim: " + name + " with different has_missing flag: " + str(has_missing) + ", " + str(ahas_missing)
has_missing = ahas_missing
self.dim_annot[name] = (has_missing, dependent, shape)
return node#}}}
class TypeStringASTRewriterPass2(GenericASTRewriter):#{{{
def process(self, tree):
ntree = self.postorder(tree)
return ntree
def n_typenest(self,node):
kids = list(node.kids)
while(len(kids) > 1):
right = kids.pop()
assert right.type == "createtype", "Invalid AST!"
assert kids[-1].type == "createtype", "Invalid AST!"
kids[-1].kids[4]= AST(type="typelist",kids=(right,))
return kids[0]#}}}
class TypeStringASTInterpreter(object):#{{{
def __init__(self, dim_annot, refdims, env={}):
self.env = env
self.dim_annot = dim_annot
self.dims = dict([(dim.name,dim) for dim in refdims])
def visit(self, node, dimpos=0):
name = 'n_' + str(node.type)
if hasattr(self, name):
func = getattr(self, name)
return func(node, dimpos)
else:
raise RuntimeError, "Cannot find method to process: " + name
def n_createtype(self, node, dimpos=0):
assert node.type == "createtype", "Cannot create type from this specification!"
typetoken, has_missing, name, dimlist, subtypelist, strict = node.kids
if has_missing is None:
has_missing = False
if(typetoken.type == '?'):
typename = "?"
else:
typename = typetoken.attr
if(typename not in __typenames__):
raise RuntimeError, "Unknown type name: " + str(typename)
typecls = __typenames__[typename]
kwargs = {}
if has_missing:
kwargs['has_missing'] = True
if(not dimlist is None):
dims = dimpaths.DimPath(*[self.visit(dimnode, dimpos + pos) for pos, dimnode in enumerate(dimlist.kids)])
dimpos += len(dims)
kwargs['dims'] = dims
elif(issubclass(typecls,TypeArray)):
dims = dimpaths.DimPath(dimensions.Dim(UNDEFINED,(True,) * dimpos))
dimpos += 1
kwargs['dims'] = dims
if not subtypelist is None:
subtypes = tuple([self.visit(subtypenode, dimpos) for subtypenode in subtypelist.kids])
kwargs['subtypes'] = subtypes
if(issubclass(typecls,TypeTuple)):
fieldnames = []
for pos, subtypenode in enumerate(subtypelist.kids):
if subtypenode.type == "createtype":
name = subtypenode.kids[2]
elif subtypenode.type == "typeelem":
name = subtypenode.kids[1].attr
else:
name = None
if name is None:
fieldnames.append("f" + str(pos))
else:
fieldnames.append(util.valid_name(name))
kwargs['fieldnames'] = tuple(fieldnames)
if 'dims' in kwargs and len(kwargs['dims']) > 1:
assert typecls == TypeArray, "Multidimensional types only allowed with arrays"
dims = kwargs['dims'][::-1]
kwargs['dims'] = dims[:1]
subtype = typecls(**kwargs)
for pos in xrange(1,len(dims)):
subtype = TypeArray(dims=dims[pos:(pos+1)], subtypes=(subtype,))
return subtype
else:
return typecls(**kwargs)
def n_typeelem(self, node, dimpos):
return self.visit(node.kids[0],dimpos)
def n_param(self, node,dimpos):
if not node.kids[0].attr in self.env:
raise RuntimeError, "Cannot find variable '" + str(node.kids[0].attr) + "' in given variables"
return self.env[node.kids[0].attr]
def n_createdim(self, node, dimpos):
assert node.type == "createdim", "Cannot create dim from this specification!"
dimnode, has_missing, name, strict = node.kids
if has_missing is None:
has_missing = False
if(dimnode.type == "varlist"):
dependent = tuple(dimnode.kids)
shape = UNDEFINED
elif(dimnode.type == "integer"):
dependent = tuple()
shape = dimnode.attr
elif(dimnode.type == "~"):
dependent = (True,) * dimpos
shape = UNDEFINED
elif(dimnode.type == "inherit"):
dependent = None
shape = None
else:
raise RuntimeError, "Invalid AST!"
if(not name is None):
assert name in self.dim_annot, "Unannotated named dim found!"
if(not name in self.dims):
has_missing,dependent,shape = self.dim_annot[name]
if(dependent is None):
dependent = tuple()
elif(dependent == "~"):
dependent = (True,) * dimpos
if(shape is None):
shape = UNDEFINED
self.dims[name] = dimensions.Dim(shape,dependent,has_missing, name=name)
dim = self.dims[name]
if(len(dim.dependent) > dimpos):
raise RuntimeError, "Dim: " + name + " has too many dependent dims: " + str(len(dim.dependent)) + " (max: " + str(dimpos) + ")"
if dim.shape != shape and not shape is None and not shape == UNDEFINED and not dim.shape == UNDEFINED:
raise RuntimeError, "Dim: " + name + " dimension unequal to known dim"
return dim
return dimensions.Dim(shape,dependent,has_missing, name=name) #}}}
def _createType(name, dimpos=0, refdims=[], env={}):
scanner = TypeStringScanner()
tokens = scanner.tokenize(name)
parser = TypeStringParser()
tree = parser.parse(tokens)
#print tree
rewriter1 = TypeStringASTRewriterPass1()
tree, dim_annotation = rewriter1.process(tree)
#print tree, dim_annotation
rewriter2 = TypeStringASTRewriterPass2()
tree = rewriter2.process(tree)
#print "2: " + str(tree)
return TypeStringASTInterpreter(dim_annotation, refdims, env).visit(tree, dimpos)
class TypeStringMatchASTInterpreter(object):
def process(self, tree, dims, matchtype, env, dim_annotation):
self.packdepth = 0
self.error_message = ""
self.env = env
self.dim_annotations = dim_annotation
res = self.n_typenest(tree,matchtype,dims)
return (res, self.packdepth, self.error_message, self.env)
def setError(self, message):
self.error_message = message
def unsetError(self):
self.error_message = ""
def visit(self, node, matchers):
name = 'n_' + str(node.type)
if hasattr(self, name):
func = getattr(self, name)
if isinstance(matchers,tuple):
mres = []
for matcher in matchers:
mres.append(func(node, matcher))
res = any([x for x,y in mres])
ms = sum([y for x,y in mres],())
else:
res, ms = func(node, matchers)
return (res, ms)
else:
raise RuntimeError, "Cannot find method to process: " + name
def paramvisit(self, node, dims):
name = 'p_' + str(node.type)
if hasattr(self, name):
func = getattr(self, name)
return func(node, dims)
else:
raise RuntimeError, "Cannot find method to process: " + name
def n_typenest(self, node, matchtype, dims=None):
assert node.type == "typenest", "Invalid AST!"
packlist = node.packed + [False]
for packed, kid in zip(packlist[::-1], node.kids[::-1]):
if packed:
res, dims = self.paramvisit(kid, dims)
if res is False:
return (False,())
else:
if self.packdepth > 0:
raise RuntimeError, "Cannot intermix packed and non-packed types (unpacked: <, packed: :)"
dims = None
matchtypes = matchtype
for pos, (packed, matcher) in enumerate(zip(packlist, node.kids)):
if packed:
continue
res, matchtypes = self.visit(matcher, matchtypes)
if res is False:
return (False,())
return (True,matchtypes)
def subtype(self, matchtypes):
if isinstance(matchtypes, tuple):
res = []
for matchtype in matchtypes:
if hasattr(matchtype, 'subtypes') and len(matchtype.subtypes) == 1:
res.append(matchtype.subtypes[0])
res = tuple(res)
else:
if hasattr(matchtypes, 'subtypes') and len(matchtypes.subtypes) == 1:
res = matchtypes.subtypes
else:
res = ()
return res
def p_createtype(self, node, dims):
if not dims:
self.setError("Asked to pack dimension, but no dimension found")
return False, None
if node.kids[0].attr == "array":
raise RuntimeError, "Only array type allowed in packed type matcher"
dimlist = node.kids[3]
if any([d.type == "??" for d in dimlist.kids]):
raise RuntimeError, "No variable dimension lists (??) allowed in packed type matcher"
res, rdims = self.visit(dimlist, (dims[-len(dimlist):],))
self.packdepth += len(dimlist)
return (res, dims[:-len(dimlist)])
def n_param(self, node, matchtype):
name = node.kids[0].attr
if name in self.env and self.env[name] != matchtype:
self.setError("Matches of multiple recurrences of variable: " + name + " are not equal")
return False
self.env[name] = matchtype
return (True, self.subtype((matchtype,)))
def n_type_orlist(self, node, matchtype):
results = [self.visit(matcher, matchtype) for matcher in node.kids]
res = any([x for x, m in results])
ms = sum([m for x, m in results if x],())
if res is True:
self.unsetError()
return (res, ms)
def n_type_andlist(self, node, matchtype):
results = [self.visit(matcher, matchtype) for matcher in node.kids]
res = all([x for x, m in results])
ms = sum([m for x, m in results],())
return (res, ms)
def n_tmatcher(self, node, matchtype):
if node.kids[0].type == "^":
res,ms = self.visit(node.kids[1], matchtype)
res = not res
if res is False:
self.setError("Type " + str(matchtype) + " matches negative constraint")
else:
self.unsetError()
return (res, ms)
else:
return self.visit(node.kids[0], matchtype)
def n_createtype(self, node, matchtype):
typetoken, has_missing, name, dimlist, subtypelist, strict = node.kids
if(typetoken.type == '?'):
return (True,())
elif(typetoken.type == 'param'):
res, ms = self.visit(typetoken, matchtype)
else:
typename = typetoken.attr
if(typename not in __typenames__):
raise RuntimeError, "Unknown type name: " + str(typename)
typecls = __typenames__[typename]
if not isinstance(matchtype, typecls):
self.setError("Type class mismatch for: " + str(matchtype) + ", expected: " + typecls.name)
return (False,())
if not has_missing is None and not matchtype.has_missing is has_missing:
self.setError("Type: " + str(matchtype) + " has incorrect has_missing state")
return (False,())
if not dimlist is None:
if not hasattr(matchtype, "dims"):
self.setError("Required type with dimensions, but found: " + str(matchtype))
return (False,())
res,md = self.visit(dimlist, (matchtype.dims,))
if res is False:
return (False,())
if not subtypelist is None:
if not hasattr(matchtype, "subtypes"):
self.setError("Required type with subtypes, but found: " + str(matchtype))
return (False,())
if subtypelist.type == "typelist":
if not len(subtypelist.kids) == len(matchtype.subtypes):
self.setError("Incorect number of subtypes, expected: " + str(len(subtypelist.kids)) + " but found: " + str(len(matchtype.subtypes)))
return (False,())
for subtype, kid in zip(matchtype.subtypes, subtypelist.kids):
res,ms = self.visit(kid, subtype)
if res is False:
return (False,())
elif subtypelist.type == "createtype":
if not len(matchtype.subtypes) == 1:
self.setError("Incorect number of subtypes, expected 1 but found: " + str(len(matchtype.subtypes)))
return False
res,ms = self.visit(subtypelist, matchtype.subtypes)
if res is False:
return (False,())
else:
raise RuntimeError, "Unexpected subtypelist in AST"
return (True, self.subtype(matchtype))
def n_typeelem(self, node, matchtype):
return self.visit(node.kids[0], matchtype)
def n_createdim(self, node, dim):
if not isinstance(dim, dimensions.Dim):
self.setError("Expected dimension, but found none")
return (False,())
dimnode, has_missing, name, strict = node.kids
if not has_missing is None and not dim.has_missing is has_missing:
self.setError("Dimension: " + str(dim) + " has incorrect has_missing state")
return (False,())
if not name is None:
if isinstance(self.dim_annotations[name], dimensions.Dim):
if not self.dim_annotations[name] == dim:
self.setError("Dimensions with same name: " + name + " in matcher do not match in type for " + str(self.dim_annotations[name]) + " and " + str(dim))
return (False,())
self.dim_annotations[name] = dim
if(dimnode.type == "varlist"):
dependent = tuple(dimnode.kids)
while(dependent and dependent[-1] is False):
dependent = dependent[:-1]
if not dim.dependent == dependent:
self.setError("Dimenension " + str(dim) + " does have incorrect dependence struture")
return (False,())
elif(dimnode.type == "integer"):
if not dim.shape == dimnode.attr:
self.setError("Dimenension " + str(dim) + " does have incorrect shape (" + str(dim.shape) + " instead of " + str(dimnode.attr) + ")")
return (False,())
elif(dimnode.type == "~"):
if not dim.dependent:
self.setError("Dimenension " + str(dim) + " should be variable but is not")
return (False,())
else:
raise RuntimeError, "Invalid AST!"
return (True,())
def n_dimlist(self, node, dims):
return self.match(node.kids, dims)
def n_nameddim(self, node, dims):
return self.visit(node.kids[0], (dims,))
def n_dim_orlist(self, node, dims):
xdims = (dims,)
results = [self.visit(matcher, xdims) for matcher in node.kids]
res = any([x for x, m in results])
ms = sum([m for x, m in results if x],())
if res is True:
self.unsetError()
return (res, ms)
def n_dim_andlist(self, node, dims):
xdism = (dims,)
results = [self.visit(matcher, xdims) for matcher in node.kids]
res = all([x for x, m in results])
ms = sum([m for x, m in results],())
return (res, ms)
def match(self, dim_matchers, dims):
if not dim_matchers and not dims:
return (True,())
elif not dims:
self.setError("Expected dimension, but found none")
return (False,())
elif not dim_matchers:
self.setError("Expected no dimension, but found one")
return (False,())
if dim_matchers[0].type == "?":
if len(dims) == 0:
self.setError("Expected dimension, but found none")
return (False,())
return self.match(dim_matchers[1:], dims[1:])
elif dim_matchers[0].type == '??':
mres = [self.match(dim_matchers[1:],dims[pos:]) for pos in xrange(len(dims) + 1)]
res = any([x for x,y in mres])
if res is True:
self.unsetError()
return (res,())
else:
res, md = self.visit(dim_matchers[0], dims[0])
if res is False:
return (False,())
return self.match(dim_matchers[1:], dims[1:])
def matchType(name, matchtype, env=None, dims=None):
scanner = TypeStringScanner()
tokens = scanner.tokenize(name)
parser = TypeStringParser()
tree = parser.parse(tokens)
#print '1: ', tree
rewriter1 = TypeStringASTRewriterPass1()
tree, dim_annotation = rewriter1.process(tree)
#print '2: ', tree, dim_annotation
if env is None:
env = {}
return TypeStringMatchASTInterpreter().process(tree, dims, matchtype, env, dim_annotation)
#### HELPER functions #########
def mostSpecializedTypesCls(typeclasses):
"""Takes list of typeclasses, and removes the
classes that also have a subclass of themself in the list"""
basetypes = reduce(operator.__or__,
[set(t.__mro__[1:]) for t in typeclasses])
return [tcls for tcls in typeclasses
if not tcls in basetypes]
def mostSpecializedTypes(typeobjs):
"""Takes list of typeobjects, and removes the
objects that also have a subclass of themself in the list"""
basetypes = reduce(operator.__or__,
[set(t.__class__.__mro__[1:]) for t in typeobjs])
return [tobj for tobj in typeobjs
if not tobj.__class__ in basetypes]
### sets ###
TypeAll = set(TypeAny.getDescendantTypes())
TypeNumbers = set(TypeNumber.getDescendantTypes())
TypeStrings = set(TypeString.getDescendantTypes()) - set([TypePickle])
TypeArrays = set(TypeArray.getDescendantTypes())
TypeIntegers = set(TypeInteger.getDescendantTypes())
TypeReals = set(TypeReal64.getDescendantTypes()) - TypeIntegers
if(platform.architecture()[0] == "32bit"):
TypePlatformInt = TypeInt32
else:
TypePlatformInt = TypeInt64
__typenames__["int"] = TypePlatformInt
__typenames__['unknown'] = TypeUnknown | PypiClean |
/AsyncDex-1.1.tar.gz/AsyncDex-1.1/asyncdex/ratelimit.py | import asyncio
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from logging import getLogger
from math import ceil
from re import Pattern
from typing import Dict, Optional, Tuple
import aiohttp
logger = getLogger(__name__)
@dataclass(frozen=True)
class Path:
"""A Path object representing a various path."""
name: str
"""The name of the path. This will be the value provided by :attr:`.Ratelimit.path`."""
path_regex: Pattern
"""A compiled regex pattern matching the path, used when the path has a variable, such as ``/action/{id}``."""
method: Optional[str] = None
"""The HTTP method for the path. Leave None if ratelimit applies to all methods."""
@dataclass()
class PathRatelimit:
"""An object that allows the request method to check the ratelimit before making a response."""
path: Path
"""A :class:`~.Path` object."""
ratelimit_amount: int
"""Analogous to :attr:`.Ratelimit.ratelimit_amount`"""
ratelimit_time: int
"""The amount of time needed for the ratelimit to expire after the first use."""
ratelimit_expires: datetime = field(default=datetime.min, init=False)
"""Analogous to :attr:`.Ratelimit.ratelimit_expires`"""
ratelimit_used: int = field(default=0, init=False)
"""How many times the path has been called since the last ratelimit expire."""
ratelimit_enqueued: int = field(default=0, init=False)
"""How many requests are currently sleeping. This is used to up the sleep time to prevent a number of requests
more than double the ``ratelimit_time`` amount of requests."""
def time_until_expire(self) -> timedelta:
"""Returns a :class:`datetime.timedelta` representing the amount of seconds for the ratelimit to expire."""
return self.ratelimit_expires - datetime.utcnow()
def can_call(self, method: str) -> bool:
"""Returns whether or not this route can be used right now.
:param method: The HTTP method being used.
:type method: str
:return: Whether or not this route can be used without ratelimit.
:rtype: bool
"""
if self.path.method == method or self.path.method is None:
return self.ratelimit_used < self.ratelimit_amount or self.time_until_expire() < timedelta(microseconds=-1)
else:
return True
def expire(self):
"""Expire the ratelimit."""
self.ratelimit_used = 0
self.ratelimit_expires = datetime.min
def update(self, response: aiohttp.ClientResponse):
"""Update the path's ratelimit based on the headers.
:param response: The response object.
:type response: aiohttp.ClientResponse
"""
headers = response.headers
if self.ratelimit_expires == datetime.min:
self.ratelimit_expires = datetime.utcnow() + timedelta(seconds=self.ratelimit_time)
if headers.get("x-ratelimit-limit", ""):
self.ratelimit_amount = int(headers["x-ratelimit-limit"])
if headers.get("x-ratelimit-retry-after", ""):
new_ratelimit = datetime.utcfromtimestamp(int(headers["x-ratelimit-retry-after"]))
if new_ratelimit > self.ratelimit_expires:
self.ratelimit_expires = new_ratelimit
if headers.get("x-ratelimit-remaining", ""):
self.ratelimit_used = self.ratelimit_amount - int(headers["x-ratelimit-remaining"])
else:
self.ratelimit_used += 1
class Ratelimits:
"""An object holding all of the various ratelimits.
:param ratelimits: The :class:`.PathRatelimit` object.
:type ratelimits: PathRatelimit
"""
ratelimit_dictionary: Dict[Pattern, PathRatelimit]
"""A dictionary where the keys are regex patterns representing the paths and the values are
:class:`~.PathRatelimit` objects."""
def __init__(self, *ratelimits: PathRatelimit):
self.ratelimit_dictionary = {}
self._check_lock = asyncio.Lock()
self._enqueue_lock = asyncio.Lock()
for item in ratelimits:
self.add(item)
def add(self, obj: PathRatelimit):
"""Add a new ratelimit. If the path is the same as an existing path, it will be overwritten.
:param obj: The new ratelimit object to add.
:type obj: PathRatelimit
"""
self.ratelimit_dictionary[obj.path.path_regex] = obj
def remove(self, obj: PathRatelimit):
"""Remove a ratelimit.
:param obj: The new ratelimit object to remove.
:type obj: PathRatelimit
"""
self.ratelimit_dictionary.pop(obj.path.path_regex)
async def check(self, url: str, method: str) -> Tuple[float, Optional[PathRatelimit]]:
"""Check if a path is ratelimited.
:param url: The path, starting with ``/``
:type url: str
:param method: The HTTP method being used.
:type method: str
:return: A number representing the amount of seconds before ratelimit expire or -1 if there is no need to
ratelimit as well as the :class:`~.PathRatelimit` object if found.
:rtype: float
"""
# We want to check with priority.
# For example, ``/url/x/something`` matches both ``/url/{id}`` and ``/url/{id}/something``.
# We want to match the second one because that is how API ratelimits work.
ratelimit_obj = None
obj_priority = 0 # Actually just the number of slashes in the regex
async with self._check_lock:
for regex, obj in self.ratelimit_dictionary.items():
if regex.match(url):
count = regex.pattern.count("/")
if count > obj_priority:
obj_priority = count
ratelimit_obj = obj
if ratelimit_obj is None:
return -1, ratelimit_obj
if ratelimit_obj.can_call(method):
return -1, ratelimit_obj
return (
ceil(
ratelimit_obj.time_until_expire().total_seconds()
+ (ratelimit_obj.ratelimit_time * ((ratelimit_obj.ratelimit_enqueued // 60)) + 1)
),
ratelimit_obj,
)
async def sleep(self, url: str, method: str) -> Optional[PathRatelimit]:
"""Helper function that sleeps the amount of time returned by :meth:`.check`.
:param url: The path, starting with ``/``
:type url: str
:param method: The HTTP method being used.
:type method: str
:return: The :class:`~.PathRatelimit` object if found
:rtype: :class:`~.PathRatelimit`
"""
time_to_sleep, return_val = await self.check(url, method)
if return_val and time_to_sleep > 0:
async with self._enqueue_lock:
return_val.ratelimit_enqueued += 1
logger.warning("Sleeping for %s seconds.", time_to_sleep)
await asyncio.sleep(time_to_sleep)
async with self._enqueue_lock:
return_val.ratelimit_enqueued -= 1
return return_val
def __repr__(self) -> str:
"""Provide a string representation of the object.
:return: The string representation
:rtype: str
"""
return f"{type(self).__name__}{self.ratelimit_dictionary!r}" | PypiClean |
/lektor-3.4.0b6-py3-none-any.whl/lektor/filecontents.py | import base64
import codecs
import hashlib
import mimetypes
import os
from lektor.utils import deprecated
class FileContents:
@deprecated(name="FileContents", version="3.4.0")
def __init__(self, filename):
self.filename = filename
self._md5 = None
self._sha1 = None
self._integrity = None
self._mimetype = mimetypes.guess_type(filename)[0] or "application/octet-stream"
@property
def sha1(self):
self._ensure_hashes()
return self._sha1
@property
def md5(self):
self._ensure_hashes()
return self._md5
@property
def integrity(self):
self._ensure_hashes()
return self._integrity
@property
def mimetype(self):
return self._mimetype
@property
def bytes(self):
try:
return os.stat(self.filename).st_size
except (OSError, IOError):
return 0
def as_data_url(self, mediatype=None):
if mediatype is None:
mediatype = self.mimetype
return "data:%s;base64,%s" % (
mediatype,
self.as_base64(),
)
def as_text(self):
with self.open() as f:
return f.read()
def as_bytes(self):
with self.open("rb") as f:
return f.read()
def as_base64(self):
return base64.b64encode(self.as_bytes())
def open(self, mode="r", encoding=None):
if mode == "rb":
return open(self.filename, "rb")
if mode != "r":
raise TypeError("Can only open files for reading")
return codecs.open(self.filename, encoding=encoding or "utf-8")
def _ensure_hashes(self):
if self._md5 is not None:
return
with self.open("rb") as f:
md5 = hashlib.md5()
sha1 = hashlib.sha1()
sha384 = hashlib.sha384()
while 1:
chunk = f.read(16384)
if not chunk:
break
md5.update(chunk)
sha1.update(chunk)
sha384.update(chunk)
self._md5 = md5.hexdigest()
self._sha1 = sha1.hexdigest()
self._integrity = "sha384-" + base64.b64encode(sha384.digest()).decode(
"ascii"
)
def __repr__(self):
return "<FileContents %r md5=%r>" % (
self.filename,
self.md5,
) | PypiClean |
/CC-dbgen-0.2.0.tar.gz/CC-dbgen-0.2.0/dbgen/scripts/IO/get_catalog_sherlock.py | from typing import List,Tuple
from os import listdir
from glob import glob
################################################################################
def get_catalog_sherlock(rootpath : str
,existing : str
) -> Tuple[List[str],List[str],List[str]
,List[str],List[str],List[str]
,List[str],List[str],List[str]]:
"""
Searches for DFT calculation folders. Returns unzipped (stordir,logfile,code) triples
to get anytraj: \$(cat \$(ls \$f/*.traj | head -1))
"""
######################
# Initialize Variables
#---------------------
filt = set(existing.split(","))
files = set(listdir(rootpath))
dirs,logfiles,codes,logs,pws,pots,pos,kpts,params =[],[],[],[],[],[],[],[],[]
# Main Program
#-------------
for d in files - filt:
dir = rootpath +'/'+d
ds = listdir(dir)
assert 'runtime.json' in ds,'Failed sanity check'
pwinps = glob(dir+'*/pw.inp')
# DFTcode-specific stuff
#------------------------
if len(pwinps) == 1:
code = 'quantumespresso'
logfile = dir+'/log'
with open(pwinps[0],'r') as f:
pwinp = f.read()
poscar,potcar,kptcar = '','',''
elif 'OUTCAR' in ds:
code = 'vasp'
logfile = dir+'/OUTCAR'
pwinp = ''
with open(dir+'/POSCAR','r') as f:
poscar = f.read()
with open(dir+'/POTCAR','r') as f:
potcar = f.read()
with open(dir+'/KPOINTS','r') as f:
kptcar = f.read()
else:
code = 'gpaw'
logfile = dir+'/log'
pwinp,poscar,potcar,kptcar = '','','',''
# Common attributes
#------------------
with open(logfile,'r') as f:
log = f.read()
with open(dir+'/params.json','r') as f:
paramjson = f.read()
# Store results
#------------
dirs.append(dir)
logfiles.append(logfile)
codes.append(code)
logs.append(log)
pws.append(pwinp)
pots.append(potcar)
pos.append(poscar)
kpts.append(kptcar)
params.append(paramjson)
return dirs,logfiles,codes,logs,pws,pots,pos,kpts,params
if __name__ == '__main__':
import sys
print(get_catalog_sherlock(sys.argv[1],'')) | PypiClean |
/Braindecode-0.7.tar.gz/Braindecode-0.7/braindecode/models/usleep.py |
import numpy as np
import torch
from torch import nn
def _crop_tensors_to_match(x1, x2, axis=-1):
"""Crops two tensors to their lowest-common-dimension along an axis."""
dim_cropped = min(x1.shape[axis], x2.shape[axis])
x1_cropped = torch.index_select(
x1, dim=axis,
index=torch.arange(dim_cropped).to(device=x1.device)
)
x2_cropped = torch.index_select(
x2, dim=axis,
index=torch.arange(dim_cropped).to(device=x1.device)
)
return x1_cropped, x2_cropped
class _EncoderBlock(nn.Module):
"""Encoding block for a timeseries x of shape (B, C, T)."""
def __init__(self,
in_channels=2,
out_channels=2,
kernel_size=9,
downsample=2):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.downsample = downsample
self.block_prepool = nn.Sequential(
nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding='same'),
nn.ELU(),
nn.BatchNorm1d(num_features=out_channels),
)
self.pad = nn.ConstantPad1d(padding=1, value=0)
self.maxpool = nn.MaxPool1d(
kernel_size=self.downsample, stride=self.downsample)
def forward(self, x):
x = self.block_prepool(x)
residual = x
if x.shape[-1] % 2:
x = self.pad(x)
x = self.maxpool(x)
return x, residual
class _DecoderBlock(nn.Module):
"""Decoding block for a timeseries x of shape (B, C, T)."""
def __init__(self,
in_channels=2,
out_channels=2,
kernel_size=9,
upsample=2,
with_skip_connection=True):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.upsample = upsample
self.with_skip_connection = with_skip_connection
self.block_preskip = nn.Sequential(
nn.Upsample(scale_factor=upsample),
nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=2,
padding='same'),
nn.ELU(),
nn.BatchNorm1d(num_features=out_channels),
)
self.block_postskip = nn.Sequential(
nn.Conv1d(
in_channels=(
2 * out_channels if with_skip_connection else out_channels),
out_channels=out_channels,
kernel_size=kernel_size,
padding='same'),
nn.ELU(),
nn.BatchNorm1d(num_features=out_channels),
)
def forward(self, x, residual):
x = self.block_preskip(x)
if self.with_skip_connection:
x, residual = _crop_tensors_to_match(x, residual, axis=-1) # in case of mismatch
x = torch.cat([x, residual], axis=1) # (B, 2 * C, T)
x = self.block_postskip(x)
return x
class USleep(nn.Module):
"""Sleep staging architecture from Perslev et al 2021.
U-Net (autoencoder with skip connections) feature-extractor for sleep
staging described in [1]_.
For the encoder ('down'):
-- the temporal dimension shrinks (via maxpooling in the time-domain)
-- the spatial dimension expands (via more conv1d filters in the
time-domain)
For the decoder ('up'):
-- the temporal dimension expands (via upsampling in the time-domain)
-- the spatial dimension shrinks (via fewer conv1d filters in the
time-domain)
Both do so at exponential rates.
Parameters
----------
in_chans : int
Number of EEG or EOG channels. Set to 2 in [1]_ (1 EEG, 1 EOG).
sfreq : float
EEG sampling frequency. Set to 128 in [1]_.
depth : int
Number of conv blocks in encoding layer (number of 2x2 max pools)
Note: each block halve the spatial dimensions of the features.
n_time_filters : int
Initial number of convolutional filters. Set to 5 in [1]_.
complexity_factor : float
Multiplicative factor for number of channels at each layer of the U-Net.
Set to 2 in [1]_.
with_skip_connection : bool
If True, use skip connections in decoder blocks.
n_classes : int
Number of classes. Set to 5.
input_size_s : float
Size of the input, in seconds. Set to 30 in [1]_.
time_conv_size_s : float
Size of the temporal convolution kernel, in seconds. Set to 9 / 128 in
[1]_.
ensure_odd_conv_size : bool
If True and the size of the convolutional kernel is an even number, one
will be added to it to ensure it is odd, so that the decoder blocks can
work. This can ne useful when using different sampling rates from 128
or 100 Hz.
apply_softmax : bool
If True, apply softmax on output (e.g. when using nn.NLLLoss). Use
False if using nn.CrossEntropyLoss.
References
----------
.. [1] Perslev M, Darkner S, Kempfner L, Nikolic M, Jennum PJ, Igel C.
U-Sleep: resilient high-frequency sleep staging. npj Digit. Med. 4, 72 (2021).
https://github.com/perslev/U-Time/blob/master/utime/models/usleep.py
"""
def __init__(self,
in_chans=2,
sfreq=128,
depth=12,
n_time_filters=5,
complexity_factor=1.67,
with_skip_connection=True,
n_classes=5,
input_size_s=30,
time_conv_size_s=9 / 128,
ensure_odd_conv_size=False,
apply_softmax=False
):
super().__init__()
self.in_chans = in_chans
max_pool_size = 2 # Hardcoded to avoid dimensional errors
time_conv_size = np.round(time_conv_size_s * sfreq).astype(int)
if time_conv_size % 2 == 0:
if ensure_odd_conv_size:
time_conv_size += 1
else:
raise ValueError(
'time_conv_size must be an odd number to accomodate the '
'upsampling step in the decoder blocks.')
# Convert between units: seconds to time-points (at sfreq)
input_size = np.ceil(input_size_s * sfreq).astype(int)
channels = [in_chans]
n_filters = n_time_filters
for _ in range(depth + 1):
channels.append(int(n_filters * np.sqrt(complexity_factor)))
n_filters = int(n_filters * np.sqrt(2))
self.channels = channels
# Instantiate encoder
encoder = list()
for idx in range(depth):
encoder += [
_EncoderBlock(in_channels=channels[idx],
out_channels=channels[idx + 1],
kernel_size=time_conv_size,
downsample=max_pool_size)
]
self.encoder = nn.Sequential(*encoder)
# Instantiate bottom (channels increase, temporal dim stays the same)
self.bottom = nn.Sequential(
nn.Conv1d(in_channels=channels[-2],
out_channels=channels[-1],
kernel_size=time_conv_size,
padding=(time_conv_size - 1) // 2), # preserves dimension
nn.ELU(),
nn.BatchNorm1d(num_features=channels[-1]),
)
# Instantiate decoder
decoder = list()
channels_reverse = channels[::-1]
for idx in range(depth):
decoder += [
_DecoderBlock(in_channels=channels_reverse[idx],
out_channels=channels_reverse[idx + 1],
kernel_size=time_conv_size,
upsample=max_pool_size,
with_skip_connection=with_skip_connection)
]
self.decoder = nn.Sequential(*decoder)
# The temporal dimension remains unchanged
# (except through the AvgPooling which collapses it to 1)
# The spatial dimension is preserved from the end of the UNet, and is mapped to n_classes
self.clf = nn.Sequential(
nn.Conv1d(
in_channels=channels[1],
out_channels=channels[1],
kernel_size=1,
stride=1,
padding=0,
), # output is (B, C, 1, S * T)
nn.Tanh(),
nn.AvgPool1d(input_size), # output is (B, C, S)
nn.Conv1d(
in_channels=channels[1],
out_channels=n_classes,
kernel_size=1,
stride=1,
padding=0,
), # output is (B, n_classes, S)
nn.ELU(),
nn.Conv1d(
in_channels=n_classes,
out_channels=n_classes,
kernel_size=1,
stride=1,
padding=0,
),
nn.Softmax(dim=1) if apply_softmax else nn.Identity(),
# output is (B, n_classes, S)
)
def forward(self, x):
"""If input x has shape (B, S, C, T), return y_pred of shape (B, n_classes, S).
If input x has shape (B, C, T), return y_pred of shape (B, n_classes).
"""
# reshape input
if x.ndim == 4: # input x has shape (B, S, C, T)
x = x.permute(0, 2, 1, 3) # (B, C, S, T)
x = x.flatten(start_dim=2) # (B, C, S * T)
# encoder
residuals = []
for down in self.encoder:
x, res = down(x)
residuals.append(res)
# bottom
x = self.bottom(x)
# decoder
residuals = residuals[::-1] # flip order
for up, res in zip(self.decoder, residuals):
x = up(x, res)
# classifier
y_pred = self.clf(x) # (B, n_classes, seq_length)
if y_pred.shape[-1] == 1: # seq_length of 1
y_pred = y_pred[:, :, 0]
return y_pred | PypiClean |
/GxSphinx-1.0.0.tar.gz/GxSphinx-1.0.0/sphinx/search/non-minified-js/german-stemmer.js | var JSX = {};
(function (JSX) {
/**
* extends the class
*/
function $__jsx_extend(derivations, base) {
var ctor = function () {};
ctor.prototype = base.prototype;
var proto = new ctor();
for (var i in derivations) {
derivations[i].prototype = proto;
}
}
/**
* copies the implementations from source interface to target
*/
function $__jsx_merge_interface(target, source) {
for (var k in source.prototype)
if (source.prototype.hasOwnProperty(k))
target.prototype[k] = source.prototype[k];
}
/**
* defers the initialization of the property
*/
function $__jsx_lazy_init(obj, prop, func) {
function reset(obj, prop, value) {
delete obj[prop];
obj[prop] = value;
return value;
}
Object.defineProperty(obj, prop, {
get: function () {
return reset(obj, prop, func());
},
set: function (v) {
reset(obj, prop, v);
},
enumerable: true,
configurable: true
});
}
var $__jsx_imul = Math.imul;
if (typeof $__jsx_imul === "undefined") {
$__jsx_imul = function (a, b) {
var ah = (a >>> 16) & 0xffff;
var al = a & 0xffff;
var bh = (b >>> 16) & 0xffff;
var bl = b & 0xffff;
return ((al * bl) + (((ah * bl + al * bh) << 16) >>> 0)|0);
};
}
/**
* fused int-ops with side-effects
*/
function $__jsx_ipadd(o, p, r) {
return o[p] = (o[p] + r) | 0;
}
function $__jsx_ipsub(o, p, r) {
return o[p] = (o[p] - r) | 0;
}
function $__jsx_ipmul(o, p, r) {
return o[p] = $__jsx_imul(o[p], r);
}
function $__jsx_ipdiv(o, p, r) {
return o[p] = (o[p] / r) | 0;
}
function $__jsx_ipmod(o, p, r) {
return o[p] = (o[p] % r) | 0;
}
function $__jsx_ippostinc(o, p) {
var v = o[p];
o[p] = (v + 1) | 0;
return v;
}
function $__jsx_ippostdec(o, p) {
var v = o[p];
o[p] = (v - 1) | 0;
return v;
}
/**
* non-inlined version of Array#each
*/
function $__jsx_forEach(o, f) {
var l = o.length;
for (var i = 0; i < l; ++i)
f(o[i]);
}
/*
* global functions, renamed to avoid conflict with local variable names
*/
var $__jsx_parseInt = parseInt;
var $__jsx_parseFloat = parseFloat;
function $__jsx_isNaN(n) { return n !== n; }
var $__jsx_isFinite = isFinite;
var $__jsx_encodeURIComponent = encodeURIComponent;
var $__jsx_decodeURIComponent = decodeURIComponent;
var $__jsx_encodeURI = encodeURI;
var $__jsx_decodeURI = decodeURI;
var $__jsx_ObjectToString = Object.prototype.toString;
var $__jsx_ObjectHasOwnProperty = Object.prototype.hasOwnProperty;
/*
* profiler object, initialized afterwards
*/
function $__jsx_profiler() {
}
/*
* public interface to JSX code
*/
JSX.require = function (path) {
var m = $__jsx_classMap[path];
return m !== undefined ? m : null;
};
JSX.profilerIsRunning = function () {
return $__jsx_profiler.getResults != null;
};
JSX.getProfileResults = function () {
return ($__jsx_profiler.getResults || function () { return {}; })();
};
JSX.postProfileResults = function (url, cb) {
if ($__jsx_profiler.postResults == null)
throw new Error("profiler has not been turned on");
return $__jsx_profiler.postResults(url, cb);
};
JSX.resetProfileResults = function () {
if ($__jsx_profiler.resetResults == null)
throw new Error("profiler has not been turned on");
return $__jsx_profiler.resetResults();
};
JSX.DEBUG = false;
var GeneratorFunction$0 =
(function () {
try {
return Function('import {GeneratorFunction} from "std:iteration"; return GeneratorFunction')();
} catch (e) {
return function GeneratorFunction () {};
}
})();
var __jsx_generator_object$0 =
(function () {
function __jsx_generator_object() {
this.__next = 0;
this.__loop = null;
this.__seed = null;
this.__value = undefined;
this.__status = 0; // SUSPENDED: 0, ACTIVE: 1, DEAD: 2
}
__jsx_generator_object.prototype.next = function (seed) {
switch (this.__status) {
case 0:
this.__status = 1;
this.__seed = seed;
// go next!
this.__loop(this.__next);
var done = false;
if (this.__next != -1) {
this.__status = 0;
} else {
this.__status = 2;
done = true;
}
return { value: this.__value, done: done };
case 1:
throw new Error("Generator is already running");
case 2:
throw new Error("Generator is already finished");
default:
throw new Error("Unexpected generator internal state");
}
};
return __jsx_generator_object;
}());
function Among(s, substring_i, result) {
this.s_size = s.length;
this.s = s;
this.substring_i = substring_i;
this.result = result;
this.method = null;
this.instance = null;
};
function Among$0(s, substring_i, result, method, instance) {
this.s_size = s.length;
this.s = s;
this.substring_i = substring_i;
this.result = result;
this.method = method;
this.instance = instance;
};
$__jsx_extend([Among, Among$0], Object);
function Stemmer() {
};
$__jsx_extend([Stemmer], Object);
function BaseStemmer() {
var current$0;
var cursor$0;
var limit$0;
this.cache = ({ });
current$0 = this.current = "";
cursor$0 = this.cursor = 0;
limit$0 = this.limit = current$0.length;
this.limit_backward = 0;
this.bra = cursor$0;
this.ket = limit$0;
};
$__jsx_extend([BaseStemmer], Stemmer);
BaseStemmer.prototype.setCurrent$S = function (value) {
var current$0;
var cursor$0;
var limit$0;
current$0 = this.current = value;
cursor$0 = this.cursor = 0;
limit$0 = this.limit = current$0.length;
this.limit_backward = 0;
this.bra = cursor$0;
this.ket = limit$0;
};
function BaseStemmer$setCurrent$LBaseStemmer$S($this, value) {
var current$0;
var cursor$0;
var limit$0;
current$0 = $this.current = value;
cursor$0 = $this.cursor = 0;
limit$0 = $this.limit = current$0.length;
$this.limit_backward = 0;
$this.bra = cursor$0;
$this.ket = limit$0;
};
BaseStemmer.setCurrent$LBaseStemmer$S = BaseStemmer$setCurrent$LBaseStemmer$S;
BaseStemmer.prototype.getCurrent$ = function () {
return this.current;
};
function BaseStemmer$getCurrent$LBaseStemmer$($this) {
return $this.current;
};
BaseStemmer.getCurrent$LBaseStemmer$ = BaseStemmer$getCurrent$LBaseStemmer$;
BaseStemmer.prototype.copy_from$LBaseStemmer$ = function (other) {
this.current = other.current;
this.cursor = other.cursor;
this.limit = other.limit;
this.limit_backward = other.limit_backward;
this.bra = other.bra;
this.ket = other.ket;
};
function BaseStemmer$copy_from$LBaseStemmer$LBaseStemmer$($this, other) {
$this.current = other.current;
$this.cursor = other.cursor;
$this.limit = other.limit;
$this.limit_backward = other.limit_backward;
$this.bra = other.bra;
$this.ket = other.ket;
};
BaseStemmer.copy_from$LBaseStemmer$LBaseStemmer$ = BaseStemmer$copy_from$LBaseStemmer$LBaseStemmer$;
BaseStemmer.prototype.in_grouping$AIII = function (s, min, max) {
var ch;
var $__jsx_postinc_t;
if (this.cursor >= this.limit) {
return false;
}
ch = this.current.charCodeAt(this.cursor);
if (ch > max || ch < min) {
return false;
}
ch -= min;
if ((s[ch >>> 3] & 0x1 << (ch & 0x7)) === 0) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
};
function BaseStemmer$in_grouping$LBaseStemmer$AIII($this, s, min, max) {
var ch;
var $__jsx_postinc_t;
if ($this.cursor >= $this.limit) {
return false;
}
ch = $this.current.charCodeAt($this.cursor);
if (ch > max || ch < min) {
return false;
}
ch -= min;
if ((s[ch >>> 3] & 0x1 << (ch & 0x7)) === 0) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
};
BaseStemmer.in_grouping$LBaseStemmer$AIII = BaseStemmer$in_grouping$LBaseStemmer$AIII;
BaseStemmer.prototype.in_grouping_b$AIII = function (s, min, max) {
var ch;
var $__jsx_postinc_t;
if (this.cursor <= this.limit_backward) {
return false;
}
ch = this.current.charCodeAt(this.cursor - 1);
if (ch > max || ch < min) {
return false;
}
ch -= min;
if ((s[ch >>> 3] & 0x1 << (ch & 0x7)) === 0) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
};
function BaseStemmer$in_grouping_b$LBaseStemmer$AIII($this, s, min, max) {
var ch;
var $__jsx_postinc_t;
if ($this.cursor <= $this.limit_backward) {
return false;
}
ch = $this.current.charCodeAt($this.cursor - 1);
if (ch > max || ch < min) {
return false;
}
ch -= min;
if ((s[ch >>> 3] & 0x1 << (ch & 0x7)) === 0) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
};
BaseStemmer.in_grouping_b$LBaseStemmer$AIII = BaseStemmer$in_grouping_b$LBaseStemmer$AIII;
BaseStemmer.prototype.out_grouping$AIII = function (s, min, max) {
var ch;
var $__jsx_postinc_t;
if (this.cursor >= this.limit) {
return false;
}
ch = this.current.charCodeAt(this.cursor);
if (ch > max || ch < min) {
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
}
ch -= min;
if ((s[ch >>> 3] & 0X1 << (ch & 0x7)) === 0) {
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
}
return false;
};
function BaseStemmer$out_grouping$LBaseStemmer$AIII($this, s, min, max) {
var ch;
var $__jsx_postinc_t;
if ($this.cursor >= $this.limit) {
return false;
}
ch = $this.current.charCodeAt($this.cursor);
if (ch > max || ch < min) {
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
}
ch -= min;
if ((s[ch >>> 3] & 0X1 << (ch & 0x7)) === 0) {
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
}
return false;
};
BaseStemmer.out_grouping$LBaseStemmer$AIII = BaseStemmer$out_grouping$LBaseStemmer$AIII;
BaseStemmer.prototype.out_grouping_b$AIII = function (s, min, max) {
var ch;
var $__jsx_postinc_t;
if (this.cursor <= this.limit_backward) {
return false;
}
ch = this.current.charCodeAt(this.cursor - 1);
if (ch > max || ch < min) {
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
}
ch -= min;
if ((s[ch >>> 3] & 0x1 << (ch & 0x7)) === 0) {
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
}
return false;
};
function BaseStemmer$out_grouping_b$LBaseStemmer$AIII($this, s, min, max) {
var ch;
var $__jsx_postinc_t;
if ($this.cursor <= $this.limit_backward) {
return false;
}
ch = $this.current.charCodeAt($this.cursor - 1);
if (ch > max || ch < min) {
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
}
ch -= min;
if ((s[ch >>> 3] & 0x1 << (ch & 0x7)) === 0) {
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
}
return false;
};
BaseStemmer.out_grouping_b$LBaseStemmer$AIII = BaseStemmer$out_grouping_b$LBaseStemmer$AIII;
BaseStemmer.prototype.in_range$II = function (min, max) {
var ch;
var $__jsx_postinc_t;
if (this.cursor >= this.limit) {
return false;
}
ch = this.current.charCodeAt(this.cursor);
if (ch > max || ch < min) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
};
function BaseStemmer$in_range$LBaseStemmer$II($this, min, max) {
var ch;
var $__jsx_postinc_t;
if ($this.cursor >= $this.limit) {
return false;
}
ch = $this.current.charCodeAt($this.cursor);
if (ch > max || ch < min) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
};
BaseStemmer.in_range$LBaseStemmer$II = BaseStemmer$in_range$LBaseStemmer$II;
BaseStemmer.prototype.in_range_b$II = function (min, max) {
var ch;
var $__jsx_postinc_t;
if (this.cursor <= this.limit_backward) {
return false;
}
ch = this.current.charCodeAt(this.cursor - 1);
if (ch > max || ch < min) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
};
function BaseStemmer$in_range_b$LBaseStemmer$II($this, min, max) {
var ch;
var $__jsx_postinc_t;
if ($this.cursor <= $this.limit_backward) {
return false;
}
ch = $this.current.charCodeAt($this.cursor - 1);
if (ch > max || ch < min) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
};
BaseStemmer.in_range_b$LBaseStemmer$II = BaseStemmer$in_range_b$LBaseStemmer$II;
BaseStemmer.prototype.out_range$II = function (min, max) {
var ch;
var $__jsx_postinc_t;
if (this.cursor >= this.limit) {
return false;
}
ch = this.current.charCodeAt(this.cursor);
if (! (ch > max || ch < min)) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
};
function BaseStemmer$out_range$LBaseStemmer$II($this, min, max) {
var ch;
var $__jsx_postinc_t;
if ($this.cursor >= $this.limit) {
return false;
}
ch = $this.current.charCodeAt($this.cursor);
if (! (ch > max || ch < min)) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
return true;
};
BaseStemmer.out_range$LBaseStemmer$II = BaseStemmer$out_range$LBaseStemmer$II;
BaseStemmer.prototype.out_range_b$II = function (min, max) {
var ch;
var $__jsx_postinc_t;
if (this.cursor <= this.limit_backward) {
return false;
}
ch = this.current.charCodeAt(this.cursor - 1);
if (! (ch > max || ch < min)) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
};
function BaseStemmer$out_range_b$LBaseStemmer$II($this, min, max) {
var ch;
var $__jsx_postinc_t;
if ($this.cursor <= $this.limit_backward) {
return false;
}
ch = $this.current.charCodeAt($this.cursor - 1);
if (! (ch > max || ch < min)) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t - 1) | 0, $__jsx_postinc_t);
return true;
};
BaseStemmer.out_range_b$LBaseStemmer$II = BaseStemmer$out_range_b$LBaseStemmer$II;
BaseStemmer.prototype.eq_s$IS = function (s_size, s) {
var cursor$0;
if (((this.limit - this.cursor) | 0) < s_size) {
return false;
}
if (this.current.slice(cursor$0 = this.cursor, ((cursor$0 + s_size) | 0)) !== s) {
return false;
}
this.cursor = (this.cursor + s_size) | 0;
return true;
};
function BaseStemmer$eq_s$LBaseStemmer$IS($this, s_size, s) {
var cursor$0;
if ((($this.limit - $this.cursor) | 0) < s_size) {
return false;
}
if ($this.current.slice(cursor$0 = $this.cursor, ((cursor$0 + s_size) | 0)) !== s) {
return false;
}
$this.cursor = ($this.cursor + s_size) | 0;
return true;
};
BaseStemmer.eq_s$LBaseStemmer$IS = BaseStemmer$eq_s$LBaseStemmer$IS;
BaseStemmer.prototype.eq_s_b$IS = function (s_size, s) {
var cursor$0;
if (((this.cursor - this.limit_backward) | 0) < s_size) {
return false;
}
if (this.current.slice((((cursor$0 = this.cursor) - s_size) | 0), cursor$0) !== s) {
return false;
}
this.cursor = (this.cursor - s_size) | 0;
return true;
};
function BaseStemmer$eq_s_b$LBaseStemmer$IS($this, s_size, s) {
var cursor$0;
if ((($this.cursor - $this.limit_backward) | 0) < s_size) {
return false;
}
if ($this.current.slice((((cursor$0 = $this.cursor) - s_size) | 0), cursor$0) !== s) {
return false;
}
$this.cursor = ($this.cursor - s_size) | 0;
return true;
};
BaseStemmer.eq_s_b$LBaseStemmer$IS = BaseStemmer$eq_s_b$LBaseStemmer$IS;
BaseStemmer.prototype.eq_v$S = function (s) {
return BaseStemmer$eq_s$LBaseStemmer$IS(this, s.length, s);
};
function BaseStemmer$eq_v$LBaseStemmer$S($this, s) {
return BaseStemmer$eq_s$LBaseStemmer$IS($this, s.length, s);
};
BaseStemmer.eq_v$LBaseStemmer$S = BaseStemmer$eq_v$LBaseStemmer$S;
BaseStemmer.prototype.eq_v_b$S = function (s) {
return BaseStemmer$eq_s_b$LBaseStemmer$IS(this, s.length, s);
};
function BaseStemmer$eq_v_b$LBaseStemmer$S($this, s) {
return BaseStemmer$eq_s_b$LBaseStemmer$IS($this, s.length, s);
};
BaseStemmer.eq_v_b$LBaseStemmer$S = BaseStemmer$eq_v_b$LBaseStemmer$S;
BaseStemmer.prototype.find_among$ALAmong$I = function (v, v_size) {
var i;
var j;
var c;
var l;
var common_i;
var common_j;
var first_key_inspected;
var k;
var diff;
var common;
var w;
var i2;
var res;
i = 0;
j = v_size;
c = this.cursor;
l = this.limit;
common_i = 0;
common_j = 0;
first_key_inspected = false;
while (true) {
k = i + (j - i >>> 1);
diff = 0;
common = (common_i < common_j ? common_i : common_j);
w = v[k];
for (i2 = common; i2 < w.s_size; i2++) {
if (c + common === l) {
diff = -1;
break;
}
diff = this.current.charCodeAt(c + common) - w.s.charCodeAt(i2);
if (diff !== 0) {
break;
}
common++;
}
if (diff < 0) {
j = k;
common_j = common;
} else {
i = k;
common_i = common;
}
if (j - i <= 1) {
if (i > 0) {
break;
}
if (j === i) {
break;
}
if (first_key_inspected) {
break;
}
first_key_inspected = true;
}
}
while (true) {
w = v[i];
if (common_i >= w.s_size) {
this.cursor = (c + w.s_size | 0);
if (w.method == null) {
return w.result;
}
res = w.method(w.instance);
this.cursor = (c + w.s_size | 0);
if (res) {
return w.result;
}
}
i = w.substring_i;
if (i < 0) {
return 0;
}
}
return -1;
};
function BaseStemmer$find_among$LBaseStemmer$ALAmong$I($this, v, v_size) {
var i;
var j;
var c;
var l;
var common_i;
var common_j;
var first_key_inspected;
var k;
var diff;
var common;
var w;
var i2;
var res;
i = 0;
j = v_size;
c = $this.cursor;
l = $this.limit;
common_i = 0;
common_j = 0;
first_key_inspected = false;
while (true) {
k = i + (j - i >>> 1);
diff = 0;
common = (common_i < common_j ? common_i : common_j);
w = v[k];
for (i2 = common; i2 < w.s_size; i2++) {
if (c + common === l) {
diff = -1;
break;
}
diff = $this.current.charCodeAt(c + common) - w.s.charCodeAt(i2);
if (diff !== 0) {
break;
}
common++;
}
if (diff < 0) {
j = k;
common_j = common;
} else {
i = k;
common_i = common;
}
if (j - i <= 1) {
if (i > 0) {
break;
}
if (j === i) {
break;
}
if (first_key_inspected) {
break;
}
first_key_inspected = true;
}
}
while (true) {
w = v[i];
if (common_i >= w.s_size) {
$this.cursor = (c + w.s_size | 0);
if (w.method == null) {
return w.result;
}
res = w.method(w.instance);
$this.cursor = (c + w.s_size | 0);
if (res) {
return w.result;
}
}
i = w.substring_i;
if (i < 0) {
return 0;
}
}
return -1;
};
BaseStemmer.find_among$LBaseStemmer$ALAmong$I = BaseStemmer$find_among$LBaseStemmer$ALAmong$I;
BaseStemmer.prototype.find_among_b$ALAmong$I = function (v, v_size) {
var i;
var j;
var c;
var lb;
var common_i;
var common_j;
var first_key_inspected;
var k;
var diff;
var common;
var w;
var i2;
var res;
i = 0;
j = v_size;
c = this.cursor;
lb = this.limit_backward;
common_i = 0;
common_j = 0;
first_key_inspected = false;
while (true) {
k = i + (j - i >> 1);
diff = 0;
common = (common_i < common_j ? common_i : common_j);
w = v[k];
for (i2 = w.s_size - 1 - common; i2 >= 0; i2--) {
if (c - common === lb) {
diff = -1;
break;
}
diff = this.current.charCodeAt(c - 1 - common) - w.s.charCodeAt(i2);
if (diff !== 0) {
break;
}
common++;
}
if (diff < 0) {
j = k;
common_j = common;
} else {
i = k;
common_i = common;
}
if (j - i <= 1) {
if (i > 0) {
break;
}
if (j === i) {
break;
}
if (first_key_inspected) {
break;
}
first_key_inspected = true;
}
}
while (true) {
w = v[i];
if (common_i >= w.s_size) {
this.cursor = (c - w.s_size | 0);
if (w.method == null) {
return w.result;
}
res = w.method(this);
this.cursor = (c - w.s_size | 0);
if (res) {
return w.result;
}
}
i = w.substring_i;
if (i < 0) {
return 0;
}
}
return -1;
};
function BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I($this, v, v_size) {
var i;
var j;
var c;
var lb;
var common_i;
var common_j;
var first_key_inspected;
var k;
var diff;
var common;
var w;
var i2;
var res;
i = 0;
j = v_size;
c = $this.cursor;
lb = $this.limit_backward;
common_i = 0;
common_j = 0;
first_key_inspected = false;
while (true) {
k = i + (j - i >> 1);
diff = 0;
common = (common_i < common_j ? common_i : common_j);
w = v[k];
for (i2 = w.s_size - 1 - common; i2 >= 0; i2--) {
if (c - common === lb) {
diff = -1;
break;
}
diff = $this.current.charCodeAt(c - 1 - common) - w.s.charCodeAt(i2);
if (diff !== 0) {
break;
}
common++;
}
if (diff < 0) {
j = k;
common_j = common;
} else {
i = k;
common_i = common;
}
if (j - i <= 1) {
if (i > 0) {
break;
}
if (j === i) {
break;
}
if (first_key_inspected) {
break;
}
first_key_inspected = true;
}
}
while (true) {
w = v[i];
if (common_i >= w.s_size) {
$this.cursor = (c - w.s_size | 0);
if (w.method == null) {
return w.result;
}
res = w.method($this);
$this.cursor = (c - w.s_size | 0);
if (res) {
return w.result;
}
}
i = w.substring_i;
if (i < 0) {
return 0;
}
}
return -1;
};
BaseStemmer.find_among_b$LBaseStemmer$ALAmong$I = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I;
BaseStemmer.prototype.replace_s$IIS = function (c_bra, c_ket, s) {
var adjustment;
adjustment = ((s.length - (((c_ket - c_bra) | 0))) | 0);
this.current = this.current.slice(0, c_bra) + s + this.current.slice(c_ket);
this.limit = (this.limit + adjustment) | 0;
if (this.cursor >= c_ket) {
this.cursor = (this.cursor + adjustment) | 0;
} else if (this.cursor > c_bra) {
this.cursor = c_bra;
}
return (adjustment | 0);
};
function BaseStemmer$replace_s$LBaseStemmer$IIS($this, c_bra, c_ket, s) {
var adjustment;
adjustment = ((s.length - (((c_ket - c_bra) | 0))) | 0);
$this.current = $this.current.slice(0, c_bra) + s + $this.current.slice(c_ket);
$this.limit = ($this.limit + adjustment) | 0;
if ($this.cursor >= c_ket) {
$this.cursor = ($this.cursor + adjustment) | 0;
} else if ($this.cursor > c_bra) {
$this.cursor = c_bra;
}
return (adjustment | 0);
};
BaseStemmer.replace_s$LBaseStemmer$IIS = BaseStemmer$replace_s$LBaseStemmer$IIS;
BaseStemmer.prototype.slice_check$ = function () {
var bra$0;
var ket$0;
var limit$0;
return ((bra$0 = this.bra) < 0 || bra$0 > (ket$0 = this.ket) || ket$0 > (limit$0 = this.limit) || limit$0 > this.current.length ? false : true);
};
function BaseStemmer$slice_check$LBaseStemmer$($this) {
var bra$0;
var ket$0;
var limit$0;
return ((bra$0 = $this.bra) < 0 || bra$0 > (ket$0 = $this.ket) || ket$0 > (limit$0 = $this.limit) || limit$0 > $this.current.length ? false : true);
};
BaseStemmer.slice_check$LBaseStemmer$ = BaseStemmer$slice_check$LBaseStemmer$;
BaseStemmer.prototype.slice_from$S = function (s) {
var result;
var bra$0;
var ket$0;
var limit$0;
result = false;
if ((bra$0 = this.bra) < 0 || bra$0 > (ket$0 = this.ket) || ket$0 > (limit$0 = this.limit) || limit$0 > this.current.length ? false : true) {
BaseStemmer$replace_s$LBaseStemmer$IIS(this, this.bra, this.ket, s);
result = true;
}
return result;
};
function BaseStemmer$slice_from$LBaseStemmer$S($this, s) {
var result;
var bra$0;
var ket$0;
var limit$0;
result = false;
if ((bra$0 = $this.bra) < 0 || bra$0 > (ket$0 = $this.ket) || ket$0 > (limit$0 = $this.limit) || limit$0 > $this.current.length ? false : true) {
BaseStemmer$replace_s$LBaseStemmer$IIS($this, $this.bra, $this.ket, s);
result = true;
}
return result;
};
BaseStemmer.slice_from$LBaseStemmer$S = BaseStemmer$slice_from$LBaseStemmer$S;
BaseStemmer.prototype.slice_del$ = function () {
return BaseStemmer$slice_from$LBaseStemmer$S(this, "");
};
function BaseStemmer$slice_del$LBaseStemmer$($this) {
return BaseStemmer$slice_from$LBaseStemmer$S($this, "");
};
BaseStemmer.slice_del$LBaseStemmer$ = BaseStemmer$slice_del$LBaseStemmer$;
BaseStemmer.prototype.insert$IIS = function (c_bra, c_ket, s) {
var adjustment;
adjustment = BaseStemmer$replace_s$LBaseStemmer$IIS(this, c_bra, c_ket, s);
if (c_bra <= this.bra) {
this.bra = (this.bra + adjustment) | 0;
}
if (c_bra <= this.ket) {
this.ket = (this.ket + adjustment) | 0;
}
};
function BaseStemmer$insert$LBaseStemmer$IIS($this, c_bra, c_ket, s) {
var adjustment;
adjustment = BaseStemmer$replace_s$LBaseStemmer$IIS($this, c_bra, c_ket, s);
if (c_bra <= $this.bra) {
$this.bra = ($this.bra + adjustment) | 0;
}
if (c_bra <= $this.ket) {
$this.ket = ($this.ket + adjustment) | 0;
}
};
BaseStemmer.insert$LBaseStemmer$IIS = BaseStemmer$insert$LBaseStemmer$IIS;
BaseStemmer.prototype.slice_to$S = function (s) {
var result;
var bra$0;
var ket$0;
var limit$0;
result = '';
if ((bra$0 = this.bra) < 0 || bra$0 > (ket$0 = this.ket) || ket$0 > (limit$0 = this.limit) || limit$0 > this.current.length ? false : true) {
result = this.current.slice(this.bra, this.ket);
}
return result;
};
function BaseStemmer$slice_to$LBaseStemmer$S($this, s) {
var result;
var bra$0;
var ket$0;
var limit$0;
result = '';
if ((bra$0 = $this.bra) < 0 || bra$0 > (ket$0 = $this.ket) || ket$0 > (limit$0 = $this.limit) || limit$0 > $this.current.length ? false : true) {
result = $this.current.slice($this.bra, $this.ket);
}
return result;
};
BaseStemmer.slice_to$LBaseStemmer$S = BaseStemmer$slice_to$LBaseStemmer$S;
BaseStemmer.prototype.assign_to$S = function (s) {
return this.current.slice(0, this.limit);
};
function BaseStemmer$assign_to$LBaseStemmer$S($this, s) {
return $this.current.slice(0, $this.limit);
};
BaseStemmer.assign_to$LBaseStemmer$S = BaseStemmer$assign_to$LBaseStemmer$S;
BaseStemmer.prototype.stem$ = function () {
return false;
};
BaseStemmer.prototype.stemWord$S = function (word) {
var result;
var current$0;
var cursor$0;
var limit$0;
result = this.cache['.' + word];
if (result == null) {
current$0 = this.current = word;
cursor$0 = this.cursor = 0;
limit$0 = this.limit = current$0.length;
this.limit_backward = 0;
this.bra = cursor$0;
this.ket = limit$0;
this.stem$();
result = this.current;
this.cache['.' + word] = result;
}
return result;
};
BaseStemmer.prototype.stemWord = BaseStemmer.prototype.stemWord$S;
BaseStemmer.prototype.stemWords$AS = function (words) {
var results;
var i;
var word;
var result;
var current$0;
var cursor$0;
var limit$0;
results = [ ];
for (i = 0; i < words.length; i++) {
word = words[i];
result = this.cache['.' + word];
if (result == null) {
current$0 = this.current = word;
cursor$0 = this.cursor = 0;
limit$0 = this.limit = current$0.length;
this.limit_backward = 0;
this.bra = cursor$0;
this.ket = limit$0;
this.stem$();
result = this.current;
this.cache['.' + word] = result;
}
results.push(result);
}
return results;
};
BaseStemmer.prototype.stemWords = BaseStemmer.prototype.stemWords$AS;
function GermanStemmer() {
BaseStemmer.call(this);
this.I_x = 0;
this.I_p2 = 0;
this.I_p1 = 0;
};
$__jsx_extend([GermanStemmer], BaseStemmer);
GermanStemmer.prototype.copy_from$LGermanStemmer$ = function (other) {
this.I_x = other.I_x;
this.I_p2 = other.I_p2;
this.I_p1 = other.I_p1;
BaseStemmer$copy_from$LBaseStemmer$LBaseStemmer$(this, other);
};
GermanStemmer.prototype.copy_from = GermanStemmer.prototype.copy_from$LGermanStemmer$;
GermanStemmer.prototype.r_prelude$ = function () {
var v_1;
var v_2;
var v_3;
var v_4;
var v_5;
var v_6;
var lab1;
var lab2;
var lab3;
var lab5;
var lab7;
var lab8;
var lab9;
var cursor$0;
var cursor$1;
var $__jsx_postinc_t;
v_1 = this.cursor;
replab0:
while (true) {
v_2 = this.cursor;
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
lab2 = true;
lab2:
while (lab2 === true) {
lab2 = false;
v_3 = this.cursor;
lab3 = true;
lab3:
while (lab3 === true) {
lab3 = false;
this.bra = this.cursor;
if (! BaseStemmer$eq_s$LBaseStemmer$IS(this, 1, "\u00DF")) {
break lab3;
}
this.ket = this.cursor;
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "ss")) {
return false;
}
break lab2;
}
cursor$0 = this.cursor = v_3;
if (cursor$0 >= this.limit) {
break lab1;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
continue replab0;
}
this.cursor = v_2;
break replab0;
}
this.cursor = v_1;
replab4:
while (true) {
v_4 = this.cursor;
lab5 = true;
lab5:
while (lab5 === true) {
lab5 = false;
golab6:
while (true) {
v_5 = this.cursor;
lab7 = true;
lab7:
while (lab7 === true) {
lab7 = false;
if (! BaseStemmer$in_grouping$LBaseStemmer$AIII(this, GermanStemmer.g_v, 97, 252)) {
break lab7;
}
this.bra = this.cursor;
lab8 = true;
lab8:
while (lab8 === true) {
lab8 = false;
v_6 = this.cursor;
lab9 = true;
lab9:
while (lab9 === true) {
lab9 = false;
if (! BaseStemmer$eq_s$LBaseStemmer$IS(this, 1, "u")) {
break lab9;
}
this.ket = this.cursor;
if (! BaseStemmer$in_grouping$LBaseStemmer$AIII(this, GermanStemmer.g_v, 97, 252)) {
break lab9;
}
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "U")) {
return false;
}
break lab8;
}
this.cursor = v_6;
if (! BaseStemmer$eq_s$LBaseStemmer$IS(this, 1, "y")) {
break lab7;
}
this.ket = this.cursor;
if (! BaseStemmer$in_grouping$LBaseStemmer$AIII(this, GermanStemmer.g_v, 97, 252)) {
break lab7;
}
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "Y")) {
return false;
}
}
this.cursor = v_5;
break golab6;
}
cursor$1 = this.cursor = v_5;
if (cursor$1 >= this.limit) {
break lab5;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
continue replab4;
}
this.cursor = v_4;
break replab4;
}
return true;
};
GermanStemmer.prototype.r_prelude = GermanStemmer.prototype.r_prelude$;
function GermanStemmer$r_prelude$LGermanStemmer$($this) {
var v_1;
var v_2;
var v_3;
var v_4;
var v_5;
var v_6;
var lab1;
var lab2;
var lab3;
var lab5;
var lab7;
var lab8;
var lab9;
var cursor$0;
var cursor$1;
var $__jsx_postinc_t;
v_1 = $this.cursor;
replab0:
while (true) {
v_2 = $this.cursor;
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
lab2 = true;
lab2:
while (lab2 === true) {
lab2 = false;
v_3 = $this.cursor;
lab3 = true;
lab3:
while (lab3 === true) {
lab3 = false;
$this.bra = $this.cursor;
if (! BaseStemmer$eq_s$LBaseStemmer$IS($this, 1, "\u00DF")) {
break lab3;
}
$this.ket = $this.cursor;
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "ss")) {
return false;
}
break lab2;
}
cursor$0 = $this.cursor = v_3;
if (cursor$0 >= $this.limit) {
break lab1;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
continue replab0;
}
$this.cursor = v_2;
break replab0;
}
$this.cursor = v_1;
replab4:
while (true) {
v_4 = $this.cursor;
lab5 = true;
lab5:
while (lab5 === true) {
lab5 = false;
golab6:
while (true) {
v_5 = $this.cursor;
lab7 = true;
lab7:
while (lab7 === true) {
lab7 = false;
if (! BaseStemmer$in_grouping$LBaseStemmer$AIII($this, GermanStemmer.g_v, 97, 252)) {
break lab7;
}
$this.bra = $this.cursor;
lab8 = true;
lab8:
while (lab8 === true) {
lab8 = false;
v_6 = $this.cursor;
lab9 = true;
lab9:
while (lab9 === true) {
lab9 = false;
if (! BaseStemmer$eq_s$LBaseStemmer$IS($this, 1, "u")) {
break lab9;
}
$this.ket = $this.cursor;
if (! BaseStemmer$in_grouping$LBaseStemmer$AIII($this, GermanStemmer.g_v, 97, 252)) {
break lab9;
}
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "U")) {
return false;
}
break lab8;
}
$this.cursor = v_6;
if (! BaseStemmer$eq_s$LBaseStemmer$IS($this, 1, "y")) {
break lab7;
}
$this.ket = $this.cursor;
if (! BaseStemmer$in_grouping$LBaseStemmer$AIII($this, GermanStemmer.g_v, 97, 252)) {
break lab7;
}
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "Y")) {
return false;
}
}
$this.cursor = v_5;
break golab6;
}
cursor$1 = $this.cursor = v_5;
if (cursor$1 >= $this.limit) {
break lab5;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
continue replab4;
}
$this.cursor = v_4;
break replab4;
}
return true;
};
GermanStemmer.r_prelude$LGermanStemmer$ = GermanStemmer$r_prelude$LGermanStemmer$;
GermanStemmer.prototype.r_mark_regions$ = function () {
var v_1;
var c;
var lab1;
var lab3;
var lab4;
var lab6;
var lab8;
var limit$0;
var cursor$0;
var cursor$1;
var $__jsx_postinc_t;
this.I_p1 = limit$0 = this.limit;
this.I_p2 = limit$0;
v_1 = cursor$0 = this.cursor;
c = (cursor$0 + 3 | 0);
if (0 > c || c > limit$0) {
return false;
}
cursor$1 = this.cursor = c;
this.I_x = cursor$1;
this.cursor = v_1;
golab0:
while (true) {
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
if (! BaseStemmer$in_grouping$LBaseStemmer$AIII(this, GermanStemmer.g_v, 97, 252)) {
break lab1;
}
break golab0;
}
if (this.cursor >= this.limit) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
golab2:
while (true) {
lab3 = true;
lab3:
while (lab3 === true) {
lab3 = false;
if (! BaseStemmer$out_grouping$LBaseStemmer$AIII(this, GermanStemmer.g_v, 97, 252)) {
break lab3;
}
break golab2;
}
if (this.cursor >= this.limit) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
this.I_p1 = this.cursor;
lab4 = true;
lab4:
while (lab4 === true) {
lab4 = false;
if (! (this.I_p1 < this.I_x)) {
break lab4;
}
this.I_p1 = this.I_x;
}
golab5:
while (true) {
lab6 = true;
lab6:
while (lab6 === true) {
lab6 = false;
if (! BaseStemmer$in_grouping$LBaseStemmer$AIII(this, GermanStemmer.g_v, 97, 252)) {
break lab6;
}
break golab5;
}
if (this.cursor >= this.limit) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
golab7:
while (true) {
lab8 = true;
lab8:
while (lab8 === true) {
lab8 = false;
if (! BaseStemmer$out_grouping$LBaseStemmer$AIII(this, GermanStemmer.g_v, 97, 252)) {
break lab8;
}
break golab7;
}
if (this.cursor >= this.limit) {
return false;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
this.I_p2 = this.cursor;
return true;
};
GermanStemmer.prototype.r_mark_regions = GermanStemmer.prototype.r_mark_regions$;
function GermanStemmer$r_mark_regions$LGermanStemmer$($this) {
var v_1;
var c;
var lab1;
var lab3;
var lab4;
var lab6;
var lab8;
var limit$0;
var cursor$0;
var cursor$1;
var $__jsx_postinc_t;
$this.I_p1 = limit$0 = $this.limit;
$this.I_p2 = limit$0;
v_1 = cursor$0 = $this.cursor;
c = (cursor$0 + 3 | 0);
if (0 > c || c > limit$0) {
return false;
}
cursor$1 = $this.cursor = c;
$this.I_x = cursor$1;
$this.cursor = v_1;
golab0:
while (true) {
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
if (! BaseStemmer$in_grouping$LBaseStemmer$AIII($this, GermanStemmer.g_v, 97, 252)) {
break lab1;
}
break golab0;
}
if ($this.cursor >= $this.limit) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
golab2:
while (true) {
lab3 = true;
lab3:
while (lab3 === true) {
lab3 = false;
if (! BaseStemmer$out_grouping$LBaseStemmer$AIII($this, GermanStemmer.g_v, 97, 252)) {
break lab3;
}
break golab2;
}
if ($this.cursor >= $this.limit) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
$this.I_p1 = $this.cursor;
lab4 = true;
lab4:
while (lab4 === true) {
lab4 = false;
if (! ($this.I_p1 < $this.I_x)) {
break lab4;
}
$this.I_p1 = $this.I_x;
}
golab5:
while (true) {
lab6 = true;
lab6:
while (lab6 === true) {
lab6 = false;
if (! BaseStemmer$in_grouping$LBaseStemmer$AIII($this, GermanStemmer.g_v, 97, 252)) {
break lab6;
}
break golab5;
}
if ($this.cursor >= $this.limit) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
golab7:
while (true) {
lab8 = true;
lab8:
while (lab8 === true) {
lab8 = false;
if (! BaseStemmer$out_grouping$LBaseStemmer$AIII($this, GermanStemmer.g_v, 97, 252)) {
break lab8;
}
break golab7;
}
if ($this.cursor >= $this.limit) {
return false;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
}
$this.I_p2 = $this.cursor;
return true;
};
GermanStemmer.r_mark_regions$LGermanStemmer$ = GermanStemmer$r_mark_regions$LGermanStemmer$;
GermanStemmer.prototype.r_postlude$ = function () {
var among_var;
var v_1;
var lab1;
var $__jsx_postinc_t;
replab0:
while (true) {
v_1 = this.cursor;
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
this.bra = this.cursor;
among_var = BaseStemmer$find_among$LBaseStemmer$ALAmong$I(this, GermanStemmer.a_0, 6);
if (among_var === 0) {
break lab1;
}
this.ket = this.cursor;
switch (among_var) {
case 0:
break lab1;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "y")) {
return false;
}
break;
case 2:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "u")) {
return false;
}
break;
case 3:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "a")) {
return false;
}
break;
case 4:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "o")) {
return false;
}
break;
case 5:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "u")) {
return false;
}
break;
case 6:
if (this.cursor >= this.limit) {
break lab1;
}
($__jsx_postinc_t = this.cursor, this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
break;
}
continue replab0;
}
this.cursor = v_1;
break replab0;
}
return true;
};
GermanStemmer.prototype.r_postlude = GermanStemmer.prototype.r_postlude$;
function GermanStemmer$r_postlude$LGermanStemmer$($this) {
var among_var;
var v_1;
var lab1;
var $__jsx_postinc_t;
replab0:
while (true) {
v_1 = $this.cursor;
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
$this.bra = $this.cursor;
among_var = BaseStemmer$find_among$LBaseStemmer$ALAmong$I($this, GermanStemmer.a_0, 6);
if (among_var === 0) {
break lab1;
}
$this.ket = $this.cursor;
switch (among_var) {
case 0:
break lab1;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "y")) {
return false;
}
break;
case 2:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "u")) {
return false;
}
break;
case 3:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "a")) {
return false;
}
break;
case 4:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "o")) {
return false;
}
break;
case 5:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "u")) {
return false;
}
break;
case 6:
if ($this.cursor >= $this.limit) {
break lab1;
}
($__jsx_postinc_t = $this.cursor, $this.cursor = ($__jsx_postinc_t + 1) | 0, $__jsx_postinc_t);
break;
}
continue replab0;
}
$this.cursor = v_1;
break replab0;
}
return true;
};
GermanStemmer.r_postlude$LGermanStemmer$ = GermanStemmer$r_postlude$LGermanStemmer$;
GermanStemmer.prototype.r_R1$ = function () {
return (! (this.I_p1 <= this.cursor) ? false : true);
};
GermanStemmer.prototype.r_R1 = GermanStemmer.prototype.r_R1$;
function GermanStemmer$r_R1$LGermanStemmer$($this) {
return (! ($this.I_p1 <= $this.cursor) ? false : true);
};
GermanStemmer.r_R1$LGermanStemmer$ = GermanStemmer$r_R1$LGermanStemmer$;
GermanStemmer.prototype.r_R2$ = function () {
return (! (this.I_p2 <= this.cursor) ? false : true);
};
GermanStemmer.prototype.r_R2 = GermanStemmer.prototype.r_R2$;
function GermanStemmer$r_R2$LGermanStemmer$($this) {
return (! ($this.I_p2 <= $this.cursor) ? false : true);
};
GermanStemmer.r_R2$LGermanStemmer$ = GermanStemmer$r_R2$LGermanStemmer$;
GermanStemmer.prototype.r_standard_suffix$ = function () {
var among_var;
var v_1;
var v_2;
var v_3;
var v_4;
var v_5;
var v_6;
var v_7;
var v_8;
var v_9;
var v_10;
var lab0;
var lab1;
var lab2;
var c;
var lab3;
var lab4;
var lab5;
var lab6;
var lab7;
var lab8;
var lab9;
var lab10;
var cursor$0;
var cursor$1;
var limit$0;
var cursor$2;
var cursor$3;
var cursor$4;
var cursor$5;
var cursor$6;
var cursor$7;
var limit$1;
var cursor$8;
v_1 = ((this.limit - this.cursor) | 0);
lab0 = true;
lab0:
while (lab0 === true) {
lab0 = false;
this.ket = this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I(this, GermanStemmer.a_1, 7);
if (among_var === 0) {
break lab0;
}
this.bra = cursor$0 = this.cursor;
if (! (! (this.I_p1 <= cursor$0) ? false : true)) {
break lab0;
}
switch (among_var) {
case 0:
break lab0;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
break;
case 2:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
v_2 = ((this.limit - this.cursor) | 0);
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
this.ket = this.cursor;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS(this, 1, "s")) {
this.cursor = ((this.limit - v_2) | 0);
break lab1;
}
this.bra = this.cursor;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS(this, 3, "nis")) {
this.cursor = ((this.limit - v_2) | 0);
break lab1;
}
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
}
break;
case 3:
if (! BaseStemmer$in_grouping_b$LBaseStemmer$AIII(this, GermanStemmer.g_s_ending, 98, 116)) {
break lab0;
}
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
break;
}
}
cursor$2 = this.cursor = (((limit$0 = this.limit) - v_1) | 0);
v_3 = ((limit$0 - cursor$2) | 0);
lab2 = true;
lab2:
while (lab2 === true) {
lab2 = false;
this.ket = this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I(this, GermanStemmer.a_2, 4);
if (among_var === 0) {
break lab2;
}
this.bra = cursor$1 = this.cursor;
if (! (! (this.I_p1 <= cursor$1) ? false : true)) {
break lab2;
}
switch (among_var) {
case 0:
break lab2;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
break;
case 2:
if (! BaseStemmer$in_grouping_b$LBaseStemmer$AIII(this, GermanStemmer.g_st_ending, 98, 116)) {
break lab2;
}
c = (this.cursor - 3 | 0);
if (this.limit_backward > c || c > this.limit) {
break lab2;
}
this.cursor = c;
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
break;
}
}
cursor$8 = this.cursor = (((limit$1 = this.limit) - v_3) | 0);
v_4 = ((limit$1 - cursor$8) | 0);
lab3 = true;
lab3:
while (lab3 === true) {
lab3 = false;
this.ket = this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I(this, GermanStemmer.a_4, 8);
if (among_var === 0) {
break lab3;
}
this.bra = cursor$3 = this.cursor;
if (! (! (this.I_p2 <= cursor$3) ? false : true)) {
break lab3;
}
switch (among_var) {
case 0:
break lab3;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
v_5 = ((this.limit - this.cursor) | 0);
lab4 = true;
lab4:
while (lab4 === true) {
lab4 = false;
this.ket = this.cursor;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS(this, 2, "ig")) {
this.cursor = ((this.limit - v_5) | 0);
break lab4;
}
this.bra = cursor$4 = this.cursor;
v_6 = ((this.limit - cursor$4) | 0);
lab5 = true;
lab5:
while (lab5 === true) {
lab5 = false;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS(this, 1, "e")) {
break lab5;
}
this.cursor = ((this.limit - v_5) | 0);
break lab4;
}
cursor$5 = this.cursor = ((this.limit - v_6) | 0);
if (! (! (this.I_p2 <= cursor$5) ? false : true)) {
this.cursor = ((this.limit - v_5) | 0);
break lab4;
}
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
}
break;
case 2:
v_7 = ((this.limit - this.cursor) | 0);
lab6 = true;
lab6:
while (lab6 === true) {
lab6 = false;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS(this, 1, "e")) {
break lab6;
}
break lab3;
}
this.cursor = ((this.limit - v_7) | 0);
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
break;
case 3:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
v_8 = ((this.limit - this.cursor) | 0);
lab7 = true;
lab7:
while (lab7 === true) {
lab7 = false;
this.ket = this.cursor;
lab8 = true;
lab8:
while (lab8 === true) {
lab8 = false;
v_9 = ((this.limit - this.cursor) | 0);
lab9 = true;
lab9:
while (lab9 === true) {
lab9 = false;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS(this, 2, "er")) {
break lab9;
}
break lab8;
}
this.cursor = ((this.limit - v_9) | 0);
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS(this, 2, "en")) {
this.cursor = ((this.limit - v_8) | 0);
break lab7;
}
}
this.bra = cursor$6 = this.cursor;
if (! (! (this.I_p1 <= cursor$6) ? false : true)) {
this.cursor = ((this.limit - v_8) | 0);
break lab7;
}
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
}
break;
case 4:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
v_10 = ((this.limit - this.cursor) | 0);
lab10 = true;
lab10:
while (lab10 === true) {
lab10 = false;
this.ket = this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I(this, GermanStemmer.a_3, 2);
if (among_var === 0) {
this.cursor = ((this.limit - v_10) | 0);
break lab10;
}
this.bra = cursor$7 = this.cursor;
if (! (! (this.I_p2 <= cursor$7) ? false : true)) {
this.cursor = ((this.limit - v_10) | 0);
break lab10;
}
switch (among_var) {
case 0:
this.cursor = ((this.limit - v_10) | 0);
break lab10;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S(this, "")) {
return false;
}
break;
}
}
break;
}
}
this.cursor = ((this.limit - v_4) | 0);
return true;
};
GermanStemmer.prototype.r_standard_suffix = GermanStemmer.prototype.r_standard_suffix$;
function GermanStemmer$r_standard_suffix$LGermanStemmer$($this) {
var among_var;
var v_1;
var v_2;
var v_3;
var v_4;
var v_5;
var v_6;
var v_7;
var v_8;
var v_9;
var v_10;
var lab0;
var lab1;
var lab2;
var c;
var lab3;
var lab4;
var lab5;
var lab6;
var lab7;
var lab8;
var lab9;
var lab10;
var cursor$0;
var cursor$1;
var limit$0;
var cursor$2;
var cursor$3;
var cursor$4;
var cursor$5;
var cursor$6;
var cursor$7;
var limit$1;
var cursor$8;
v_1 = (($this.limit - $this.cursor) | 0);
lab0 = true;
lab0:
while (lab0 === true) {
lab0 = false;
$this.ket = $this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I($this, GermanStemmer.a_1, 7);
if (among_var === 0) {
break lab0;
}
$this.bra = cursor$0 = $this.cursor;
if (! (! ($this.I_p1 <= cursor$0) ? false : true)) {
break lab0;
}
switch (among_var) {
case 0:
break lab0;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
break;
case 2:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
v_2 = (($this.limit - $this.cursor) | 0);
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
$this.ket = $this.cursor;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS($this, 1, "s")) {
$this.cursor = (($this.limit - v_2) | 0);
break lab1;
}
$this.bra = $this.cursor;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS($this, 3, "nis")) {
$this.cursor = (($this.limit - v_2) | 0);
break lab1;
}
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
}
break;
case 3:
if (! BaseStemmer$in_grouping_b$LBaseStemmer$AIII($this, GermanStemmer.g_s_ending, 98, 116)) {
break lab0;
}
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
break;
}
}
cursor$2 = $this.cursor = (((limit$0 = $this.limit) - v_1) | 0);
v_3 = ((limit$0 - cursor$2) | 0);
lab2 = true;
lab2:
while (lab2 === true) {
lab2 = false;
$this.ket = $this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I($this, GermanStemmer.a_2, 4);
if (among_var === 0) {
break lab2;
}
$this.bra = cursor$1 = $this.cursor;
if (! (! ($this.I_p1 <= cursor$1) ? false : true)) {
break lab2;
}
switch (among_var) {
case 0:
break lab2;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
break;
case 2:
if (! BaseStemmer$in_grouping_b$LBaseStemmer$AIII($this, GermanStemmer.g_st_ending, 98, 116)) {
break lab2;
}
c = ($this.cursor - 3 | 0);
if ($this.limit_backward > c || c > $this.limit) {
break lab2;
}
$this.cursor = c;
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
break;
}
}
cursor$8 = $this.cursor = (((limit$1 = $this.limit) - v_3) | 0);
v_4 = ((limit$1 - cursor$8) | 0);
lab3 = true;
lab3:
while (lab3 === true) {
lab3 = false;
$this.ket = $this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I($this, GermanStemmer.a_4, 8);
if (among_var === 0) {
break lab3;
}
$this.bra = cursor$3 = $this.cursor;
if (! (! ($this.I_p2 <= cursor$3) ? false : true)) {
break lab3;
}
switch (among_var) {
case 0:
break lab3;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
v_5 = (($this.limit - $this.cursor) | 0);
lab4 = true;
lab4:
while (lab4 === true) {
lab4 = false;
$this.ket = $this.cursor;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS($this, 2, "ig")) {
$this.cursor = (($this.limit - v_5) | 0);
break lab4;
}
$this.bra = cursor$4 = $this.cursor;
v_6 = (($this.limit - cursor$4) | 0);
lab5 = true;
lab5:
while (lab5 === true) {
lab5 = false;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS($this, 1, "e")) {
break lab5;
}
$this.cursor = (($this.limit - v_5) | 0);
break lab4;
}
cursor$5 = $this.cursor = (($this.limit - v_6) | 0);
if (! (! ($this.I_p2 <= cursor$5) ? false : true)) {
$this.cursor = (($this.limit - v_5) | 0);
break lab4;
}
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
}
break;
case 2:
v_7 = (($this.limit - $this.cursor) | 0);
lab6 = true;
lab6:
while (lab6 === true) {
lab6 = false;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS($this, 1, "e")) {
break lab6;
}
break lab3;
}
$this.cursor = (($this.limit - v_7) | 0);
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
break;
case 3:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
v_8 = (($this.limit - $this.cursor) | 0);
lab7 = true;
lab7:
while (lab7 === true) {
lab7 = false;
$this.ket = $this.cursor;
lab8 = true;
lab8:
while (lab8 === true) {
lab8 = false;
v_9 = (($this.limit - $this.cursor) | 0);
lab9 = true;
lab9:
while (lab9 === true) {
lab9 = false;
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS($this, 2, "er")) {
break lab9;
}
break lab8;
}
$this.cursor = (($this.limit - v_9) | 0);
if (! BaseStemmer$eq_s_b$LBaseStemmer$IS($this, 2, "en")) {
$this.cursor = (($this.limit - v_8) | 0);
break lab7;
}
}
$this.bra = cursor$6 = $this.cursor;
if (! (! ($this.I_p1 <= cursor$6) ? false : true)) {
$this.cursor = (($this.limit - v_8) | 0);
break lab7;
}
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
}
break;
case 4:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
v_10 = (($this.limit - $this.cursor) | 0);
lab10 = true;
lab10:
while (lab10 === true) {
lab10 = false;
$this.ket = $this.cursor;
among_var = BaseStemmer$find_among_b$LBaseStemmer$ALAmong$I($this, GermanStemmer.a_3, 2);
if (among_var === 0) {
$this.cursor = (($this.limit - v_10) | 0);
break lab10;
}
$this.bra = cursor$7 = $this.cursor;
if (! (! ($this.I_p2 <= cursor$7) ? false : true)) {
$this.cursor = (($this.limit - v_10) | 0);
break lab10;
}
switch (among_var) {
case 0:
$this.cursor = (($this.limit - v_10) | 0);
break lab10;
case 1:
if (! BaseStemmer$slice_from$LBaseStemmer$S($this, "")) {
return false;
}
break;
}
}
break;
}
}
$this.cursor = (($this.limit - v_4) | 0);
return true;
};
GermanStemmer.r_standard_suffix$LGermanStemmer$ = GermanStemmer$r_standard_suffix$LGermanStemmer$;
GermanStemmer.prototype.stem$ = function () {
var v_1;
var v_2;
var v_4;
var lab0;
var lab1;
var lab2;
var lab3;
var cursor$0;
var cursor$1;
var cursor$2;
v_1 = this.cursor;
lab0 = true;
lab0:
while (lab0 === true) {
lab0 = false;
if (! GermanStemmer$r_prelude$LGermanStemmer$(this)) {
break lab0;
}
}
cursor$0 = this.cursor = v_1;
v_2 = cursor$0;
lab1 = true;
lab1:
while (lab1 === true) {
lab1 = false;
if (! GermanStemmer$r_mark_regions$LGermanStemmer$(this)) {
break lab1;
}
}
cursor$1 = this.cursor = v_2;
this.limit_backward = cursor$1;
this.cursor = this.limit;
lab2 = true;
lab2:
while (lab2 === true) {
lab2 = false;
if (! GermanStemmer$r_standard_suffix$LGermanStemmer$(this)) {
break lab2;
}
}
cursor$2 = this.cursor = this.limit_backward;
v_4 = cursor$2;
lab3 = true;
lab3:
while (lab3 === true) {
lab3 = false;
if (! GermanStemmer$r_postlude$LGermanStemmer$(this)) {
break lab3;
}
}
this.cursor = v_4;
return true;
};
GermanStemmer.prototype.stem = GermanStemmer.prototype.stem$;
GermanStemmer.prototype.equals$X = function (o) {
return o instanceof GermanStemmer;
};
GermanStemmer.prototype.equals = GermanStemmer.prototype.equals$X;
function GermanStemmer$equals$LGermanStemmer$X($this, o) {
return o instanceof GermanStemmer;
};
GermanStemmer.equals$LGermanStemmer$X = GermanStemmer$equals$LGermanStemmer$X;
GermanStemmer.prototype.hashCode$ = function () {
var classname;
var hash;
var i;
var char;
classname = "GermanStemmer";
hash = 0;
for (i = 0; i < classname.length; i++) {
char = classname.charCodeAt(i);
hash = (hash << 5) - hash + char;
hash = hash & hash;
}
return (hash | 0);
};
GermanStemmer.prototype.hashCode = GermanStemmer.prototype.hashCode$;
function GermanStemmer$hashCode$LGermanStemmer$($this) {
var classname;
var hash;
var i;
var char;
classname = "GermanStemmer";
hash = 0;
for (i = 0; i < classname.length; i++) {
char = classname.charCodeAt(i);
hash = (hash << 5) - hash + char;
hash = hash & hash;
}
return (hash | 0);
};
GermanStemmer.hashCode$LGermanStemmer$ = GermanStemmer$hashCode$LGermanStemmer$;
GermanStemmer.serialVersionUID = 1;
$__jsx_lazy_init(GermanStemmer, "methodObject", function () {
return new GermanStemmer();
});
$__jsx_lazy_init(GermanStemmer, "a_0", function () {
return [ new Among("", -1, 6), new Among("U", 0, 2), new Among("Y", 0, 1), new Among("\u00E4", 0, 3), new Among("\u00F6", 0, 4), new Among("\u00FC", 0, 5) ];
});
$__jsx_lazy_init(GermanStemmer, "a_1", function () {
return [ new Among("e", -1, 2), new Among("em", -1, 1), new Among("en", -1, 2), new Among("ern", -1, 1), new Among("er", -1, 1), new Among("s", -1, 3), new Among("es", 5, 2) ];
});
$__jsx_lazy_init(GermanStemmer, "a_2", function () {
return [ new Among("en", -1, 1), new Among("er", -1, 1), new Among("st", -1, 2), new Among("est", 2, 1) ];
});
$__jsx_lazy_init(GermanStemmer, "a_3", function () {
return [ new Among("ig", -1, 1), new Among("lich", -1, 1) ];
});
$__jsx_lazy_init(GermanStemmer, "a_4", function () {
return [ new Among("end", -1, 1), new Among("ig", -1, 2), new Among("ung", -1, 1), new Among("lich", -1, 3), new Among("isch", -1, 2), new Among("ik", -1, 2), new Among("heit", -1, 3), new Among("keit", -1, 4) ];
});
GermanStemmer.g_v = [ 17, 65, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 32, 8 ];
GermanStemmer.g_s_ending = [ 117, 30, 5 ];
GermanStemmer.g_st_ending = [ 117, 30, 4 ];
var $__jsx_classMap = {
"src/among.jsx": {
Among: Among,
Among$SII: Among,
Among$SIIF$LBaseStemmer$B$LBaseStemmer$: Among$0
},
"src/stemmer.jsx": {
Stemmer: Stemmer,
Stemmer$: Stemmer
},
"src/base-stemmer.jsx": {
BaseStemmer: BaseStemmer,
BaseStemmer$: BaseStemmer
},
"src/german-stemmer.jsx": {
GermanStemmer: GermanStemmer,
GermanStemmer$: GermanStemmer
}
};
})(JSX);
var Among = JSX.require("src/among.jsx").Among;
var Among$SII = JSX.require("src/among.jsx").Among$SII;
var Stemmer = JSX.require("src/stemmer.jsx").Stemmer;
var BaseStemmer = JSX.require("src/base-stemmer.jsx").BaseStemmer;
var GermanStemmer = JSX.require("src/german-stemmer.jsx").GermanStemmer; | PypiClean |
/BIT_framework-0.0.2-py3-none-any.whl/BIT_DL/pytorch/run/metric/summary.py | import sys
import weakref
from collections import deque
from typing import Any, Deque, Optional, Sequence
import numpy as np
from torch.optim.optimizer import Optimizer
from BIT_DL.pytorch.run.metric.base_metric import StreamingMetric
__all__ = [
"Average",
"AveragePerplexity",
"RunningAverage",
"LR",
]
class Average(StreamingMetric[float, float]):
r"""The average of a specific predicted value.
Average is a :class:`~texar.torch.run.metric.StreamingMetric`, requires only
predicted values. Average values are unbounded :class:`float` numbers. By
default, lower values are better, but the behavior can be configured.
Keyword Args:
pred_name (str): Name of the predicted value. This will be used as the
key to the dictionary returned by the model. Defaults to ``"loss"``.
higher_is_better (bool, optional): If specified, the
:attr:`higher_is_better` attribute for the instance is overwritten
by the specified value. Defaults to `False`.
"""
higher_is_better = False
requires_label = False
sum: float
def __init__(self, *, pred_name: str = "loss",
higher_is_better: bool = False):
# pylint: disable=useless-super-delegation
super().__init__(pred_name=pred_name, higher_is_better=higher_is_better)
def reset(self) -> None:
super().reset()
self.sum = 0.0
def add(self, predicted: Sequence[float], _) -> None:
self.count += len(predicted)
self.sum += sum(predicted)
def value(self) -> float:
if self.count == 0:
return 0.0
return self.sum / self.count
class AveragePerplexity(Average):
# TODO: Create a `WeightedAverage` class that takes `(value, weight)`
# and subclass that instead.
higher_is_better = False
def add(self, predicted: Sequence[float], _) -> None:
super().add(np.exp(predicted), _)
class RunningAverage(StreamingMetric[float, float]):
r"""The running average of a specific predicted value, i.e., the average
computed over the most recent :attr:`queue_size` values.
Running average is a :class:`~texar.torch.run.metric.StreamingMetric`,
requires only predicted values. Running average values are unbounded
:class:`float` numbers. By default, lower values are better, but the
behavior can be configured.
Keyword Args:
queue_size (int): Size of the queue to keep history values. The running
average is computed over the most recent :attr:`queue_size` values.
pred_name (str): Name of the predicted value. This will be used as the
key to the dictionary returned by the model. Defaults to ``"loss"``.
higher_is_better (bool, optional): If specified, the
:attr:`higher_is_better` attribute for the instance is overwritten
by the specified value.
"""
higher_is_better = False
requires_label = False
history: Deque[float]
sum: float
def __init__(self, queue_size: int, *, pred_name: str = "loss",
higher_is_better: bool = False):
super().__init__(pred_name=pred_name, higher_is_better=higher_is_better)
if not isinstance(queue_size, int) or queue_size <= 0:
raise ValueError("'queue_size' must be a position integer")
self.queue_size = queue_size
def reset(self) -> None:
super().reset()
self.sum = 0.0
self.history = deque()
def add(self, predicted: Sequence[float], _) -> None:
if len(predicted) >= self.queue_size:
self.history = deque(predicted[-self.queue_size:])
self.sum = sum(self.history)
else:
for _ in range(len(predicted) -
(self.queue_size - len(self.history))):
self.sum -= self.history.popleft()
self.sum += sum(predicted)
self.history.extend(predicted)
def value(self) -> float:
if len(self.history) == 0:
return 0.0
return self.sum / len(self.history)
class LR(StreamingMetric[Any, float]):
r"""The learning rate (LR) of the given optimizer. This is not exactly a
metric, but rather a convenience object to print learning rates in log.
LR is a :class:`~texar.torch.run.metric.StreamingMetric`, requires neither
predicted values nor labels. LR values are unbounded :class:`float` numbers,
with no clear definition of "better". Comparison of two learning rates are
not meaningful.
Keyword Args:
optimizer: The optimizer instance.
param_group (int, optional): Index of the parameter group to obtain the
learning rate from. Defaults to 0. You don't need to specify this if
the optimizer contains only one parameter group (e.g., constructed
using :python:`optim_class(model.parameters())`.
"""
requires_pred = False
requires_label = False
def __init__(self, optimizer: Optimizer, param_group: int = 0):
super().__init__(pred_name=None)
self.optimizer = weakref.ref(optimizer)
self.group = param_group
def add(self, _, __):
pass
def value(self) -> float:
return self.optimizer().param_groups[self.group]['lr'] # type: ignore
def better(self, cur: float, prev: float) -> Optional[bool]:
# pylint: disable=unused-argument
# Always return `None` to indicate values are uncomparable.
return None
def __getstate__(self):
# There's no point in pickling an `LR` metric; just ignore it.
return None
def __getnewargs__(self):
# But when unpickling, we need to make sure we can construct something.
# This requires passing a dummy `optimizer` to which a weakref can be
# constructed. In this case, we use an arbitrary built-in class.
return (int,) | PypiClean |
/CrossMap-0.6.6-py3-none-any.whl/cmmodule/wig_reader.py | import sys
import bx.wiggle
from bx.bbi.bigwig_file import BigWigFile
import numpy
import collections
from itertools import groupby
from operator import itemgetter
from cmmodule import BED
def wig_to_bgr2(pos2val):
'''pos2val is dictionary: position: value. position is 0 based '''
v2p = collections.defaultdict(list)
# point_num = 1
# count = 0
coord = min(pos2val.keys())
# coorindate range to value, bedgraph. #[start]=[len,value]
range2p = {}
for v in list(pos2val.values()):
coord += 1
if v != 0:
v2p[v].append(coord)
for v in v2p:
for k, g in groupby(enumerate(v2p[v]), lambda i_x: i_x[0]-i_x[1]):
for l in [list(map(itemgetter(1), g))]:
range2p[l[0]-1] = [len(l), v]
for i in sorted(range2p):
yield((i-1, i + range2p[i][0]-1, range2p[i][1]))
def wig_to_bgr1(pos2val):
'''pos2val is dictionary: position: value. position is 0 based '''
point_num = 1
count = 0
for pos in sorted(pos2val):
count += 1
if count == 1: # initilize value. only run one time
up_bound = pos+1
score = pos2val[pos]
continue
if pos2val[pos] == score:
point_num += 1
up_bound = pos+1
else:
yield((up_bound - point_num-1, up_bound-1, score))
score = pos2val[pos]
up_bound = pos + 1
point_num = 1
def wig_reader(infile, chrom_sizes=None, informat='wiggle', bin_size=2000):
'''infile: either a wiggle or bigwig format file
chromsize: chrom_name: size, only needed is format is bigwig
format: either 'wiggle' or 'bigwig'
return: chrom, position (0-based), value
'''
if informat.upper() == 'WIGGLE':
point_num = 1
# count = 0
for chrom, start, end, strand, score in bx.wiggle.IntervalReader(infile):
yield (chrom, start, end, score)
elif informat.upper() == 'BIGWIG':
bw_obj = BigWigFile(file=open(infile))
for chr_name, chr_size in list(chrom_sizes.items()):
for chrom, st, end in BED.tillingBed(
chrName=chr_name, chrSize=chr_size, stepSize=bin_size):
sig_list = bw_obj.get_as_array(chrom, st, end)
if sig_list is None:
continue
sig_list = numpy.nan_to_num(sig_list)
if numpy.sum(sig_list) == 0:
continue
low_bound = st
point_num = 1
score = sig_list[0]
for value in (sig_list[1:]):
if value == score:
point_num += 1
else:
yield(
(chrom, low_bound, low_bound + point_num, score)
)
score = value
low_bound = low_bound + point_num
point_num = 1
else:
raise Exception("Unknown format. Must be 'wiggle' or 'bigwig'")
if __name__ == '__main__':
for a in wig_reader(infile=sys.argv[1], informat='wiggle'):
print('\t'.join(map(str, a))) | PypiClean |
/GALFITools-1.0.0.tar.gz/GALFITools-1.0.0/CHANGELOG.rst | =========
Changelog
=========
Version 0.1
===========
- Feature A added
- FIX: nasty bug #1729 fixed
- add your changes here!
Version 0.15.2
===============
GALFITools serves as a comprehensive
library designed to enhance the functionality
of GALFIT, a powerful tool for galaxy
modeling. With GALFITools, you can achieve the following:
- Generate mask images, calculate sky backgrounds, and extract stars from your images.
- Perform essential computations such as deriving effective radius, Sersic index, slope, FWHM, and more.
- Conduct photometry analysis utilizing Ds9 regions.
- Create various visual representations including galaxy images, models, and residual images.
- Construct detailed surface brightness profiles.
- Estimate Multi-Gaussian Expansion (MGE) for optimal GALFIT utilization.
- Generate simulated galaxy data images to refine your modeling techniques.
| PypiClean |
/INGInious-0.8.7.tar.gz/INGInious-0.8.7/inginious/frontend/pages/course_admin/task_edit.py |
""" Pages that allow editing of tasks """
import json
import logging
import tempfile
import bson
import flask
from collections import OrderedDict
from zipfile import ZipFile
from flask import redirect
from werkzeug.exceptions import NotFound
from inginious.frontend.tasks import _migrate_from_v_0_6
from inginious.frontend.accessible_time import AccessibleTime
from inginious.frontend.pages.course_admin.utils import INGIniousAdminPage
from inginious.common.base import dict_from_prefix, id_checker
from inginious.common.exceptions import TaskNotFoundException
from inginious.frontend.pages.course_admin.task_edit_file import CourseTaskFiles
from inginious.frontend.tasks import Task
class CourseEditTask(INGIniousAdminPage):
""" Edit a task """
_logger = logging.getLogger("inginious.webapp.task_edit")
def GET_AUTH(self, courseid, taskid): # pylint: disable=arguments-differ
""" Edit a task """
if not id_checker(taskid):
raise NotFound(description=_("Invalid task id"))
course, __ = self.get_course_and_check_rights(courseid, allow_all_staff=False)
try:
task_data = self.task_factory.get_task_descriptor_content(courseid, taskid)
except TaskNotFoundException:
raise NotFound()
# Ensure retrocompatibility
task_data = _migrate_from_v_0_6(task_data)
environment_types = self.environment_types
environments = self.environments
current_filetype = None
try:
current_filetype = self.task_factory.get_task_descriptor_extension(courseid, taskid)
except:
pass
available_filetypes = self.task_factory.get_available_task_file_extensions()
additional_tabs = self.plugin_manager.call_hook('task_editor_tab', course=course, taskid=taskid,
task_data=task_data, template_helper=self.template_helper)
return self.template_helper.render("course_admin/task_edit.html", course=course, taskid=taskid,
problem_types=self.task_factory.get_problem_types(), task_data=task_data,
environment_types=environment_types, environments=environments,
problemdata=json.dumps(task_data.get('problems', {})),
contains_is_html=self.contains_is_html(task_data),
current_filetype=current_filetype,
available_filetypes=available_filetypes, AccessibleTime=AccessibleTime,
file_list=CourseTaskFiles.get_task_filelist(self.task_factory, courseid, taskid),
additional_tabs=additional_tabs)
@classmethod
def contains_is_html(cls, data):
""" Detect if the problem has at least one "xyzIsHTML" key """
for key, val in data.items():
if isinstance(key, str) and key.endswith("IsHTML"):
return True
if isinstance(val, (OrderedDict, dict)) and cls.contains_is_html(val):
return True
return False
def parse_problem(self, problem_content):
""" Parses a problem, modifying some data """
del problem_content["@order"]
return self.task_factory.get_problem_types().get(problem_content["type"]).parse_problem(problem_content)
def wipe_task(self, courseid, taskid):
""" Wipe the data associated to the taskid from DB"""
submissions = self.database.submissions.find({"courseid": courseid, "taskid": taskid})
for submission in submissions:
for key in ["input", "archive"]:
if key in submission and type(submission[key]) == bson.objectid.ObjectId:
self.submission_manager.get_gridfs().delete(submission[key])
self.database.user_tasks.delete_many({"courseid": courseid, "taskid": taskid})
self.database.submissions.delete_many({"courseid": courseid, "taskid": taskid})
self._logger.info("Task %s/%s wiped.", courseid, taskid)
def POST_AUTH(self, courseid, taskid): # pylint: disable=arguments-differ
""" Edit a task """
if not id_checker(taskid) or not id_checker(courseid):
raise NotFound(description=_("Invalid course/task id"))
course, __ = self.get_course_and_check_rights(courseid, allow_all_staff=False)
data = flask.request.form.copy()
data["task_file"] = flask.request.files.get("task_file")
# Delete task ?
if "delete" in data:
toc = course.get_task_dispenser().get_dispenser_data()
toc.remove_task(taskid)
self.course_factory.update_course_descriptor_element(courseid, 'toc', toc.to_structure())
self.task_factory.delete_task(courseid, taskid)
if data.get("wipe", False):
self.wipe_task(courseid, taskid)
return redirect(self.app.get_homepath() + "/admin/"+courseid+"/tasks")
# Else, parse content
try:
try:
task_zip = data.get("task_file").read()
except:
task_zip = None
del data["task_file"]
problems = dict_from_prefix("problem", data)
environment_type = data.get("environment_type", "")
environment_parameters = dict_from_prefix("envparams", data).get(environment_type, {})
environment_id = dict_from_prefix("environment_id", data).get(environment_type, "")
data = {key: val for key, val in data.items() if
not key.startswith("problem")
and not key.startswith("envparams")
and not key.startswith("environment_id")
and not key.startswith("/")
and not key == "@action"}
data["environment_id"] = environment_id # we do this after having removed all the environment_id[something] entries
# Determines the task filetype
if data["@filetype"] not in self.task_factory.get_available_task_file_extensions():
return json.dumps({"status": "error", "message": _("Invalid file type: {}").format(str(data["@filetype"]))})
file_ext = data["@filetype"]
del data["@filetype"]
# Parse and order the problems (also deletes @order from the result)
if problems is None:
data["problems"] = OrderedDict([])
else:
data["problems"] = OrderedDict([(key, self.parse_problem(val))
for key, val in sorted(iter(problems.items()), key=lambda x: int(x[1]['@order']))])
# Categories
course_tags = course.get_tags()
data['categories'] = [cat for cat in map(str.strip, data['categories'].split(',')) if cat]
for category in data['categories']:
if category not in course_tags:
return json.dumps({"status": "error", "message": _("Unknown category tag.")})
# Task environment parameters
data["environment_parameters"] = environment_parameters
# Weight
try:
data["weight"] = float(data["weight"])
except:
return json.dumps({"status": "error", "message": _("Grade weight must be a floating-point number")})
if data["weight"] < 0:
return json.dumps({"status": "error", "message": _("Grade weight must be positive!")})
# Groups
if "groups" in data:
data["groups"] = True if data["groups"] == "true" else False
# Submission storage
if "store_all" in data:
try:
stored_submissions = data["stored_submissions"]
data["stored_submissions"] = 0 if data["store_all"] == "true" else int(stored_submissions)
except:
return json.dumps(
{"status": "error", "message": _("The number of stored submission must be an integer!")})
if data["store_all"] == "false" and data["stored_submissions"] <= 0:
return json.dumps({"status": "error", "message": _("The number of stored submission must be positive!")})
del data['store_all']
# Submission limits
if "submission_limit" in data:
if data["submission_limit"] == "none":
result = {"amount": -1, "period": -1}
elif data["submission_limit"] == "hard":
try:
result = {"amount": int(data["submission_limit_hard"]), "period": -1}
except:
return json.dumps({"status": "error", "message": _("Invalid submission limit!")})
else:
try:
result = {"amount": int(data["submission_limit_soft_0"]), "period": int(data["submission_limit_soft_1"])}
if result['period'] < 0:
return json.dumps({"status": "error", "message": _("The soft limit period must be positive!")})
except:
return json.dumps({"status": "error", "message": _("Invalid submission limit!")})
if data['submission_limit'] != 'none' and result['amount'] < 0:
return json.dumps({"status": "error", "message": _("The submission limit must be positive!")})
del data["submission_limit_hard"]
del data["submission_limit_soft_0"]
del data["submission_limit_soft_1"]
data["submission_limit"] = result
# Accessible
if data["accessible"] == "custom":
data["accessible"] = "{}/{}/{}".format(data["accessible_start"], data["accessible_soft_end"], data["accessible_end"])
elif data["accessible"] == "true":
data["accessible"] = True
else:
data["accessible"] = False
del data["accessible_start"]
del data["accessible_end"]
del data["accessible_soft_end"]
try:
AccessibleTime(data["accessible"])
except Exception as message:
return json.dumps({"status": "error", "message": _("Invalid task accessibility ({})").format(message)})
# Random inputs
try:
data['input_random'] = int(data['input_random'])
except:
return json.dumps({"status": "error", "message": _("The number of random inputs must be an integer!")})
if data['input_random'] < 0:
return json.dumps({"status": "error", "message": _("The number of random inputs must be positive!")})
# Checkboxes
if data.get("responseIsHTML"):
data["responseIsHTML"] = True
# Network grading
data["network_grading"] = "network_grading" in data
except Exception as message:
return json.dumps({"status": "error", "message": _("Your browser returned an invalid form ({})").format(message)})
# Get the course
try:
course = self.course_factory.get_course(courseid)
except:
return json.dumps({"status": "error", "message": _("Error while reading course's informations")})
# Get original data
try:
orig_data = self.task_factory.get_task_descriptor_content(courseid, taskid)
data["order"] = orig_data["order"]
except:
pass
task_fs = self.task_factory.get_task_fs(courseid, taskid)
task_fs.ensure_exists()
# Call plugins and return the first error
plugin_results = self.plugin_manager.call_hook('task_editor_submit', course=course, taskid=taskid,
task_data=data, task_fs=task_fs)
# Retrieve the first non-null element
error = next(filter(None, plugin_results), None)
if error is not None:
return error
try:
Task(course, taskid, data, self.course_factory.get_fs(), self.plugin_manager, self.task_factory.get_problem_types())
except Exception as message:
return json.dumps({"status": "error", "message": _("Invalid data: {}").format(str(message))})
if task_zip:
try:
zipfile = ZipFile(task_zip)
except Exception:
return json.dumps({"status": "error", "message": _("Cannot read zip file. Files were not modified")})
with tempfile.TemporaryDirectory() as tmpdirname:
try:
zipfile.extractall(tmpdirname)
except Exception:
return json.dumps(
{"status": "error", "message": _("There was a problem while extracting the zip archive. Some files may have been modified")})
task_fs.copy_to(tmpdirname)
self.task_factory.delete_all_possible_task_files(courseid, taskid)
self.task_factory.update_task_descriptor_content(courseid, taskid, data, force_extension=file_ext)
return json.dumps({"status": "ok"}) | PypiClean |
/Flask_OAuthlib-0.9.6-py3-none-any.whl/flask_oauthlib/contrib/cache.py |
from cachelib import NullCache, SimpleCache, FileSystemCache
from cachelib import MemcachedCache, RedisCache
class Cache(object):
def __init__(self, app, config_prefix='OAUTHLIB', **kwargs):
self.config_prefix = config_prefix
self.config = app.config
cache_type = '_%s' % self._config('type')
kwargs.update(dict(
default_timeout=self._config('DEFAULT_TIMEOUT', 100)
))
try:
self.cache = getattr(self, cache_type)(**kwargs)
except AttributeError:
raise RuntimeError(
'`%s` is not a valid cache type!' % cache_type
)
app.extensions[config_prefix.lower() + '_cache'] = self.cache
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
try:
return getattr(self.cache, key)
except AttributeError:
raise AttributeError('No such attribute: %s' % key)
def _config(self, key, default='error'):
key = key.upper()
prior = '%s_CACHE_%s' % (self.config_prefix, key)
if prior in self.config:
return self.config[prior]
fallback = 'CACHE_%s' % key
if fallback in self.config:
return self.config[fallback]
if default == 'error':
raise RuntimeError('%s is missing.' % prior)
return default
def _null(self, **kwargs):
"""Returns a :class:`NullCache` instance"""
return NullCache()
def _simple(self, **kwargs):
"""Returns a :class:`SimpleCache` instance
.. warning::
This cache system might not be thread safe. Use with caution.
"""
kwargs.update(dict(threshold=self._config('threshold', 500)))
return SimpleCache(**kwargs)
def _memcache(self, **kwargs):
"""Returns a :class:`MemcachedCache` instance"""
kwargs.update(dict(
servers=self._config('MEMCACHED_SERVERS', None),
key_prefix=self._config('key_prefix', None),
))
return MemcachedCache(**kwargs)
def _redis(self, **kwargs):
"""Returns a :class:`RedisCache` instance"""
kwargs.update(dict(
host=self._config('REDIS_HOST', 'localhost'),
port=self._config('REDIS_PORT', 6379),
password=self._config('REDIS_PASSWORD', None),
db=self._config('REDIS_DB', 0),
key_prefix=self._config('KEY_PREFIX', None),
))
return RedisCache(**kwargs)
def _filesystem(self, **kwargs):
"""Returns a :class:`FileSystemCache` instance"""
kwargs.update(dict(
threshold=self._config('threshold', 500),
))
return FileSystemCache(self._config('dir', None), **kwargs) | PypiClean |
/BittyTax-0.5.1.tar.gz/BittyTax-0.5.1/src/bittytax/conv/parsers/circle.py |
from ...config import config
from ..dataparser import DataParser
from ..exceptions import UnexpectedTypeError
from ..out_record import TransactionOutRecord
WALLET = "Circle"
def parse_circle(data_row, parser, **_kwargs):
row_dict = data_row.row_dict
data_row.timestamp = DataParser.parse_timestamp(row_dict["Date"])
if row_dict["Transaction Type"] in (
"deposit",
"internal_switch_currency",
"switch_currency",
):
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_TRADE,
data_row.timestamp,
buy_quantity=row_dict["To Amount"].strip("£€$").split(" ")[0],
buy_asset=row_dict["To Currency"],
sell_quantity=row_dict["From Amount"].strip("£€$").split(" ")[0],
sell_asset=row_dict["From Currency"],
wallet=WALLET,
)
elif row_dict["Transaction Type"] == "spend":
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_WITHDRAWAL,
data_row.timestamp,
sell_quantity=row_dict["From Amount"].strip("£€$").split(" ")[0],
sell_asset=row_dict["From Currency"],
sell_value=row_dict["To Amount"].strip("£€$")
if row_dict["To Currency"] == config.ccy
else None,
wallet=WALLET,
)
elif row_dict["Transaction Type"] == "receive":
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_DEPOSIT,
data_row.timestamp,
buy_quantity=row_dict["To Amount"].strip("£€$").split(" ")[0],
buy_asset=row_dict["To Currency"],
buy_value=row_dict["From Amount"].strip("£€$")
if row_dict["From Currency"] == config.ccy
else None,
wallet=WALLET,
)
elif row_dict["Transaction Type"] == "fork":
data_row.t_record = TransactionOutRecord(
TransactionOutRecord.TYPE_AIRDROP,
data_row.timestamp,
buy_quantity=row_dict["To Amount"].strip("£€$").split(" ")[0],
buy_asset=row_dict["To Currency"],
buy_value=0,
wallet=WALLET,
)
else:
raise UnexpectedTypeError(
parser.in_header.index("Transaction Type"),
"Transaction Type",
row_dict["Transaction Type"],
)
DataParser(
DataParser.TYPE_EXCHANGE,
"Circle",
[
"Date",
"Reference ID",
"Transaction Type",
"From Account",
"To Account",
"From Amount",
"From Currency",
"To Amount",
"To Currency",
"Status",
],
worksheet_name="Circle",
row_handler=parse_circle,
) | PypiClean |
/MkNxGn_Essentials-0.1.40.tar.gz/MkNxGn_Essentials-0.1.40/essentials/socket_ops/__init__.py | import struct, socket, threading, json, os, pickle
from essentials import tokening
import essentials
import copy
import time
from hashlib import sha1
import base64
import array
print("THIS MODULE IS DEPRECATED. PLEASE USE SOCKET_OPS_V2")
PYTHONIC = "python based"
WEB_BASED = "web based"
def SocketDownload(sock, data, usage=None):
"""
Helper function for Socket Classes
"""
try:
payload_size = struct.calcsize(">L")
while len(data) < payload_size:
data += sock.recv(4096)
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack(">L", packed_msg_size)[0]
while len(data) < msg_size:
data += sock.recv(4096)
frame_data = data[:msg_size]
data = data[msg_size:]
usage.add(len(frame_data))
try:
xData = pickle.loads(frame_data, fix_imports=True, encoding="bytes")
return xData, data
except:
print("EOF Error Caught.")
except:
raise ConnectionError("Connection Error")
def SocketUpload(sock, data):
"""
Helper function for Socket Classes
"""
try:
data = pickle.dumps(data, 0)
size = len(data)
sock.sendall(struct.pack(">L", size) + data)
except:
raise ConnectionError("Connection Error")
def SocketUpload_WebBased(sock, data):
"""
Helper function for Socket Classes
"""
try:
if type(data) != type(b""):
print("WARNING: Web Sockets allow byte like data. Make sure your data is encoded next time.")
data.encode()
resp = bytearray([0b10000001, len(data)])
for d in bytearray(data):
resp.append(d)
sock.send(resp)
except Exception as e:
raise ConnectionError("Connection Error: " + str(e))
def HostServer(HOST, PORT, connections=5, SO_REUSEADDR=True):
"""
Helper function for Socket Classes
"""
PORT = int(os.getenv('PORT', PORT))
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
if SO_REUSEADDR == True:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST,PORT))
sock.listen(connections)
return sock
def ConnectorSocket(HOST, PORT):
"""
Helper function for Socket Classes
"""
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect((HOST, PORT))
return clientsocket
def WebSocket_Decode_Message(data):
"""
Helper function for Socket Classes
"""
data = bytearray(data)
if(len(data) < 6):
raise Exception("Error reading data")
assert(0x1 == (0xFF & data[0]) >> 7)
assert(0x1 == (0xF & data[0]))
assert(0x1 == (0xFF & data[1]) >> 7)
datalen = (0x7F & data[1])
if(datalen > 0):
mask_key = data[2:6]
masked_data = data[6:(6+datalen)]
unmasked_data = [masked_data[i] ^ mask_key[i%4] for i in range(len(masked_data))]
resp_data = bytearray(unmasked_data).decode("utf-8")
else:
resp_data = ""
return resp_data
class Transfer_Record(object):
def __init__(self):
self.sent = Data_Storage()
self.recieved = Data_Storage()
class Data_Storage(object):
def __init__(self):
self.bytes = 0
self.commits = 0
def add(self, count, type="b"):
self.bytes += 1
self.commits += 1
@property
def megabytes(self):
return self.bytes * 0.000001
@property
def gigabyte(self):
return self.megabytes * 0.001
class Socket_Server_Host:
def __init__(self, HOST, PORT, on_connection_open, on_data_recv, on_question, on_connection_close=False, daemon=True, autorun=True, connections=5, SO_REUSEADDR=True, heart_beats=True, heart_beat_wait=20):
"""Host your own Socket server to allows connections to this computer.
Parameters
----------
HOST (:obj:`str`): Your hosting IP Address for this server.
PORT (:obj:`int`): Which port you'd like to host this server on.
on_connection_open (:obj:`def`): The function to call when you get a new connection. Gives Socket_Server_Client Class
on_data_recv (:obj:`def`): The function to call when you receive data from a connection.
on_question (:obj:`def`): The function to call when you receive a question from a connection.
on_connection_close (:obj:`def`, optional): The function to call when a connection is closed.
daemon (:obj:`bool`, optional): If you'd like the server to close when the python file closes or is interrupted.
autorun (:obj:`bool`, optional): Will run the server on init.
connections (:obj:`int`, optional): How many connections to allow at one time. To be used with autorun = True
Attributes
----------
running (:obj:`bool`): Is the server still running.
connections (:obj:`dict`): Holds all connection threads.
on_connection_open (:obj:`def`): Holds the function you specified to use, can be over written. NOTE: Overwriting this will not overwrite old connection values.
on_connection_close (:obj:`def`): Holds the function you specified to use, can be over written. NOTE: Overwriting this will not overwrite old connection values.
on_data_recv (:obj:`def`): Holds the function you specified to use, can be over written. NOTE: Overwriting this will not overwrite old connection values.
"""
self.on_connection_open = on_connection_open
self.on_connection_close = on_connection_close
self.on_data_recv = on_data_recv
self.HOST = HOST
self.PORT = PORT
self.heart_beats = heart_beats
self.heart_beat_wait = heart_beat_wait
self.connections = {}
self.on_question = on_question
self.running = False
if autorun:
self.Run(connections, daemon, SO_REUSEADDR)
@property
def connection_count(self):
return len(self.connections)
def Run(self, connections=5, daemon=True, SO_REUSEADDR=True):
"""
Will start the server on the specified host, port and listening count.
This setup allows you to shutdown, change, and restart the server.
Parameters
----------
connections (:obj:`int`): How many connections to accept at one time
:rtype: None
"""
self.server = HostServer(self.HOST, self.PORT, connections, SO_REUSEADDR)
self.running = True
self.broker = threading.Thread(target=self.ConnectionBroker, daemon=daemon)
self.broker.start()
def ConnectionBroker(self):
"""
Server background task for accepting connections, you'll not need to use this.
:rtype: None
"""
while self.running:
try:
conn, addr = self.server.accept()
if self.running == False:
conn.close()
return
conID = tokening.CreateToken(12, self.connections)
connector = Socket_Server_Client(conn, addr, conID, self.on_data_recv, on_question=self.on_question, on_close=self.close_connection, Heart_Beat=self.heart_beats, Heart_Beat_Wait=self.heart_beat_wait)
self.connections[conID] = connector
self.on_connection_open(connector)
time.sleep(0.05)
except Exception as e:
self.running = False
raise e
def close_connection(self, connection):
"""
Server background task for clearing connections and notifying the parent file, you'll not need to use this.
:rtype: None
"""
try:
self.on_connection_close(connection)
except:
pass
del self.connections[connection.conID]
def Shutdown(self):
"""
Shutdown the server and close all connections.
:rtype: None
"""
self.running = False
keys = list(self.connections.keys())
for con in keys:
try:
self.connections[con].shutdown()
except:
pass
self.connections = {}
try:
self.server.close()
except:
pass
def CloseConnection(self, conID):
"""
Shortcut to close a certain connection.
Can also be used as Server.connections[conID].shutdown()
:rtype: None
"""
self.connections[conID].shutdown()
class Socket_Server_Client:
def __init__(self, sock, addr, conID, on_data, on_question, on_close, Heart_Beat=True, Heart_Beat_Wait=20):
"""CLIENT for Socket_Server_Host"""
self.socket = sock
self.addr = addr
self.conID = conID
self.on_data = on_data
self.on_close = on_close
self.running = True
self.meta = {}
self.recv_data = b""
self.data_usage = Transfer_Record()
self.on_question = on_question
self.__ask_list__ = {}
self.created = essentials.TimeStamp()
self.heart_beat_wait = Heart_Beat_Wait
threading.Thread(target=self.__detect_client_type__, args=[Heart_Beat]).start()
def __detect_client_type__(self, Heart_Beat):
self.socket.settimeout(1)
while True:
try:
self.recv_data += socket.recv(1)
except:
break
if b"permessage-deflate" in self.recv_data:
self.client_type = WEB_BASED
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
msg = self.recv_data.decode("utf-8")
vals = msg.replace("\r", "").split("\n")
headers = {}
for item in vals:
if item != "" and ":" in item:
headers[item.split(":")[0]] = item.split(": ")[1]
self.web_based_headers = headers
key = headers['Sec-WebSocket-Key']
sha1f = sha1()
sha1f.update(key.encode('utf-8') + GUID.encode('utf-8'))
response_key = base64.b64encode(sha1f.digest()).decode('utf-8')
websocket_answer = (
'HTTP/1.1 101 Switching Protocols',
'Upgrade: websocket',
'Connection: Upgrade',
'Sec-WebSocket-Accept: {key}\r\n\r\n',
)
response = '\r\n'.join(websocket_answer).format(key=response_key)
self.socket.send(response.encode('utf-8'))
else:
self.client_type = PYTHONIC
threading.Thread(target=self.__data_rev__, daemon=True).start()
if Heart_Beat == True and self.client_type == PYTHONIC:
self.socket.settimeout(None)
threading.Thread(target=self.__heart_beat__, daemon=True).start()
def __heart_beat__(self):
while self.running:
self.send({"heart_beat_function": True})
time.sleep(self.heart_beat_wait)
def shutdown(self):
"""
Shuts down this connection and removes any place it is still stored. Completes the on_close event.
:rtype: None
"""
try:
self.on_close(self)
except:
pass
self.running = False
try:
self.socket.shutdown(socket.SHUT_RDWR)
except:
pass
try:
self.socket.close()
except:
pass
def send(self, data):
"""
Send data to the remote connection.
:rtype: None
"""
if self.running == False:
raise ConnectionResetError("No Connection")
if self.client_type == PYTHONIC:
try:
SocketUpload(self.socket, data)
except:
self.shutdown()
elif self.client_type == WEB_BASED:
try:
SocketUpload_WebBased(self.socket, data)
except:
self.shutdown()
def ask(self, data, timeout=5):
if self.client_type == WEB_BASED:
print("WARNING: ask for Web Based Clients is not currently supported.")
return False
tok = essentials.CreateToken(20, self.__ask_list__)
self.__ask_list__[tok] = False
self.send({"function_ask_question": tok, "data": data})
while self.__ask_list__[tok] == False:
time.sleep(0.01)
timeout -= 0.01
if timeout <= 0:
raise TimeoutError("No response within time.")
copyed = copy.deepcopy(self.__ask_list__[tok])
del self.__ask_list__[tok]
return copyed['data']
def __data_rev__(self):
"""
Server background task for accepting data and run the on_data event, you'll not need to use this.
:rtype: None
"""
if self.client_type == PYTHONIC:
while self.running:
try:
data, temp = SocketDownload(self.socket, self.recv_data, self.data_usage.recieved)
self.recv_data = temp
except:
self.shutdown()
return
if type(data) == type({}) and 'heart_beat_function' in data:
pass
elif type(data) == type({}) and 'function_ask_response' in data:
self.__ask_list__[data['function_ask_response']] = data
elif type(data) == type({}) and 'function_ask_question' in data:
threading.Thread(target=self.on_question, args=[Socket_Question(data['data'], self, data['function_ask_question'])], daemon=True).start()
else:
threading.Thread(target=self.on_data, args=[data, self], daemon=True).start()
time.sleep(0.05)
elif self.client_type == WEB_BASED:
while self.running:
msg = b""
conti = True
while conti:
buffer = b""
while b"\n" not in buffer:
try:
buffer += self.socket.recv(1)
except:
conti = False
break
msg += buffer
if msg != b"":
self.data_usage.recieved.add(len(msg))
threading.Thread(target=self.on_data, args=[WebSocket_Decode_Message(msg), self], daemon=True).start()
class Socket_Question(object):
def __init__(self, data, client, tok):
self.data = data
self.questioner = client
self.__answer_token__ = tok
def answer(self, data):
self.questioner.send({"function_ask_response": self.__answer_token__, "data": data})
class Socket_Connector:
def __init__(self, HOST, PORT, on_data_recv, on_question, on_connection_close, Heart_Beat=True, Heart_Beat_Wait=10, legacy=False, legacy_buffer_size=1024):
"""Host your own Socket server to allows connections to this computer.
Parameters
----------
HOST (:obj:`str`): The hosting IP Address for the server.
PORT (:obj:`int`): The port the server is using.
on_data_recv (:obj:`def`): The function to call when you receive data from a connection.
on_question (:obj:`def`): The function to call when you receive Socket_Question from a connection.
on_connection_close (:obj:`def`, optional): The function to call when a connection is closed.
Attributes
----------
running (:obj:`bool`): Is the server still running.
on_connection_close (:obj:`def`): Holds the function you specified to use, can be over written.
on_data_recv (:obj:`def`): Holds the function you specified to use, can be over written.
"""
self.running = True
self.HOST = HOST
self.legacy = legacy
self.legacy_buffer_size = legacy_buffer_size
self.PORT = PORT
self.recv_data = b""
self.__ask_list__ = {}
self.on_question = on_question
self.on_connection_close = on_connection_close
self.socket = ConnectorSocket(HOST, PORT)
self.on_data_recv = on_data_recv
threading.Thread(target=self.__data_rev__, daemon=True).start()
if Heart_Beat == True:
self.heart_beat_wait = Heart_Beat_Wait
threading.Thread(target=self.__heart_beat__, daemon=True).start()
def __heart_beat__(self):
while self.running:
self.send({"heart_beat_function": True})
time.sleep(self.heart_beat_wait)
def ask(self, data, timeout=5):
if self.legacy:
print("Can't ask questions to legacy connections")
return
tok = essentials.CreateToken(20, self.__ask_list__)
self.__ask_list__[tok] = False
self.send({"function_ask_question": tok, "data": data})
while self.__ask_list__[tok] == False:
time.sleep(0.01)
timeout -= 0.01
if timeout <= 0:
raise TimeoutError("No response within time.")
copyed = copy.deepcopy(self.__ask_list__[tok])
del self.__ask_list__[tok]
return copyed['data']
def send(self, data):
"""
Send data to the remote connection.
:rtype: None
"""
if self.running == False:
raise ConnectionResetError("No Connection")
try:
if self.legacy:
self.socket.sendall(data)
else:
SocketUpload(self.socket, data)
except Exception as e:
print(e)
self.shutdown()
def shutdown(self):
"""
Shuts down this connection. Completes the on_close event.
:rtype: None
"""
self.running = False
self.on_connection_close(self)
print("SD")
try:
self.socket.shutdown(socket.SHUT_RDWR)
except:
pass
try:
self.socket.close()
except:
pass
def __data_rev__(self):
"""
Client background task for accepting data and run the on_data event, you'll not need to use this.
:rtype: None
"""
while self.running:
if self.legacy:
self.on_data_recv(self.socket.recv(self.legacy_buffer_size))
else:
try:
data, temp = SocketDownload(self.socket, self.recv_data)
self.recv_data = temp
except:
self.shutdown()
return
if type(data) == type({}) and 'heart_beat_function' in data:
pass
elif type(data) == type({}) and 'function_ask_response' in data:
self.__ask_list__[data['function_ask_response']] = data
elif type(data) == type({}) and 'function_ask_question' in data:
self.on_question(Socket_Question(data['data'], self, data['function_ask_question']))
else:
self.on_data_recv(data)
"""
The following is intended to record package loads.
Nothing about your person, location, or IP Address is recorded.
This task:
Runs in the background,
Keeps a maximum open time of 3 seconds,
Won't run if there is no internet.
Won't keep your program running if your program finishes before it does.
Boosts my moral to keep this package free and up to date.
This specific placement is to determine how many programs are using this script.
If you wish to not be apart of this program, please delete these next lines or change true to false.
"""
if True:
try:
import threading
def bg():
try:
import requests
response = requests.get("https://analyticscom.mknxgn.pro/rpg/mknxgn_essentials_SOP_V1", timeout=3)
# If you ever feel like deleting this, uncomment the line below...
#print(response.text)
except:
pass
threading.Thread(target=bg, daemon=True).start()
except:
pass | PypiClean |
/OctoBot-Tentacles-Manager-2.9.4.tar.gz/OctoBot-Tentacles-Manager-2.9.4/octobot_tentacles_manager/uploaders/nexus_uploader.py | import os
import aiohttp
import octobot_tentacles_manager.uploaders.uploader as uploader
class NexusUploader(uploader.Uploader):
ENV_NEXUS_USERNAME = "NEXUS_USERNAME"
ENV_NEXUS_PASSWORD = "NEXUS_PASSWORD"
ENV_NEXUS_URL = "NEXUS_URL"
NEXUS_EXPECTED_RESPONSE_STATUS = [200, 201]
def __init__(self):
super().__init__()
self.aiohttp_session: aiohttp.ClientSession = None
self.nexus_username: str = os.getenv(NexusUploader.ENV_NEXUS_USERNAME, None)
self.nexus_password: str = os.getenv(NexusUploader.ENV_NEXUS_PASSWORD, None)
self.nexus_url: str = os.getenv(NexusUploader.ENV_NEXUS_URL, None)
if None in [self.nexus_username, self.nexus_password, self.nexus_url]:
raise TypeError("Some nexus environment variables are missing, "
"please ensure that NEXUS_USERNAME, NEXUS_PASSWORD and NEXUS_URL are defined.")
async def upload_file(self, upload_path: str, file_path: str, destination_file_name: str = None) -> int:
"""
Upload file on nexus wrapper
:param upload_path: the upload path, the internal path after self.nexus_url
:param file_path: the file local path
:param destination_file_name: the file name on nexus (optional : default file_path basename)
:return: the result of _upload
"""
dest_file_name: str = destination_file_name if destination_file_name is not None else os.path.basename(
file_path)
upload_file_url: str = f"{self.nexus_url}/{upload_path}/{dest_file_name}"
self.logger.info(f"Uploading {file_path} to nexus at {upload_file_url}...")
return await self._upload(file_url_on_nexus=upload_file_url, local_file_path=file_path)
async def upload_folder(self, upload_path: str, folder_path: str, destination_folder_name: str = None) -> int:
"""
Upload folder content on nexus wrapper
:param upload_path: the upload path, the internal path after self.nexus_url
:param folder_path: the folder local path
:param destination_folder_name: the folder name on nexus (optional : default folder_path basename)
:return: the sum of all of _upload returns
"""
error_count: int = 0
dest_folder_name: str = destination_folder_name \
if destination_folder_name is not None else os.path.basename(folder_path)
upload_folder_url: str = f"{self.nexus_url}/{upload_path}/{dest_folder_name}"
for file_path in os.listdir(folder_path):
upload_file_url = f"{upload_folder_url}/{file_path}"
self.logger.debug(f"Uploading {file_path} to nexus at {upload_file_url}...")
error_count += await self._upload(file_url_on_nexus=upload_file_url,
local_file_path=os.path.join(folder_path, file_path))
return error_count
async def create_session(self) -> None:
"""
Create aiohttp session if necessary
:return: None
"""
if self.aiohttp_session is None:
self.aiohttp_session = aiohttp.ClientSession()
async def close_session(self) -> None:
"""
Close aiohttp session if necessary
:return: None
"""
if self.aiohttp_session is not None:
await self.aiohttp_session.close()
async def _upload(self, file_url_on_nexus: str, local_file_path: str) -> int:
"""
Upload a file on nexus
:param file_url_on_nexus: the complete upload url
:param local_file_path: the local file path
:return: 0 if upload succeed else 1
"""
await self.create_session()
with open(local_file_path, 'rb') as file_content:
response = await self.aiohttp_session.request('put',
file_url_on_nexus,
data=file_content,
auth=aiohttp.BasicAuth(self.nexus_username,
self.nexus_password))
if response.status not in NexusUploader.NEXUS_EXPECTED_RESPONSE_STATUS:
self.logger.error(f"Failed to upload file on nexus "
f"(status code {response.status}) : {await response.text()}")
return 1
return 0 | PypiClean |
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/bower_components/bootstrap-table/src/locale/bootstrap-table-da-DK.js | $.fn.bootstrapTable.locales['da-DK'] = $.fn.bootstrapTable.locales['da'] = {
formatCopyRows () {
return 'Copy Rows'
},
formatPrint () {
return 'Print'
},
formatLoadingMessage () {
return 'Indlæser, vent venligst'
},
formatRecordsPerPage (pageNumber) {
return `${pageNumber} poster pr side`
},
formatShowingRows (pageFrom, pageTo, totalRows, totalNotFiltered) {
if (totalNotFiltered !== undefined && totalNotFiltered > 0 && totalNotFiltered > totalRows) {
return `Viser ${pageFrom} til ${pageTo} af ${totalRows} række${(totalRows > 1) ? 'r' : ''} (filtered from ${totalNotFiltered} total rows)`
}
return `Viser ${pageFrom} til ${pageTo} af ${totalRows} række${(totalRows > 1) ? 'r' : ''}`
},
formatSRPaginationPreText () {
return 'previous page'
},
formatSRPaginationPageText (page) {
return `to page ${page}`
},
formatSRPaginationNextText () {
return 'next page'
},
formatDetailPagination (totalRows) {
return `Viser ${totalRows} række${(totalRows > 1) ? 'r' : ''}`
},
formatClearSearch () {
return 'Ryd filtre'
},
formatSearch () {
return 'Søg'
},
formatNoMatches () {
return 'Ingen poster fundet'
},
formatPaginationSwitch () {
return 'Skjul/vis nummerering'
},
formatPaginationSwitchDown () {
return 'Show pagination'
},
formatPaginationSwitchUp () {
return 'Hide pagination'
},
formatRefresh () {
return 'Opdater'
},
formatToggle () {
return 'Skift'
},
formatToggleOn () {
return 'Show card view'
},
formatToggleOff () {
return 'Hide card view'
},
formatColumns () {
return 'Kolonner'
},
formatColumnsToggleAll () {
return 'Toggle all'
},
formatFullscreen () {
return 'Fullscreen'
},
formatAllRows () {
return 'Alle'
},
formatAutoRefresh () {
return 'Auto Refresh'
},
formatExport () {
return 'Eksporter'
},
formatJumpTo () {
return 'GO'
},
formatAdvancedSearch () {
return 'Advanced search'
},
formatAdvancedCloseButton () {
return 'Close'
},
formatFilterControlSwitch () {
return 'Hide/Show controls'
},
formatFilterControlSwitchHide () {
return 'Hide controls'
},
formatFilterControlSwitchShow () {
return 'Show controls'
}
}
$.extend($.fn.bootstrapTable.defaults, $.fn.bootstrapTable.locales['da-DK']) | PypiClean |
/Mathics3-6.0.2.tar.gz/Mathics3-6.0.2/mathics/builtin/atomic/atomic.py | from mathics.builtin.base import Builtin, Test
from mathics.core.atoms import Atom
class AtomQ(Test):
"""
<url>:WMA link:https://reference.wolfram.com/language/ref/AtomQ.html</url>
<dl>
<dt>'AtomQ[$expr$]'
<dd>returns 'True' if $expr$ is an expression which cannot be divided into \
subexpressions, or 'False' otherwise.
An expression that cannot be divided into subparts is called called an "atom".
</dl>
Strings and expressions that produce strings are atoms:
>> Map[AtomQ, {"x", "x" <> "y", StringReverse["live"]}]
= {True, True, True}
Numeric literals are atoms:
>> Map[AtomQ, {2, 2.1, 1/2, 2 + I, 2^^101}]
= {True, True, True, True, True}
So are Mathematical Constants:
>> Map[AtomQ, {Pi, E, I, Degree}]
= {True, True, True, True}
A 'Symbol' not bound to a value is an atom too:
>> AtomQ[x]
= True
On the other hand, expressions with more than one 'Part' after evaluation, even those resulting in numeric values, aren't atoms:
>> AtomQ[2 + Pi]
= False
Similarly any compound 'Expression', even lists of literals, aren't atoms:
>> Map[AtomQ, {{}, {1}, {2, 3, 4}}]
= {False, False, False}
Note that evaluation or the binding of "x" to an expression is taken into account:
>> x = 2 + Pi; AtomQ[x]
= False
Again, note that the expression evaluation to a number occurs before 'AtomQ' evaluated:
>> AtomQ[2 + 3.1415]
= True
#> Clear[x]
"""
summary_text = "test whether an expression is an atom"
def test(self, expr):
return isinstance(expr, Atom)
class Head(Builtin):
"""
<url>:WMA link:https://reference.wolfram.com/language/ref/Head.html</url>
<dl>
<dt>'Head[$expr$]'
<dd>returns the head of the expression or atom $expr$.
</dl>
>> Head[a * b]
= Times
>> Head[6]
= Integer
>> Head[x]
= Symbol
"""
summary_text = "find the head of any expression, including an atom"
def eval(self, expr, evaluation):
"Head[expr_]"
return expr.get_head() | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dijit/place.js.uncompressed.js | define("dijit/place", [
"dojo/_base/array", // array.forEach array.map array.some
"dojo/dom-geometry", // domGeometry.getMarginBox domGeometry.position
"dojo/dom-style", // domStyle.getComputedStyle
"dojo/_base/kernel", // kernel.deprecated
"dojo/_base/window", // win.body
"dojo/window", // winUtils.getBox
"." // dijit (defining dijit.place to match API doc)
], function(array, domGeometry, domStyle, kernel, win, winUtils, dijit){
// module:
// dijit/place
// summary:
// Code to place a popup relative to another node
function _place(/*DomNode*/ node, choices, layoutNode, aroundNodeCoords){
// summary:
// Given a list of spots to put node, put it at the first spot where it fits,
// of if it doesn't fit anywhere then the place with the least overflow
// choices: Array
// Array of elements like: {corner: 'TL', pos: {x: 10, y: 20} }
// Above example says to put the top-left corner of the node at (10,20)
// layoutNode: Function(node, aroundNodeCorner, nodeCorner, size)
// for things like tooltip, they are displayed differently (and have different dimensions)
// based on their orientation relative to the parent. This adjusts the popup based on orientation.
// It also passes in the available size for the popup, which is useful for tooltips to
// tell them that their width is limited to a certain amount. layoutNode() may return a value expressing
// how much the popup had to be modified to fit into the available space. This is used to determine
// what the best placement is.
// aroundNodeCoords: Object
// Size of aroundNode, ex: {w: 200, h: 50}
// get {x: 10, y: 10, w: 100, h:100} type obj representing position of
// viewport over document
var view = winUtils.getBox();
// This won't work if the node is inside a <div style="position: relative">,
// so reattach it to win.doc.body. (Otherwise, the positioning will be wrong
// and also it might get cutoff)
if(!node.parentNode || String(node.parentNode.tagName).toLowerCase() != "body"){
win.body().appendChild(node);
}
var best = null;
array.some(choices, function(choice){
var corner = choice.corner;
var pos = choice.pos;
var overflow = 0;
// calculate amount of space available given specified position of node
var spaceAvailable = {
w: {
'L': view.l + view.w - pos.x,
'R': pos.x - view.l,
'M': view.w
}[corner.charAt(1)],
h: {
'T': view.t + view.h - pos.y,
'B': pos.y - view.t,
'M': view.h
}[corner.charAt(0)]
};
// configure node to be displayed in given position relative to button
// (need to do this in order to get an accurate size for the node, because
// a tooltip's size changes based on position, due to triangle)
if(layoutNode){
var res = layoutNode(node, choice.aroundCorner, corner, spaceAvailable, aroundNodeCoords);
overflow = typeof res == "undefined" ? 0 : res;
}
// get node's size
var style = node.style;
var oldDisplay = style.display;
var oldVis = style.visibility;
if(style.display == "none"){
style.visibility = "hidden";
style.display = "";
}
var mb = domGeometry. getMarginBox(node);
style.display = oldDisplay;
style.visibility = oldVis;
// coordinates and size of node with specified corner placed at pos,
// and clipped by viewport
var
startXpos = {
'L': pos.x,
'R': pos.x - mb.w,
'M': Math.max(view.l, Math.min(view.l + view.w, pos.x + (mb.w >> 1)) - mb.w) // M orientation is more flexible
}[corner.charAt(1)],
startYpos = {
'T': pos.y,
'B': pos.y - mb.h,
'M': Math.max(view.t, Math.min(view.t + view.h, pos.y + (mb.h >> 1)) - mb.h)
}[corner.charAt(0)],
startX = Math.max(view.l, startXpos),
startY = Math.max(view.t, startYpos),
endX = Math.min(view.l + view.w, startXpos + mb.w),
endY = Math.min(view.t + view.h, startYpos + mb.h),
width = endX - startX,
height = endY - startY;
overflow += (mb.w - width) + (mb.h - height);
if(best == null || overflow < best.overflow){
best = {
corner: corner,
aroundCorner: choice.aroundCorner,
x: startX,
y: startY,
w: width,
h: height,
overflow: overflow,
spaceAvailable: spaceAvailable
};
}
return !overflow;
});
// In case the best position is not the last one we checked, need to call
// layoutNode() again.
if(best.overflow && layoutNode){
layoutNode(node, best.aroundCorner, best.corner, best.spaceAvailable, aroundNodeCoords);
}
// And then position the node. Do this last, after the layoutNode() above
// has sized the node, due to browser quirks when the viewport is scrolled
// (specifically that a Tooltip will shrink to fit as though the window was
// scrolled to the left).
//
// In RTL mode, set style.right rather than style.left so in the common case,
// window resizes move the popup along with the aroundNode.
var l = domGeometry.isBodyLtr(),
s = node.style;
s.top = best.y + "px";
s[l ? "left" : "right"] = (l ? best.x : view.w - best.x - best.w) + "px";
s[l ? "right" : "left"] = "auto"; // needed for FF or else tooltip goes to far left
return best;
}
/*=====
dijit.place.__Position = function(){
// x: Integer
// horizontal coordinate in pixels, relative to document body
// y: Integer
// vertical coordinate in pixels, relative to document body
this.x = x;
this.y = y;
};
=====*/
/*=====
dijit.place.__Rectangle = function(){
// x: Integer
// horizontal offset in pixels, relative to document body
// y: Integer
// vertical offset in pixels, relative to document body
// w: Integer
// width in pixels. Can also be specified as "width" for backwards-compatibility.
// h: Integer
// height in pixels. Can also be specified as "height" from backwards-compatibility.
this.x = x;
this.y = y;
this.w = w;
this.h = h;
};
=====*/
return (dijit.place = {
// summary:
// Code to place a DOMNode relative to another DOMNode.
// Load using require(["dijit/place"], function(place){ ... }).
at: function(node, pos, corners, padding){
// summary:
// Positions one of the node's corners at specified position
// such that node is fully visible in viewport.
// description:
// NOTE: node is assumed to be absolutely or relatively positioned.
// node: DOMNode
// The node to position
// pos: dijit.place.__Position
// Object like {x: 10, y: 20}
// corners: String[]
// Array of Strings representing order to try corners in, like ["TR", "BL"].
// Possible values are:
// * "BL" - bottom left
// * "BR" - bottom right
// * "TL" - top left
// * "TR" - top right
// padding: dijit.place.__Position?
// optional param to set padding, to put some buffer around the element you want to position.
// example:
// Try to place node's top right corner at (10,20).
// If that makes node go (partially) off screen, then try placing
// bottom left corner at (10,20).
// | place(node, {x: 10, y: 20}, ["TR", "BL"])
var choices = array.map(corners, function(corner){
var c = { corner: corner, pos: {x:pos.x,y:pos.y} };
if(padding){
c.pos.x += corner.charAt(1) == 'L' ? padding.x : -padding.x;
c.pos.y += corner.charAt(0) == 'T' ? padding.y : -padding.y;
}
return c;
});
return _place(node, choices);
},
around: function(
/*DomNode*/ node,
/*DomNode || dijit.place.__Rectangle*/ anchor,
/*String[]*/ positions,
/*Boolean*/ leftToRight,
/*Function?*/ layoutNode){
// summary:
// Position node adjacent or kitty-corner to anchor
// such that it's fully visible in viewport.
//
// description:
// Place node such that corner of node touches a corner of
// aroundNode, and that node is fully visible.
//
// anchor:
// Either a DOMNode or a __Rectangle (object with x, y, width, height).
//
// positions:
// Ordered list of positions to try matching up.
// * before: places drop down to the left of the anchor node/widget, or to the right in the case
// of RTL scripts like Hebrew and Arabic; aligns either the top of the drop down
// with the top of the anchor, or the bottom of the drop down with bottom of the anchor.
// * after: places drop down to the right of the anchor node/widget, or to the left in the case
// of RTL scripts like Hebrew and Arabic; aligns either the top of the drop down
// with the top of the anchor, or the bottom of the drop down with bottom of the anchor.
// * before-centered: centers drop down to the left of the anchor node/widget, or to the right
// in the case of RTL scripts like Hebrew and Arabic
// * after-centered: centers drop down to the right of the anchor node/widget, or to the left
// in the case of RTL scripts like Hebrew and Arabic
// * above-centered: drop down is centered above anchor node
// * above: drop down goes above anchor node, left sides aligned
// * above-alt: drop down goes above anchor node, right sides aligned
// * below-centered: drop down is centered above anchor node
// * below: drop down goes below anchor node
// * below-alt: drop down goes below anchor node, right sides aligned
//
// layoutNode: Function(node, aroundNodeCorner, nodeCorner)
// For things like tooltip, they are displayed differently (and have different dimensions)
// based on their orientation relative to the parent. This adjusts the popup based on orientation.
//
// leftToRight:
// True if widget is LTR, false if widget is RTL. Affects the behavior of "above" and "below"
// positions slightly.
//
// example:
// | placeAroundNode(node, aroundNode, {'BL':'TL', 'TR':'BR'});
// This will try to position node such that node's top-left corner is at the same position
// as the bottom left corner of the aroundNode (ie, put node below
// aroundNode, with left edges aligned). If that fails it will try to put
// the bottom-right corner of node where the top right corner of aroundNode is
// (ie, put node above aroundNode, with right edges aligned)
//
// if around is a DOMNode (or DOMNode id), convert to coordinates
var aroundNodePos = (typeof anchor == "string" || "offsetWidth" in anchor)
? domGeometry.position(anchor, true)
: anchor;
// Adjust anchor positioning for the case that a parent node has overflw hidden, therefore cuasing the anchor not to be completely visible
if(anchor.parentNode){
var parent = anchor.parentNode;
while(parent && parent.nodeType == 1 && parent.nodeName != "BODY"){ //ignoring the body will help performance
var parentPos = domGeometry.position(parent, true);
var parentStyleOverflow = domStyle.getComputedStyle(parent).overflow;
if(parentStyleOverflow == "hidden" || parentStyleOverflow == "auto" || parentStyleOverflow == "scroll"){
var bottomYCoord = Math.min(aroundNodePos.y + aroundNodePos.h, parentPos.y + parentPos.h);
var rightXCoord = Math.min(aroundNodePos.x + aroundNodePos.w, parentPos.x + parentPos.w);
aroundNodePos.x = Math.max(aroundNodePos.x, parentPos.x);
aroundNodePos.y = Math.max(aroundNodePos.y, parentPos.y);
aroundNodePos.h = bottomYCoord - aroundNodePos.y;
aroundNodePos.w = rightXCoord - aroundNodePos.x;
}
parent = parent.parentNode;
}
}
var x = aroundNodePos.x,
y = aroundNodePos.y,
width = "w" in aroundNodePos ? aroundNodePos.w : (aroundNodePos.w = aroundNodePos.width),
height = "h" in aroundNodePos ? aroundNodePos.h : (kernel.deprecated("place.around: dijit.place.__Rectangle: { x:"+x+", y:"+y+", height:"+aroundNodePos.height+", width:"+width+" } has been deprecated. Please use { x:"+x+", y:"+y+", h:"+aroundNodePos.height+", w:"+width+" }", "", "2.0"), aroundNodePos.h = aroundNodePos.height);
// Convert positions arguments into choices argument for _place()
var choices = [];
function push(aroundCorner, corner){
choices.push({
aroundCorner: aroundCorner,
corner: corner,
pos: {
x: {
'L': x,
'R': x + width,
'M': x + (width >> 1)
}[aroundCorner.charAt(1)],
y: {
'T': y,
'B': y + height,
'M': y + (height >> 1)
}[aroundCorner.charAt(0)]
}
})
}
array.forEach(positions, function(pos){
var ltr = leftToRight;
switch(pos){
case "above-centered":
push("TM", "BM");
break;
case "below-centered":
push("BM", "TM");
break;
case "after-centered":
ltr = !ltr;
// fall through
case "before-centered":
push(ltr ? "ML" : "MR", ltr ? "MR" : "ML");
break;
case "after":
ltr = !ltr;
// fall through
case "before":
push(ltr ? "TL" : "TR", ltr ? "TR" : "TL");
push(ltr ? "BL" : "BR", ltr ? "BR" : "BL");
break;
case "below-alt":
ltr = !ltr;
// fall through
case "below":
// first try to align left borders, next try to align right borders (or reverse for RTL mode)
push(ltr ? "BL" : "BR", ltr ? "TL" : "TR");
push(ltr ? "BR" : "BL", ltr ? "TR" : "TL");
break;
case "above-alt":
ltr = !ltr;
// fall through
case "above":
// first try to align left borders, next try to align right borders (or reverse for RTL mode)
push(ltr ? "TL" : "TR", ltr ? "BL" : "BR");
push(ltr ? "TR" : "TL", ltr ? "BR" : "BL");
break;
default:
// To assist dijit/_base/place, accept arguments of type {aroundCorner: "BL", corner: "TL"}.
// Not meant to be used directly.
push(pos.aroundCorner, pos.corner);
}
});
var position = _place(node, choices, layoutNode, {w: width, h: height});
position.aroundNodePos = aroundNodePos;
return position;
}
});
}); | PypiClean |
/IOT-Analytics-0.0.2.tar.gz/IOT-Analytics-0.0.2/readme.md | # IOT Analytics
**Analytics for your robot or IOT device**
[](https://pypi.python.org/pypi/iot-analytics/)
[](https://travis-ci.org/gunthercox/iot-analytics)
[](https://coveralls.io/github/gunthercox/iot-analytics?branch=master)
This is a python module designed to provide the tools and resources needed to gather analytics for real-world objects and events. Analytics for online sites and services make it possible for developers to improve workflows and optimize the performance of web pages. The same techniques can be applied to tangible objects and events.
There are many examples of programmers using services such as [Google Analytics](https://analytics.google.com) to track everything from doors opening in a home to trips to the store. Check out this [this great blog post](http://nicomiceli.com/tracking-your-home-with-google-analytics/) by Nico Miceli for an example.
## Installation
```
pip install iot_analytics
```
# Project design
This module has two main components
1. Recording data
2. Data analysis

# Recording data
**Supported Endpoints**
- [Google Analytics](https://developers.google.com/analytics/devguides/collection/protocol/v1/devguide)
- IOT Analytics - A custom solution built into this project that you can host your self
# Data analysis
Analytics is the discovery and communication of meaningful patterns in data. It is not possible for humans to easily extract meaning from a collection of billions of database entries. The goal of the data analysis portion of this project is to provide tools that make it easier to view and process data in a way that makes data features and trends more apparent.
# Apps
This project includes an `apps` module which adds support for integration
with the [Zorg](https://github.com/zorg/zorg) robotics framework.
# Roadmap
- Add data analysis features for hosted storage
- Integration with [Phant](https://data.sparkfun.com)
- Integration with [Intel IOT Analytics](https://dashboard.us.enableiot.com)
| PypiClean |
/Barak-0.3.2.tar.gz/Barak-0.3.2/barak/sed.py | from __future__ import division
from io import readtabfits
from constants import c, c_kms, Jy
from utilities import get_data_path
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as pl
import os, math
import warnings
DATAPATH = get_data_path()
PATH_PASSBAND = DATAPATH + '/passbands/'
PATH_EXTINCT = DATAPATH + '/atmos_extinction/'
PATH_TEMPLATE = DATAPATH + '/templates/'
def _listfiles(topdir):
names = [n for n in os.listdir(topdir) if os.path.isdir(topdir + n)]
files = dict([(n, []) for n in names])
for name in sorted(names):
for n in sorted(os.listdir(topdir + name)):
if n != 'README' and not os.path.isdir(topdir + name + '/'+ n) and \
not n.startswith('effic') and \
not n.endswith('.py') and not n.endswith('.pdf'):
files[name].append(n)
return files
TEMPLATES = _listfiles(PATH_TEMPLATE)
PASSBANDS = _listfiles(PATH_PASSBAND)
def get_bands(instr=None, names=None, ccd=None):
""" Get one or more passbands by giving the instrument and
filename.
If `names` is not given, then every passband for that instrument
is returned. Passband instruments and filenames are listed in the
dictionary PASSBANDS. names can be a list, a single string, or a
comma-separated string of values.
Examples
--------
>>> sdss = get_bands('SDSS', 'u,g,r,i,z') # get the SDSS passbands
>>> U = get_bands('LBC', 'u') # get the LBC U_spec filter
"""
if instr is None:
return _listfiles(PATH_PASSBAND)
if isinstance(names, basestring):
if ',' in names:
names = [n.strip() for n in names.split(',')]
else:
return Passband(instr + '/' + names)
elif names is None:
names = PASSBANDS[instr]
return [Passband(instr + '/' + n, ccd=ccd) for n in names]
def get_SEDs(kind=None, names=None):
""" Get one or more SEDs based on their type and filename
If `names` is not given, then every SED of that type is returned.
SED types and filenames are listed in the dictionary TEMPLATES.
Examples
--------
>>> pickles = get_SEDs('pickles') # pickles stellar library SEDs
>>> lbga = get_SEDs('LBG', 'lbg_abs.dat') # LBG absorption spectrum
"""
if kind is None:
return _listfiles(PATH_TEMPLATE)
if isinstance(names, basestring):
if ',' in names:
names = [n.strip() for n in names.split(',')]
else:
return SED(kind + '/' + names)
elif names is None:
names = TEMPLATES[kind]
return [SED(kind + '/' + n) for n in names]
def get_extinction(filename=None, airmass=1.):
""" return the atmospheric extinction from the given file.
returns extinction = 10^(0.4 * extinction_in_mags * airmass),
where flux_true = flux_extincted * extinction
"""
if filename is None:
return sorted(os.listdir(PATH_EXTINCT))
wa, emag = np.loadtxt(PATH_EXTINCT + filename, unpack=1)
return wa, 10**(-0.4 * emag * airmass)
class Passband(object):
"""This class describes a filter transmission curve. Passband
objects are created by loading data from from text files
containing wavelength in angstroms in the first column, relative
transmission efficiency in the second column (whitespace
delimited). For example, to create a Passband object for the 2MASS
J filter:
passband = Passband('J_2MASS.res')
where 'J_2MASS.res' is a file in the current working directory
that describes the filter.
The available passbands are in PASSBANDS.
Attributes
----------
wa : array of floats
Wavelength in Angstroms
tr : array of floats
Normalised transmission, including atmospheric extinction and
detector efficiency. May or may not include extinction from the
optical path.
effective_wa : float
Effective wavelength of the passband.
Methods
-------
plot
"""
def __init__(self, filename, ccd=None):
if not filename.startswith(PATH_PASSBAND):
filepath = PATH_PASSBAND + filename
else:
filepath = filename
if filepath.endswith('.fits'):
import pyfits
rec = pyfits.getdata(filepath, 1)
self.wa, self.tr = rec.wa, rec.tr
else:
self.wa, self.tr = np.loadtxt(filepath, usecols=(0,1), unpack=True)
# check wavelengths are sorted lowest -> highest
isort = self.wa.argsort()
self.wa = self.wa[isort]
self.tr = self.tr[isort]
# get the name of the filter/passband file and the name of the
# directory in which it lives (the instrument).
prefix, filtername = os.path.split(filename)
_, instr = os.path.split(prefix)
self.name = filename
if instr == 'LBC' and ccd is None:
if filtername.startswith('LBCB') or filtername in 'ug':
ccd = 'blue'
elif filtername.startswith('LBCR') or filtername in 'riz':
ccd = 'red'
elif instr == 'FORS' and ccd is None:
warnings.warn('No cdd ("red" or "blue") given, assuming red.')
ccd = 'red'
self.atmos = self.effic = None
if ccd is not None:
# apply ccd/optics efficiency
name = PATH_PASSBAND + instr + '/effic_%s.txt' % ccd
wa, effic = np.loadtxt(name, usecols=(0,1), unpack=1)
self.effic = np.interp(self.wa, wa, effic)
self.tr *= self.effic
extinctmap = dict(LBC='kpno_atmos.dat', FORS='paranal_atmos.dat',
HawkI='paranal_atmos.dat',
KPNO_Mosaic='kpno_atmos.dat',
CTIO_Mosaic='ctio_atmos.dat')
if instr in extinctmap:
# apply atmospheric extinction
wa, emag = np.loadtxt(PATH_EXTINCT + extinctmap[instr], unpack=1)
self.atmos = np.interp(self.wa, wa, 10**(-0.4 * emag))
self.tr *= self.atmos
# trim away areas where band transmission is negligibly small
# (<0.01% of peak transmission).
isort = self.tr.argsort()
sortedtr = self.tr[isort]
maxtr = sortedtr[-1]
imax = isort[-1]
ind = isort[sortedtr < 1e-4 * maxtr]
if len(ind) > 0:
i = 0
c0 = ind < imax
if c0.any():
i = ind[c0].max()
j = len(self.wa) - 1
c0 = ind > imax
if c0.any():
j = ind[c0].min()
i = min(abs(i-2), 0)
j += 1
self.wa = self.wa[i:j]
self.tr = self.tr[i:j]
if self.atmos is not None:
self.atmos = self.atmos[i:j]
if self.effic is not None:
self.effic = self.effic[i:j]
# normalise
self.ntr = self.tr / np.trapz(self.tr, self.wa)
# Calculate the effective wavelength for the passband. This is
# the same as equation (3) of Carter et al. 2009.
a = np.trapz(self.tr * self.wa)
b = np.trapz(self.tr / self.wa)
self.effective_wa = math.sqrt(a / b)
# find the AB and Vega magnitudes in this band for calculating
# magnitudes.
self.flux = {}
self.flux['Vega'] = VEGA.calc_flux(self)
self.flux['AB'] = AB.calc_flux(self)
def __repr__(self):
return 'Passband "%s"' % self.name
def plot(self, effic=False, atmos=False, ymax=None, **kwargs):
""" Plots the passband. We plot the non-normalised
transmission. This may or may not include ccd efficiency,
losses from the atmosphere and telescope optics.
"""
tr = self.tr
if ymax is not None:
tr = self.tr / self.tr.max() * ymax
pl.plot(self.wa, tr, **kwargs)
if self.effic is not None and effic:
pl.plot(self.wa, self.effic,
label='applied ccd efficiency', **kwargs)
if self.atmos is not None and atmos:
pl.plot(self.wa, self.atmos,
label='applied atmospheric extinction', **kwargs)
pl.xlabel("Wavelength ($\AA$)")
pl.ylabel("Transmission")
if atmos or effic:
pl.legend()
if pl.isinteractive():
pl.show()
class SED(object):
"""A Spectral Energy Distribution (SED).
Instantiate with either a filename or a list of wavelengths and fluxes.
Wavelengths must be in Angstroms, fluxes in erg/s/cm^2/Ang.
To convert from f_nu to f_lambda in erg/s/cm^2/Ang, substitute
using::
nu = c / lambda
f_lambda = c / lambda^2 * f_nu
Available SED template filenames are in TEMPLATES.
"""
def __init__(self, filename=None, wa=[], fl=[], z=0., label=None):
# filename overrides wave and flux keywords
if filename is not None:
if not filename.startswith(PATH_TEMPLATE):
filepath = PATH_TEMPLATE + filename
if filepath.endswith('.fits'):
rec = readtabfits(filepath)
wa, fl = rec.wa, rec.fl
else:
wa, fl = np.loadtxt(filepath, usecols=(0,1), unpack=1)
if label is None:
label = filename
# We keep a copy of the wavelength, flux at z = 0
self.z0wa = np.array(wa)
self.z0fl = np.array(fl)
self.wa = np.array(wa)
self.fl = np.array(fl)
self.z = z
self.label = label
# Store the intrinsic (i.e. unextincted) flux in case we
# change extinction
self.EBmV = 0.
self.z0fl_no_extinct = np.array(fl)
if abs(z) > 1e-6:
self.redshift_to(z)
def __repr__(self):
return 'SED "%s"' % self.label
def copy(self):
"""Copies the SED, returning a new SED object.
"""
newSED = SED(wa=self.z0wa, fl=self.z0fl, z=self.z, label=self.label)
return newSED
def integrate(self, wmin=None, wmax=None):
""" Calculates flux (erg/s/cm^2) in SED within given wavelength
range."""
if wmin is None:
wmin = self.wa[0]
if wmax is None:
wmax = self.wa[-1]
i,j = self.wa.searchsorted([wmin, wmax])
fl = np.trapz(self.fl[i:j], self.wa[i:j])
return fl
def plot(self, log=False, ymax=None, **kwargs):
fl = self.fl
if ymax is not None:
fl = self.fl / self.fl.max() * ymax
label = '%s z=%.1f E(B-V)=%.2f' % (self.label, self.z, self.EBmV)
if log:
pl.loglog(self.wa, fl, label=label, **kwargs)
else:
pl.plot(self.wa, fl, label=label, **kwargs)
pl.xlabel('Wavelength ($\AA$)')
pl.ylabel('Flux (ergs s$^{-1}$cm$^{-2}$ $\AA^{-1}$)')
#pl.legend()
if pl.isinteractive():
pl.show()
def redshift_to(self, z, cosmo=None):
"""Redshifts the SED to redshift z. """
# We have to conserve energy so the area under the redshifted
# SED has to be equal to the area under the unredshifted SED,
# otherwise magnitude calculations will be wrong when
# comparing SEDs at different zs
self.wa = np.array(self.z0wa)
self.fl = np.array(self.z0fl)
z0fluxtot = np.trapz(self.z0wa, self.z0fl)
self.wa *= z + 1
zfluxtot = np.trapz(self.wa, self.fl)
self.fl *= z0fluxtot / zfluxtot
self.z = z
def normalise_to_mag(self, ABmag, band):
"""Normalises the SED to match the flux equivalent to the
given AB magnitude in the given passband.
"""
magflux = mag2flux(ABmag, band)
sedflux = self.calc_flux(band)
norm = magflux / sedflux
self.fl *= norm
self.z0fl *= norm
def calc_flux(self, band):
"""Calculate the mean flux for a passband, weighted by the
response and wavelength in the given passband.
Returns the mean flux (erg/s/cm^2/Ang) inside the band.
"""
if self.wa[0] > band.wa[0] or self.wa[-1] < band.wa[-1]:
msg = "SED does not cover the whole bandpass, extrapolating"
warnings.warn(msg)
dw = np.median(np.diff(self.wa))
sedwa = np.arange(band.wa[0], band.wa[-1]+dw, dw)
sedfl = np.interp(sedwa, self.wa, self.fl)
else:
sedwa = self.wa
sedfl = self.fl
i,j = sedwa.searchsorted([band.wa[0], band.wa[-1]])
fl = sedfl[i:j]
wa = sedwa[i:j]
dw_band = np.median(np.diff(band.wa))
dw_sed = np.median(np.diff(wa))
if dw_sed > dw_band and dw_band > 20:
warnings.warn(
'WARNING: SED wavelength sampling interval ~%.2f Ang, '
'but bandpass sampling interval ~%.2f Ang' %
(dw_sed, dw_band))
# interpolate the SED to the passband wavelengths
fl = np.interp(band.wa, wa, fl)
band_tr = band.tr
wa = band.wa
else:
# interpolate the band transmission to the SED
# wavelength values.
band_tr = np.interp(wa, band.wa, band.tr)
# weight by response and wavelength, appropriate when we're
# counting the number of photons within the band.
flux = np.trapz(band_tr * fl * wa, wa) / np.trapz(band_tr * wa, wa)
return flux
def calc_mag(self, band, system="Vega"):
"""Calculates magnitude in the given passband.
Note that the distance modulus is not added.
mag_sigma : float
Add a gaussian random deviate to the magnitude, with sigma
given by this value.
`system` is either 'Vega' or 'AB'
"""
f1 = self.calc_flux(band)
if f1 > 0:
mag = -2.5 * math.log10(f1/band.flux[system])
# Add 0.026 because Vega has V=0.026 (e.g. Bohlin & Gilliland 2004)
if system == "Vega":
mag += 0.026
else:
mag = np.inf
return mag
def calc_colour(self, band1, band2, system="Vega"):
"""Calculates the colour band1 - band2.
system is either 'Vega' or 'AB'.
mag_sigma : float
Add a gaussian random deviate to each magnitude, with sigma
given by this value.
"""
mag1 = self.calc_mag(band1, system=system)
mag2 = self.calc_mag(band2, system=system)
return mag1 - mag2
def mag2flux(ABmag, band):
""" Converts given AB magnitude into flux in the given band, in
erg/s/cm^2/Angstrom.
Returns the flux in the given band.
"""
# AB mag (See Oke, J.B. 1974, ApJS, 27, 21)
# fnu in erg/s/cm^2/Hz
fnu = 10**(-(ABmag + 48.6)/2.5)
# convert to erg/s/cm^2/Ang
flambda = fnu_to_flambda(band.effective_wa, fnu)
return flambda
def flux2mag(flambda, band):
"""Converts flux in erg/s/cm^2/Angstrom into AB magnitudes.
Returns the magnitude in the given band.
"""
# convert to erg/s/cm^2/Hz
fnu = flambda_to_fnu(band.effective_wa, flambda)
mag = -2.5*math.log10(fnu) - 48.6
return mag
def mag2Jy(ABmag):
"""Converts an AB magnitude into flux density in Jy (fnu).
"""
flux_nu = 10**(-(ABmag + 48.6)/2.5) / Jy
return flux_nu
def Jy2Mag(fluxJy):
"""Converts flux density in Jy into AB magnitude (fnu).
"""
ABmag = -2.5 * (np.log10(fluxJy * Jy)) - 48.6
return ABmag
def fnu_to_flambda(wa, f_nu):
""" Convert flux per unit frequency to a flux per unit wavelength.
Parameters
----------
wa : array_like
Wavelength in Angstroms
f_nu : array_like
Flux at each wavelength in erg/s/cm^2/Hz
Returns
-------
f_lambda : ndarray
Flux at each wavelength in erg/s/cm^2/Ang
"""
return c / (wa * 1e-8)**2 * f_nu * 1e-8
def flambda_to_fnu(wa, f_lambda):
""" Convert flux per unit wavelength to a flux per unit frequency.
Parameters
----------
wa : array_like
Wavelength in Angstroms
f_lambda : array_like
Flux at each wavelength in erg/s/cm^2/Ang
Returns
-------
f_nu : ndarray
Flux at each wavelength in erg/s/cm^2/Hz
"""
return (wa *1e-8)**2 * f_lambda * 1e8 / c
def qso_template(wa, z):
""" Return a composite QSO spectrum at redshift z.
This uses the SDSS composite at wa > 1680 and a smoothed version
of the HST/COS EUV+FUV AGN composite spectrum shown in Figure 5
from Shull, Stevans, and Danforth 2012 for wa < 1680.
The spectrum is in arbitrary units of F_lambda. wa must be in
angstroms.
"""
wa = np.array(wa, copy=False)
wrest = wa / (1+z)
i = wrest.searchsorted(1680)
if i == len(wrest):
return qso_template_uv(wa, z)
elif i == 0:
return qso_template_sdss(wa, z)
else:
fl = np.ones(len(wa), float)
f = qso_template_uv(wa, z)
fl[:i] = f[:i] / f[i]
f = qso_template_sdss(wa, z)
fl[i:] = f[i:] / f[i]
return fl
def qso_template_sdss(wa, z):
""" Return a composite visible QSO spectrum at redshift z.
The SDSS composite spectrum as a function of F_lambda is returned
at each wavelength of wa. wa must be in angstroms.
Only good between 700 and 8000 (rest frame).
"""
T = readtabfits(DATAPATH + '/templates/qso/dr1QSOspec.fits')
return np.interp(wa, T.wa*(1+z), T.fl)
def qso_template_uv(wa, z):
""" Return a composite UV QSO spectrum at redshift z.
Wavelengths must be in Angstroms.
This is a smoothed version of the HST/COS EUV+FUV AGN composite
spectrum shown in Figure 5 from Shull, Stevans, and Danforth 2012.
Only good between 550 and 1730 Angstroms (rest frame)
"""
T = readtabfits(DATAPATH + 'templates/qso/Shull_composite.fits')
return np.interp(wa, T.wa*(1 + z), T.fl)
def make_constant_dv_wa_scale(wmin, wmax, dv):
""" Make a constant velocity width scale given a start and end
wavelength, and velocity pixel width.
"""
dlogw = np.log10(1 + dv/c_kms)
# find the number of points needed.
npts = int(np.log10(wmax / wmin) / dlogw)
wa = wmin * 10**(np.arange(npts)*dlogw)
return wa
# Data
VEGA = SED('reference/Vega_bohlin2006')
SUN = SED('reference/sun_stis')
# AB SED has constant flux density (f_nu) 3631 Jy, see
# http://www.sdss.org/dr5/algorithms/fluxcal.html
fnu = 3631 * Jy # erg/s/cm^2/Hz
wa = np.logspace(1, 10, 1e5) # Ang
AB = SED(wa=wa, fl=fnu_to_flambda(wa, fnu))
# don't clutter the namespace
del wa, fnu | PypiClean |
/Flask-Pay-WX-1.0.5.tar.gz/Flask-Pay-WX-1.0.5/flask_pay_wx/v2/__init__.py | from typing import Dict
from flask_pay_wx.v2.Tools import Tools
class PayOrder(object):
def __init__(self, private_key: str, app_id: str = None, mch_id: str = None, nonce_str: str = None, product_body: str = None,
out_trade_no: str = None, total_fee: str = None, spbill_create_ip: str = None, notify_url: str = None,
trade_type: str = 'JSAPI', open_id: str = None, sign_type: str = 'MD5'):
self.private_key = private_key
self.app_id = app_id
self.mch_id = mch_id
self.nonce_str = nonce_str
self.product_body = product_body
self.out_trade_no = out_trade_no
self.total_fee = total_fee
self.spbill_create_ip = spbill_create_ip
self.notify_url = notify_url
self.trade_type = trade_type
self.open_id = open_id
self.sign_type = sign_type
def to_dict(self) -> Dict:
data = {
'xml': {}
}
data['xml']['appid'] = self.app_id
data['xml']['mch_id'] = self.mch_id
data['xml']['nonce_str'] = self.nonce_str
data['xml']['body'] = self.product_body
data['xml']['out_trade_no'] = self.out_trade_no
data['xml']['total_fee'] = self.total_fee
data['xml']['spbill_create_ip'] = self.spbill_create_ip
data['xml']['notify_url'] = self.notify_url
data['xml']['trade_type'] = self.trade_type
if self.trade_type == 'JSAPI':
data['xml']['openid'] = self.open_id
sign = self._get_sign()
data['xml']['sign'] = sign
return data
def _get_sign(self) -> str:
if self.trade_type == 'JSAPI':
sort_str = '''appid={0}&body={1}&mch_id={2}&nonce_str={3}¬ify_url={4}&openid={5}&out_trade_no={6}&
&spbill_create_ip={7}&trade_type={8}&total_fee={9}'''.format(self.app_id, self.product_body, self.mch_id,
self.nonce_str, self.notify_url, self.open_id, self.out_trade_no, self.spbill_create_ip,
self.trade_type, self.total_fee)
else:
sort_str = '''appid={0}&body={1}&mch_id={2}&nonce_str={3}¬ify_url={4}&out_trade_no={5}&
&spbill_create_ip={6}&trade_type={7}&total_fee={8}'''.format(self.app_id, self.product_body, self.mch_id,
self.nonce_str, self.notify_url, self.out_trade_no, self.spbill_create_ip, self.trade_type, self.total_fee)
encryption_str = self._encryption_str(sort_str)
return encryption_str
# 加密字符串
def _encryption_str(self, origin_str: str) -> str:
string_sign_temp = "{0}&key={1}".format(origin_str, self.private_key)
if self.sign_type == 'MD5':
sign = Tools.md5(string_sign_temp)
else:
sign = Tools.hmac_sha256(self.private_key, string_sign_temp)
return sign
class QueryOrder(object):
def __init__(self, private_key: str, app_id: str = None, mch_id: str = None, nonce_str: str = None, out_trade_no: str = None,
transaction_id: str = None, sign_type: str = 'MD5'):
self.private_key = private_key
self.app_id = app_id
self.mch_id = mch_id
self.nonce_str = nonce_str
self.out_trade_no = out_trade_no
self.transaction_id = transaction_id
self.sign_type = sign_type
def to_dict(self) -> Dict:
data = {
'xml': {}
}
data['xml']['appid'] = self.app_id
data['xml']['mch_id'] = self.mch_id
data['xml']['nonce_str'] = self.nonce_str
if self.out_trade_no is not None:
data['xml']['out_trade_no'] = self.out_trade_no
if self.transaction_id is not None:
data['xml']['transaction_id'] = self.transaction_id
data['xml']['sign_type'] = self.sign_type
sign = self._get_sign()
data['xml']['sign'] = sign
return data
def _get_sign(self) -> str:
if self.out_trade_no is not None:
sort_str = '''appid={0}&mch_id={1}&nonce_str={2}&out_trade_no={3}&sign_type={4}'''\
.format(self.app_id, self.mch_id, self.nonce_str, self.out_trade_no, self.sign_type)
else:
sort_str = '''appid={0}&mch_id={1}&nonce_str={2}&sign_type={3}&transaction_id={4}'''\
.format(self.app_id, self.mch_id, self.nonce_str, self.sign_type, self.transaction_id)
encryption_str = self._encryption_str(sort_str)
return encryption_str
# 加密字符串
def _encryption_str(self, origin_str: str) -> str:
string_sign_temp = "{0}&key={1}".format(origin_str, self.private_key)
if self.sign_type == 'MD5':
sign = Tools.md5(string_sign_temp)
else:
sign = Tools.hmac_sha256(self.private_key, string_sign_temp)
return sign | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='NASFCOS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False, eps=0),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=dict(
type='NASFCOS_FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5,
norm_cfg=dict(type='BN'),
conv_cfg=dict(type='DCNv2', deform_groups=2)),
bbox_head=dict(
type='NASFCOSHead',
num_classes=80,
in_channels=256,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
norm_cfg=dict(type='GN', num_groups=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=2,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
optimizer = dict(
lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) | PypiClean |
/BuildStream-2.0.1-cp39-cp39-manylinux_2_28_x86_64.whl/buildstream/downloadablefilesource.py | import os
import urllib.request
import urllib.error
import contextlib
import shutil
import netrc
from .source import Source, SourceError
from . import utils
class _NetrcFTPOpener(urllib.request.FTPHandler):
def __init__(self, netrc_config):
self.netrc = netrc_config
def _unsplit(self, host, port, user, passwd):
if port:
host = "{}:{}".format(host, port)
if user:
if passwd:
user = "{}:{}".format(user, passwd)
host = "{}@{}".format(user, host)
return host
def ftp_open(self, req):
uri = urllib.parse.urlparse(req.full_url)
username = uri.username
password = uri.password
if uri.username is None and self.netrc:
entry = self.netrc.authenticators(uri.hostname)
if entry:
username, _, password = entry
req.host = self._unsplit(uri.hostname, uri.port, username, password)
return super().ftp_open(req)
class _NetrcPasswordManager:
def __init__(self, netrc_config):
self.netrc = netrc_config
def add_password(self, realm, uri, user, passwd):
pass
def find_user_password(self, realm, authuri):
if not self.netrc:
return None, None
parts = urllib.parse.urlsplit(authuri)
entry = self.netrc.authenticators(parts.hostname)
if not entry:
return None, None
else:
login, _, password = entry
return login, password
def _download_file(opener_creator, url, etag, directory):
opener = opener_creator.get_url_opener()
default_name = os.path.basename(url)
request = urllib.request.Request(url)
request.add_header("Accept", "*/*")
request.add_header("User-Agent", "BuildStream/2")
if etag is not None:
request.add_header("If-None-Match", etag)
try:
with contextlib.closing(opener.open(request)) as response:
info = response.info()
# some servers don't honor the 'If-None-Match' header
if etag and info["ETag"] == etag:
return None, None, None
etag = info["ETag"]
filename = info.get_filename(default_name)
filename = os.path.basename(filename)
local_file = os.path.join(directory, filename)
with open(local_file, "wb") as dest:
shutil.copyfileobj(response, dest)
except urllib.error.HTTPError as e:
if e.code == 304:
# 304 Not Modified.
# Because we use etag only for matching ref, currently specified ref is what
# we would have downloaded.
return None, None, None
return None, None, str(e)
except (urllib.error.URLError, urllib.error.ContentTooShortError, OSError, ValueError) as e:
# Note that urllib.request.Request in the try block may throw a
# ValueError for unknown url types, so we handle it here.
return None, None, str(e)
return local_file, etag, None
class DownloadableFileSource(Source):
# pylint: disable=attribute-defined-outside-init
COMMON_CONFIG_KEYS = Source.COMMON_CONFIG_KEYS + ["url", "ref"]
__default_mirror_file = None
def configure(self, node):
self.original_url = node.get_str("url")
self.ref = node.get_str("ref", None)
self.url = self.translate_url(self.original_url)
self._mirror_dir = os.path.join(self.get_mirror_directory(), utils.url_directory_name(self.original_url))
def preflight(self):
return
def get_unique_key(self):
return [self.original_url, self.ref]
def is_cached(self) -> bool:
return os.path.isfile(self._get_mirror_file())
def load_ref(self, node):
self.ref = node.get_str("ref", None)
def get_ref(self):
return self.ref
def set_ref(self, ref, node):
node["ref"] = self.ref = ref
def track(self): # pylint: disable=arguments-differ
# there is no 'track' field in the source to determine what/whether
# or not to update refs, because tracking a ref is always a conscious
# decision by the user.
new_ref = self._ensure_mirror("Tracking {}".format(self.url))
if self.ref and self.ref != new_ref:
detail = (
"When tracking, new ref differs from current ref:\n"
+ " Tracked URL: {}\n".format(self.url)
+ " Current ref: {}\n".format(self.ref)
+ " New ref: {}\n".format(new_ref)
)
self.warn("Potential man-in-the-middle attack!", detail=detail)
return new_ref
def fetch(self): # pylint: disable=arguments-differ
# Just a defensive check, it is impossible for the
# file to be already cached because Source.fetch() will
# not be called if the source is already cached.
#
if os.path.isfile(self._get_mirror_file()):
return # pragma: nocover
# Download the file, raise hell if the sha256sums don't match,
# and mirror the file otherwise.
sha256 = self._ensure_mirror(
"Fetching {}".format(self.url),
)
if sha256 != self.ref:
raise SourceError(
"File downloaded from {} has sha256sum '{}', not '{}'!".format(self.url, sha256, self.ref)
)
def _get_etag(self, ref):
etagfilename = os.path.join(self._mirror_dir, "{}.etag".format(ref))
if os.path.exists(etagfilename):
with open(etagfilename, "r", encoding="utf-8") as etagfile:
return etagfile.read()
return None
def _store_etag(self, ref, etag):
etagfilename = os.path.join(self._mirror_dir, "{}.etag".format(ref))
with utils.save_file_atomic(etagfilename) as etagfile:
etagfile.write(etag)
def _ensure_mirror(self, activity_name: str):
# Downloads from the url and caches it according to its sha256sum.
with self.tempdir() as td:
# We do not use etag in case what we have in cache is
# not matching ref in order to be able to recover from
# corrupted download.
if self.ref and not self.is_cached():
# Do not re-download the file if the ETag matches.
etag = self._get_etag(self.ref)
else:
etag = None
url_opener_creator = _UrlOpenerCreator(self._parse_netrc())
local_file, new_etag, error = self.blocking_activity(
_download_file, (url_opener_creator, self.url, etag, td), activity_name
)
if error:
raise SourceError("{}: Error mirroring {}: {}".format(self, self.url, error), temporary=True)
if local_file is None:
return self.ref
# Make sure url-specific mirror dir exists.
if not os.path.isdir(self._mirror_dir):
os.makedirs(self._mirror_dir)
# Store by sha256sum
sha256 = utils.sha256sum(local_file)
# Even if the file already exists, move the new file over.
# In case the old file was corrupted somehow.
os.rename(local_file, self._get_mirror_file(sha256))
if new_etag:
self._store_etag(sha256, new_etag)
return sha256
def _parse_netrc(self):
netrc_config = None
try:
netrc_config = netrc.netrc()
except OSError:
# If the .netrc file was not found, FileNotFoundError will be
# raised, but OSError will be raised directly by the netrc package
# in the case that $HOME is not set.
#
# This will catch both cases.
pass
except netrc.NetrcParseError as e:
self.warn("{}: While reading .netrc: {}".format(self, e))
return netrc_config
def _get_mirror_file(self, sha=None):
if sha is not None:
return os.path.join(self._mirror_dir, sha)
if self.__default_mirror_file is None:
self.__default_mirror_file = os.path.join(self._mirror_dir, self.ref)
return self.__default_mirror_file
class _UrlOpenerCreator:
def __init__(self, netrc_config):
self.netrc_config = netrc_config
def get_url_opener(self):
if self.netrc_config:
netrc_pw_mgr = _NetrcPasswordManager(self.netrc_config)
http_auth = urllib.request.HTTPBasicAuthHandler(netrc_pw_mgr)
ftp_handler = _NetrcFTPOpener(self.netrc_config)
return urllib.request.build_opener(http_auth, ftp_handler)
return urllib.request.build_opener() | PypiClean |
/BGT_Client-1.0.2-py3-none-any.whl/dgt_sdk/protobuf/network_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='dgt_sdk/protobuf/network.proto',
package='',
syntax='proto3',
serialized_options=_b('\n\025sawtooth.sdk.protobufP\001Z\013network_pb2'),
serialized_pb=_b('\n\x1e\x64gt_sdk/protobuf/network.proto\"\x13\n\x11\x44isconnectMessage\"\xd9\x01\n\x13PeerRegisterRequest\x12\x10\n\x08\x65ndpoint\x18\x01 \x01(\t\x12\x18\n\x10protocol_version\x18\x02 \x01(\r\x12/\n\x04mode\x18\x03 \x01(\x0e\x32!.PeerRegisterRequest.RegisterMode\x12\x0b\n\x03pid\x18\x04 \x01(\r\x12\x11\n\tcomponent\x18\x05 \x01(\t\x12\x10\n\x08\x65xtpoint\x18\x06 \x01(\t\x12\x0b\n\x03hid\x18\x07 \x01(\t\"&\n\x0cRegisterMode\x12\x0c\n\x08REGISTER\x10\x00\x12\x08\n\x04SYNC\x10\x01\"\x17\n\x15PeerUnregisterRequest\"r\n\x0fGetPeersRequest\x12\x0f\n\x07peer_id\x18\x01 \x01(\x0c\x12\x10\n\x08\x65ndpoint\x18\x02 \x01(\t\x12\x0f\n\x07\x63luster\x18\x03 \x01(\t\x12\x0b\n\x03KYC\x18\x04 \x01(\t\x12\x0f\n\x07network\x18\x05 \x01(\t\x12\r\n\x05\x62\x61tch\x18\x06 \x01(\t\"\x85\x02\n\x10GetPeersResponse\x12\x30\n\x06status\x18\x01 \x01(\x0e\x32 .GetPeersResponse.GetPeersStatus\x12\x0f\n\x07\x63luster\x18\x02 \x01(\t\x12\x16\n\x0epeer_endpoints\x18\x03 \x03(\t\"\x95\x01\n\x0eGetPeersStatus\x12\x06\n\x02OK\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\x0c\n\x08REDIRECT\x10\x02\x12\n\n\x06\x44\x45NIED\x10\x03\x12\n\n\x06JOINED\x10\x04\x12\x0c\n\x08\x44YNPEERS\x10\x05\x12\x0b\n\x07NOSPACE\x10\x06\x12\x0b\n\x07WAITING\x10\x07\x12\x0c\n\x08PEERSTAT\x10\x08\x12\x12\n\x0eNOT_VALID_CERT\x10\t\"\r\n\x0bPingRequest\"\x0e\n\x0cPingResponse\"\xc1\x01\n\rGossipMessage\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\x0c\x12\x30\n\x0c\x63ontent_type\x18\x02 \x01(\x0e\x32\x1a.GossipMessage.ContentType\x12\x14\n\x0ctime_to_live\x18\x03 \x01(\r\"W\n\x0b\x43ontentType\x12\x16\n\x12\x43ONTENT_TYPE_UNSET\x10\x00\x12\t\n\x05\x42LOCK\x10\x01\x12\t\n\x05\x42\x41TCH\x10\x02\x12\x0b\n\x07\x42\x41TCHES\x10\x03\x12\r\n\tENDPOINTS\x10\x04\"\x92\x01\n\x16NetworkAcknowledgement\x12.\n\x06status\x18\x01 \x01(\x0e\x32\x1e.NetworkAcknowledgement.Status\x12\x0b\n\x03pid\x18\x02 \x01(\r\x12\x0c\n\x04sync\x18\x03 \x01(\x08\"-\n\x06Status\x12\x10\n\x0cSTATUS_UNSET\x10\x00\x12\x06\n\x02OK\x10\x01\x12\t\n\x05\x45RROR\x10\x02\"K\n\x12GossipBlockRequest\x12\x10\n\x08\x62lock_id\x18\x01 \x01(\t\x12\r\n\x05nonce\x18\x02 \x01(\t\x12\x14\n\x0ctime_to_live\x18\x03 \x01(\r\"4\n\x13GossipBlockResponse\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\x0c\x12\x0c\n\x04nest\x18\x02 \x01(\x08\"&\n\x13GossipBatchResponse\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\x0c\"N\n\x1bGossipBatchByBatchIdRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05nonce\x18\x02 \x01(\t\x12\x14\n\x0ctime_to_live\x18\x03 \x01(\r\"U\n!GossipBatchByTransactionIdRequest\x12\x0b\n\x03ids\x18\x01 \x03(\t\x12\r\n\x05nonce\x18\x02 \x01(\t\x12\x14\n\x0ctime_to_live\x18\x03 \x01(\r\"R\n\x16GossipConsensusMessage\x12\x0f\n\x07message\x18\x01 \x01(\x0c\x12\x11\n\tsender_id\x18\x02 \x01(\x0c\x12\x14\n\x0ctime_to_live\x18\x03 \x01(\r\"1\n\x0c\x45ndpointItem\x12\x0f\n\x07peer_id\x18\x01 \x01(\x0c\x12\x10\n\x08\x65ndpoint\x18\x02 \x01(\t\"0\n\x0c\x45ndpointList\x12 \n\tendpoints\x18\x01 \x03(\x0b\x32\r.EndpointItemB&\n\x15sawtooth.sdk.protobufP\x01Z\x0bnetwork_pb2b\x06proto3')
)
_PEERREGISTERREQUEST_REGISTERMODE = _descriptor.EnumDescriptor(
name='RegisterMode',
full_name='PeerRegisterRequest.RegisterMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='REGISTER', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SYNC', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=235,
serialized_end=273,
)
_sym_db.RegisterEnumDescriptor(_PEERREGISTERREQUEST_REGISTERMODE)
_GETPEERSRESPONSE_GETPEERSSTATUS = _descriptor.EnumDescriptor(
name='GetPeersStatus',
full_name='GetPeersResponse.GetPeersStatus',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PENDING', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REDIRECT', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DENIED', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOINED', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DYNPEERS', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOSPACE', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WAITING', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PEERSTAT', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_VALID_CERT', index=9, number=9,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=529,
serialized_end=678,
)
_sym_db.RegisterEnumDescriptor(_GETPEERSRESPONSE_GETPEERSSTATUS)
_GOSSIPMESSAGE_CONTENTTYPE = _descriptor.EnumDescriptor(
name='ContentType',
full_name='GossipMessage.ContentType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='CONTENT_TYPE_UNSET', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BLOCK', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BATCH', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BATCHES', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ENDPOINTS', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=818,
serialized_end=905,
)
_sym_db.RegisterEnumDescriptor(_GOSSIPMESSAGE_CONTENTTYPE)
_NETWORKACKNOWLEDGEMENT_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='NetworkAcknowledgement.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATUS_UNSET', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OK', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1009,
serialized_end=1054,
)
_sym_db.RegisterEnumDescriptor(_NETWORKACKNOWLEDGEMENT_STATUS)
_DISCONNECTMESSAGE = _descriptor.Descriptor(
name='DisconnectMessage',
full_name='DisconnectMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=34,
serialized_end=53,
)
_PEERREGISTERREQUEST = _descriptor.Descriptor(
name='PeerRegisterRequest',
full_name='PeerRegisterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='endpoint', full_name='PeerRegisterRequest.endpoint', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='protocol_version', full_name='PeerRegisterRequest.protocol_version', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mode', full_name='PeerRegisterRequest.mode', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pid', full_name='PeerRegisterRequest.pid', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='component', full_name='PeerRegisterRequest.component', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='extpoint', full_name='PeerRegisterRequest.extpoint', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hid', full_name='PeerRegisterRequest.hid', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_PEERREGISTERREQUEST_REGISTERMODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=56,
serialized_end=273,
)
_PEERUNREGISTERREQUEST = _descriptor.Descriptor(
name='PeerUnregisterRequest',
full_name='PeerUnregisterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=275,
serialized_end=298,
)
_GETPEERSREQUEST = _descriptor.Descriptor(
name='GetPeersRequest',
full_name='GetPeersRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='peer_id', full_name='GetPeersRequest.peer_id', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endpoint', full_name='GetPeersRequest.endpoint', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster', full_name='GetPeersRequest.cluster', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='KYC', full_name='GetPeersRequest.KYC', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='network', full_name='GetPeersRequest.network', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch', full_name='GetPeersRequest.batch', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=300,
serialized_end=414,
)
_GETPEERSRESPONSE = _descriptor.Descriptor(
name='GetPeersResponse',
full_name='GetPeersResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='GetPeersResponse.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster', full_name='GetPeersResponse.cluster', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='peer_endpoints', full_name='GetPeersResponse.peer_endpoints', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETPEERSRESPONSE_GETPEERSSTATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=417,
serialized_end=678,
)
_PINGREQUEST = _descriptor.Descriptor(
name='PingRequest',
full_name='PingRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=680,
serialized_end=693,
)
_PINGRESPONSE = _descriptor.Descriptor(
name='PingResponse',
full_name='PingResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=695,
serialized_end=709,
)
_GOSSIPMESSAGE = _descriptor.Descriptor(
name='GossipMessage',
full_name='GossipMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='content', full_name='GossipMessage.content', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='content_type', full_name='GossipMessage.content_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_to_live', full_name='GossipMessage.time_to_live', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_GOSSIPMESSAGE_CONTENTTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=712,
serialized_end=905,
)
_NETWORKACKNOWLEDGEMENT = _descriptor.Descriptor(
name='NetworkAcknowledgement',
full_name='NetworkAcknowledgement',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='NetworkAcknowledgement.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pid', full_name='NetworkAcknowledgement.pid', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sync', full_name='NetworkAcknowledgement.sync', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_NETWORKACKNOWLEDGEMENT_STATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=908,
serialized_end=1054,
)
_GOSSIPBLOCKREQUEST = _descriptor.Descriptor(
name='GossipBlockRequest',
full_name='GossipBlockRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='block_id', full_name='GossipBlockRequest.block_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nonce', full_name='GossipBlockRequest.nonce', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_to_live', full_name='GossipBlockRequest.time_to_live', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1056,
serialized_end=1131,
)
_GOSSIPBLOCKRESPONSE = _descriptor.Descriptor(
name='GossipBlockResponse',
full_name='GossipBlockResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='content', full_name='GossipBlockResponse.content', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nest', full_name='GossipBlockResponse.nest', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1133,
serialized_end=1185,
)
_GOSSIPBATCHRESPONSE = _descriptor.Descriptor(
name='GossipBatchResponse',
full_name='GossipBatchResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='content', full_name='GossipBatchResponse.content', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1187,
serialized_end=1225,
)
_GOSSIPBATCHBYBATCHIDREQUEST = _descriptor.Descriptor(
name='GossipBatchByBatchIdRequest',
full_name='GossipBatchByBatchIdRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='GossipBatchByBatchIdRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nonce', full_name='GossipBatchByBatchIdRequest.nonce', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_to_live', full_name='GossipBatchByBatchIdRequest.time_to_live', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1227,
serialized_end=1305,
)
_GOSSIPBATCHBYTRANSACTIONIDREQUEST = _descriptor.Descriptor(
name='GossipBatchByTransactionIdRequest',
full_name='GossipBatchByTransactionIdRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ids', full_name='GossipBatchByTransactionIdRequest.ids', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nonce', full_name='GossipBatchByTransactionIdRequest.nonce', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_to_live', full_name='GossipBatchByTransactionIdRequest.time_to_live', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1307,
serialized_end=1392,
)
_GOSSIPCONSENSUSMESSAGE = _descriptor.Descriptor(
name='GossipConsensusMessage',
full_name='GossipConsensusMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='GossipConsensusMessage.message', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sender_id', full_name='GossipConsensusMessage.sender_id', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_to_live', full_name='GossipConsensusMessage.time_to_live', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1394,
serialized_end=1476,
)
_ENDPOINTITEM = _descriptor.Descriptor(
name='EndpointItem',
full_name='EndpointItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='peer_id', full_name='EndpointItem.peer_id', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endpoint', full_name='EndpointItem.endpoint', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1478,
serialized_end=1527,
)
_ENDPOINTLIST = _descriptor.Descriptor(
name='EndpointList',
full_name='EndpointList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='endpoints', full_name='EndpointList.endpoints', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1529,
serialized_end=1577,
)
_PEERREGISTERREQUEST.fields_by_name['mode'].enum_type = _PEERREGISTERREQUEST_REGISTERMODE
_PEERREGISTERREQUEST_REGISTERMODE.containing_type = _PEERREGISTERREQUEST
_GETPEERSRESPONSE.fields_by_name['status'].enum_type = _GETPEERSRESPONSE_GETPEERSSTATUS
_GETPEERSRESPONSE_GETPEERSSTATUS.containing_type = _GETPEERSRESPONSE
_GOSSIPMESSAGE.fields_by_name['content_type'].enum_type = _GOSSIPMESSAGE_CONTENTTYPE
_GOSSIPMESSAGE_CONTENTTYPE.containing_type = _GOSSIPMESSAGE
_NETWORKACKNOWLEDGEMENT.fields_by_name['status'].enum_type = _NETWORKACKNOWLEDGEMENT_STATUS
_NETWORKACKNOWLEDGEMENT_STATUS.containing_type = _NETWORKACKNOWLEDGEMENT
_ENDPOINTLIST.fields_by_name['endpoints'].message_type = _ENDPOINTITEM
DESCRIPTOR.message_types_by_name['DisconnectMessage'] = _DISCONNECTMESSAGE
DESCRIPTOR.message_types_by_name['PeerRegisterRequest'] = _PEERREGISTERREQUEST
DESCRIPTOR.message_types_by_name['PeerUnregisterRequest'] = _PEERUNREGISTERREQUEST
DESCRIPTOR.message_types_by_name['GetPeersRequest'] = _GETPEERSREQUEST
DESCRIPTOR.message_types_by_name['GetPeersResponse'] = _GETPEERSRESPONSE
DESCRIPTOR.message_types_by_name['PingRequest'] = _PINGREQUEST
DESCRIPTOR.message_types_by_name['PingResponse'] = _PINGRESPONSE
DESCRIPTOR.message_types_by_name['GossipMessage'] = _GOSSIPMESSAGE
DESCRIPTOR.message_types_by_name['NetworkAcknowledgement'] = _NETWORKACKNOWLEDGEMENT
DESCRIPTOR.message_types_by_name['GossipBlockRequest'] = _GOSSIPBLOCKREQUEST
DESCRIPTOR.message_types_by_name['GossipBlockResponse'] = _GOSSIPBLOCKRESPONSE
DESCRIPTOR.message_types_by_name['GossipBatchResponse'] = _GOSSIPBATCHRESPONSE
DESCRIPTOR.message_types_by_name['GossipBatchByBatchIdRequest'] = _GOSSIPBATCHBYBATCHIDREQUEST
DESCRIPTOR.message_types_by_name['GossipBatchByTransactionIdRequest'] = _GOSSIPBATCHBYTRANSACTIONIDREQUEST
DESCRIPTOR.message_types_by_name['GossipConsensusMessage'] = _GOSSIPCONSENSUSMESSAGE
DESCRIPTOR.message_types_by_name['EndpointItem'] = _ENDPOINTITEM
DESCRIPTOR.message_types_by_name['EndpointList'] = _ENDPOINTLIST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DisconnectMessage = _reflection.GeneratedProtocolMessageType('DisconnectMessage', (_message.Message,), dict(
DESCRIPTOR = _DISCONNECTMESSAGE,
__module__ = 'dgt_sdk.protobuf.network_pb2'
# @@protoc_insertion_point(class_scope:DisconnectMessage)
))
_sym_db.RegisterMessage(DisconnectMessage)
PeerRegisterRequest = _reflection.GeneratedProtocolMessageType('PeerRegisterRequest', (_message.Message,), dict(
DESCRIPTOR = _PEERREGISTERREQUEST,
__module__ = 'dgt_sdk.protobuf.network_pb2'
# @@protoc_insertion_point(class_scope:PeerRegisterRequest)
))
_sym_db.RegisterMessage(PeerRegisterRequest)
PeerUnregisterRequest = _reflection.GeneratedProtocolMessageType('PeerUnregisterRequest', (_message.Message,), dict(
DESCRIPTOR = _PEERUNREGISTERREQUEST,
__module__ = 'dgt_sdk.protobuf.network_pb2'
# @@protoc_insertion_point(class_scope:PeerUnregisterRequest)
))
_sym_db.RegisterMessage(PeerUnregisterRequest)
GetPeersRequest = _reflection.GeneratedProtocolMessageType('GetPeersRequest', (_message.Message,), dict(
DESCRIPTOR = _GETPEERSREQUEST,
__module__ = 'dgt_sdk.protobuf.network_pb2'
# @@protoc_insertion_point(class_scope:GetPeersRequest)
))
_sym_db.RegisterMessage(GetPeersRequest)
GetPeersResponse = _reflection.GeneratedProtocolMessageType('GetPeersResponse', (_message.Message,), dict(
DESCRIPTOR = _GETPEERSRESPONSE,
__module__ = 'dgt_sdk.protobuf.network_pb2'
# @@protoc_insertion_point(class_scope:GetPeersResponse)
))
_sym_db.RegisterMessage(GetPeersResponse)
PingRequest = _reflection.GeneratedProtocolMessageType('PingRequest', (_message.Message,), dict(
DESCRIPTOR = _PINGREQUEST,
__module__ = 'dgt_sdk.protobuf.network_pb2'
# @@protoc_insertion_point(class_scope:PingRequest)
))
_sym_db.RegisterMessage(PingRequest)
PingResponse = _reflection.GeneratedProtocolMessageType('PingResponse', (_message.Message,), dict(
DESCRIPTOR = _PINGRESPONSE,
__module__ = 'dgt_sdk.protobuf.network_pb2'
# @@protoc_insertion_point(class_scope:PingResponse)
))
_sym_db.RegisterMessage(PingResponse)
GossipMessage = _reflection.GeneratedProtocolMessageType('GossipMessage', (_message.Message,), dict(
DESCRIPTOR = _GOSSIPMESSAGE,
__module__ = 'dgt_sdk.protobuf.network_pb2'
# @@protoc_insertion_point(class_scope:GossipMessage)
))
_sym_db.RegisterMessage(GossipMessage)
NetworkAcknowledgement = _reflection.GeneratedProtocolMessageType('NetworkAcknowledgement', (_message.Message,), dict(
DESCRIPTOR = _NETWORKACKNOWLEDGEMENT,
__module__ = 'dgt_sdk.protobuf.network_pb2'
# @@protoc_insertion_point(class_scope:NetworkAcknowledgement)
))
_sym_db.RegisterMessage(NetworkAcknowledgement)
GossipBlockRequest = _reflection.GeneratedProtocolMessageType('GossipBlockRequest', (_message.Message,), dict(
DESCRIPTOR = _GOSSIPBLOCKREQUEST,
__module__ = 'dgt_sdk.protobuf.network_pb2'
# @@protoc_insertion_point(class_scope:GossipBlockRequest)
))
_sym_db.RegisterMessage(GossipBlockRequest)
GossipBlockResponse = _reflection.GeneratedProtocolMessageType('GossipBlockResponse', (_message.Message,), dict(
DESCRIPTOR = _GOSSIPBLOCKRESPONSE,
__module__ = 'dgt_sdk.protobuf.network_pb2'
# @@protoc_insertion_point(class_scope:GossipBlockResponse)
))
_sym_db.RegisterMessage(GossipBlockResponse)
GossipBatchResponse = _reflection.GeneratedProtocolMessageType('GossipBatchResponse', (_message.Message,), dict(
DESCRIPTOR = _GOSSIPBATCHRESPONSE,
__module__ = 'dgt_sdk.protobuf.network_pb2'
# @@protoc_insertion_point(class_scope:GossipBatchResponse)
))
_sym_db.RegisterMessage(GossipBatchResponse)
GossipBatchByBatchIdRequest = _reflection.GeneratedProtocolMessageType('GossipBatchByBatchIdRequest', (_message.Message,), dict(
DESCRIPTOR = _GOSSIPBATCHBYBATCHIDREQUEST,
__module__ = 'dgt_sdk.protobuf.network_pb2'
# @@protoc_insertion_point(class_scope:GossipBatchByBatchIdRequest)
))
_sym_db.RegisterMessage(GossipBatchByBatchIdRequest)
GossipBatchByTransactionIdRequest = _reflection.GeneratedProtocolMessageType('GossipBatchByTransactionIdRequest', (_message.Message,), dict(
DESCRIPTOR = _GOSSIPBATCHBYTRANSACTIONIDREQUEST,
__module__ = 'dgt_sdk.protobuf.network_pb2'
# @@protoc_insertion_point(class_scope:GossipBatchByTransactionIdRequest)
))
_sym_db.RegisterMessage(GossipBatchByTransactionIdRequest)
GossipConsensusMessage = _reflection.GeneratedProtocolMessageType('GossipConsensusMessage', (_message.Message,), dict(
DESCRIPTOR = _GOSSIPCONSENSUSMESSAGE,
__module__ = 'dgt_sdk.protobuf.network_pb2'
# @@protoc_insertion_point(class_scope:GossipConsensusMessage)
))
_sym_db.RegisterMessage(GossipConsensusMessage)
EndpointItem = _reflection.GeneratedProtocolMessageType('EndpointItem', (_message.Message,), dict(
DESCRIPTOR = _ENDPOINTITEM,
__module__ = 'dgt_sdk.protobuf.network_pb2'
# @@protoc_insertion_point(class_scope:EndpointItem)
))
_sym_db.RegisterMessage(EndpointItem)
EndpointList = _reflection.GeneratedProtocolMessageType('EndpointList', (_message.Message,), dict(
DESCRIPTOR = _ENDPOINTLIST,
__module__ = 'dgt_sdk.protobuf.network_pb2'
# @@protoc_insertion_point(class_scope:EndpointList)
))
_sym_db.RegisterMessage(EndpointList)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope) | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_seh.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"AM",
"PM"
],
"DAY": [
"Dimingu",
"Chiposi",
"Chipiri",
"Chitatu",
"Chinai",
"Chishanu",
"Sabudu"
],
"MONTH": [
"Janeiro",
"Fevreiro",
"Marco",
"Abril",
"Maio",
"Junho",
"Julho",
"Augusto",
"Setembro",
"Otubro",
"Novembro",
"Decembro"
],
"SHORTDAY": [
"Dim",
"Pos",
"Pir",
"Tat",
"Nai",
"Sha",
"Sab"
],
"SHORTMONTH": [
"Jan",
"Fev",
"Mar",
"Abr",
"Mai",
"Jun",
"Jul",
"Aug",
"Set",
"Otu",
"Nov",
"Dec"
],
"fullDate": "EEEE, d 'de' MMMM 'de' y",
"longDate": "d 'de' MMMM 'de' y",
"medium": "d 'de' MMM 'de' y HH:mm:ss",
"mediumDate": "d 'de' MMM 'de' y",
"mediumTime": "HH:mm:ss",
"short": "d/M/y HH:mm",
"shortDate": "d/M/y",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "MTn",
"DECIMAL_SEP": ",",
"GROUP_SEP": ".",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a4",
"posPre": "",
"posSuf": "\u00a4"
}
]
},
"id": "seh",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/Django-4.2.4.tar.gz/Django-4.2.4/django/contrib/gis/gdal/prototypes/generation.py | from ctypes import POINTER, c_bool, c_char_p, c_double, c_int, c_int64, c_void_p
from functools import partial
from django.contrib.gis.gdal.prototypes.errcheck import (
check_arg_errcode,
check_const_string,
check_errcode,
check_geom,
check_geom_offset,
check_pointer,
check_srs,
check_str_arg,
check_string,
)
class gdal_char_p(c_char_p):
pass
def bool_output(func, argtypes, errcheck=None):
"""Generate a ctypes function that returns a boolean value."""
func.argtypes = argtypes
func.restype = c_bool
if errcheck:
func.errcheck = errcheck
return func
def double_output(func, argtypes, errcheck=False, strarg=False, cpl=False):
"Generate a ctypes function that returns a double value."
func.argtypes = argtypes
func.restype = c_double
if errcheck:
func.errcheck = partial(check_arg_errcode, cpl=cpl)
if strarg:
func.errcheck = check_str_arg
return func
def geom_output(func, argtypes, offset=None):
"""
Generate a function that returns a Geometry either by reference
or directly (if the return_geom keyword is set to True).
"""
# Setting the argument types
func.argtypes = argtypes
if not offset:
# When a geometry pointer is directly returned.
func.restype = c_void_p
func.errcheck = check_geom
else:
# Error code returned, geometry is returned by-reference.
func.restype = c_int
def geomerrcheck(result, func, cargs):
return check_geom_offset(result, func, cargs, offset)
func.errcheck = geomerrcheck
return func
def int_output(func, argtypes, errcheck=None):
"Generate a ctypes function that returns an integer value."
func.argtypes = argtypes
func.restype = c_int
if errcheck:
func.errcheck = errcheck
return func
def int64_output(func, argtypes):
"Generate a ctypes function that returns a 64-bit integer value."
func.argtypes = argtypes
func.restype = c_int64
return func
def srs_output(func, argtypes):
"""
Generate a ctypes prototype for the given function with
the given C arguments that returns a pointer to an OGR
Spatial Reference System.
"""
func.argtypes = argtypes
func.restype = c_void_p
func.errcheck = check_srs
return func
def const_string_output(func, argtypes, offset=None, decoding=None, cpl=False):
func.argtypes = argtypes
if offset:
func.restype = c_int
else:
func.restype = c_char_p
def _check_const(result, func, cargs):
res = check_const_string(result, func, cargs, offset=offset, cpl=cpl)
if res and decoding:
res = res.decode(decoding)
return res
func.errcheck = _check_const
return func
def string_output(func, argtypes, offset=-1, str_result=False, decoding=None):
"""
Generate a ctypes prototype for the given function with the
given argument types that returns a string from a GDAL pointer.
The `const` flag indicates whether the allocated pointer should
be freed via the GDAL library routine VSIFree -- but only applies
only when `str_result` is True.
"""
func.argtypes = argtypes
if str_result:
# Use subclass of c_char_p so the error checking routine
# can free the memory at the pointer's address.
func.restype = gdal_char_p
else:
# Error code is returned
func.restype = c_int
# Dynamically defining our error-checking function with the
# given offset.
def _check_str(result, func, cargs):
res = check_string(result, func, cargs, offset=offset, str_result=str_result)
if res and decoding:
res = res.decode(decoding)
return res
func.errcheck = _check_str
return func
def void_output(func, argtypes, errcheck=True, cpl=False):
"""
For functions that don't only return an error code that needs to
be examined.
"""
if argtypes:
func.argtypes = argtypes
if errcheck:
# `errcheck` keyword may be set to False for routines that
# return void, rather than a status code.
func.restype = c_int
func.errcheck = partial(check_errcode, cpl=cpl)
else:
func.restype = None
return func
def voidptr_output(func, argtypes, errcheck=True):
"For functions that return c_void_p."
func.argtypes = argtypes
func.restype = c_void_p
if errcheck:
func.errcheck = check_pointer
return func
def chararray_output(func, argtypes, errcheck=True):
"""For functions that return a c_char_p array."""
func.argtypes = argtypes
func.restype = POINTER(c_char_p)
if errcheck:
func.errcheck = check_pointer
return func | PypiClean |
/ChemGAPP-0.0.9-py3-none-any.whl/ChemGAPP_Package/ChemGAPP_Big/MW_Conditions_to_Remove.py |
# In[ ]:
import argparse
import pandas as pd
def get_options():
parser = argparse.ArgumentParser(description="Outputs a list of conditions which were removed at a certain chosen threshold for the Mann Whitney Condition Level test. Also outputs a new dataset to go back into the process of normalisation and scoring, but with detrimental plates removed.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-i", "--InputFile", help="output from Mann_Whitney_Condition_Level.py")
parser.add_argument("-o", "--OutputFile", help="A CSV file with the name of the plates that were removed and their file names.")
parser.add_argument("-od", "--Original_Dataset", help="The original .csv dataset used in the first stage or the output of MW_Plates_to_Remove.py or Z_Plates_to_Remove.py or Variance_Conditions_to_Remove.py to remove more plates")
parser.add_argument("-or", "--Output_removed", help="A .csv dataset with detrimental plates removed.")
parser.add_argument("-t", "--Threshold",type=float, help="A chosen threshold, usually based off of the bar chart produced by Bar_plot_Condition.py.")
return parser.parse_args()
def main():
options = get_options()
input_V = options.InputFile
outpt = options.OutputFile
input_original = options.Original_Dataset
outpt_removed = options.Output_removed
threshold = options.Threshold
#reads the output from Mann_Whitney_Condition_Level.py
input_DF_2 = pd.read_csv(input_V)
#reads the original dataset before normalisation
m = pd.read_csv(input_original,index_col=[0, 1],header=[0, 1, 2, 3])
#swaps levels so later we can remove by just condition and batch
m.columns = m.columns.swaplevel(0,1)
m.columns = m.columns.swaplevel(1, 3)
Plts_rm = pd.DataFrame(columns=['Condition','Batch',
'Mean Variance P-Value','Mean Variance U-Stat'])
#goes down row by row and checks if mean variance p-value is larger than the chosen threshold,
# if so it is appended to a row of Plts_rm dataframe
lst = []
for row in input_DF_2.iterrows():
if row[1][3] > threshold:
#appends the condition and batch name to a list
abc=(str(row[1][0]),row[1][1])
lst.append(abc)
name = (row[1][0],row[1][1],row[1][3],row[1][2])
columns = list(Plts_rm)
data = []
zipped = zip(columns, name)
a_dictionary = dict(zipped)
data.append(a_dictionary)
Plts_rm = Plts_rm.append(data, True)
Plts_rm.to_csv(outpt)
#checks if condition and batch in the original
# dataset supplied as you can supply a dataset that has had things removed already from another test.
lst2 = {x[0:2] for x in m.columns}
lst3 = []
for i in lst:
if i in lst2:
lst3.append(i)
#drops columns which match the condition and batch.
n = m.drop(columns=lst3, axis=1)
n.columns = n.columns.swaplevel(3,1)
n.columns = n.columns.swaplevel(1,0)
n.to_csv(outpt_removed)
if __name__ == "__main__":
main() | PypiClean |
/Oasis_Optimization-1.0.2-py3-none-any.whl/Oasis/gradient.py | import copy
import numpy
class Gradient(object):
"""
Abstract Class for Optimizer Gradient Calculation Object
"""
def __init__(self, opt_problem, sens_type, sens_mode='', sens_step={}, *args, **kwargs):
"""
Optimizer Gradient Calculation Class Initialization
Arguments:
- opt_problem -> INST: Optimization instance
- sens_type -> STR/FUNC: Sensitivity type ('FD', 'CS', or function)
Keyword arguments:
- sens_mode -> STR: Parallel flag [''-serial,'pgc'-parallel], *Default* = ''
- sens_step -> INT: Step size, *Default* = {} [=1e-6(FD), 1e-20(CS)]
"""
self.opt_problem = opt_problem
if isinstance(sens_type, str):
self.sens_type = sens_type.lower()
else:
self.sens_type = sens_type
if (sens_step == {}):
if (self.sens_type == 'fd'):
self.sens_step = 1.0e-6
elif (self.sens_type == 'cs'):
self.sens_step = 1.0e-20
else:
self.sens_step = sens_step
else:
self.sens_step = sens_step
self.sens_mode = sens_mode.lower()
# MPI Setup
if (self.sens_mode.lower() == 'pgc'):
try:
import mpi4py
from mpi4py import MPI
except ImportError:
print('Error: mpi4py library failed to import')
comm = MPI.COMM_WORLD
self.nproc = comm.Get_size()
self.myrank = comm.Get_rank()
if (mpi4py.__version__[0] == '0'):
self.Barrier = comm.Barrier
self.Send = comm.SSend
self.Recv = comm.Recv
self.Bcast = comm.Bcast
self.Gather = comm.Gather
elif (mpi4py.__version__[0] >= '1'):
self.Barrier = comm.barrier
self.Send = comm.ssend
self.Recv = comm.recv
self.Bcast = comm.bcast
self.Gather = comm.gather
self.mydvs = range(self.myrank,
len(opt_problem._variables.keys()),
self.nproc)
else:
self.myrank = 0
self.mydvs = range(len(opt_problem._variables.keys()))
def getGrad(self, x, group_ids, f, g, *args, **kwargs):
"""
Get Gradient
Arguments:
- x -> ARRAY: Design variables
- group_ids -> DICT: Group identifications
- f -> ARRAY: Objective values
- g -> ARRAY: Constraint values
"""
opt_problem = self.opt_problem
sens_type = self.sens_type
sens_mode = self.sens_mode
sens_step = self.sens_step
mydvs = self.mydvs
myrank = self.myrank
opt_problem.is_gradient = True
dfi = numpy.zeros([len(opt_problem._objectives.keys()), len(mydvs)], 'd')
dgi = numpy.zeros([len(opt_problem._constraints.keys()), len(mydvs)], 'd')
if (sens_type == 'fd'):
# Finite Differences
dh = sens_step
xs = x
k = 0
for i in mydvs:
xh = copy.copy(xs)
xh[i] += dh
# Variables Groups Handling
if opt_problem.use_groups:
xhg = {}
for group in group_ids.keys():
if (group_ids[group][1] - group_ids[group][0] == 1):
xhg[group] = xh[group_ids[group][0]]
else:
xhg[group] = xh[group_ids[group][0]:group_ids[group][1]]
xh = xhg
[fph, gph, fail] = opt_problem.obj_fun(xh, *args, **kwargs)
if isinstance(fph, float):
fph = [fph]
for j in range(len(opt_problem._objectives.keys())):
dfi[j, k] = (fph[j] - f[j]) / dh
for j in range(len(opt_problem._constraints.keys())):
dgi[j, k] = (gph[j] - g[j]) / dh
k += 1
elif (sens_type == 'cs'):
# Complex Step
cdh = sens_step
cxs = copy.copy(x)
k = 0
for i in mydvs:
cxh = cxs + numpy.zeros(len(cxs), complex)
cxh[i] = complex(cxh[i], cdh)
# Variables Groups Handling
if opt_problem.use_groups:
cxhg = {}
for group in group_ids.keys():
if (group_ids[group][1] - group_ids[group][0] == 1):
cxhg[group] = cxh[group_ids[group][0]]
else:
cxhg[group] = cxh[group_ids[group][0]:group_ids[group][1]]
cxh = cxhg
[cfph, cgph, fail] = opt_problem.obj_fun(cxh, *args, **kwargs)
if isinstance(cfph, complex):
cfph = [cfph]
for j in range(len(opt_problem._objectives.keys())):
dfi[j, k] = cfph[j].imag / cdh
for j in range(len(opt_problem._constraints.keys())):
dgi[j, k] = cgph[j].imag / cdh
k += 1
dfi = dfi.astype(float)
dgi = dgi.astype(float)
else:
# Variables Groups Handling
if opt_problem.use_groups:
xg = {}
for group in group_ids.keys():
if (group_ids[group][1] - group_ids[group][0] == 1):
xg[group] = x[group_ids[group][0]]
else:
xg[group] = x[group_ids[group][0]:group_ids[group][1]]
xn = xg
else:
xn = x
# User Provided Sensitivities
[df_user, dg_user, fail] = sens_type(xn, f, g, *args, **kwargs)
if isinstance(df_user, list):
if len(opt_problem._objectives.keys()) == 1:
df_user = [df_user]
df_user = numpy.array(df_user)
if isinstance(dg_user, list):
dg_user = numpy.array(dg_user)
#
for i in range(len(opt_problem._variables.keys())):
for j in range(len(opt_problem._objectives.keys())):
dfi[j, i] = df_user[j, i]
for j in range(len(opt_problem._constraints.keys())):
dgi[j, i] = dg_user[j, i]
# MPI Gradient Assembly
df = numpy.zeros([
len(opt_problem._objectives.keys()), len(
opt_problem._variables.keys())
], 'd')
dg = numpy.zeros([
len(opt_problem._constraints.keys()), len(
opt_problem._variables.keys())
], 'd')
if (sens_mode == 'pgc'):
if (sens_type == 'fd') or (sens_type == 'cs'):
send_obj = [myrank, dfi, dgi]
p_results = self.Gather(send_obj, root=0)
if myrank == 0:
for proc in range(self.nproc):
k = 0
for i in range(p_results[proc][0],
len(opt_problem._variables.keys()),
self.nproc):
df[:, i] = p_results[proc][1][:, k]
dg[:, i] = p_results[proc][2][:, k]
k += 1
[df, dg] = self.Bcast([df, dg], root=0)
else:
df = dfi
dg = dgi
opt_problem.is_gradient = False
return df, dg
def getHess(self, *args, **kwargs):
"""
Get Hessian
"""
return
# Optimizer Gradient Calculation Test
if __name__ == '__main__':
# Test Optimizer Gradient Calculation
print('Testing Optimizer Gradient Calculation...')
grd = Gradient() | PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/datatables-buttons/js/buttons.html5.js | (function( factory ){
if ( typeof define === 'function' && define.amd ) {
// AMD
define( ['jquery', 'datatables.net', 'datatables.net-buttons'], function ( $ ) {
return factory( $, window, document );
} );
}
else if ( typeof exports === 'object' ) {
// CommonJS
module.exports = function (root, $, jszip, pdfmake) {
if ( ! root ) {
root = window;
}
if ( ! $ || ! $.fn.dataTable ) {
$ = require('datatables.net')(root, $).$;
}
if ( ! $.fn.dataTable.Buttons ) {
require('datatables.net-buttons')(root, $);
}
return factory( $, root, root.document, jszip, pdfmake );
};
}
else {
// Browser
factory( jQuery, window, document );
}
}(function( $, window, document, jszip, pdfmake, undefined ) {
'use strict';
var DataTable = $.fn.dataTable;
// Allow the constructor to pass in JSZip and PDFMake from external requires.
// Otherwise, use globally defined variables, if they are available.
function _jsZip () {
return jszip || window.JSZip;
}
function _pdfMake () {
return pdfmake || window.pdfMake;
}
DataTable.Buttons.pdfMake = function (_) {
if ( ! _ ) {
return _pdfMake();
}
pdfmake = _;
}
DataTable.Buttons.jszip = function (_) {
if ( ! _ ) {
return _jsZip();
}
jszip = _;
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* FileSaver.js dependency
*/
/*jslint bitwise: true, indent: 4, laxbreak: true, laxcomma: true, smarttabs: true, plusplus: true */
var _saveAs = (function(view) {
"use strict";
// IE <10 is explicitly unsupported
if (typeof view === "undefined" || typeof navigator !== "undefined" && /MSIE [1-9]\./.test(navigator.userAgent)) {
return;
}
var
doc = view.document
// only get URL when necessary in case Blob.js hasn't overridden it yet
, get_URL = function() {
return view.URL || view.webkitURL || view;
}
, save_link = doc.createElementNS("http://www.w3.org/1999/xhtml", "a")
, can_use_save_link = "download" in save_link
, click = function(node) {
var event = new MouseEvent("click");
node.dispatchEvent(event);
}
, is_safari = /constructor/i.test(view.HTMLElement) || view.safari
, is_chrome_ios =/CriOS\/[\d]+/.test(navigator.userAgent)
, throw_outside = function(ex) {
(view.setImmediate || view.setTimeout)(function() {
throw ex;
}, 0);
}
, force_saveable_type = "application/octet-stream"
// the Blob API is fundamentally broken as there is no "downloadfinished" event to subscribe to
, arbitrary_revoke_timeout = 1000 * 40 // in ms
, revoke = function(file) {
var revoker = function() {
if (typeof file === "string") { // file is an object URL
get_URL().revokeObjectURL(file);
} else { // file is a File
file.remove();
}
};
setTimeout(revoker, arbitrary_revoke_timeout);
}
, dispatch = function(filesaver, event_types, event) {
event_types = [].concat(event_types);
var i = event_types.length;
while (i--) {
var listener = filesaver["on" + event_types[i]];
if (typeof listener === "function") {
try {
listener.call(filesaver, event || filesaver);
} catch (ex) {
throw_outside(ex);
}
}
}
}
, auto_bom = function(blob) {
// prepend BOM for UTF-8 XML and text/* types (including HTML)
// note: your browser will automatically convert UTF-16 U+FEFF to EF BB BF
if (/^\s*(?:text\/\S*|application\/xml|\S*\/\S*\+xml)\s*;.*charset\s*=\s*utf-8/i.test(blob.type)) {
return new Blob([String.fromCharCode(0xFEFF), blob], {type: blob.type});
}
return blob;
}
, FileSaver = function(blob, name, no_auto_bom) {
if (!no_auto_bom) {
blob = auto_bom(blob);
}
// First try a.download, then web filesystem, then object URLs
var
filesaver = this
, type = blob.type
, force = type === force_saveable_type
, object_url
, dispatch_all = function() {
dispatch(filesaver, "writestart progress write writeend".split(" "));
}
// on any filesys errors revert to saving with object URLs
, fs_error = function() {
if ((is_chrome_ios || (force && is_safari)) && view.FileReader) {
// Safari doesn't allow downloading of blob urls
var reader = new FileReader();
reader.onloadend = function() {
var url = is_chrome_ios ? reader.result : reader.result.replace(/^data:[^;]*;/, 'data:attachment/file;');
var popup = view.open(url, '_blank');
if(!popup) view.location.href = url;
url=undefined; // release reference before dispatching
filesaver.readyState = filesaver.DONE;
dispatch_all();
};
reader.readAsDataURL(blob);
filesaver.readyState = filesaver.INIT;
return;
}
// don't create more object URLs than needed
if (!object_url) {
object_url = get_URL().createObjectURL(blob);
}
if (force) {
view.location.href = object_url;
} else {
var opened = view.open(object_url, "_blank");
if (!opened) {
// Apple does not allow window.open, see https://developer.apple.com/library/safari/documentation/Tools/Conceptual/SafariExtensionGuide/WorkingwithWindowsandTabs/WorkingwithWindowsandTabs.html
view.location.href = object_url;
}
}
filesaver.readyState = filesaver.DONE;
dispatch_all();
revoke(object_url);
}
;
filesaver.readyState = filesaver.INIT;
if (can_use_save_link) {
object_url = get_URL().createObjectURL(blob);
setTimeout(function() {
save_link.href = object_url;
save_link.download = name;
click(save_link);
dispatch_all();
revoke(object_url);
filesaver.readyState = filesaver.DONE;
});
return;
}
fs_error();
}
, FS_proto = FileSaver.prototype
, saveAs = function(blob, name, no_auto_bom) {
return new FileSaver(blob, name || blob.name || "download", no_auto_bom);
}
;
// IE 10+ (native saveAs)
if (typeof navigator !== "undefined" && navigator.msSaveOrOpenBlob) {
return function(blob, name, no_auto_bom) {
name = name || blob.name || "download";
if (!no_auto_bom) {
blob = auto_bom(blob);
}
return navigator.msSaveOrOpenBlob(blob, name);
};
}
FS_proto.abort = function(){};
FS_proto.readyState = FS_proto.INIT = 0;
FS_proto.WRITING = 1;
FS_proto.DONE = 2;
FS_proto.error =
FS_proto.onwritestart =
FS_proto.onprogress =
FS_proto.onwrite =
FS_proto.onabort =
FS_proto.onerror =
FS_proto.onwriteend =
null;
return saveAs;
}(
typeof self !== "undefined" && self
|| typeof window !== "undefined" && window
|| this.content
));
// Expose file saver on the DataTables API. Can't attach to `DataTables.Buttons`
// since this file can be loaded before Button's core!
DataTable.fileSave = _saveAs;
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Local (private) functions
*/
/**
* Get the sheet name for Excel exports.
*
* @param {object} config Button configuration
*/
var _sheetname = function ( config )
{
var sheetName = 'Sheet1';
if ( config.sheetName ) {
sheetName = config.sheetName.replace(/[\[\]\*\/\\\?\:]/g, '');
}
return sheetName;
};
/**
* Get the newline character(s)
*
* @param {object} config Button configuration
* @return {string} Newline character
*/
var _newLine = function ( config )
{
return config.newline ?
config.newline :
navigator.userAgent.match(/Windows/) ?
'\r\n' :
'\n';
};
/**
* Combine the data from the `buttons.exportData` method into a string that
* will be used in the export file.
*
* @param {DataTable.Api} dt DataTables API instance
* @param {object} config Button configuration
* @return {object} The data to export
*/
var _exportData = function ( dt, config )
{
var newLine = _newLine( config );
var data = dt.buttons.exportData( config.exportOptions );
var boundary = config.fieldBoundary;
var separator = config.fieldSeparator;
var reBoundary = new RegExp( boundary, 'g' );
var escapeChar = config.escapeChar !== undefined ?
config.escapeChar :
'\\';
var join = function ( a ) {
var s = '';
// If there is a field boundary, then we might need to escape it in
// the source data
for ( var i=0, ien=a.length ; i<ien ; i++ ) {
if ( i > 0 ) {
s += separator;
}
s += boundary ?
boundary + ('' + a[i]).replace( reBoundary, escapeChar+boundary ) + boundary :
a[i];
}
return s;
};
var header = config.header ? join( data.header )+newLine : '';
var footer = config.footer && data.footer ? newLine+join( data.footer ) : '';
var body = [];
for ( var i=0, ien=data.body.length ; i<ien ; i++ ) {
body.push( join( data.body[i] ) );
}
return {
str: header + body.join( newLine ) + footer,
rows: body.length
};
};
/**
* Older versions of Safari (prior to tech preview 18) don't support the
* download option required.
*
* @return {Boolean} `true` if old Safari
*/
var _isDuffSafari = function ()
{
var safari = navigator.userAgent.indexOf('Safari') !== -1 &&
navigator.userAgent.indexOf('Chrome') === -1 &&
navigator.userAgent.indexOf('Opera') === -1;
if ( ! safari ) {
return false;
}
var version = navigator.userAgent.match( /AppleWebKit\/(\d+\.\d+)/ );
if ( version && version.length > 1 && version[1]*1 < 603.1 ) {
return true;
}
return false;
};
/**
* Convert from numeric position to letter for column names in Excel
* @param {int} n Column number
* @return {string} Column letter(s) name
*/
function createCellPos( n ){
var ordA = 'A'.charCodeAt(0);
var ordZ = 'Z'.charCodeAt(0);
var len = ordZ - ordA + 1;
var s = "";
while( n >= 0 ) {
s = String.fromCharCode(n % len + ordA) + s;
n = Math.floor(n / len) - 1;
}
return s;
}
try {
var _serialiser = new XMLSerializer();
var _ieExcel;
}
catch (t) {}
/**
* Recursively add XML files from an object's structure to a ZIP file. This
* allows the XSLX file to be easily defined with an object's structure matching
* the files structure.
*
* @param {JSZip} zip ZIP package
* @param {object} obj Object to add (recursive)
*/
function _addToZip( zip, obj ) {
if ( _ieExcel === undefined ) {
// Detect if we are dealing with IE's _awful_ serialiser by seeing if it
// drop attributes
_ieExcel = _serialiser
.serializeToString(
( new window.DOMParser() ).parseFromString( excelStrings['xl/worksheets/sheet1.xml'], 'text/xml' )
)
.indexOf( 'xmlns:r' ) === -1;
}
$.each( obj, function ( name, val ) {
if ( $.isPlainObject( val ) ) {
var newDir = zip.folder( name );
_addToZip( newDir, val );
}
else {
if ( _ieExcel ) {
// IE's XML serialiser will drop some name space attributes from
// from the root node, so we need to save them. Do this by
// replacing the namespace nodes with a regular attribute that
// we convert back when serialised. Edge does not have this
// issue
var worksheet = val.childNodes[0];
var i, ien;
var attrs = [];
for ( i=worksheet.attributes.length-1 ; i>=0 ; i-- ) {
var attrName = worksheet.attributes[i].nodeName;
var attrValue = worksheet.attributes[i].nodeValue;
if ( attrName.indexOf( ':' ) !== -1 ) {
attrs.push( { name: attrName, value: attrValue } );
worksheet.removeAttribute( attrName );
}
}
for ( i=0, ien=attrs.length ; i<ien ; i++ ) {
var attr = val.createAttribute( attrs[i].name.replace( ':', '_dt_b_namespace_token_' ) );
attr.value = attrs[i].value;
worksheet.setAttributeNode( attr );
}
}
var str = _serialiser.serializeToString(val);
// Fix IE's XML
if ( _ieExcel ) {
// IE doesn't include the XML declaration
if ( str.indexOf( '<?xml' ) === -1 ) {
str = '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'+str;
}
// Return namespace attributes to being as such
str = str.replace( /_dt_b_namespace_token_/g, ':' );
// Remove testing name space that IE puts into the space preserve attr
str = str.replace( /xmlns:NS[\d]+="" NS[\d]+:/g, '' );
}
// Safari, IE and Edge will put empty name space attributes onto
// various elements making them useless. This strips them out
str = str.replace( /<([^<>]*?) xmlns=""([^<>]*?)>/g, '<$1 $2>' );
zip.file( name, str );
}
} );
}
/**
* Create an XML node and add any children, attributes, etc without needing to
* be verbose in the DOM.
*
* @param {object} doc XML document
* @param {string} nodeName Node name
* @param {object} opts Options - can be `attr` (attributes), `children`
* (child nodes) and `text` (text content)
* @return {node} Created node
*/
function _createNode( doc, nodeName, opts ) {
var tempNode = doc.createElement( nodeName );
if ( opts ) {
if ( opts.attr ) {
$(tempNode).attr( opts.attr );
}
if ( opts.children ) {
$.each( opts.children, function ( key, value ) {
tempNode.appendChild( value );
} );
}
if ( opts.text !== null && opts.text !== undefined ) {
tempNode.appendChild( doc.createTextNode( opts.text ) );
}
}
return tempNode;
}
/**
* Get the width for an Excel column based on the contents of that column
* @param {object} data Data for export
* @param {int} col Column index
* @return {int} Column width
*/
function _excelColWidth( data, col ) {
var max = data.header[col].length;
var len, lineSplit, str;
if ( data.footer && data.footer[col].length > max ) {
max = data.footer[col].length;
}
for ( var i=0, ien=data.body.length ; i<ien ; i++ ) {
var point = data.body[i][col];
str = point !== null && point !== undefined ?
point.toString() :
'';
// If there is a newline character, workout the width of the column
// based on the longest line in the string
if ( str.indexOf('\n') !== -1 ) {
lineSplit = str.split('\n');
lineSplit.sort( function (a, b) {
return b.length - a.length;
} );
len = lineSplit[0].length;
}
else {
len = str.length;
}
if ( len > max ) {
max = len;
}
// Max width rather than having potentially massive column widths
if ( max > 40 ) {
return 54; // 40 * 1.35
}
}
max *= 1.35;
// And a min width
return max > 6 ? max : 6;
}
// Excel - Pre-defined strings to build a basic XLSX file
var excelStrings = {
"_rels/.rels":
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'+
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships">'+
'<Relationship Id="rId1" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/officeDocument" Target="xl/workbook.xml"/>'+
'</Relationships>',
"xl/_rels/workbook.xml.rels":
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'+
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships">'+
'<Relationship Id="rId1" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/worksheet" Target="worksheets/sheet1.xml"/>'+
'<Relationship Id="rId2" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/styles" Target="styles.xml"/>'+
'</Relationships>',
"[Content_Types].xml":
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'+
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">'+
'<Default Extension="xml" ContentType="application/xml" />'+
'<Default Extension="rels" ContentType="application/vnd.openxmlformats-package.relationships+xml" />'+
'<Default Extension="jpeg" ContentType="image/jpeg" />'+
'<Override PartName="/xl/workbook.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml" />'+
'<Override PartName="/xl/worksheets/sheet1.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml" />'+
'<Override PartName="/xl/styles.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml" />'+
'</Types>',
"xl/workbook.xml":
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'+
'<workbook xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">'+
'<fileVersion appName="xl" lastEdited="5" lowestEdited="5" rupBuild="24816"/>'+
'<workbookPr showInkAnnotation="0" autoCompressPictures="0"/>'+
'<bookViews>'+
'<workbookView xWindow="0" yWindow="0" windowWidth="25600" windowHeight="19020" tabRatio="500"/>'+
'</bookViews>'+
'<sheets>'+
'<sheet name="Sheet1" sheetId="1" r:id="rId1"/>'+
'</sheets>'+
'<definedNames/>'+
'</workbook>',
"xl/worksheets/sheet1.xml":
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'+
'<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" mc:Ignorable="x14ac" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac">'+
'<sheetData/>'+
'<mergeCells count="0"/>'+
'</worksheet>',
"xl/styles.xml":
'<?xml version="1.0" encoding="UTF-8"?>'+
'<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" mc:Ignorable="x14ac" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac">'+
'<numFmts count="6">'+
'<numFmt numFmtId="164" formatCode="#,##0.00_-\ [$$-45C]"/>'+
'<numFmt numFmtId="165" formatCode=""£"#,##0.00"/>'+
'<numFmt numFmtId="166" formatCode="[$€-2]\ #,##0.00"/>'+
'<numFmt numFmtId="167" formatCode="0.0%"/>'+
'<numFmt numFmtId="168" formatCode="#,##0;(#,##0)"/>'+
'<numFmt numFmtId="169" formatCode="#,##0.00;(#,##0.00)"/>'+
'</numFmts>'+
'<fonts count="5" x14ac:knownFonts="1">'+
'<font>'+
'<sz val="11" />'+
'<name val="Calibri" />'+
'</font>'+
'<font>'+
'<sz val="11" />'+
'<name val="Calibri" />'+
'<color rgb="FFFFFFFF" />'+
'</font>'+
'<font>'+
'<sz val="11" />'+
'<name val="Calibri" />'+
'<b />'+
'</font>'+
'<font>'+
'<sz val="11" />'+
'<name val="Calibri" />'+
'<i />'+
'</font>'+
'<font>'+
'<sz val="11" />'+
'<name val="Calibri" />'+
'<u />'+
'</font>'+
'</fonts>'+
'<fills count="6">'+
'<fill>'+
'<patternFill patternType="none" />'+
'</fill>'+
'<fill>'+ // Excel appears to use this as a dotted background regardless of values but
'<patternFill patternType="none" />'+ // to be valid to the schema, use a patternFill
'</fill>'+
'<fill>'+
'<patternFill patternType="solid">'+
'<fgColor rgb="FFD9D9D9" />'+
'<bgColor indexed="64" />'+
'</patternFill>'+
'</fill>'+
'<fill>'+
'<patternFill patternType="solid">'+
'<fgColor rgb="FFD99795" />'+
'<bgColor indexed="64" />'+
'</patternFill>'+
'</fill>'+
'<fill>'+
'<patternFill patternType="solid">'+
'<fgColor rgb="ffc6efce" />'+
'<bgColor indexed="64" />'+
'</patternFill>'+
'</fill>'+
'<fill>'+
'<patternFill patternType="solid">'+
'<fgColor rgb="ffc6cfef" />'+
'<bgColor indexed="64" />'+
'</patternFill>'+
'</fill>'+
'</fills>'+
'<borders count="2">'+
'<border>'+
'<left />'+
'<right />'+
'<top />'+
'<bottom />'+
'<diagonal />'+
'</border>'+
'<border diagonalUp="false" diagonalDown="false">'+
'<left style="thin">'+
'<color auto="1" />'+
'</left>'+
'<right style="thin">'+
'<color auto="1" />'+
'</right>'+
'<top style="thin">'+
'<color auto="1" />'+
'</top>'+
'<bottom style="thin">'+
'<color auto="1" />'+
'</bottom>'+
'<diagonal />'+
'</border>'+
'</borders>'+
'<cellStyleXfs count="1">'+
'<xf numFmtId="0" fontId="0" fillId="0" borderId="0" />'+
'</cellStyleXfs>'+
'<cellXfs count="68">'+
'<xf numFmtId="0" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="1" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="2" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="3" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="4" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="0" fillId="2" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="1" fillId="2" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="2" fillId="2" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="3" fillId="2" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="4" fillId="2" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="0" fillId="3" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="1" fillId="3" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="2" fillId="3" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="3" fillId="3" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="4" fillId="3" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="0" fillId="4" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="1" fillId="4" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="2" fillId="4" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="3" fillId="4" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="4" fillId="4" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="0" fillId="5" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="1" fillId="5" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="2" fillId="5" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="3" fillId="5" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="4" fillId="5" borderId="0" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="0" fillId="0" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="1" fillId="0" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="2" fillId="0" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="3" fillId="0" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="4" fillId="0" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="0" fillId="2" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="1" fillId="2" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="2" fillId="2" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="3" fillId="2" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="4" fillId="2" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="0" fillId="3" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="1" fillId="3" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="2" fillId="3" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="3" fillId="3" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="4" fillId="3" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="0" fillId="4" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="1" fillId="4" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="2" fillId="4" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="3" fillId="4" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="4" fillId="4" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="0" fillId="5" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="1" fillId="5" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="2" fillId="5" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="3" fillId="5" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="4" fillId="5" borderId="1" applyFont="1" applyFill="1" applyBorder="1"/>'+
'<xf numFmtId="0" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyAlignment="1">'+
'<alignment horizontal="left"/>'+
'</xf>'+
'<xf numFmtId="0" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyAlignment="1">'+
'<alignment horizontal="center"/>'+
'</xf>'+
'<xf numFmtId="0" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyAlignment="1">'+
'<alignment horizontal="right"/>'+
'</xf>'+
'<xf numFmtId="0" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyAlignment="1">'+
'<alignment horizontal="fill"/>'+
'</xf>'+
'<xf numFmtId="0" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyAlignment="1">'+
'<alignment textRotation="90"/>'+
'</xf>'+
'<xf numFmtId="0" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyAlignment="1">'+
'<alignment wrapText="1"/>'+
'</xf>'+
'<xf numFmtId="9" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyNumberFormat="1"/>'+
'<xf numFmtId="164" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyNumberFormat="1"/>'+
'<xf numFmtId="165" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyNumberFormat="1"/>'+
'<xf numFmtId="166" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyNumberFormat="1"/>'+
'<xf numFmtId="167" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyNumberFormat="1"/>'+
'<xf numFmtId="168" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyNumberFormat="1"/>'+
'<xf numFmtId="169" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyNumberFormat="1"/>'+
'<xf numFmtId="3" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyNumberFormat="1"/>'+
'<xf numFmtId="4" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyNumberFormat="1"/>'+
'<xf numFmtId="1" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyNumberFormat="1"/>'+
'<xf numFmtId="2" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyNumberFormat="1"/>'+
'<xf numFmtId="14" fontId="0" fillId="0" borderId="0" applyFont="1" applyFill="1" applyBorder="1" xfId="0" applyNumberFormat="1"/>'+
'</cellXfs>'+
'<cellStyles count="1">'+
'<cellStyle name="Normal" xfId="0" builtinId="0" />'+
'</cellStyles>'+
'<dxfs count="0" />'+
'<tableStyles count="0" defaultTableStyle="TableStyleMedium9" defaultPivotStyle="PivotStyleMedium4" />'+
'</styleSheet>'
};
// Note we could use 3 `for` loops for the styles, but when gzipped there is
// virtually no difference in size, since the above can be easily compressed
// Pattern matching for special number formats. Perhaps this should be exposed
// via an API in future?
// Ref: section 3.8.30 - built in formatters in open spreadsheet
// https://www.ecma-international.org/news/TC45_current_work/Office%20Open%20XML%20Part%204%20-%20Markup%20Language%20Reference.pdf
var _excelSpecials = [
{ match: /^\-?\d+\.\d%$/, style: 60, fmt: function (d) { return d/100; } }, // Percent with d.p.
{ match: /^\-?\d+\.?\d*%$/, style: 56, fmt: function (d) { return d/100; } }, // Percent
{ match: /^\-?\$[\d,]+.?\d*$/, style: 57 }, // Dollars
{ match: /^\-?£[\d,]+.?\d*$/, style: 58 }, // Pounds
{ match: /^\-?€[\d,]+.?\d*$/, style: 59 }, // Euros
{ match: /^\-?\d+$/, style: 65 }, // Numbers without thousand separators
{ match: /^\-?\d+\.\d{2}$/, style: 66 }, // Numbers 2 d.p. without thousands separators
{ match: /^\([\d,]+\)$/, style: 61, fmt: function (d) { return -1 * d.replace(/[\(\)]/g, ''); } }, // Negative numbers indicated by brackets
{ match: /^\([\d,]+\.\d{2}\)$/, style: 62, fmt: function (d) { return -1 * d.replace(/[\(\)]/g, ''); } }, // Negative numbers indicated by brackets - 2d.p.
{ match: /^\-?[\d,]+$/, style: 63 }, // Numbers with thousand separators
{ match: /^\-?[\d,]+\.\d{2}$/, style: 64 },
{ match: /^[\d]{4}\-[\d]{2}\-[\d]{2}$/, style: 67, fmt: function (d) {return Math.round(25569 + (Date.parse(d) / (86400 * 1000)));}} //Date yyyy-mm-dd
];
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Buttons
*/
//
// Copy to clipboard
//
DataTable.ext.buttons.copyHtml5 = {
className: 'buttons-copy buttons-html5',
text: function ( dt ) {
return dt.i18n( 'buttons.copy', 'Copy' );
},
action: function ( e, dt, button, config ) {
this.processing( true );
var that = this;
var exportData = _exportData( dt, config );
var info = dt.buttons.exportInfo( config );
var newline = _newLine(config);
var output = exportData.str;
var hiddenDiv = $('<div/>')
.css( {
height: 1,
width: 1,
overflow: 'hidden',
position: 'fixed',
top: 0,
left: 0
} );
if ( info.title ) {
output = info.title + newline + newline + output;
}
if ( info.messageTop ) {
output = info.messageTop + newline + newline + output;
}
if ( info.messageBottom ) {
output = output + newline + newline + info.messageBottom;
}
if ( config.customize ) {
output = config.customize( output, config, dt );
}
var textarea = $('<textarea readonly/>')
.val( output )
.appendTo( hiddenDiv );
// For browsers that support the copy execCommand, try to use it
if ( document.queryCommandSupported('copy') ) {
hiddenDiv.appendTo( dt.table().container() );
textarea[0].focus();
textarea[0].select();
try {
var successful = document.execCommand( 'copy' );
hiddenDiv.remove();
if (successful) {
dt.buttons.info(
dt.i18n( 'buttons.copyTitle', 'Copy to clipboard' ),
dt.i18n( 'buttons.copySuccess', {
1: 'Copied one row to clipboard',
_: 'Copied %d rows to clipboard'
}, exportData.rows ),
2000
);
this.processing( false );
return;
}
}
catch (t) {}
}
// Otherwise we show the text box and instruct the user to use it
var message = $('<span>'+dt.i18n( 'buttons.copyKeys',
'Press <i>ctrl</i> or <i>\u2318</i> + <i>C</i> to copy the table data<br>to your system clipboard.<br><br>'+
'To cancel, click this message or press escape.' )+'</span>'
)
.append( hiddenDiv );
dt.buttons.info( dt.i18n( 'buttons.copyTitle', 'Copy to clipboard' ), message, 0 );
// Select the text so when the user activates their system clipboard
// it will copy that text
textarea[0].focus();
textarea[0].select();
// Event to hide the message when the user is done
var container = $(message).closest('.dt-button-info');
var close = function () {
container.off( 'click.buttons-copy' );
$(document).off( '.buttons-copy' );
dt.buttons.info( false );
};
container.on( 'click.buttons-copy', close );
$(document)
.on( 'keydown.buttons-copy', function (e) {
if ( e.keyCode === 27 ) { // esc
close();
that.processing( false );
}
} )
.on( 'copy.buttons-copy cut.buttons-copy', function () {
close();
that.processing( false );
} );
},
exportOptions: {},
fieldSeparator: '\t',
fieldBoundary: '',
header: true,
footer: false,
title: '*',
messageTop: '*',
messageBottom: '*'
};
//
// CSV export
//
DataTable.ext.buttons.csvHtml5 = {
bom: false,
className: 'buttons-csv buttons-html5',
available: function () {
return window.FileReader !== undefined && window.Blob;
},
text: function ( dt ) {
return dt.i18n( 'buttons.csv', 'CSV' );
},
action: function ( e, dt, button, config ) {
this.processing( true );
// Set the text
var output = _exportData( dt, config ).str;
var info = dt.buttons.exportInfo(config);
var charset = config.charset;
if ( config.customize ) {
output = config.customize( output, config, dt );
}
if ( charset !== false ) {
if ( ! charset ) {
charset = document.characterSet || document.charset;
}
if ( charset ) {
charset = ';charset='+charset;
}
}
else {
charset = '';
}
if ( config.bom ) {
output = String.fromCharCode(0xFEFF) + output;
}
_saveAs(
new Blob( [output], {type: 'text/csv'+charset} ),
info.filename,
true
);
this.processing( false );
},
filename: '*',
extension: '.csv',
exportOptions: {},
fieldSeparator: ',',
fieldBoundary: '"',
escapeChar: '"',
charset: null,
header: true,
footer: false
};
//
// Excel (xlsx) export
//
DataTable.ext.buttons.excelHtml5 = {
className: 'buttons-excel buttons-html5',
available: function () {
return window.FileReader !== undefined && _jsZip() !== undefined && ! _isDuffSafari() && _serialiser;
},
text: function ( dt ) {
return dt.i18n( 'buttons.excel', 'Excel' );
},
action: function ( e, dt, button, config ) {
this.processing( true );
var that = this;
var rowPos = 0;
var dataStartRow, dataEndRow;
var getXml = function ( type ) {
var str = excelStrings[ type ];
//str = str.replace( /xmlns:/g, 'xmlns_' ).replace( /mc:/g, 'mc_' );
return $.parseXML( str );
};
var rels = getXml('xl/worksheets/sheet1.xml');
var relsGet = rels.getElementsByTagName( "sheetData" )[0];
var xlsx = {
_rels: {
".rels": getXml('_rels/.rels')
},
xl: {
_rels: {
"workbook.xml.rels": getXml('xl/_rels/workbook.xml.rels')
},
"workbook.xml": getXml('xl/workbook.xml'),
"styles.xml": getXml('xl/styles.xml'),
"worksheets": {
"sheet1.xml": rels
}
},
"[Content_Types].xml": getXml('[Content_Types].xml')
};
var data = dt.buttons.exportData( config.exportOptions );
var currentRow, rowNode;
var addRow = function ( row ) {
currentRow = rowPos+1;
rowNode = _createNode( rels, "row", { attr: {r:currentRow} } );
for ( var i=0, ien=row.length ; i<ien ; i++ ) {
// Concat both the Cell Columns as a letter and the Row of the cell.
var cellId = createCellPos(i) + '' + currentRow;
var cell = null;
// For null, undefined of blank cell, continue so it doesn't create the _createNode
if ( row[i] === null || row[i] === undefined || row[i] === '' ) {
if ( config.createEmptyCells === true ) {
row[i] = '';
}
else {
continue;
}
}
var originalContent = row[i];
row[i] = typeof row[i].trim === 'function'
? row[i].trim()
: row[i];
// Special number formatting options
for ( var j=0, jen=_excelSpecials.length ; j<jen ; j++ ) {
var special = _excelSpecials[j];
// TODO Need to provide the ability for the specials to say
// if they are returning a string, since at the moment it is
// assumed to be a number
if ( row[i].match && ! row[i].match(/^0\d+/) && row[i].match( special.match ) ) {
var val = row[i].replace(/[^\d\.\-]/g, '');
if ( special.fmt ) {
val = special.fmt( val );
}
cell = _createNode( rels, 'c', {
attr: {
r: cellId,
s: special.style
},
children: [
_createNode( rels, 'v', { text: val } )
]
} );
break;
}
}
if ( ! cell ) {
if ( typeof row[i] === 'number' || (
row[i].match &&
row[i].match(/^-?\d+(\.\d+)?([eE]\-?\d+)?$/) && // Includes exponential format
! row[i].match(/^0\d+/) )
) {
// Detect numbers - don't match numbers with leading zeros
// or a negative anywhere but the start
cell = _createNode( rels, 'c', {
attr: {
t: 'n',
r: cellId
},
children: [
_createNode( rels, 'v', { text: row[i] } )
]
} );
}
else {
// String output - replace non standard characters for text output
var text = ! originalContent.replace ?
originalContent :
originalContent.replace(/[\x00-\x09\x0B\x0C\x0E-\x1F\x7F-\x9F]/g, '');
cell = _createNode( rels, 'c', {
attr: {
t: 'inlineStr',
r: cellId
},
children:{
row: _createNode( rels, 'is', {
children: {
row: _createNode( rels, 't', {
text: text,
attr: {
'xml:space': 'preserve'
}
} )
}
} )
}
} );
}
}
rowNode.appendChild( cell );
}
relsGet.appendChild(rowNode);
rowPos++;
};
if ( config.customizeData ) {
config.customizeData( data );
}
var mergeCells = function ( row, colspan ) {
var mergeCells = $('mergeCells', rels);
mergeCells[0].appendChild( _createNode( rels, 'mergeCell', {
attr: {
ref: 'A'+row+':'+createCellPos(colspan)+row
}
} ) );
mergeCells.attr( 'count', parseFloat(mergeCells.attr( 'count' ))+1 );
$('row:eq('+(row-1)+') c', rels).attr( 's', '51' ); // centre
};
// Title and top messages
var exportInfo = dt.buttons.exportInfo( config );
if ( exportInfo.title ) {
addRow( [exportInfo.title], rowPos );
mergeCells( rowPos, data.header.length-1 );
}
if ( exportInfo.messageTop ) {
addRow( [exportInfo.messageTop], rowPos );
mergeCells( rowPos, data.header.length-1 );
}
// Table itself
if ( config.header ) {
addRow( data.header, rowPos );
$('row:last c', rels).attr( 's', '2' ); // bold
}
dataStartRow = rowPos;
for ( var n=0, ie=data.body.length ; n<ie ; n++ ) {
addRow( data.body[n], rowPos );
}
dataEndRow = rowPos;
if ( config.footer && data.footer ) {
addRow( data.footer, rowPos);
$('row:last c', rels).attr( 's', '2' ); // bold
}
// Below the table
if ( exportInfo.messageBottom ) {
addRow( [exportInfo.messageBottom], rowPos );
mergeCells( rowPos, data.header.length-1 );
}
// Set column widths
var cols = _createNode( rels, 'cols' );
$('worksheet', rels).prepend( cols );
for ( var i=0, ien=data.header.length ; i<ien ; i++ ) {
cols.appendChild( _createNode( rels, 'col', {
attr: {
min: i+1,
max: i+1,
width: _excelColWidth( data, i ),
customWidth: 1
}
} ) );
}
// Workbook modifications
var workbook = xlsx.xl['workbook.xml'];
$( 'sheets sheet', workbook ).attr( 'name', _sheetname( config ) );
// Auto filter for columns
if ( config.autoFilter ) {
$('mergeCells', rels).before( _createNode( rels, 'autoFilter', {
attr: {
ref: 'A'+dataStartRow+':'+createCellPos(data.header.length-1)+dataEndRow
}
} ) );
$('definedNames', workbook).append( _createNode( workbook, 'definedName', {
attr: {
name: '_xlnm._FilterDatabase',
localSheetId: '0',
hidden: 1
},
text: _sheetname(config)+'!$A$'+dataStartRow+':'+createCellPos(data.header.length-1)+dataEndRow
} ) );
}
// Let the developer customise the document if they want to
if ( config.customize ) {
config.customize( xlsx, config, dt );
}
// Excel doesn't like an empty mergeCells tag
if ( $('mergeCells', rels).children().length === 0 ) {
$('mergeCells', rels).remove();
}
var jszip = _jsZip();
var zip = new jszip();
var zipConfig = {
type: 'blob',
mimeType: 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
};
_addToZip( zip, xlsx );
if ( zip.generateAsync ) {
// JSZip 3+
zip
.generateAsync( zipConfig )
.then( function ( blob ) {
_saveAs( blob, exportInfo.filename );
that.processing( false );
} );
}
else {
// JSZip 2.5
_saveAs(
zip.generate( zipConfig ),
exportInfo.filename
);
this.processing( false );
}
},
filename: '*',
extension: '.xlsx',
exportOptions: {},
header: true,
footer: false,
title: '*',
messageTop: '*',
messageBottom: '*',
createEmptyCells: false,
autoFilter: false,
sheetName: ''
};
//
// PDF export - using pdfMake - http://pdfmake.org
//
DataTable.ext.buttons.pdfHtml5 = {
className: 'buttons-pdf buttons-html5',
available: function () {
return window.FileReader !== undefined && _pdfMake();
},
text: function ( dt ) {
return dt.i18n( 'buttons.pdf', 'PDF' );
},
action: function ( e, dt, button, config ) {
this.processing( true );
var that = this;
var data = dt.buttons.exportData( config.exportOptions );
var info = dt.buttons.exportInfo( config );
var rows = [];
if ( config.header ) {
rows.push( $.map( data.header, function ( d ) {
return {
text: typeof d === 'string' ? d : d+'',
style: 'tableHeader'
};
} ) );
}
for ( var i=0, ien=data.body.length ; i<ien ; i++ ) {
rows.push( $.map( data.body[i], function ( d ) {
if ( d === null || d === undefined ) {
d = '';
}
return {
text: typeof d === 'string' ? d : d+'',
style: i % 2 ? 'tableBodyEven' : 'tableBodyOdd'
};
} ) );
}
if ( config.footer && data.footer) {
rows.push( $.map( data.footer, function ( d ) {
return {
text: typeof d === 'string' ? d : d+'',
style: 'tableFooter'
};
} ) );
}
var doc = {
pageSize: config.pageSize,
pageOrientation: config.orientation,
content: [
{
table: {
headerRows: 1,
body: rows
},
layout: 'noBorders'
}
],
styles: {
tableHeader: {
bold: true,
fontSize: 11,
color: 'white',
fillColor: '#2d4154',
alignment: 'center'
},
tableBodyEven: {},
tableBodyOdd: {
fillColor: '#f3f3f3'
},
tableFooter: {
bold: true,
fontSize: 11,
color: 'white',
fillColor: '#2d4154'
},
title: {
alignment: 'center',
fontSize: 15
},
message: {}
},
defaultStyle: {
fontSize: 10
}
};
if ( info.messageTop ) {
doc.content.unshift( {
text: info.messageTop,
style: 'message',
margin: [ 0, 0, 0, 12 ]
} );
}
if ( info.messageBottom ) {
doc.content.push( {
text: info.messageBottom,
style: 'message',
margin: [ 0, 0, 0, 12 ]
} );
}
if ( info.title ) {
doc.content.unshift( {
text: info.title,
style: 'title',
margin: [ 0, 0, 0, 12 ]
} );
}
if ( config.customize ) {
config.customize( doc, config, dt );
}
var pdf = _pdfMake().createPdf( doc );
if ( config.download === 'open' && ! _isDuffSafari() ) {
pdf.open();
}
else {
pdf.download( info.filename );
}
this.processing( false );
},
title: '*',
filename: '*',
extension: '.pdf',
exportOptions: {},
orientation: 'portrait',
pageSize: 'A4',
header: true,
footer: false,
messageTop: '*',
messageBottom: '*',
customize: null,
download: 'download'
};
return DataTable.Buttons;
})); | PypiClean |
/Bytestag-0.2b1.tar.gz/Bytestag-0.2b1/src/py3/bytestag/client.py | # This file is part of Bytestag.
# Copyright © 2012 Christopher Foo <chris.foo@gmail.com>.
# Licensed under GNU GPLv3. See COPYING.txt for details.
from bytestag import basedir
from bytestag.dht.downloading import Downloader
from bytestag.dht.network import DHTNetwork
from bytestag.dht.publishing import Publisher, Replicator
from bytestag.events import EventReactor, FnTaskSlot
from bytestag.keys import KeyBytes
from bytestag.network import Network
from bytestag.storage import DatabaseKVPTable, SharedFilesKVPTable
from bytestag.tables import AggregatedKVPTable
import logging
import os.path
import threading
__docformat__ = 'restructuredtext en'
_logger = logging.getLogger(__name__)
class Client(threading.Thread):
'''Client interface.
:warning: this class is under development.
'''
def __init__(self, cache_dir, address=('0.0.0.0', 0), node_id=None,
known_node_address=None, initial_scan=False, config_dir=None):
threading.Thread.__init__(self)
self.daemon = True
self.name = '{}.{}'.format(__name__, Client.__name__)
self._event_reactor = EventReactor()
self._node_id = node_id or KeyBytes()
self._network = Network(self._event_reactor, address=address)
self._cache_table = DatabaseKVPTable(
os.path.join(cache_dir, 'cache.db'))
self._shared_files_table = SharedFilesKVPTable(
os.path.join(cache_dir, 'shared_files.db'))
self._aggregated_kvp_table = AggregatedKVPTable(self._cache_table,
[self._cache_table, self._shared_files_table])
self._known_node_address = known_node_address
self._upload_slot = FnTaskSlot()
self._download_slot = FnTaskSlot()
self._initial_scan = initial_scan
self._config_dir = config_dir or basedir.config_dir
self._init()
@property
def cache_table(self):
'''The :class:`DatabaseKVPTable`'''
return self._cache_table
@property
def shared_files_table(self):
'''The :class:`SharedFilesKVPTable`'''
return self._shared_files_table
@property
def upload_slot(self):
'''The :class:`.FnTaskSlot` which holds :class:`.StoreValueTask`.'''
return self._upload_slot
@property
def download_slot(self):
'''Download slot.
:see: :func:`.DHTNetwork.download_slot`
'''
return self._download_slot
@property
def dht_network(self):
return self._dht_network
@property
def network(self):
return self._network
def _init(self):
self._dht_network = DHTNetwork(self._event_reactor,
self._aggregated_kvp_table, self._node_id, self._network,
self._download_slot)
self._publisher = Publisher(self._event_reactor, self._dht_network,
self._aggregated_kvp_table, self._upload_slot)
self._replicator = Replicator(self._event_reactor, self._dht_network,
self._aggregated_kvp_table, self._upload_slot)
self._downloader = Downloader(self._event_reactor, self._config_dir,
self._dht_network, self._download_slot)
def run(self):
if self._known_node_address:
self._dht_network.join_network(self._known_node_address)
# TODO: put warning if join fails, but don't check on
# the same thread as the event_reactor
if self._initial_scan:
self._shared_files_table.hash_directories()
self._event_reactor.start()
def stop(self):
self._event_reactor.put(EventReactor.STOP_ID) | PypiClean |
/Dot_Plot-0.1.3.1.tar.gz/Dot_Plot-0.1.3.1/README.rst | =========
Dot Plot
=========
-------------------
The python script
-------------------
:Author: Nicola Cappellini
:Version: $Revision: beta $
:Copyright: This document has been placed in the public domain.
Tutorial
=========
Chord and / or scale entry
---------------------------
Via click
~~~~~~~~~~
Left-click on the diagram
Places a dot
Left-click on an X or O
Toggles between X and O.
Right-click on a dot
Remove it
ctrl + [scroll wheel up / down]
Adds fretting-hand fingering information
ctrl + shift + [scroll wheel up/ down]
Adjusts position indication marker
ctrl + [scroll wheel up / down]
Cycles through fingering options.
Keyboard corrections
~~~~~~~~~~~~~~~~~~~~~
return
Add the diagram in the sandbox to the render queue
alt + r
reset the diagram in the sandbox
shift + backspace
Remove the previously entered diagram from the render queue
alt + backspace
Load the previously entered diagram into the editor for correction.
ctrl + shift + backspace
Fiasco: Reset both the sandbox and render queue.
shift + [right / left]
add / remove new strings to the diagram in the sandbox
shift + [up / down]
add / remove new frets to the diagram in the sandbox
escape
Exit the script
In addition:
alt + t
generate a "Takemitsu-style chord / scale" (mainly used for testing)
| PypiClean |
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_2D/airfoil_polar_functions.py | from aerosandbox.geometry import Airfoil
from aerosandbox.performance import OperatingPoint
import aerosandbox.numpy as np
import aerosandbox.library.aerodynamics as aerolib
def airfoil_coefficients_post_stall(
airfoil: Airfoil,
alpha: float,
):
"""
Estimates post-stall aerodynamics of an airfoil.
Uses methods given in:
Truong, V. K. "An analytical model for airfoil aerodynamic characteristics over the entire 360deg angle of attack
range". J. Renewable Sustainable Energy. 2020. doi: 10.1063/1.5126055
Args:
airfoil:
op_point:
Returns:
"""
sina = np.sind(alpha)
cosa = np.cosd(alpha)
##### Normal force calulation
# Cd90_fp = aerolib.Cd_flat_plate_normal() # TODO implement
# Cd90_0 = Cd90_fp - 0.83 * airfoil.LE_radius() - 1.46 / 2 * airfoil.max_thickness() + 1.46 * airfoil.max_camber()
# Cd270_0 = Cd90_fp - 0.83 * airfoil.LE_radius() - 1.46 / 2 * airfoil.max_thickness() - 1.46 * airfoil.max_camber()
### Values for NACA0012
Cd90_0 = 2.08
pn2_star = 8.36e-2
pn3_star = 4.06e-1
pt1_star = 9.00e-2
pt2_star = -1.78e-1
pt3_star = -2.98e-1
Cd90 = Cd90_0 + pn2_star * cosa + pn3_star * cosa ** 2
CN = Cd90 * sina
##### Tangential force calculation
CT = (pt1_star + pt2_star * cosa + pt3_star * cosa ** 3) * sina ** 2
##### Conversion to wind axes
CL = CN * cosa + CT * sina
CD = CN * sina - CT * cosa
CM = np.zeros_like(CL) # TODO
return CL, CD, CM
if __name__ == '__main__':
af = Airfoil("naca0012")
alpha = np.linspace(0, 360, 721)
CL, CD, CM = airfoil_coefficients_post_stall(
af, alpha
)
from aerosandbox.tools.pretty_plots import plt, show_plot, set_ticks
fig, ax = plt.subplots(1, 2, figsize=(8, 5))
plt.sca(ax[0])
plt.plot(alpha, CL)
plt.xlabel("AoA")
plt.ylabel("CL")
set_ticks(45, 15, 0.5, 0.1)
plt.sca(ax[1])
plt.plot(alpha, CD)
plt.xlabel("AoA")
plt.ylabel("CD")
set_ticks(45, 15, 0.5, 0.1)
show_plot() | PypiClean |
/GailBot_Testing_Suite-0.1a8-py3-none-any.whl/gailbot/core/engines/whisperEngine/whisperTimestamped/transcribe_naive.py |
import sys
import os
import numpy as np
import whisper
import torch
import torch.nn.functional as F
from .alignment import perform_word_alignment
from .utils import (
norm_language,
should_use_space,
print_timestamped,
round_confidence,
audio_minimum_padding
)
from .vars import (
AUDIO_SAMPLES_PER_TOKEN,
AUDIO_TIME_PER_TOKEN,
SAMPLE_RATE,
N_FRAMES
)
import string
import logging
logger = logging.getLogger()
_punctuation = "".join(c for c in string.punctuation if c not in ["-", "'"])
def _transcribe_timestamped_naive(
model,
audio,
remove_punctuation_from_words,
compute_word_confidence,
include_punctuation_in_confidence,
refine_whisper_precision_nframes,
plot_word_alignment,
min_word_duration,
**whisper_options,
):
verbose = whisper_options["verbose"]
whisper_options["verbose"] = None if whisper_options["verbose"] is True else whisper_options["verbose"] # We will print intermediate results ourselves
language = whisper_options["language"]
refine_whisper_precision_sec = refine_whisper_precision_nframes * AUDIO_TIME_PER_TOKEN
if isinstance(audio, str):
audio = whisper.load_audio(audio)
if isinstance(audio, np.ndarray):
audio = torch.Tensor(audio)
else:
assert isinstance(audio, torch.Tensor), f"Got unexpected audio of type {type(audio)}"
audio = audio.to(model.device)
audio_duration = audio.shape[-1] / SAMPLE_RATE
if verbose and language is None and not whisper_options["verbose"]:
# Reproduce whisper verbose (1/2)
print("Detecting language using up to the first 30 seconds. Use `--language` to specify the language")
transcription = model.transcribe(audio, **whisper_options)
if verbose and language is None and not whisper_options["verbose"]:
# Reproduce whisper verbose (2/2)
print(f"Detected language: {whisper.tokenizer.LANGUAGES[transcription['language']].title()}")
sys.stdout.flush()
language = norm_language(transcription["language"])
tokenizer = whisper.tokenizer.get_tokenizer(model.is_multilingual, task=whisper_options["task"], language=language)
use_space = should_use_space(language)
attention_weights = [[] for _ in range(len(model.decoder.blocks))]
try:
all_hooks = []
# Hook the model
for i, block in enumerate(model.decoder.blocks):
all_hooks.append(
block.cross_attn.register_forward_hook(
lambda layer, ins, outs, index=i: attention_weights.__setitem__(index, outs[-1])
)
)
words = []
previous_end = 0
whisper_segments = transcription["segments"]
for i_segment, segment in enumerate(whisper_segments):
start = segment["start"]
end = segment["end"]
if end < start:
# Whisper is wrong on the prediction of segment end
end = min(audio_duration, start + 30.0)
start_margin_min = start - refine_whisper_precision_sec
start_margin_max = start + refine_whisper_precision_sec
if start >= audio_duration - min_word_duration or (previous_end >= start_margin_min and previous_end <= start_margin_max):
# Make start as accurate as possible (as the decoding will start with timestamp <|0|>)
start = previous_end
else:
# Fallback
start = start_margin_min
if start > audio_duration - min_word_duration:
# Skip last segment if too short
logger.warn(f"Skipping segment outside of audio duration {audio_duration} (original: {segment['start']}-{segment['end']}, new: {start}-XXX)")
continue
end_margin_min = end - refine_whisper_precision_sec
end_margin_max = end + refine_whisper_precision_sec
if i_segment < len(whisper_segments) - 1:
# Try to enforce:
# end + min_word_duration <= next start + refine_whisper_precision_sec
end_margin_max2 = whisper_segments[i_segment + 1]["start"] + refine_whisper_precision_sec - min_word_duration
if end_margin_max2 >= end_margin_min:
end_margin_max = min(end_margin_max2, end_margin_max)
end = min(audio_duration, end_margin_max)
if end < start + min_word_duration:
logger.warn(f"Got super short segment (original from whisper: {segment['start']}-{segment['end']}, new: {start, end})")
end = min(audio_duration, start + min_word_duration)
if end <= start:
logger.warn(f"Skipping this short segment occuring too close to the end of the audio")
continue
start_sample = min(round(start * SAMPLE_RATE), audio.shape[-1])
end_sample = min(round(end * SAMPLE_RATE), audio.shape[-1])
sub_audio = audio_minimum_padding(audio[start_sample:end_sample])
mfcc = whisper.log_mel_spectrogram(sub_audio).to(model.device)
mfcc = whisper.pad_or_trim(mfcc, N_FRAMES)
mfcc = mfcc.unsqueeze(0)
tokens = segment["tokens"]
# assert len(tokens), "Got empty transcription!"
if tokens:
if tokens[0] == tokenizer.timestamp_begin:
tokens = tokens[1:]
while tokens[-1] >= tokenizer.timestamp_begin:
tokens = tokens[:-1]
assert len(tokens), "Got transcription with only timestamps!"
tokens = [
*tokenizer.sot_sequence,
tokenizer.timestamp_begin,
] + tokens
i_start = len(tokenizer.sot_sequence)
with torch.no_grad():
logprobs = model(mfcc, torch.Tensor(tokens).int().to(model.device).unsqueeze(0))
logprobs = F.log_softmax(logprobs, dim=-1)
tokens = tokens[i_start:] + [tokenizer.timestamp_begin + round((end_sample - start_sample) // AUDIO_SAMPLES_PER_TOKEN)]
attention_weights = [w[:, :, i_start-1:, :] for w in attention_weights]
ws = perform_word_alignment(
tokens,
attention_weights,
tokenizer,
use_space=use_space,
remove_punctuation_from_words=remove_punctuation_from_words,
refine_whisper_precision_nframes=refine_whisper_precision_nframes,
mfcc=mfcc,
plot=plot_word_alignment,
)
segment_logprobs = []
for w in ws:
w["start"] = round(w["start"] + start, 2)
w["end"] = round(w["end"] + start, 2)
w.update({"idx_segment": i_segment})
if compute_word_confidence:
tokens = w["tokens"]
i_end = i_start + len(tokens)
if include_punctuation_in_confidence:
tokens_str = [tokenizer.decode([t]) for t in tokens]
while len(tokens_str) > 1 and tokens_str[-1][-1] in _punctuation: # Note: look at the last character of token, to take into account "...", "!!", etc.
tokens_str = tokens_str[:-1]
tokens = tokens[:-1]
word_logprobs = [logprobs[:, step, tok] for (step, tok) in zip(range(i_start, i_start + len(tokens)), tokens)]
i_start = i_end
word_logprobs = torch.cat(word_logprobs)
w.update({"confidence": round_confidence(word_logprobs.mean().exp().item())})
segment_logprobs.append(word_logprobs)
words.append(w)
if verbose:
print_timestamped(w)
if len(segment_logprobs):
segment.update({"confidence": round_confidence(torch.cat(segment_logprobs).mean().exp().item())})
if len(ws):
previous_end = ws[-1]["end"]
finally:
# Remove hooks
for hook in all_hooks:
hook.remove()
return (transcription, words) | PypiClean |
/MSOIsHH2os-1.0.tar.gz/MSOIsHH2os-1.0/flood_tool/tool.py |
import os
import numpy as np
import pandas as pd
from .geo import *
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn import set_config
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.impute import SimpleImputer
from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score
from sklearn.compose import ColumnTransformer
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import SGDRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from keras.utils import to_categorical
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import RandomOverSampler
from collections import Counter
__all__ = ['Tool']
class Tool(object):
"""Class to interact with a postcode database file."""
def __init__(self, postcode_file='', sample_labels='',
household_file=''):
"""
Parameters
----------
full_postcode_file : str, optional
Filename of a .csv file containing geographic location
data for postcodes.
household_file : str, optional
Filename of a .csv file containing information on households
by postcode.
"""
if postcode_file == '':
full_postcode_file = os.sep.join((os.path.dirname(__file__),
'resources',
'postcodes_unlabelled.csv'))
if household_file == '':
household_file = os.sep.join((os.path.dirname(__file__),
'resources',
'households_per_sector.csv'))
if sample_labels == '':
labelled_samples = os.sep.join((os.path.dirname(__file__),
'resources',
'postcodes_sampled.csv'))
self.label_data = pd.read_csv(labelled_samples)
self.postcodedb = pd.read_csv(full_postcode_file)
self.householddb = pd.read_csv(household_file)
def train(self, labelled_samples=''):
"""Train the model using a labelled set of samples.
Parameters
----------
labelled_samples : str, optional
Filename of a .csv file containing a labelled set of samples.
"""
if labelled_samples == '':
labelled_samples = os.sep.join((os.path.dirname(__file__),
'resources',
'postcodes_sampled.csv'))
self.label_data = pd.read_csv(labelled_samples)
def get_useful_features(self, postcodes, features=['sector', 'easting', 'northing', 'localAuthority', 'altitude', 'soilType']):
frame = self.postcodedb.copy()
frame = frame.set_index('postcode')
return frame.loc[postcodes,features]
def get_easting_northing(self, postcodes):
"""Get a frame of OS eastings and northings from a collection
of input postcodes.
Parameters
----------
postcodes: sequence of strs
Sequence of postcodes.
Returns
-------
pandas.DataFrame
DataFrame containing only OSGB36 easthing and northing indexed
by the input postcodes. Invalid postcodes (i.e. not in the
input unlabelled postcodes file) return as NaN.
"""
frame = self.postcodedb.copy()
frame = frame.set_index('postcode')
return frame.loc[postcodes, ['easting', 'northing']]
def get_lat_long(self, postcodes):
"""Get a frame containing GPS latitude and longitude information for a
collection of postcodes.
Parameters
----------
postcodes: sequence of strs
Sequence of postcodes.
Returns
-------
pandas.DataFrame
DataFrame containing only WGS84 latitude and longitude pairs for
the input postcodes. Invalid postcodes (i.e. not in the
input unlabelled postcodes file) return as NAN.
"""
es = self.get_easting_northing(postcodes)
lat, lon = get_gps_lat_long_from_easting_northing(es["easting"].to_list(), es["northing"].to_list())
return pd.DataFrame(np.concatenate([[lat], [lon]], axis=1), columns=["latitude", "longitude"], index=es.index)
@staticmethod
def get_flood_class_from_postcodes_methods():
"""
Get a dictionary of available flood probablity classification methods
for postcodes.
Returns
-------
dict
Dictionary mapping classification method names (which have
no inate meaning) on to an identifier to be passed to the
get_flood_class_from_postcode method.
"""
flood_class_method = {'all_zero_risk': 0, 'RandomForestClassifier': 1}
return flood_class_method
def get_flood_class_from_postcodes(self, postcodes, method=0):
"""
Generate series predicting flood probability classification
for a collection of poscodes.
Parameters
----------
postcodes : sequence of strs
Sequence of postcodes.
method : int (optional)
optionally specify (via a value in
get_flood_class_from_postcodes_methods) the classification
method to be used.
Returns
-------
pandas.Series
Series of flood risk classification labels indexed by postcodes.
"""
assert method in [0,1], 'No such a method'
assert isinstance(postcodes, list) or isinstance(postcodes, np.ndarray), 'postcodes must be a list or numpy array'
if method == 0:
return pd.Series(data=np.ones(len(postcodes), int),
index=np.asarray(postcodes),
name='riskLabel')
else:
OSBG36_cord = self.get_easting_northing(postcodes)
eastings = OSBG36_cord['easting'].to_list()
northings = OSBG36_cord['northing'].to_list()
OS_RLPredic = self.get_flood_class_from_OSGB36_locations(eastings, northings, method=method)
PC_RLPrediction_Series = pd.Series(data=OS_RLPredic.values,
index=[postcode for postcode in postcodes],
name='riskLabel')
return PC_RLPrediction_Series
@staticmethod
def get_flood_class_from_locations_methods():
"""
Get aB dictionary of available flood probablity classification methods
for locations.
Returns
-------
dict
Dictionary mapping classification method names (which have
no inate meaning) on to an identifier to be passed to the
get_flood_class_from_OSGB36_locations and
get_flood_class_from_OSGB36_locations method.
"""
flood_class_method = {'Do Nothing': 0, 'RandomForestClassifier': 1, 'KNeighborsRegressor': 2}
return flood_class_method
def get_flood_class_from_OSGB36_locations(self, eastings, northings, method=0):
"""
Generate series predicting flood probability classification
for a collection of OSGB36_locations.
Parameters
----------
eastings : sequence of floats
Sequence of OSGB36 eastings.
northings : sequence of floats
Sequence of OSGB36 northings.
method : int (optional)
optionally specify (via a value in
self.get_flood_class_from_locations_methods) the classification
method to be used.
Returns
-------
pandas.Series
Series of flood risk classification labels indexed by locations.
"""
assert method in [0,1,2], 'No such a method'
assert isinstance(eastings, list) or isinstance(eastings, np.ndarray), 'eastings must be a list or numpy array'
assert isinstance(northings, list) or isinstance(northings, np.ndarray), 'northings must be a list or numpy array'
if method == 0:
return pd.Series(data=np.ones(len(eastings), int),
index=[(est, nth) for est, nth in
zip(eastings, northings)],
name='riskLabel')
elif method == 1:
data = self.label_data.copy()
y = data['riskLabel']
X = data[[ 'easting', 'northing']]
ros = RandomOverSampler(random_state=0)
x_over, y_over = ros.fit_resample(X, y)
rf_model = RandomForestClassifier().fit(x_over, y_over)
## predict
cor_list = []
for est,nor in zip(eastings, northings):
cor_list.append([est,nor])
RLPrediction = rf_model.predict(pd.DataFrame(cor_list))
RLPrediction_Series = pd.Series(data=RLPrediction,
index=[(est, nth) for est, nth in
zip(eastings, northings)],
name='riskLabel')
return RLPrediction_Series
elif method == 2:
## train
data = self.label_data.copy()
data = data.drop(columns = ['postcode','sector','soilType','medianPrice','localAuthority','altitude'])
data['riskLabel'] = data['riskLabel'].replace([1,2,3,4,5,6,7,8,9,10], [0.0001,0.0005,0.001,0.005,0.01,0.015,0.02,0.03,0.04,0.05])
data['riskLabel'] = np.log(10/data['riskLabel'])
y = data['riskLabel'].copy()
X = data.drop(columns='riskLabel')
Kn_regressor = KNeighborsRegressor().fit(X, y)
## predict
cor_list = []
for est,nor in zip(eastings, northings):
cor_list.append([est,nor])
RLPrediction = Kn_regressor.predict(pd.DataFrame(cor_list))
bins = [0,0.0001,0.0005,0.001,0.005,0.01,0.015,0.02,0.03,0.04,0.05]
labels = [1,2,3,4,5,6,7,8,9,10]
RLPrediction_Series = pd.Series(data=pd.cut(x=RLPrediction, bins=bins, labels=labels),
index=[(est, nth) for est, nth in
zip(eastings, northings)],
name='riskLabel')
return RLPrediction_Series
def get_flood_class_from_WGS84_locations(self, latitudes, longitudes, rad=False, method=0):
"""
Generate series predicting flood probability classification
for a collection of WGS84 datum locations.
Parameters
----------
longitudes : sequence of floats
Sequence of WGS84 longitudes.
latitudes : sequence of floats
Sequence of WGS84 latitudes.
method : int (optional)
optionally specify (via a value in
self.get_flood_class_from_locations_methods) the classification
method to be used.
Returns
-------
pandas.Series
Series of flood risk classification labels indexed by locations.
"""
assert method in [0,1], 'No such a method'
assert isinstance(latitudes, list) or isinstance(latitudes, np.ndarray), 'latitudes must be a list or numpy array'
assert isinstance(longitudes, list) or isinstance(longitudes, np.ndarray), 'longitudes must be a list or numpy array'
if method == 0:
return pd.Series(data=np.ones(len(longitudes), int),
index=[(lng, lat) for lng, lat in
zip(longitudes, latitudes)],
name='riskLabel')
else:
# convert from WGS84 to OSGB36
eastings, northings = get_easting_northing_from_gps_lat_long(latitudes, longitudes, rads=rad)
OS_RLPredic = self.get_flood_class_from_OSGB36_locations(eastings, northings,method)
WG_RLPrediction_Series = pd.Series(data=OS_RLPredic.values,
index=[(lat, lot) for lat, lot in
zip( latitudes,longitudes)],
name='riskLabel')
return WG_RLPrediction_Series
@staticmethod
def get_house_price_methods():
"""
Get a dictionary of available flood house price regression methods.
Returns
-------
dict
Dictionary mapping regression method names (which have
no inate meaning) on to an identifier to be passed to the
get_median_house_price_estimate method.
"""
return {'all_england_median':0, 'KNeighborsRegressor':1}
def get_median_house_price_estimate(self, postcodes, method=0):
"""
Generate series predicting median house price for a collection
of poscodes.
Parameters
----------
postcodes : sequence of strs
Sequence of postcodes.
method : int (optional)
optionally specify (via a value in
self.get_house_price_methods) the regression
method to be used.
Returns
-------
pandas.Series
Series of median house price estimates indexed by postcodes.
"""
if method == 0:
return pd.Series(data=np.full(len(postcodes), 245000.0),
index=np.asarray(postcodes),
name='medianPrice')
elif method == 1:
self.model = KNeighborsRegressor(algorithm='brute', n_neighbors=9, weights='distance')
X = self.label_data[['easting', 'northing']]
y = self.label_data.medianPrice
self.model.fit(X, y)
X_pre = get_useful_features(postcodes)
return pd.Series(index=np.asarray(postcodes), data=np.asarray(self.model.predict(X_pre)), name='medianPrice')
else:
raise NotImplementedError
@staticmethod
def get_local_authority_methods():
"""
Get a dictionary of available local authorithy classification methods.
Returns
-------
dict
Dictionary mapping regression method names (which have
no inate meaning) on to an identifier to be passed to the
get_altitude_estimate method.
"""
class_method = {'Do Nothing': 0, 'RandomForestClassifier': 1}
return class_method
def get_local_authority_estimate(self, eastings, northings, method=0):
"""
Generate series predicting local authorities in n for a sequence
of OSGB36 locations.
Parameters
----------
eastingss : sequence of floats
Sequence of OSGB36 eastings.
northings : sequence of floats
Sequence of OSGB36 northings.
method : int (optional)
optionally specify (via a value in
self.get_altitude_methods) the regression
method to be used.
Returns
-------
pandas.Series
Series of LocalAuthority indexed by OSGB36 locations.
"""
assert method in [0,1], 'No such a method'
assert isinstance(eastings, list) or isinstance(eastings, np.ndarray), 'eastings must be a list or numpy array'
assert isinstance(northings, list) or isinstance(northings, np.ndarray), 'northings must be a list or numpy array'
if method == 0:
return pd.Series(data=np.full(len(eastings), 'Unknown'),
index=[(est, nth) for est, nth in
zip(eastings, northings)],
name='localAuthority')
elif method == 1:
## train model
df2 = self.label_data.copy()
y = df2['localAuthority']
X = df2[[ 'easting', 'northing']]
lencoder = LabelEncoder().fit(y)
y = pd.Series(lencoder.transform(y))
smote=SMOTE(random_state=1)
X_smote, y_smote = smote.fit_resample(X, y)
rf_model = RandomForestClassifier().fit(X_smote, y_smote)
## predict
cor_list = []
for est,nor in zip(eastings, northings):
cor_list.append([est,nor])
LAPrediction = rf_model.predict(pd.DataFrame(cor_list))
LAPrediction = lencoder.inverse_transform(LAPrediction)
LAPrediction_Series = pd.Series(data=LAPrediction,
index=[(est, nth) for est, nth in
zip(eastings, northings)],
name='localAuthority')
return LAPrediction_Series
def get_total_value(self, postal_data):
"""
Return a series of estimates of the total property values
of a sequence of postcode units or postcode sectors.
Parameters
----------
postal_data : sequence of strs
Sequence of postcode units or postcodesectors
Returns
-------
pandas.Series
Series of total property value estimates indexed by locations.
"""
raise NotImplementedError
def get_annual_flood_risk(self, postcodes, risk_labels=None):
"""
Return a series of estimates of the total property values of a
collection of postcodes.
Risk is defined here as a damage coefficient multiplied by the.
value under threat multiplied by the probability of an event.
Parameters
----------
postcodes : sequence of strs
Sequence of postcodes.
risk_labels: pandas.Series (optional)
Series containing flood risk classifiers, as
predicted by get_flood_class_from_postcodes.
Returns
-------
pandas.Series
Series of total annual flood risk estimates indexed by locations.
"""
risk_labels = risk_labels or self.get_flood_class(postcodes)
cost = self.get_total_value(risk_labels.index)
raise NotImplementedError | PypiClean |
/Gbtestapi-0.1a10-py3-none-any.whl/gailbot/core/engines/whisperEngine/core.py |
import os
from threading import Lock
import json
from typing import List, Dict, Any
from dataclasses import asdict
import torch
import whisper_timestamped as whisper
from whisper_timestamped.transcribe import force_cudnn_initialization
from .parsers import (
parse_into_full_text,
parse_into_word_dicts,
add_speaker_info_to_text,
)
from .parsers import parse_into_word_dicts
from src.gailbot.configs import whisper_config_loader, workspace_config_loader
from src.gailbot.core.utils.general import is_file, make_dir
from src.gailbot.configs import whisper_config_loader
from src.gailbot.core.utils.logger import makelogger
logger = makelogger("whisper")
WHISPER_CONFIG = whisper_config_loader()
LOAD_MODEL_LOCK = Lock()
class WhisperCore:
"""
We are using this class as an adapter for the engine so that we can use
multiple different instances of the underlying whisper package is required.
"""
# NOTE: Intentionally limiting the supported formats since we have
# not tested other formats.
# TODO: I'm not sure if this is the best place to define the supported
# formats.
_SUPPORTED_FORMATS = "wav"
def __init__(self):
# initialize the workspace
self.workspace_dir = workspace_config_loader().engine_ws.whisper
logger.info(f"Whisper workspace path: {self.workspace_dir}")
self.cache_dir = os.path.join(self.workspace_dir, "cache")
self.models_dir = os.path.join(self.cache_dir, "models")
self.loadModelLock = Lock()
make_dir(self.workspace_dir, overwrite=False)
make_dir(self.cache_dir, overwrite=False)
make_dir(self.models_dir, overwrite=False)
# Load a GPU is it is available
self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
if self.device.lower().startswith("cuda"):
force_cudnn_initialization(self.device)
logger.info(f"Whisper core initialized with device: {self.device}")
def __repr__(self) -> str:
configs = json.dumps(
asdict(WHISPER_CONFIG.transcribe_configs), indent=2, ensure_ascii=False
)
return (
f"Whisper model: {WHISPER_CONFIG.model_name}"
f"Transcribe configs:\n{configs}"
)
def transcribe(
self, audio_path: str, language: str = None, detect_speaker: bool = False
) -> List[Dict]:
assert is_file(audio_path), f"ERROR: Invalid file path: {audio_path}"
# Load the model
logger.info(f"start to load whisper model")
global LOAD_MODEL_LOCK
with LOAD_MODEL_LOCK:
whisper_model = whisper.load_model(
name=WHISPER_CONFIG.model_name,
device=self.device,
download_root=self.models_dir,
)
logger.info(f"Whisper core using whisper model: {WHISPER_CONFIG.model_name}")
# Load the diarization pipeline
# diarization_pipeline = PyannoteDiarizer(self.models_dir)
logger.info("get the diarazation pipleine")
logger.info(
f"received setting language: {language}, detect speaker {detect_speaker} audio_path {audio_path}"
)
# if language != None and not language in self.get_supported_languages():
# raise Exception(
# f"Unsupported language, must be one of: {self.get_supported_languages()}"
# )
if language == None:
logger.info("No language specified - auto detecting language")
# Load the audio and models, transcribe, and return the parsed result
logger.info("prepare to load audio")
audio = whisper.load_audio(audio_path)
logger.info("audio loaded")
asr_result = whisper.transcribe(
whisper_model,
audio,
language=language,
**asdict(WHISPER_CONFIG.transcribe_configs),
)
if WHISPER_CONFIG.transcribe_configs.verbose:
logger.debug(parse_into_full_text(asr_result))
res = parse_into_word_dicts(asr_result)
logger.info("get the result from parse")
return res
# # Apply speaker diarization
# # TODO: remove this part until line 151
# if detect_speaker:
# if self.device == "cpu":
# logger.warning(
# f"Performing speaker diarization on {self.device} may take upto 10x "
# f"the duration of the audio"
# )
# logger.info("Performing speaker diarization")
# try:
# dir_result = diarization_pipeline(audio_path)
# # Create and return results
# res = add_speaker_info_to_text(asr_result, dir_result)
# logger.info("get the result from parsed dict with speaker")
# return res
# except ValueError as e:
# logger.warn(e, exc_info=e)
# return []
# else:
# res = parse_into_word_dicts(asr_result)
# logger.info("get the result from parse")
# return res
def get_supported_formats(self) -> List[str]:
return list(self._SUPPORTED_FORMATS)
def get_available_models(self) -> List[str]:
return whisper.available_models()
# def get_supported_languages(self) -> List[str]:
# # return whisper.supported_languages()
# return whisper.utils.supported_languages()
################ PRIVATE METHODS | PypiClean |
/BGWpy-3.2.2.tar.gz/BGWpy-3.2.2/Documentation/Tutorial/Tutorial_Abinit.ipynb | # Running BerkeleyGW with BGWpy #
In this notebook, we assume that you are somewhat familiar with the BerkeleyGW software: what problem it solves, and what is the general workflow to run it. We also assume that you have a basic knowledge of Python and its terminology.
Before you begin, make sure that you have the following packages installed:
* Jupyter Notebook
* Abinit
* BerkeleyGW
* BGWpy
To run BGWpy, you'll also need the `bin` directories of BerkeleyGW and Abinit installations located in your `PATH` environment variable.
## Checking your configuration ##
The following cell is used to generate information that we'll need, should we have to debug this notebook. You don't need to run it, but it may be useful to look at for educational purposes.
```
import sys
import os
import BGWpy.config as defaults
print("Python kernel:\n {} ".format(sys.executable))
print("Python version:\n {} ".format(sys.version))
print("Current working directory:\n {} ".format(os.getcwd()))
print("Configuration file:\n {} ".format(defaults.config_file))
print("Use HDF5?:\n {} ".format(defaults.flavors['use_hdf5']))
print("Use complex version of BerkeleyGW?:\n {}".format(defaults.flavors['flavor_complex']))
print("DFT Flavor:\n {} ".format(defaults.flavors['dft_flavor']))
print("Default MPI settings:\n {} ".format(defaults.default_mpi))
print("Default runscript settings:\n {} ".format(defaults.default_runscript))
print("Paths in $PATH:")
for i in os.environ['PATH'].split(":"):
print(" {}".format(i))
```
Pay attention to the `use_hdf5` flag. It should reflect whether you compiled BerkeleyGW with HDF5 support or not. If the information above is not consistent with what you have, then you should edit your `~/.BGWpyrc` file accordingly. This is important because the file names that BGW expects from a calculation depends on it. If you don't have HDF5, then you should remove all the '.h5' extensions from file names. It is highly recommended, however, that you build BGW with HDF5 support, as it could become mandatory in the future.
If you don't have a `~/.BGWpyrc` yet, you can copy it from the `BGWpy/config` directory, or simply run the script `BGWpy_make_config_file.py`.
# Load Libraries #
First, we load two external packages which BGWpy uses: `numpy` and `pymatgen`.
```
import pymatgen
import numpy as np
```
Next, we load the `Structure` class from the BGWpy package. But really this is the Structure object from the `pymatgen` package.
```
from BGWpy import Structure
```
Next, we load the classes which create and run Abinit calculations.
```
from BGWpy import AbinitScfTask, AbinitBgwFlow
```
Finally, we load the classes with create and run BerkeleyGW calculations.
```
from BGWpy import EpsilonTask, SigmaTask, KernelTask, AbsorptionTask
```
Make sure that both the BerkeleyGW and Abinit binary folders are in the PATH folder
# Create the Structure #
For this tutorial, we'll calculate the many-body properties of the GaAs primitive cell. All files that you will need have been provided for you in the `Data` subdirectory.
SHOW PICTURE HERE. (Even better if can play using `pymatgen`...)
Geometries are specified in BGWpy using pymatgen's `Structure` class, which may be imported directly from BGWpy or through pymatgen.
There are a number of ways that we can import geometries into BGWpy using the `Structure` class. For example, we can load them from a pre-existing CIF file:
```
structure = Structure.from_file('../Data/Structures/GaAs.cif')
print(structure)
```
We can also load them from a previous pymatgen Structure which has been exported to a file in the JSON format:
```
Structure.from_file('../Data/Structures/GaAs.json')
print(structure)
```
We can even use pymatgen to directly create the structure in a Python script:
```
acell_angstrom = 5.6535
rprim = np.array([[.0,.5,.5],[.5,.0,.5],[.5,.5,.0]]) * acell_angstrom
structure = pymatgen.Structure(
lattice = pymatgen.core.lattice.Lattice(rprim),
species= ['Ga', 'As'],
coords = [3*[.0], 3*[.25]],
)
print(structure)
```
For more information about pymatgen, please consult its official documentation.
# Generating the Ground State Density #
To begin, we will run a ground state DFT calculation to self-consistency to generate the ground state charge density for the calculation. This ground state charge density will be fed into all wavefunction calculations in the next step. We use Abinit in this notebook, however BerkeleyGW and BGWpy supports a number of other DFT packages.
First, we will create a object of the `AbinitScfTask` task to prepare the needed variables:
```
task = AbinitScfTask(
dirname = 'Runs/11-Density',
structure = Structure.from_file('../Data/Structures/GaAs.json'),
prefix = 'GaAs', # File names prefix. You don't really need to specify this with abinit.
pseudo_dir = '../Data/Pseudos',
pseudos = ['31-Ga.pspnc', '33-As.pspnc'],
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
ecut = 5.0, # Wavefunctions cutoff energy
# These are the default parameters for the MPI runner.
# You can specify them here, but it's better to store this info in
# the configuration file ~/.BGWpyrc
nproc=1,
nproc_per_node=1,
mpirun='mpirun',
nproc_flag='-n',
nproc_per_node_flag='--npernode',
)
```
As you can see, BGWpy has a number of parameters that you will need to set. However, many of these parameters are consistent from calculation to calculation, so we'll store them in dictionaries that we can reuse for future steps.
First, a dictionary to store all variables that will be used across all Abinit calculations:
```
structure_and_pseudos = dict(
structure = Structure.from_file('../Data/Structures/GaAs.json'),
pseudo_dir = '../Data/Pseudos',
pseudos = ['31-Ga.pspnc', '33-As.pspnc'],
)
```
Next, a dictionary to store the variables which are used only for this particular SCF task:
```
scf_settings = dict(
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
ecut = 5.0, # Wavefunctions cutoff energy
)
```
And finally, a dictionary to store the settings related to running calculations with MPI.
```
mpi_settings = dict( # Then again, you should store those settings in ~/.BGWpyrc
nproc=1,
nproc_per_node=1,
mpirun='mpirun',
nproc_flag='-n',
nproc_per_node_flag='--npernode',
)
```
Note that all these dictionaries correspond to arguments for the `AbinitScfTask`, stored as key/value pairs. This allows us to use dictionary unpacking to considerably tidy up our code:
```
scf_task = AbinitScfTask(
dirname='Runs/11-Density',
**scf_settings,
**structure_and_pseudos,
**mpi_settings,
)
```
Now that we've created the `AbinitScfTask` task, we can use the `write` method to write the needed input files to disk:
```
scf_task.write()
```
If you receive an error message stating that an executable could not be found, you likely do not have the needed BerkeleyGW and Abinit binary folders in your `PATH` environment variable.
Let's take a look at the folder that was created by this task using Jupyter's built-in `!ls` magic command:
```
!ls 'Runs/11-Density'
```
In our new folder, there are several new directories:
* `GaAs.files`, the list of files used by Abinit.
* `GaAs.in`, the Abinit input variables.
* `run.sh`, the execution script.
and folders used by abinit for the input data files, outputs, and temporary files:
* `input_data`
* `out_data`
* `tmp_data`
Now that we've created the needed input files, let's run the `run.sh` script using the `run` method. Note that this step will take a few seconds, as it will run Abinit in the background.
```
scf_task.run()
```
Finally, we can check the status of the calculation using the `report` method. You should see a message telling you that it's been completed.
```
scf_task.report()
```
It is possible to access the data files produced by this task with
```
charge_density_fname = scf_task.get_odat('DEN')
vxc_fname = scf_task.get_odat('VXC')
print("Charge density file name: {}".format(charge_density_fname))
print("Exchange-correlation potential file name: {}".format(vxc_fname))
```
This won't be necessary, however, when we get to use the `AbinitBgwFlow`.
# Generating the Wavefunctions #
Now that we've generated the ground state density, we'll used this to generate the wavefunctions that we'll feed into BerkeleyGW. This may be done with the ` AbinitBgwFlow` class. As mentioned in the introduction, we'll need up to 6 different types of wavefunction files.
## WFN ##
`WFN` is the "standard" k-shifted wavefunction file which is read by the `Epsilon` calculation, and thus is needed for all BerkeleyGW calculations.
It (and all other wavefunction files) are generated using the `AbinitBgwFlow` class. The only difference between these wavefunction types are the parameter values used:
```
task = AbinitBgwFlow(
dirname = 'Runs/12-Wfn',
structure = Structure.from_file('../Data/Structures/GaAs.json'),
prefix = 'GaAs',
pseudo_dir = '../Data/Pseudos',
pseudos = ['31-Ga.pspnc', '33-As.pspnc'],
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
ecut = 5.0, # Wavefunctions cutoff energy
nband = 9, # Number of bands
input_variables = {'autoparal' : 1}, # Any extra input variables we want to specify
charge_density_fname = '11-Density/out_data/odat_DEN',
vxc_fname = '11-Density/out_data/odat_VXC',
# These are the default parameters for the MPI runner.
# Please adapt them to your needs.
nproc = 1,
nproc_per_node = 1,
mpirun = 'mpirun',
nproc_flag = '-n',
nproc_per_node_flag = '--npernode',
)
```
As before, we will break up these arguments into sets of dictionaries: the settings common to all wavefunction calculations
```
wfn_common_settings = dict(
ecut = 5.0, # Wavefunctions cutoff energy
input_variables = {'autoparal' : 1}, # Any extra input variables we want to specify
charge_density_fname = charge_density_fname,
vxc_fname = vxc_fname,
)
```
and the arguments specific to the current wavefunction calculation
```
wfn_settings = dict(
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
nband = 9, # Number of bands
**wfn_common_settings)
```
Reusing dictionaries of settings previously defined,
We can now create the instance of the `AbinitBgwFlow` class:
```
wfn_flow = AbinitBgwFlow(
dirname='Runs/12-Wfn',
**wfn_settings,
**structure_and_pseudos,
**mpi_settings)
```
As before, we'll write the input files to disc then run the calculation:
```
wfn_flow.write()
wfn_flow.run()
wfn_flow.report()
```
The output specifies that we've actually run two calculations here: a `WFN` calculation where we calculate wavefunctions using Abinit, and `Abi2BGW` where we convert the resulting Abinit-specific output files into a format readable by BerkeleyGW. Unlike in the density case where we ran a single task, here we're running two tasks (`WFN` and `Abi2BGW`) in a workflow (hence the name `AbiBgwFlow`).
## WFNq ##
Next, we'll create `WFNq`, which is the "standard" k-shifted and q-shifted wavefunction file which is read by the `Epsilon` calculation, and thus is needed for all BerkeleyGW calculations.
The only dictionary we need to create is are the settings specific to the `WFNq` wavefunction:
```
wfnq_settings = dict(
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
qshift = [.001,.0,.0],# k-points q-shift
**wfn_common_settings)
```
And then we can prepare the calculation:
```
wfnq_flow = AbinitBgwFlow(
dirname='Runs/13-Wfnq',
**wfnq_settings,
**structure_and_pseudos,
**mpi_settings)
```
Create it, and run it:
```
wfnq_flow.write()
wfnq_flow.run()
wfnq_flow.report()
```
## Wfn_co ##
Next, we'll create `WFN_co`, which is the wavefunction on a coarser (and unshifted) grid than `WFN`. This is used by `Sigma`, `Kernel`, and `Absorption`, and thus will be needed by most BerkeleyGW calculations. we will also use this calculation to generate the ground state density and exchange-correlation energy density that will be used by `Sigma`.
Once again, we set up the dictionary with our needed variables:
```
wfn_co_settings = dict(
ngkpt = [2,2,2], # k-points grid
kshift = [.0,.0,.0], # k-points shift
nband = 9, # Number of bands
rhog_flag = True, # Also convert the charge density for BGW.
vxcg_flag = True, # Also convert vxc for BGW.
**wfn_common_settings)
```
Note that there's a new flag `rhog_flag` which tells `AbinitBgwFlow` to generate additional density-related files,
while the vxcg_flag tells the `Abi2BGW` task to read and convert the `VXC` file.
Now we can prepare the calculation:
```
wfn_co_flow = AbinitBgwFlow(
dirname = 'Runs/14-Wfn_co',
**wfn_co_settings,
**structure_and_pseudos,
**mpi_settings)
```
And create and run it:
```
wfn_co_flow.write()
wfn_co_flow.run()
wfn_co_flow.report()
```
## WFN_fi ##
Next, we'll create `WFN_fi`, the k-shifted `WFN` on a finer grid than `WFN`. This is used during interpolation in the `Absorption` executable and thus is only needed if you need to solve the BSE equations. (Symmetry is also turned off for this calculation.)
```
wfn_fi_settings = dict(
nband = 9, # Number of bands
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
symkpt = False, # Do not reduce the k-point grid with symmetries.
**wfn_common_settings)
wfn_fi_flow = AbinitBgwFlow(
dirname = 'Runs/15-Wfn_fi',
**wfn_fi_settings,
**structure_and_pseudos,
**mpi_settings)
wfn_fi_flow.write()
wfn_fi_flow.run()
wfn_fi_flow.report()
```
## WFNq_fi ##
FINALLY, we'll create `WFNq_fi`, the k-shifted and q-shifted `WFN` on a finer grid than `WFN`. Like `WFN_fi`, this is used during interpolation in the `Absorption` executable and thus is only needed if you need to solve the BSE equations. (And symmetry is turned off, as before.)
Let's go through the steps again:
```
wfnq_fi_settings = dict(
nband = 9, # Number of bands
ngkpt = [2,2,2], # k-points grid
kshift = [.5,.5,.5], # k-points shift
qshift = [.001,.0,.0],# k-points q-shift
symkpt = False, # Do not reduce the k-point grid with symmetries.
**wfn_common_settings)
wfnq_fi_flow = AbinitBgwFlow(
dirname = 'Runs/16-Wfnq_fi',
**wfnq_fi_settings,
**structure_and_pseudos,
**mpi_settings)
wfnq_fi_flow.write()
wfnq_fi_flow.run()
wfnq_fi_flow.report()
```
# Running GW #
Now the moment you've been waiting for, when we actually run a GW calculation!
## Epsilon ##
Our first step is to run an `Epsilon` calculation, where we'll generate the dielectric matrix (to be precise, the inverse of the dielectric matrix.)
Because BerkeleyGW uses a file-based communication system, we'll need to specify the location of the wavefunction files that we previously calculated:
```
epsilon_input_files = dict(
wfn_fname='Runs/12-Wfn/wfn.cplx',
wfnq_fname='Runs/13-Wfnq/wfn.cplx',
)
```
Actually, we can set the file name above using a property of the flow
```
epsilon_input_files = dict(
wfn_fname=wfn_flow.wfn_fname,
wfnq_fname=wfnq_flow.wfn_fname,
)
```
As well as the settings for an `Epsilon` calculation:
```
epsilon_settings = dict(
ngkpt = wfn_settings['ngkpt'], # 'ngkpt': [2, 2, 2],
qshift = wfnq_settings['qshift'], # 'qshift': [.001, .0, .0],
ecuteps = 10.0,
)
```
And then we can prepare the Epsilon calculation using an `EpsilonTask` object (reusing our `mpi_settings` dictionary from before):
```
epsilon_task = EpsilonTask(
dirname='Runs/21-Epsilon',
structure=structure,
**epsilon_input_files,
**epsilon_settings,
**mpi_settings)
```
Let's run the calculation:
```
epsilon_task.write()
epsilon_task.run()
epsilon_task.report()
```
## Sigma ##
Now that we've calculated the (inverse) dielectric matrix and needed wavefunctions, we have everything we need to calculate the GW self-energy. This is done with the `Sigma` executable, which takes as inputs the results from our `WFN_co` and `Epsilon` calculations:
```
sigma_input_files = dict(
wfn_co_fname='Runs/14-Wfn_co/wfn.cplx',
rho_fname='Runs/14-Wfn_co/rho.cplx',
vxc_fname='Runs/14-Wfn_co/vxc.cplx',
eps0mat_fname='Runs/21-Epsilon/eps0mat.h5',
epsmat_fname='Runs/21-Epsilon/epsmat.h5',
)
```
Then again, making use of the object properties, we can get the above file names with
```
sigma_input_files = dict(
wfn_co_fname=wfn_co_flow.wfn_fname,
rho_fname=wfn_co_flow.rho_fname,
vxc_fname=wfn_co_flow.vxc_fname,
eps0mat_fname=epsilon_task.eps0mat_fname,
epsmat_fname=epsilon_task.epsmat_fname,
)
```
Specify the settings:
```
sigma_settings = dict(
ngkpt = wfn_co_settings['ngkpt'], # ngkpt': [2,2,2],
ibnd_min = 1, # Minimum band for GW corrections
ibnd_max = 8, # Maximum band for GW corrections
extra_lines = ['dont_use_vxcdat'],
#'extra_lines' : ['dont_use_vxcdat', 'dont_use_hdf5'],
)
```
Prepare the calculation:
```
sigma_task = SigmaTask(
dirname='Runs/22-Sigma',
structure=structure,
**sigma_input_files,
**sigma_settings,
**mpi_settings)
```
And finally run it.
```
# Execution
sigma_task.write()
sigma_task.run()
sigma_task.report()
```
If you see an `Unfinised` status, something went wrong, and you should inspect the content of the run directory, in particular the main output file `Runs/22-Sigma/sigma.out` .
Make sure you are using the latest version of BerkeleyGW.
If you see a `Completed` status, then congratulations! You have successfully ran a BerkeleyGW calculation from start to finish.
# Running BSE #
For those of you that want to go further, BerkeleyGW can calculate excitionic properties on the GW+BSE level of theory. This is done with the `KernelTask` and `AbsorptionTask` classes.
## Kernel ##
`Kernel` takes in as inputs the results of `WFN_co` and `Epsilon`:
```
kernel_input_files = dict(
wfn_co_fname=wfn_co_flow.wfn_fname,
eps0mat_fname=epsilon_task.eps0mat_fname,
epsmat_fname=epsilon_task.epsmat_fname,
)
```
We can specify its settings:
```
kernel_settings = dict(
ngkpt = wfn_co_settings['ngkpt'],
ecuteps = epsilon_settings['ecuteps'],
nbnd_val = 4,
nbnd_cond = 4,
# These extra lines will be added verbatim to the input file.
extra_lines = ['use_symmetries_coarse_grid', 'screening_semiconductor'],
)
```
Prepare the calculation:
```
kernel_task = KernelTask(
dirname='Runs/23-Kernel',
structure=structure,
**kernel_input_files,
**kernel_settings,
**mpi_settings)
```
And finally run it:
```
kernel_task.write()
kernel_task.run()
kernel_task.report()
```
## Absorption ##
Finally, we solve the BSE equation via the `Absorption` executable. It has as inputs the results of `WFN_co`, `WFNq_fi`, and `WFN_fi`, as well as all previous BerkleyGW executables `Epsilon`, `Sigma`, and `Kernel`:
```
absorption_input_files = dict(
wfn_co_fname = 'Runs/14-Wfn_co/wfn.cplx',
wfn_fi_fname = 'Runs/15-Wfn_fi/wfn.cplx',
wfnq_fi_fname = 'Runs/16-Wfnq_fi/wfn.cplx',
eps0mat_fname = 'Runs/21-Epsilon/eps0mat.h5',
epsmat_fname = 'Runs/21-Epsilon/epsmat.h5',
eqp_fname = 'Runs/22-Sigma/eqp1.dat',
bsemat_fname = 'Runs/23-Kernel/bsemat.h5'
# If you don't use hdf5, the BSE matrix is written in two separate files.
#bsexmat_fname = 'Runs/23-Kernel/bsexmat',
#bsedmat_fname = 'Runs/23-Kernel/bsedmat',
)
```
Or, using the appropriate variables,
```
absorption_input_files = dict(
wfn_co_fname = wfn_co_flow.wfn_fname,
wfn_fi_fname = wfn_fi_flow.wfn_fname,
wfnq_fi_fname = wfnq_fi_flow.wfn_fname,
eps0mat_fname = epsilon_task.eps0mat_fname,
epsmat_fname = epsilon_task.epsmat_fname,
eqp_fname = sigma_task.eqp1_fname,
bsemat_fname = kernel_task.bsemat_fname,
# If you don't use hdf5, the BSE matrix is written in two separate files.
#bsexmat_fname = kernel_task.bsexmat_fname,
#bsedmat_fname = kernel_task.bsedmat_fname,
)
```
Next, we set the calculation settings. There are...a lot of those.
```
absorption_settings = dict(
ngkpt = [2, 2, 2], # k-points grid
nbnd_val = 4, # Number of valence bands
nbnd_cond = 4, # Number of conduction bands
nbnd_val_co = 4, # Number of valence bands on the coarse grid
nbnd_cond_co = 4, # Number of conduction bands on the coarse grid
nbnd_val_fi = 4, # Number of valence bands on the fine grid
nbnd_cond_fi = 4, # Number of conduction bands on the fine grid
# These extra lines will be added verbatim to the input file.
extra_lines = [
'use_symmetries_coarse_grid',
'no_symmetries_fine_grid',
'no_symmetries_shifted_grid',
'screening_semiconductor',
'use_velocity',
'gaussian_broadening',
'eqp_co_corrections',
],
# These extra variables will be added to the input file as '{variable} {value}'.
extra_variables = {
'energy_resolution': 0.15,
},
)
```
But preparing the calculation is as simple as always:
```
absorption_task = AbsorptionTask(
dirname='Runs/24-Absorption',
structure=structure,
**absorption_input_files,
**absorption_settings,
**mpi_settings)
```
And, at last, we can run it.
```
absorption_task.write()
absorption_task.run()
absorption_task.report()
```
Congratulations yet again! You've run a full GW+BSE calculation!
# Using workflows #
Can we do all of these steps at once? Yes we can!
```
from BGWpy import GWFlow, BSEFlow
flow = GWFlow(
dirname='Runs/32-GW',
dft_flavor='abinit',
structure = Structure.from_file('../Data/Structures/GaAs.json'),
prefix = 'GaAs',
pseudo_dir = '../Data/Pseudos',
pseudos = ['31-Ga.pspnc', '33-As.pspnc'],
ecut = 10.0,
nbnd = 9,
ngkpt = [2,2,2],
kshift = [.5,.5,.5],
qshift = [.001,.0,.0],
ibnd_min = 1,
ibnd_max = 8,
ecuteps = 7.5,
# Extra lines and extra variables
epsilon_extra_lines = [],
epsilon_extra_variables = {},
sigma_extra_lines = ['screening_semiconductor'],
sigma_extra_variables = {},
**mpi_settings)
```
Let's execute the whole thing.
```
flow.write()
flow.run()
flow.report()
```
Likewise, for the BSE
```
flow = BSEFlow(
dirname='Runs/33-BSE',
dft_flavor='abinit',
structure = Structure.from_file('../Data/Structures/GaAs.json'),
prefix = 'GaAs',
pseudo_dir = '../Data/Pseudos',
pseudos = ['31-Ga.pspnc', '33-As.pspnc'],
ecut = 5.0,
nbnd = 12,
nbnd_fine = 9,
ngkpt = [2,2,2],
kshift = [.5,.5,.5],
qshift = [.001,.0,.0],
# Fine grids
ngkpt_fine = [4,4,4],
kshift_fine = [.0,.0,.0],
ibnd_min = 1,
ibnd_max = 8,
ecuteps = 10.0,
sigma_extra_lines = ['screening_semiconductor'],
# Kernel variables
nbnd_val = 4,
nbnd_cond = 4,
kernel_extra_lines = [
'use_symmetries_coarse_grid',
'screening_semiconductor',
],
# Absorption variables
nbnd_val_co=4,
nbnd_cond_co=4,
nbnd_val_fi=4,
nbnd_cond_fi=4,
absorption_extra_lines = [
'use_symmetries_coarse_grid',
'no_symmetries_fine_grid',
'no_symmetries_shifted_grid',
'screening_semiconductor',
'use_velocity',
'gaussian_broadening',
'eqp_co_corrections',
],
absorption_extra_variables = {
'energy_resolution' : 0.15,
},
**mpi_settings)
flow.write()
flow.run()
flow.report()
```
## Custom workflows ##
For a realistic GW or BSE calculation, in general, you don't run every steps all at once like we did. You actually perform a **convergence study**, in which you gradually increase the parameters until the calculation is converged. For example, in a GW calculation, we have the following convergence studies to perform:
* Convergence of the k-points grids for epsilon
* Convergence of the q-points grid for sigma
* Convergence on the number of bands for epsilon
* Convergence on the number of bands for sigma
* Convergence on the size of the dielectric matrix
For these, you will need to construct your own workflow. Here is an example.
```
from os.path import join as pjoin
from BGWpy import Workflow
workflow = Workflow(dirname='Runs/50-Workflow')
epsilon_input_files = dict(
wfn_fname=wfn_flow.wfn_fname,
wfnq_fname=wfnq_flow.wfn_fname,
)
sigma_input_files = dict(
wfn_co_fname=wfn_co_flow.wfn_fname,
rho_fname=wfn_co_flow.rho_fname,
vxc_fname=wfn_co_flow.vxc_fname,
)
ecuteps_l = [5.0, 7.5, 10.0]
for i, ecuteps in enumerate(ecuteps_l):
epsilon_settings['ecuteps'] = ecuteps
epsilon_task = EpsilonTask(
dirname=pjoin(workflow.dirname, 'Epsilon{}'.format(i)),
structure=structure,
**epsilon_input_files,
**epsilon_settings,
**mpi_settings)
sigma_task = SigmaTask(
dirname=pjoin(workflow.dirname, 'Sigma{}'.format(i)),
structure=structure,
eps0mat_fname=epsilon_task.eps0mat_fname,
epsmat_fname=epsilon_task.epsmat_fname,
**sigma_input_files,
**sigma_settings,
**mpi_settings)
workflow.add_tasks([epsilon_task, sigma_task])
workflow.write()
workflow.run()
workflow.report()
```
Note that you could also run and report each task sequentially with
```
for task in workflow.tasks:
task.run()
task.report()
```
And of course, you should now check the result of the calculations in the different output files, and plot the convergence of the quasiparticle energies as a function of ecuteps.
# Closing word #
`BGWpy` allows you to use pre-defined workflows and custom ones. However, it is your own responsibility to check every input parameter, and verify the convergence of the results. Happy computing!
| PypiClean |
/ChIP_R-1.2.0-py3-none-any.whl/chipr/ival.py | import random
class IntervalTree:
"""
Binary search tree for storing long integer intervals, and for performing queries on them.
See https://en.wikipedia.org/wiki/Interval_tree, specifically the Augmented kind.
The present implementation balances the tree by using randomisation.
"""
root = None # pointer to the root node of the binary search tree
stack = None
def __iter__(self):
self.current = self.root
self.stack = Stack()
return self
def __next__(self):
while self.current != None:
self.stack.push(self.current)
self.current = self.current.left
if self.stack.isEmpty():
raise StopIteration
self.current = self.stack.pop()
ret = self.current
self.current = self.current.right
return ret
def __len__(self):
return self.root.N
def __contains__(self, ival):
return self.get(ival) != None
def get(self, ival):
return self._get(self.root, ival)
def _get(self, node, ival):
if node == None: return None
if ival < node.ival:
return self._get(node.left, ival)
elif ival > node.ival:
return self._get(node.right, ival)
else:
return node
def isect(self, ival, node = None):
""" Look for intersecting interval in subtree rooted at specified node (root by default).
Returns node of intersecting interval. """
if self.root == None: return None
if node == None: return self.isect(ival, self.root)
while node != None:
if isect(ival, node.ival): return node
elif node.left == None: node = node.right
elif node.left.max < ival.min: node = node.right
else: node = node.left
return None
def isectall(self, ival):
""" Look for all intersecting intervals in the subtree rooted at specified node (root by default).
Returns nodes of intersecting intervals. """
return _isectall(ival, self.root)
def closest(self, query):
""" Retrieve the interval Y stored in the tree that is closest to the given interval X.
If the given interval overlaps with one or more stored intervals, one is returned:
the interval Y with the greatest Jaccard index to X. If multiple intervals are equally close,
only one is returned (the one before I think).
:param query: the interval for which the closest is sought
:return: the interval closest to the given query interval
"""
ovlap = self.isectall(query)
if len(ovlap) == 0: # overlapping intervals are not in the tree
return _closest(query, self.root)
else:
best_iv = None
best_ji = 0
for node in ovlap:
ji = jaccard(node.ival, query)
if best_iv == None or ji > best_ji:
best_iv = node
best_ji = ji
return best_iv
def put(self, ival, value = None):
nodex = self.get(ival)
if nodex:
nodex.values.add(value)
else:
self.root = self._randomizedInsert(self.root, ival, value)
def _randomizedInsert(self, node, ival, value):
if node == None: return IntervalNode(ival, value)
if random.uniform(0,1) * node.N < 1.0: return self._rootInsert(node, ival, value)
if ival < node.ival:
node.left = self._randomizedInsert(node.left, ival, value)
else:
node.right = self._randomizedInsert(node.right, ival, value)
_fix(node)
return node
def _rootInsert(self, node, ival, value):
if node == None: return IntervalNode(ival, value)
if ival < node.ival:
node.left = self._rootInsert(node.left, ival, value)
node = _rotR(node)
else:
node.right = self._rootInsert(node.right, ival, value)
node = _rotL(node)
return node
def _isectall(ival, node):
""" Look for all intersecting intervals in the subtree rooted at specified node (root by default).
Returns nodes of intersecting intervals. """
if node == None: return []
found = []
if isect(ival, node.ival):
found = [node]
if node.left and node.left.max >= ival.min:
found.extend(_isectall(ival, node.left))
if len(found) > 0 or node.left == None or node.left.max < ival.min:
found.extend(_isectall(ival, node.right))
return found
def _closest(query, cand):
""" Recursively find the interval with the minimum distance to that given.
This internal function does not guarantee that distances are sensible when overlapping
intervals exist; essentially it assumes that overlaps have been eliminated prior.
:param query: interval
:param cand: node from which search starts
:return: closest interval """
fav = None
favdist = -1
while cand != None:
if query == cand.ival: return cand
distx = query.dist(cand.ival)
if fav == None or distx <= favdist:
fav = cand
favdist = distx
if cand.left == None: cand = cand.right
elif cand.right == None: cand = cand.left
elif cand.ival.min > query.max: cand = cand.left # the smallest, indexed value (on left) is AFTER the query min
else: # no way to choose without looking in the intervals below
favleft = None
distleft = query.dist(Interval(cand.left.min, cand.left.max))
if distleft < favdist:
favleft = _closest(query, cand.left)
distleft = query.dist(favleft.ival) if favleft != None else MAX_VALUE
distright = query.dist(Interval(cand.right.min, cand.right.max))
if distright < favdist:
favright = _closest(query, cand.right)
distright = query.dist(favright.ival) if favright != None else MAX_VALUE
if distleft < distright:
return favleft if distleft < favdist else fav
else:
return favright if distright < favdist else fav
return fav
class IntervalNode:
"""
Defines the node of the interval search tree.
Manages values associated with intervals.
"""
ival = None # the actual interval
values = None # values associated with the interval
left = None # subtree on the left (lesser)
right = None # subtree on the right (greater)
N = 1 # number of nodes under (and including) this one
min = 0 # min point of subtree rooted at this node
max = 0 # max point of subtree rooted at this node
def __init__(self, interval, value = None):
self.ival = interval
self.min = interval.min
self.max = interval.max
self.values = set()
if value != None:
self.values.add(value)
def add(self, value):
if value:
self.values.add(value)
def __str__(self):
leftstr = 'o' if self.left else 'x'
rightstr = 'o' if self.right else 'x'
return leftstr + self.ival.__str__() + rightstr
def __unicode__(self):
leftstr = 'o' if self.left else 'x'
rightstr = 'o' if self.right else 'x'
return leftstr + self.ival.__unicode__() + rightstr
def size(node):
if node == None: return 0
else: return node.N
def _fix(node):
if node == None: return
node.N = 1 + size(node.left) + size(node.right)
node.min = _min3(node.ival.min, _min(node.left), _min(node.right))
node.max = _max3(node.ival.max, _max(node.left), _max(node.right))
MAX_VALUE = 9E30
MIN_VALUE = -9E30
def _min(node):
return MAX_VALUE if node == None else node.min
def _max(node):
return MIN_VALUE if node == None else node.max
def _min3(a, b, c):
return min(a, min(b, c))
def _max3(a, b, c):
return max(a, max(b, c))
def _rotR(node):
y = node.left
node.left = y.right
y.right = node
_fix(node)
_fix(y)
return y
def _rotL(node):
y = node.right
node.right = y.left
y.left = node
_fix(node)
_fix(y)
return y
class Stack:
""" A stack to support an iterator over IntervalNodes in the IntervalTree. """
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items) - 1]
def size(self):
return len(self.items)
class Interval:
"""
Define a one-dimensional interval.
"""
def __init__(self, min, max):
if (min <= max):
self.min = min
self.max = max
else:
raise RuntimeError
def isect(self, that):
if (that.max < self.min): return False
if (self.max < that.min): return False
return True
def isectStrict(self, that):
if (that.max <= self.min): return False
if (self.max <= that.min): return False
return True
def contains(self, x):
return (min <= x) and (x <= max)
def __eq__(self, other):
if not isinstance(other, Interval): return False
return True if (self.min == other.min and self.max == other.max) else False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if not isinstance(other, Interval): return False
return True if (self.min < other.min or (self.min == other.min and self.max < other.max)) else False
def __gt__(self, other):
if not isinstance(other, Interval): return False
return True if (self.min > other.min or (self.min == other.min and self.max > other.max)) else False
def __str__(self):
return '[' + str(self.min) + ', ' + str(self.max) +']'
def __unicode__(self):
return '[' + str(self.min) + ', ' + str(self.max) +']'
def __sizeof__(self):
return self.max - self.min
def dist(self, that, signed = False, centre2centre = False):
""" Calculate and return the closest distance (from one end of the interval of this to the end of the interval of that).
If centre2centre is True, use the centre-to-centre distance instead.
If signed is True, the distance is negative if this interval is after the that.
"""
if not centre2centre:
if not signed:
if (self.min > that.max): return self.min - that.max # that interval is BEFORE this
if (self.max < that.min): return that.min - self.max # that interval is AFTER this
else: # distance is signed
if (self.min > that.max): return that.max - self.min # that interval is BEFORE this
if (self.max < that.min): return that.min - self.max # that interval is AFTER this
return 0
else:
thiscentre = (self.max - self.min) / 2 + self.min
thatcentre = (that.max - that.min) / 2 + that.min
return thatcentre - thiscentre if signed else abs(thatcentre - thiscentre)
def dist(first, second, signed = False, centre2centre = False):
""" Calculate and return the closest distance (from one end of the interval to the other).
If centre2centre is True, use the centre-to-centre distance instead.
If signed is True, the distance is negative if the first is after the second.
"""
if isinstance(first, Interval) and isinstance(second, Interval):
return first.dist(second, signed, centre2centre)
return RuntimeError
def union(first, second):
if (first.isect(second)):
min = first.min if (first.min < second.min) else second.min
max = second.max if (first.max < second.max) else first.max
return Interval(min, max)
else:
raise RuntimeError
def isect(first, second):
if (first.isect(second)):
min = first.min if (first.min > second.min) else second.min
max = second.max if (first.max > second.max) else first.max
return Interval(min, max)
else:
return None
def jaccard(first, second):
if (isect(first, second)):
isect_min = first.min if (first.min > second.min) else second.min
isect_max = second.max if (first.max > second.max) else first.max
union_min = first.min if (first.min < second.min) else second.min
union_max = second.max if (first.max < second.max) else first.max
denom = union_max - union_min
if (denom > 0):
return (isect_max - isect_min) / denom
return 0
else:
return 0
if __name__ == '__main__':
a = Interval(13, 20)
b = Interval(25, 30)
c = Interval(27, 33)
d = Interval(40, 50)
e = Interval(21, 22)
f = Interval(36, 38)
g = Interval(16, 19)
h = Interval(28, 31)
i = Interval(55, 66)
j = Interval(-3, 0)
k = Interval(24, 24)
l = Interval(52, 55)
print('dist(b,a,signed=False,centre2centre=False)=', dist(b, a, signed = False, centre2centre=False))
print('dist(b,a,signed=True,centre2centre=False)=', dist(b, a, signed = True, centre2centre=False))
print('dist(b,a,signed=False,centre2centre=True)=', dist(b, a, signed = False, centre2centre=True))
print('dist(b,a,signed=True,centre2centre=True)=', dist(b, a, signed = True, centre2centre=True))
t = IntervalTree()
t.put(a, 'A')
t.put(b, 'B')
t.put(c, 'C')
t.put(d, 'D')
t.put(e, 'E')
t.put(b, 123)
t.put(b, 'blah')
t.get(d).add('x999')
t.put(i)
t.put(j)
t.put(g)
t.put(k)
print(c in t)
print(e in t)
print(t.get(a).values)
print(t.get(d).values)
print(t.get(b).values)
print(t.isect(f))
print(t.isect(g))
tryme = f
all = t.isectall(tryme)
print("Intersect with " + str(tryme) + ": ")
for n in all:
print('\t' + str(n))
print("Closest to " + str(tryme) + ": ")
print(t.closest(tryme))
print('Iterate through tree: ')
for n in t:
print('\t' + str(n)) | PypiClean |
/GQCMS-0.0.4-py3-none-any.whl/build/lib/build/lib/build/lib/build/lib/build/lib/build/lib/build/lib/gqcms/ConstrainedMethod.py | import numpy as np
import pandas as pd
from scipy import linalg
from typing import List
from gqcms import Hubbard
from gqcms import FCI
from gqcms import HartreeFock
from gqcms import Determinant
from gqcms import createHamiltonianSCI
from gqcms import NumberOperator
from gqcms import DensityOperator
from gqcms import basisTransform
def ConstrainedFCI(molecule: Hubbard, operator: np.ndarray, m: float) -> pd.DataFrame:
"""
Computes the FCI energy and expectation value of a contrained Hubbard Hamiltonian.
:param molecule: hubbard class object
:param m: Lagrange multiplier
:param operator: matrix representation of the feature operator
:returns: pandas DataFrame with the the columns
['E', 'C', '1PDM', 'mu', 'expectation_value']
"""
# Create a constrained Hubbard Hamiltonian for a Lagrange multiplier
contrained_hamiltonian = molecule.ConstrainHamiltonian(m, operator)
# Compute the energy, wave function and 1PDM
result = FCI(contrained_hamiltonian)
# Compute the expectation value of the operator using the 1PDM
expectation_value = np.einsum("ij,ij", result["1PDM"][0], operator)
D = DensityOperator(result['C'][0], molecule.basis, molecule.sites)
# Add the Lagrange multiplier and expectation value to the result DataFrame
result["mu"] = m
result["expectation_value"] = expectation_value
result["E"] = result["E"][0] + m * expectation_value
result["D"] = [D]
return result
class ConstrainedHartreeFock(HartreeFock):
"""
Hartree Fock solver for the Hubbard model with a constraint
on the Hamiltonian matrices using the feature operator
"""
def __init__(
self,
system: Hubbard,
operator: np.ndarray,
m: float,
bdiis: bool = False,
bdamping: bool = True,
bseries: bool = False,
max_size: int = None,
diis_convergence: float = 1e-2,
E_convergence: float = 1e-4,
D_convergence: float = 1e-8,
maxiter: int = 200,
):
"""
Initialize a constrained Hubbard Hartree-Fock solver
:param system: the Hubbard system used in the SCF algoritm
:param operator: feature operator used to constrain the Hamiltonians
:param m: Lagrange multiplier
:param bdiis: use DIIS or not (default is False)
:param bdamping: use density damping or not (default is True)
:param bseries: return a pandas series instead of HubbardEnvironment (default is False)
:param max_size: maximum size of the DIIS queue, if None queue is infinite (default is None)
:param diss_convergence: min threshold of DIIS needed to stop the algorithm succesfull
:param E_convergence: min threshold of energy difference to stop the algorithm succesfull
:param maxiter: maximum number of iterations allowed if threshold is not reached
"""
super().__init__(
system, bdiis, bdamping, bseries, max_size, diis_convergence, E_convergence, D_convergence, maxiter
)
# Add Lagrange multiplier and feature operator to environment
self._env.m = m
self._env.mod = operator
# Insert the constrain functions
self._steps.insert(1, self.constrainHamiltonians)
self._steps.append(self.expectationValue)
def constrainHamiltonians(self):
"""
Constrain the Hamiltonians using the mod matrix
"""
self._env.H_a -= self._env.m * self._env.mod
self._env.H_b -= self._env.m * self._env.mod
def expectationValue(self):
"""
Compute the expectation value of the feature operator
"""
self._env.expectationValue = np.einsum(
"ij,ij", self._env.D_a + self._env.D_b, self._env.mod
)
def solve(self):
"""
Overwrite default solve method to return a pandas Series
"""
super().solve()
return pd.Series(
{
"iterations": self._env.iteration,
"E": self._env.E,
"C_a": self._env.C_a,
"C_b": self._env.C_b,
"eps_a": self._env.eps_a,
"eps_b": self._env.eps_b,
"D_a": self._env.D_a,
"D_b": self._env.D_b,
"mu": self._env.m,
"expectation_value": self._env.expectationValue,
}
)
def ConstrainedSCI(
molecule: Hubbard,
operator: np.ndarray,
m: float,
excitations: list = None,
basis: list = None,
result_HF = None,
bdiis: bool = False,
bdamping: bool = True,
bseries: bool = False,
max_size: int = None,
diis_convergence: float = 1e-2,
E_convergence: float = 1e-6,
D_convergence: float = 1e-8,
maxiter: int = 200,
) -> pd.Series:
"""
Performs a constrained selected CI calculation with the given excitation degrees of the Hartree-Fock determinant
:param molecule: Hubbard molecule
:param operator: feature operator used in the constraint
:param m: Lagrange multiplier
:param excitations: list of excitation degrees that are taken into account
:param basis: list of Determinants
:param result_HF: result of a Hartree-Fock computation
:param bdiis: use DIIS or not (default is False)
:param bdamping: use density damping or not (default is True)
:param bseries: return a pandas series object or not (default is False)
:param max_size: max queue size of DIIS (default is None)
:param diss_convergence: min threshold of DIIS needed to stop the algorithm succesfull (default is 1e-2)
:param E_convergence: min threshold of energy difference to stop the algorithm succesfull (default is 1e-6)
:param D_convergence: min threshold of in density matrices (default is 1e-8)
:param maxiter: maximum number of iterations allowed if threshold is not reached (default is 200)
:return: pandas DataFrame with the the columns ['E', 'C', '1PDM']
"""
# Perform a Hartree-Fock computation if result_HF is None
if result_HF is None:
HF_solver = HartreeFock(molecule, bdiis, bdamping, bseries, max_size, diis_convergence, E_convergence, D_convergence, maxiter)
result_HF = HF_solver.solve()
# Create SCI Hamiltonian and coefficient matrix
if basis is not None:
H, det_list = createHamiltonianSCI(molecule, result_HF, basis=basis, return_extra=True)
elif excitations is not None:
# Convert excitations to list if int is given
if isinstance(excitations, int):
excitations = [excitations]
H, det_list = createHamiltonianSCI(molecule, result_HF, excitations=excitations, return_extra=True)
else:
raise ValueError("A list of excitations or determinants should be given.")
operator_mo_a = np.einsum("uj,vi,uv", result_HF.C_a, result_HF.C_a, operator)
operator_mo_b = np.einsum("uj,vi,uv", result_HF.C_b, result_HF.C_b, operator)
# Spin block operator
operator_mo = np.zeros((2*molecule.sites, 2*molecule.sites))
operator_mo[::2, ::2] = operator_mo_a
operator_mo[1::2, 1::2] = operator_mo_b
# Transform operator to CI basis
nr_op_onv_mo = basisTransform(operator_mo, det_list)
# Compute energie
energies, C_sci = linalg.eigh(H - m*nr_op_onv_mo)
# Compute density matrix in ONV basis
D_onv = np.outer(C_sci[:, 0], C_sci[:, 0])
# Compute density matrix in HF-MO basis
D_mo = DensityOperator(C_sci[:, 0], det_list, molecule.sites)
# Compute density matrix in site basis
D_site =result_HF.C_a @ D_mo @ result_HF.C_a.T
# D_site = C @ D_mo @ C.T
# Compute population in site basis
# expectation_value = np.einsum("ij,ij", D_site, np.diag(np.repeat(np.diagonal(operator), 2)))
expectation_value = np.einsum("ij,ij", D_site, operator)
return pd.Series(
{
"E": energies[0] + m*expectation_value,
"C": C_sci[:, 0],
"D_site": D_site,
"D_mo": D_mo,
"mu": m,
"expectation_value": expectation_value,
}
) | PypiClean |
/Authomatic-1.2.1.tar.gz/Authomatic-1.2.1/authomatic/six.py | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.9.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (
isinstance(fp, file)
and isinstance(data, unicode)
and fp.encoding is not None
):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (
type(importer).__name__ == "_SixMetaPathImporter"
and importer.name == __name__
):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer) | PypiClean |
/NorBi_distribution-1.10.tar.gz/NorBi_distribution-1.10/NorBi_distribution/Gaussiandistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | PypiClean |
/BespON-0.6.0.tar.gz/BespON-0.6.0/bespon/encoding.py |
# pylint: disable = C0301
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import sys
import re
import collections
import fractions
from . import escape
from . import grammar
from . import tooling
if sys.version_info.major == 2:
str = unicode
class BespONEncoder(object):
'''
Encode BespON.
'''
def __init__(self, *args, **kwargs):
if args:
raise TypeError('Explicit keyword arguments are required')
only_ascii_source = kwargs.pop('only_ascii_source', False)
only_ascii_unquoted = kwargs.pop('only_ascii_unquoted', True)
aliases = kwargs.pop('aliases', True)
circular_references = kwargs.pop('circular_references', False)
integers = kwargs.pop('integers', True)
hex_floats = kwargs.pop('hex_floats', False)
extended_types = kwargs.pop('extended_types', False)
python_types = kwargs.pop('python_types', False)
baseclass = kwargs.pop('baseclass', False)
trailing_commas = kwargs.pop('trailing_commas', False)
compact_inline = kwargs.pop('compact_inline', False)
if not all(x in (True, False) for x in (only_ascii_source, only_ascii_unquoted,
aliases, circular_references,
integers, hex_floats, extended_types, python_types,
baseclass, trailing_commas, compact_inline)):
raise TypeError
self.only_ascii_source = only_ascii_source
self.only_ascii_unquoted = only_ascii_unquoted
self.aliases = aliases
self.circular_references = circular_references
self.integers = integers
self.hex_floats = hex_floats
self.extended_types = extended_types
self.python_types = python_types
self.baseclass = baseclass
self.trailing_commas = trailing_commas
self.compact_inline = compact_inline
max_nesting_depth = kwargs.pop('max_nesting_depth', grammar.PARAMS['max_nesting_depth'])
max_section_depth = kwargs.pop('max_section_depth', 0)
inline_depth = kwargs.pop('inline_depth', max_nesting_depth+1)
if not all(isinstance(x, int) for x in (max_nesting_depth, max_section_depth, inline_depth)):
raise TypeError
if not all(x >= 0 for x in (max_nesting_depth, max_section_depth, inline_depth)):
raise ValueError
self.max_nesting_depth = max_nesting_depth
self.max_section_depth = max_section_depth
self.inline_depth = inline_depth
nesting_indent = kwargs.pop('nesting_indent', grammar.LIT_GRAMMAR['nesting_indent'])
start_list_item = kwargs.pop('start_list_item', grammar.LIT_GRAMMAR['start_list_item'])
flush_start_list_item = kwargs.pop('flush_start_list_item', grammar.LIT_GRAMMAR['flush_start_list_item'])
if not all(isinstance(x, str) and x for x in (nesting_indent, start_list_item, flush_start_list_item)):
raise TypeError
if nesting_indent.lstrip(grammar.LIT_GRAMMAR['indent']):
raise ValueError
self.nesting_indent = nesting_indent
if (start_list_item.count(grammar.LIT_GRAMMAR['open_indentation_list']) != 1 or
start_list_item[0] not in grammar.LIT_GRAMMAR['indent'] or
start_list_item.strip(grammar.LIT_GRAMMAR['indent_or_open_indentation_list'])):
raise ValueError
if (flush_start_list_item[0] != grammar.LIT_GRAMMAR['open_indentation_list'] or
start_list_item.strip(grammar.LIT_GRAMMAR['indent_or_open_indentation_list'])):
raise ValueError
self.start_list_item = start_list_item
self.flush_start_list_item = flush_start_list_item
before_open, after_open = start_list_item.split(grammar.LIT_GRAMMAR['open_indentation_list'])
self._start_list_item_indent = before_open
self._start_list_item_open = start_list_item[:-len(after_open)]
self._list_item_leading = after_open
if before_open[-1:] == '\t' and after_open[:1] == '\t':
self._list_item_indent = start_list_item.replace(grammar.LIT_GRAMMAR['open_indentation_list'], '')
else:
self._list_item_indent = start_list_item.replace(grammar.LIT_GRAMMAR['open_indentation_list'], '\x20')
self._flush_start_list_item_indent = ''
self._flush_start_list_item_open = flush_start_list_item[0]
self._flush_list_item_leading = flush_start_list_item[1:]
if flush_start_list_item[1:2] == '\t':
self._flush_list_item_indent = flush_start_list_item[1:]
else:
self._flush_list_item_indent = '\x20' + flush_start_list_item[1:]
if kwargs:
raise TypeError('Unexpected keyword argument(s) {0}'.format(', '.join('"{0}"'.format(k) for k in kwargs)))
self._escape = escape.Escape(only_ascii_source=only_ascii_source)
self._escape_unicode = self._escape.escape_unicode
self._escape_bytes = self._escape.escape_bytes
self._invalid_literal_unicode_re = self._escape.invalid_literal_unicode_re
self._invalid_literal_bytes_re = self._escape.invalid_literal_bytes_re
if only_ascii_unquoted:
self._unquoted_str_re = re.compile(grammar.RE_GRAMMAR['valid_terminated_unquoted_string_ascii'])
else:
self._unquoted_str_re = re.compile(grammar.RE_GRAMMAR['valid_terminated_unquoted_string_unicode'])
self._unquoted_bytes_re = re.compile(grammar.RE_GRAMMAR['valid_terminated_unquoted_string_ascii'].encode('ascii'))
self._line_terminator_unicode_re = re.compile(grammar.RE_GRAMMAR['line_terminator_unicode'])
self._line_terminator_bytes_re = re.compile(grammar.RE_GRAMMAR['line_terminator_ascii'].encode('ascii'))
self.bidi_rtl_re = re.compile(grammar.RE_GRAMMAR['bidi_rtl'])
encode_funcs = {type(None): self._encode_none,
type(True): self._encode_bool,
type(1): self._encode_int if integers else self._encode_int_as_float,
type(1.0): self._encode_float,
type('a'): self._encode_str,
type(b'a'): self._encode_bytes,
type([]): self._encode_list,
type({}): self._encode_dict}
extended_types_encode_funcs = {type(1j): self._encode_complex,
type(fractions.Fraction()): self._encode_rational,
type(collections.OrderedDict()): self._encode_odict,
type(set()): self._encode_set}
python_types_encode_funcs = {type(tuple()): self._encode_tuple}
if self.extended_types:
encode_funcs.update(extended_types_encode_funcs)
if self.python_types:
encode_funcs.update(python_types_encode_funcs)
if not baseclass:
def encode_func_factory(t):
if t in extended_types_encode_funcs:
raise TypeError('Unsupported type {0} (extended_types=False)'.format(t))
if t in python_types_encode_funcs:
raise TypeError('Unsupported type {0} (python_types=False)'.format(t))
raise TypeError('Unsupported type {0}'.format(t))
else:
def encode_func_factory(t, issubclass=issubclass):
for k, v in encode_funcs.items():
if issubclass(t, k):
return v
if t in extended_types_encode_funcs:
raise TypeError('Unsupported type {0} (extended_types=False)'.format(t))
if t in python_types_encode_funcs:
raise TypeError('Unsupported type {0} (python_types=False)'.format(t))
raise TypeError('Unsupported type {0}'.format(t))
self._encode_funcs = tooling.keydefaultdict(encode_func_factory)
self._encode_funcs.update(encode_funcs)
def _reset(self):
'''
Reset everything in preparation for the next run.
'''
self._buffer = []
self._nesting_depth = 0
self._scalar_bidi_rtl = False
self._obj_path = collections.OrderedDict()
self._alias_counter = 0
self._alias_values = {}
self._alias_def_template = {}
self._alias_def_buffer_index = {}
def _free(self):
'''
Free up memory used in last run.
'''
self._buffer = None
self._obj_path = None
self._alias_values = None
self._alias_def_template = None
self._alias_def_buffer_index = None
def _encode_none(self, obj,
flush_margin=False, inline=False, at_line_start=True, indent='', leading='', after_start_list_item=False, key=False, key_path=False, value=False,
none_type=grammar.LIT_GRAMMAR['none_type']):
self._buffer.append(leading)
self._buffer.append(none_type)
def _encode_bool(self, obj,
flush_margin=False, inline=False, at_line_start=True, indent='', leading='', after_start_list_item=False, key=False, key_path=False, value=False,
bool_true=grammar.LIT_GRAMMAR['bool_true'],
bool_false=grammar.LIT_GRAMMAR['bool_false']):
self._buffer.append(leading)
self._buffer.append(bool_true if obj else bool_false)
def _encode_int(self, obj,
flush_margin=False, inline=False, at_line_start=True, indent='', leading='', after_start_list_item=False, key=False, key_path=False, value=False,
num_base=10,
hex_template='{0}{{0:0x}}'.format(grammar.LIT_GRAMMAR['hex_prefix']),
oct_template='{0}{{0:0o}}'.format(grammar.LIT_GRAMMAR['oct_prefix']),
bin_template='{0}{{0:0b}}'.format(grammar.LIT_GRAMMAR['bin_prefix']),
str=str):
if key_path:
raise TypeError('Ints are not valid in key paths')
self._buffer.append(leading)
if num_base == 10:
self._buffer.append(str(obj))
return
if num_base == 16:
self._buffer.append(hex_template.format(obj))
return
if num_base == 8:
self._buffer.append(oct_template.format(obj))
return
if num_base == 2:
self._buffer.append(bin_template.format(obj))
return
raise ValueError('Unknown base {0}'.format(num_base))
def _encode_int_as_float(self, obj, float=float, **kwargs):
# Extremely large ints won't be silently converted to inf, because
# `float()` raises an OverflowError.
self._encode_float(float(obj), **kwargs)
def _encode_float(self, obj,
flush_margin=False, inline=False, at_line_start=True, indent='', leading='', after_start_list_item=False, key=False, key_path=False, value=False,
num_base=None,
hex_exponent_letter=grammar.LIT_GRAMMAR['hex_exponent_letter'][0],
str=str):
if key:
raise TypeError('Floats are not valid dict keys')
self._buffer.append(leading)
if self.hex_floats:
if num_base is not None:
if num_base != 16:
raise ValueError
else:
num_base = 16
if num_base is None or num_base == 10:
self._buffer.append(str(obj))
return
if num_base == 16:
num, exp = obj.hex().split('p')
num = num.rstrip('0')
if num[-1] == '.':
num += '0'
self._buffer.append(num + hex_exponent_letter + exp)
return
raise ValueError('Unknown base {0}'.format(num_base))
def _encode_complex(self, obj,
flush_margin=False, inline=False, at_line_start=True, indent='', leading='', after_start_list_item=False, key=False, key_path=False, value=False,
num_base=None,
hex_exponent_letter=grammar.LIT_GRAMMAR['hex_exponent_letter'][0],
dec_float_zero=grammar.LIT_GRAMMAR['dec_float_zero'],
hex_float_zero=grammar.LIT_GRAMMAR['hex_float_zero'],
imaginary_unit=grammar.LIT_GRAMMAR['imaginary_unit'],
str=str):
if key:
raise TypeError('Complex floats are not valid dict keys')
self._buffer.append(leading)
if self.hex_floats:
if num_base is not None:
if num_base != 16:
raise ValueError
else:
num_base = 16
real = obj.real
imag = obj.imag
if num_base is None or num_base == 10:
if real == 0.0:
self._buffer.append(str(imag) + imaginary_unit)
return
if imag == 0.0:
self._buffer.append(str(real) + '+' + dec_float_zero + imaginary_unit)
return
if imag < 0.0:
self._buffer.append(str(real) + str(imag) + imaginary_unit)
return
self._buffer.append(str(real) + '+' + str(imag) + imaginary_unit)
return
if num_base == 16:
if real == 0.0:
num_imag, exp_imag = imag.hex().split('p')
num_imag = num_imag.rstrip('0')
if num_imag[-1] == '.':
num_imag += '0'
self._buffer.append(num_imag + hex_exponent_letter + exp_imag + imaginary_unit)
return
if imag == 0.0:
num_real, exp_real = real.hex().split('p')
num_real = num_real.rstrip('0')
if num_real[-1] == '.':
num_real += '0'
self._buffer.append(num_real + hex_exponent_letter + exp_real + '+' + hex_float_zero + imaginary_unit)
return
num_real, exp_real = real.hex().split('p')
num_real = num_real.rstrip('0')
if num_real[-1] == '.':
num_real += '0'
num_imag, exp_imag = imag.hex().split('p')
num_imag = num_imag.rstrip('0')
if num_imag[-1] == '.':
num_imag += '0'
if imag < 0.0:
self._buffer.append(num_real + hex_exponent_letter + exp_real + num_imag + hex_exponent_letter + exp_imag + imaginary_unit)
return
self._buffer.append(num_real + hex_exponent_letter + exp_real + '+' + num_imag + hex_exponent_letter + exp_imag + imaginary_unit)
return
raise ValueError('Unknown base {0}'.format(num_base))
def _encode_rational(self, obj,
flush_margin=False, inline=False, at_line_start=True, indent='', leading='', after_start_list_item=False, key=False, key_path=False, value=False,
str=str):
if key:
raise TypeError('Rational numbers are not valid dict keys')
self._buffer.append(leading)
self._buffer.append(str(obj))
def _encode_str(self, obj,
flush_margin=False, inline=False, at_line_start=True, indent='', leading='', after_start_list_item=False, key=False, key_path=False, value=False,
delim=None, block=None,
string_delim_seq_set=grammar.LIT_GRAMMAR['string_delim_seq_set'],
len=len):
# There is a lot of logic here to cover round-tripping. In that
# scenario, delimiter style should be preserved whenever reasonable.
if delim is None:
if self._unquoted_str_re.match(obj) is not None:
self._scalar_bidi_rtl = self.bidi_rtl_re.search(obj) is not None
self._buffer.append(leading)
self._buffer.append(obj)
return
delim_char = '"'
elif delim in string_delim_seq_set:
delim_char = delim[0]
else:
raise ValueError
if key_path:
if delim is None:
raise ValueError('String does not match the required pattern for a key path element')
raise ValueError('Key path elements cannot be quoted')
if inline and self.compact_inline:
self._buffer.append(leading)
self._buffer.append('"{0}"'.format(self._escape_unicode(obj, '"', inline=True, bidi_rtl=True)))
return
if self._line_terminator_unicode_re.search(obj) is None:
self._scalar_bidi_rtl = self.bidi_rtl_re.search(obj) is not None
self._buffer.append(leading)
if delim_char == "'":
if "'" not in obj[1:-1]:
self._buffer.append("'{0}'".format(self._escape_unicode(obj, "'", inline=True)))
return
self._buffer.append("'''{0}'''".format(self._escape_unicode(obj, "'", inline=True, multidelim=True)))
return
if delim_char == '"' or obj == '' or self._invalid_literal_unicode_re.search(obj) is not None:
if '"' not in obj[1:-1]:
self._buffer.append('"{0}"'.format(self._escape_unicode(obj, '"', inline=True)))
return
self._buffer.append('"""{0}"""'.format(self._escape_unicode(obj, '"', inline=True, multidelim=True)))
return
if '`' not in obj:
self._buffer.append('`{0}`'.format(obj))
return
if '``' not in obj:
if obj[0] == '`':
open_delim = '``\x20'
else:
open_delim = '``'
if obj[-1] == '`':
close_delim = '\x20``'
else:
close_delim = '``'
self._buffer.append(open_delim + obj + close_delim)
return
if '```' not in obj:
if obj[0] == '`':
open_delim = '```\x20'
else:
open_delim = '```'
if obj[-1] == '`':
close_delim = '\x20```'
else:
close_delim = '```'
self._buffer.append(open_delim + obj + close_delim)
return
if '"' not in obj[1:-1]:
self._buffer.append('"{0}"'.format(self._escape_unicode(obj, '"', inline=True)))
return
self._buffer.append('"""{0}"""'.format(self._escape_unicode(obj, '"', inline=True, multidelim=True)))
return
if at_line_start:
self._buffer.append(leading)
else:
indent += self.nesting_indent
self._buffer.append('\n' + indent)
template = '|{0}\n{1}{2}|{0}/'
if obj[-1] != '\n' or self._invalid_literal_unicode_re.search(obj) is not None:
if delim_char == '`':
delim_char = '"'
obj_encoded = self._escape_unicode(obj, delim_char, multidelim=True)
obj_encoded_lines = obj_encoded.splitlines(True)
if obj_encoded_lines[-1][-1:] != '\n':
obj_encoded_lines[-1] += '\\\n'
obj_encoded_indented = ''.join([indent + line for line in obj_encoded_lines])
self._buffer.append(template.format(delim_char*3, obj_encoded_indented, indent))
return
if delim_char*3 not in obj:
obj_lines = obj.splitlines(True)
obj_indented = ''.join([indent + line for line in obj_lines])
self._buffer.append(template.format(delim_char*3, obj_indented, indent))
return
if delim_char*6 not in obj:
obj_lines = obj.splitlines(True)
obj_indented = ''.join([indent + line for line in obj_lines])
self._buffer.append(template.format(delim_char*6, obj_indented, indent))
return
obj_encoded = self._escape_unicode(obj, '"', multidelim=True)
obj_encoded_lines = obj_encoded.splitlines(True)
obj_encoded_indented = ''.join([indent + line for line in obj_encoded_lines])
self._buffer.append(template.format('"""', obj_encoded_indented, indent))
return
def _encode_bytes(self, obj,
flush_margin=False, inline=False, at_line_start=True, indent='', leading='', after_start_list_item=False, key=False, key_path=False, value=False,
delim=None, block=None,
string_delim_seq_set=grammar.LIT_GRAMMAR['string_delim_seq_set']):
if key_path:
raise TypeError('Bytes type cannot be used in key paths')
tag = '(bytes)> '
if delim is None:
if self._unquoted_bytes_re.match(obj) is not None:
self._buffer.append(leading)
self._buffer.append(tag + obj.decode('ascii'))
return
delim_char = '"'
delim_char_bytes = b'"'
elif delim in string_delim_seq_set:
delim_char = delim[0]
delim_char_bytes = delim_char.encode('ascii')
else:
raise ValueError
if inline and self.compact_inline:
self._buffer.append(leading)
self._buffer.append('"{0}"'.format(self._escape_bytes(obj, '"', inline=True).decode('ascii')))
return
if self._line_terminator_bytes_re.search(obj) is None:
self._buffer.append(leading)
if delim_char == "'":
if b"'" not in obj[1:-1]:
self._buffer.append(tag + "'{0}'".format(self._escape_bytes(obj, "'", inline=True).decode('ascii')))
return
self._buffer.append(tag + "'''{0}'''".format(self._escape_bytes(obj, "'", inline=True, multidelim=True).decode('ascii')))
return
if delim_char == '"' or obj == b'' or self._invalid_literal_bytes_re.search(obj) is not None:
if b'"' not in obj[1:-1]:
self._buffer.append(tag + '"{0}"'.format(self._escape_bytes(obj, '"', inline=True).decode('ascii')))
return
self._buffer.append(tag + '"""{0}"""'.format(self._escape_bytes(obj, '"', inline=True, multidelim=True).decode('ascii')))
return
if b'`' not in obj:
self._buffer.append(tag + '`{0}`'.format(obj.decode('ascii')))
return
if b'``' not in obj:
if obj[:1] == b'`':
open_delim = '``\x20'
else:
open_delim = '``'
if obj[-1:] == b'`':
close_delim = '\x20``'
else:
close_delim = '``'
self._buffer.append(tag + open_delim + obj.decode('ascii') + close_delim)
return
if '```' not in obj:
if obj[:1] == b'`':
open_delim = '```\x20'
else:
open_delim = '```'
if obj[-1:] == b'`':
close_delim = '\x20```'
else:
close_delim = '```'
self._buffer.append(tag + open_delim + obj.decode('ascii') + close_delim)
return
if b'"' not in obj[1:-1]:
self._buffer.append(tag +'"{0}"'.format(self._escape_bytes(obj, '"', inline=True).decode('ascii')))
return
self._buffer.append(tag + '"""{0}"""'.format(self._escape_bytes(obj, '"', inline=True, multidelim=True).decode('ascii')))
return
if at_line_start:
self._buffer.append(leading)
else:
indent += self.nesting_indent
self._buffer.append('\n' + indent)
tag = '(bytes)>\n' + indent
template = '|{0}\n{1}{2}|{0}/'
if obj[-1] != b'\n' or self._invalid_literal_bytes_re.search(obj) is not None:
if delim_char == '`':
delim_char = '"'
obj_encoded = self._escape_bytes(obj, delim_char, multidelim=True).decode('ascii')
obj_encoded_lines = obj_encoded.splitlines(True)
if obj_encoded_lines[-1][-1:] != '\n':
obj_encoded_lines[-1] += '\\\n'
obj_encoded_indented = ''.join([indent + line for line in obj_encoded_lines])
self._buffer.append(tag + template.format(delim_char*3, obj_encoded_indented, indent))
return
if delim_char_bytes*3 not in obj:
obj_lines = obj.decode('ascii').splitlines(True)
obj_indented = ''.join([indent + line for line in obj_lines])
self._buffer.append(tag + template.format(delim_char*3, obj_indented, indent))
return
if delim_char_bytes*6 not in obj:
obj_lines = obj.decode('ascii').splitlines(True)
obj_indented = ''.join([indent + line for line in obj_lines])
self._buffer.append(tag + template.format(delim_char*6, obj_indented, indent))
return
obj_encoded = self._escape_bytes(obj, '"', multidelim=True).decode('ascii')
obj_encoded_lines = obj_encoded.splitlines(True)
obj_encoded_indented = ''.join([indent + line for line in obj_encoded_lines])
self._buffer.append(tag + template.format('"""', obj_encoded_indented, indent))
def _encode_doc_comment(self, obj,
flush_margin=False, inline=False, at_line_start=True, indent='', leading='', after_start_list_item=False, key=False, key_path=False, value=False,
delim=None, block=None,
doc_comment_delim_seq_set=grammar.LIT_GRAMMAR['doc_comment_delim_seq_set'],):
if key_path:
raise TypeError('Key paths do not take doc comments')
if delim is None:
delim = '###'
elif delim in doc_comment_delim_seq_set:
delim = '###'
else:
raise ValueError
if self._invalid_literal_unicode_re.search(obj) is not None:
raise ValueError('Invalid literal code point')
while delim in obj:
delim += '###'
if delim not in doc_comment_delim_seq_set:
raise ValueError('Cannot create comment since all valid escape sequences of "#" appear literally within the comment text')
if not at_line_start:
indent += self.nesting_indent
self._buffer.append('\n' + indent)
if self._line_terminator_unicode_re.search(obj) or self.bidi_rtl_re.search(obj):
if obj[-1] != '\n':
self._buffer.append('|{0}\n{1}{2}\n{1}|{0}/'.format(delim, indent, indent.join(obj.splitlines(True))))
return
self._buffer.append('|{0}\n{1}{2}{1}|{0}/'.format(delim, indent, indent.join(obj.splitlines(True))))
return
self._buffer.append('{0}{1}{0}'.format(delim, obj))
def _encode_line_comment(self, obj,
flush_margin=False, inline=False, at_line_start=True, indent='', leading='', after_start_list_item=False, key=False, key_path=False, value=False,
delim=None, block=None):
if self._invalid_literal_unicode_re.search(obj) is not None:
raise ValueError('Invalid literal code point')
if self._line_terminator_unicode_re.search(obj):
raise ValueError('Line comments cannot contain literal newlines')
self._buffer.append(leading)
self._buffer.append('#' + obj)
def _encode_alias(self, obj,
flush_margin=False, inline=False, at_line_start=True, indent='', leading='', after_start_list_item=False, key=False, key_path=False, value=False,
alias_prefix=grammar.LIT_GRAMMAR['alias_prefix'],
alias_basename='obj', id=id, str=str):
id_obj = id(obj)
if not self.aliases:
raise ValueError('Objects appeared multiple times but aliasing is not enabled (aliases=False)')
if id_obj in self._obj_path and not self.circular_references:
raise ValueError('Circular references were encountered but are not enabled (circular_references=False)')
alias_value = self._alias_values[id_obj]
if alias_value is None:
self._alias_counter += 1
alias_value = alias_basename + str(self._alias_counter)
if alias_basename != 'obj':
self._scalar_bidi_rtl = self.bidi_rtl_re.search(alias_value) is not None
self._alias_values[id_obj] = alias_value
self._buffer[self._alias_def_buffer_index[id_obj]] = self._alias_def_template[id_obj].format(alias_value)
self._buffer.append(leading)
self._buffer.append(alias_prefix + alias_value)
def _encode_list(self, obj,
flush_margin=False, inline=False, at_line_start=True, indent='', leading='', after_start_list_item=False, key=False, key_path=False, value=False,
explicit_type=None,
start_inline_list=grammar.LIT_GRAMMAR['start_inline_list'],
end_inline_list=grammar.LIT_GRAMMAR['end_inline_list'],
indent_chars=grammar.LIT_GRAMMAR['indent'],
id=id, len=len, type=type):
if key:
raise TypeError('List-like objects are not supported as dict keys')
id_obj = id(obj)
if id_obj in self._alias_values:
self._encode_alias(obj)
return
self._obj_path[id_obj] = None
self._alias_values[id_obj] = None
if not inline:
inline = self._nesting_depth >= self.inline_depth
self._nesting_depth += 1
if self._nesting_depth > self.max_nesting_depth:
raise TypeError('Max nesting depth for collections was exceeded; max depth = {0}'.format(self.max_nesting_depth))
if not obj:
self._buffer.append(leading)
if explicit_type is None:
self._alias_def_buffer_index[id_obj] = len(self._buffer)
self._buffer.append('')
self._alias_def_template[id_obj] = '(label={0})>\x20'
else:
self._buffer.append('({0}'.format(explicit_type))
self._alias_def_buffer_index[id_obj] = len(self._buffer)
self._buffer.append('')
self._buffer.append(')>\x20')
self._alias_def_template[id_obj] = ', label={0}'
self._buffer.append(start_inline_list + end_inline_list)
self._obj_path.popitem()
self._nesting_depth -= 1
return
if inline:
self._buffer.append(leading)
if explicit_type is None:
self._alias_def_buffer_index[id_obj] = len(self._buffer)
self._buffer.append('')
self._alias_def_template[id_obj] = '(label={0})>\x20'
else:
self._buffer.append('({0}'.format(explicit_type))
self._alias_def_buffer_index[id_obj] = len(self._buffer)
self._buffer.append('')
self._buffer.append(')>\x20')
self._alias_def_template[id_obj] = ', label={0}'
internal_indent = indent + self.nesting_indent
if self.compact_inline:
self._buffer.append(start_inline_list)
for item in obj:
self._encode_funcs[type(item)](item, inline=inline, at_line_start=False, indent=internal_indent)
self._buffer.append(',\x20')
if self.trailing_commas:
self._buffer[-1] = ','
else:
self._buffer[-1] = ''
self._buffer.append(end_inline_list)
else:
self._buffer.append(start_inline_list + '\n')
for item in obj:
self._buffer.append(internal_indent)
self._encode_funcs[type(item)](item, inline=inline, indent=internal_indent)
self._buffer.append(',\n')
if not self.trailing_commas:
self._buffer[-1] = '\n'
self._buffer.append(indent + end_inline_list)
else:
if after_start_list_item or not at_line_start:
self._buffer.append('\n')
if flush_margin or after_start_list_item:
start_list_item_indent = self._flush_start_list_item_indent
start_list_item_open = self._flush_start_list_item_open
internal_leading = self._flush_list_item_leading
internal_indent = indent + self._flush_list_item_indent
else:
start_list_item_indent = self._start_list_item_indent
start_list_item_open = self._start_list_item_open
internal_leading = self._list_item_leading
internal_indent = indent + self._list_item_indent
if explicit_type is None:
self._alias_def_buffer_index[id_obj] = len(self._buffer)
self._buffer.append('')
self._alias_def_template[id_obj] = indent + start_list_item_indent + '(label={0})>\n'
else:
self._buffer.append(indent + start_list_item_indent + '({0}'.format(explicit_type))
self._alias_def_buffer_index[id_obj] = len(self._buffer)
self._buffer.append('')
self._buffer.append(')>\n')
self._alias_def_template[id_obj] = ', label={0}'
for item in obj:
self._buffer.append(indent + start_list_item_open)
self._encode_funcs[type(item)](item, inline=inline, after_start_list_item=True, indent=internal_indent, leading=internal_leading)
self._buffer.append('\n')
self._buffer.pop()
self._obj_path.popitem()
self._nesting_depth -= 1
def _encode_dict(self, obj,
flush_margin=False, inline=False, at_line_start=True, indent='', leading='', after_start_list_item=False, key=False, key_path=False, value=False,
explicit_type=None,
start_inline_dict=grammar.LIT_GRAMMAR['start_inline_dict'],
end_inline_dict=grammar.LIT_GRAMMAR['end_inline_dict'],
assign_key_val=grammar.LIT_GRAMMAR['assign_key_val']):
if key:
raise TypeError('Dict-like objects are not supported as dict keys')
id_obj = id(obj)
if id_obj in self._alias_values:
self._encode_alias(obj)
return
self._obj_path[id_obj] = None
self._alias_values[id_obj] = None
if not inline:
inline = self._nesting_depth >= self.inline_depth
self._nesting_depth += 1
if self._nesting_depth > self.max_nesting_depth:
raise TypeError('Max nesting depth for collections was exceeded; max depth = {0}'.format(self.max_nesting_depth))
if not obj:
self._buffer.append(leading)
if explicit_type is None:
self._alias_def_buffer_index[id_obj] = len(self._buffer)
self._buffer.append('')
self._alias_def_template[id_obj] = '(label={0})>\x20'
else:
self._buffer.append('({0}'.format(explicit_type))
self._alias_def_buffer_index[id_obj] = len(self._buffer)
self._buffer.append('')
self._buffer.append(')>\x20')
self._alias_def_template[id_obj] = ', label={0}'
self._buffer.append(start_inline_dict + end_inline_dict)
self._obj_path.popitem()
self._nesting_depth -= 1
return
if inline:
self._buffer.append(leading)
if explicit_type is None:
self._alias_def_buffer_index[id_obj] = len(self._buffer)
self._buffer.append('')
self._alias_def_template[id_obj] = '(label={0})>\x20'
else:
self._buffer.append('({0}'.format(explicit_type))
self._alias_def_buffer_index[id_obj] = len(self._buffer)
self._buffer.append('')
self._buffer.append(')>\x20')
self._alias_def_template[id_obj] = ', label={0}'
internal_indent = indent + self.nesting_indent
if self.compact_inline:
self._buffer.append(start_inline_dict)
for k, v in obj.items():
self._encode_funcs[type(k)](k, inline=inline, at_line_start=False, indent=internal_indent, key=True)
self._buffer.append(' =')
self._encode_funcs[type(v)](v, inline=inline, at_line_start=False, indent=internal_indent, leading='\x20', value=True)
self._buffer.append(',\x20')
if self.trailing_commas:
self._buffer[-1] = ','
else:
self._buffer[-1] = ''
self._buffer.append(end_inline_dict)
else:
self._buffer.append(start_inline_dict + '\n')
for k, v in obj.items():
self._buffer.append(internal_indent)
self._encode_funcs[type(k)](k, inline=inline, indent=internal_indent, key=True)
if self._scalar_bidi_rtl:
self._scalar_bidi_rtl = False
self._buffer.append('\x20=\n' + internal_indent)
self._encode_funcs[type(v)](v, inline=inline, indent=internal_indent, value=True)
else:
self._buffer.append('\x20=')
self._encode_funcs[type(v)](v, inline=inline, at_line_start=False, indent=internal_indent, leading='\x20', value=True)
self._buffer.append(',\n')
if not self.trailing_commas:
self._buffer[-1] = '\n'
self._buffer.append(indent + end_inline_dict)
else:
if at_line_start:
self._buffer.append(leading)
else:
indent += self.nesting_indent
self._buffer.append('\n' + indent)
if explicit_type is None:
self._alias_def_buffer_index[id_obj] = len(self._buffer)
self._buffer.append('')
self._alias_def_template[id_obj] = indent + '(dict, label={0})>\n'
else:
self._buffer.append(indent + '({0}'.format(explicit_type))
self._alias_def_buffer_index[id_obj] = len(self._buffer)
self._buffer.append('')
self._buffer.append(')>\n')
self._alias_def_template[id_obj] = ', label={0}'
internal_indent = indent + self.nesting_indent
first = True
for k, v in obj.items():
if first:
first = False
else:
self._buffer.append(indent)
self._encode_funcs[type(k)](k, inline=inline, indent=indent, key=True)
if self._scalar_bidi_rtl:
self._scalar_bidi_rtl = False
self._buffer.append('\x20=\n' + internal_indent)
self._encode_funcs[type(v)](v, inline=inline, indent=internal_indent, value=True)
else:
self._buffer.append('\x20=')
self._encode_funcs[type(v)](v, inline=inline, at_line_start=False, indent=indent, leading='\x20', value=True)
self._buffer.append('\n')
self._buffer.pop()
self._obj_path.popitem()
self._nesting_depth -= 1
def _encode_odict(self, obj, **kwargs):
self._encode_dict(obj, explicit_type='odict', **kwargs)
def _encode_set(self, obj, **kwargs):
self._encode_list(obj, explicit_type='set', **kwargs)
def _encode_tuple(self, obj, **kwargs):
self._encode_list(obj, explicit_type='tuple', **kwargs)
def encode(self, obj):
'''
Encode an object as a string.
'''
self._reset()
self._encode_funcs[type(obj)](obj, flush_margin=True)
if self._buffer[-1][-1] != '\n':
self._buffer.append('\n')
encoded = ''.join(self._buffer)
self._free()
return encoded
def partial_encode(self, obj, dtype=None,
flush_margin=False,
inline=False, at_line_start=True, indent='',
after_start_list_item=False,
key=False, key_path=False,
delim=None, block=False, num_base=None,
initial_nesting_depth=0):
'''
Encode an object within a larger object in a manner suitable for its
context. This is used in RoundtripAst.
'''
self._reset()
self._nesting_depth = initial_nesting_depth
if dtype is None:
if (delim and num_base) or (key_path and not key):
raise TypeError('Invalid argument combination')
if delim or block:
self._encode_funcs[type(obj)](obj, flush_margin=flush_margin, inline=inline, at_line_start=at_line_start, after_start_list_item=after_start_list_item, key=key, key_path=key_path, delim=delim, block=block)
elif num_base:
self._encode_funcs[type(obj)](obj, flush_margin=flush_margin, inline=inline, at_line_start=at_line_start, after_start_list_item=after_start_list_item, key=key, key_path=key_path, num_base=num_base)
else:
self._encode_funcs[type(obj)](obj, flush_margin=flush_margin, inline=inline, at_line_start=at_line_start, after_start_list_item=after_start_list_item, key=key, key_path=key_path)
elif dtype == 'doc_comment':
self._encode_doc_comment(obj, flush_margin=flush_margin, inline=inline, at_line_start=at_line_start, after_start_list_item=after_start_list_item, key=key, key_path=key_path, delim=delim, block=block)
elif dtype == 'line_comment':
self._encode_line_comment(obj, flush_margin=flush_margin, inline=inline, at_line_start=at_line_start, after_start_list_item=after_start_list_item, key=key, key_path=key_path, delim=delim, block=block)
else:
raise ValueError
encoded = ''.join(self._buffer).replace('\n', '\n'+indent)
self._free()
return encoded | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/av/FLAudio.js.uncompressed.js | define("dojox/av/FLAudio", ['dojo', 'dojox/embed/Flash', 'dojox/timing/doLater'],function(dojo, dijit){
dojo.experimental("dojox.av.FLVideo");
dojo.declare("dojox.av.FLAudio", null, {
// summary:
// Play MP3 files through the Flash SWF built in the
// DEFT project.
// description:
// This class is brand new, so there is a lot of
// functionality not yet available. The initial
// purpose is for playing "event" sounds like button
// clicks, and for loading and controlling multiple
// sounds at once. As of yet, streaming is not supported
// and polling the sounds for events during playback
// may still be missing information. Markup is not
// supported, as it may not be needed.
//
// TODO:
// Streaming, playback events, crossdomain, CDN support,
// (alternate SWF location), global volume, ID3 tag,
// factor out doLater, onLoadStatus needs work,
// play(position) / seek()
//
// example:
// | new dojox.av.FLAudio({
// | initialVolume:.7,
// | initialPan:0,
// | autoPlay:false
// | });
//
// id: String?
// The id of this widget and the id of the SWF movie.
id:"",
//
// initialVolume: Number
// From 0-1
// Sets volume for all files unless changed with doPlay
// or setVolume
initialVolume: 0.7,
//
// initialPan: Number
// From -1 to 1 (-1 is left, 1 is right, 0 is middle)
// Sets pan for all files unless changed with play
// or setPan
initialPan: 0,
//
// autoPlay: Boolean
// If true, all files will play upon load. If false,
// they load and wait for doPlay() command.
//
// isDebug: Boolean?
// Setting to true tells the SWF to output log messages to Firebug.
isDebug: false,
//
// statusInterval: Number
// How often in milliseconds that the status of the
// player is checked - both load and play
statusInterval:200,
//
// _swfPath: Uri
// The path to the video player SWF resource
_swfPath: dojo.moduleUrl("dojox.av", "resources/audio.swf"),
//
//
// allowScriptAccess: String
// Whether the SWF can access the container JS
allowScriptAccess:"always",
//
// allowNetworking: String
// Whether SWF is restricted to a domain
allowNetworking: "all",
//
constructor: function(/*Object*/options){
// Provide this function for the SWF to ensure that the it is playing
// in HTML.
dojo.global.swfIsInHTML = function(){ return true; }
dojo.mixin(this, options || {});
if(!this.id){ this.id = "flaudio_"+new Date().getTime(); }
this.domNode = dojo.doc.createElement("div");
dojo.style(this.domNode, {
position:"relative",
width:"1px",
height:"1px",
top:"1px",
left:"1px"
});
dojo.body().appendChild(this.domNode);
this.init();
},
init: function(){
// summary:
// Initialize the media.
//
//
this._subs = [];
this.initialVolume = this._normalizeVolume(this.initialVolume);
var args = {
path:this._swfPath.uri,
width:"1px",
height:"1px",
minimumVersion:9, // this may need to be 10, not sure
expressInstall:true,
params:{
wmode:"transparent",
allowScriptAccess:this.allowScriptAccess,
allowNetworking:this.allowNetworking
},
// only pass in simple variables - no deep objects
vars:{
id:this.id,
autoPlay:this.autoPlay,
initialVolume:this.initialVolume,
initialPan:this.initialPan,
statusInterval:this.statusInterval,
isDebug:this.isDebug
}
};
this._sub("mediaError", "onError");
this._sub("filesProgress", "onLoadStatus");
this._sub("filesAllLoaded", "onAllLoaded");
this._sub("mediaPosition", "onPlayStatus");
this._sub("mediaEnd", "onComplete");
this._sub("mediaMeta", "onID3");
this._flashObject = new dojox.embed.Flash(args, this.domNode);
this._flashObject.onError = function(err){
console.warn("Flash Error:", err);
};
this._flashObject.onLoad = dojo.hitch(this, function(mov){
this.flashMedia = mov;
this.isPlaying = this.autoPlay;
this.isStopped = !this.autoPlay;
this.onLoad(this.flashMedia);
});
},
// ============== //
// Loading Files //
// ============== //
load: function(/*Object*/options){
// summary:
// Adds a media object to the playlist
// ***This can be called repeatedly to add multiple items.
// options: Object
// url: String
// (required) path to MP3 media
// url must be absolute or relative to SWF,
// not dojo or the html. An effort will be made
// to fix incorrect paths.
// id: String
// (optional) an identifier to later determine
// which media to control.
// returns:
// The normalized url, which can be used to identify the
// audio.
//
if(dojox.timing.doLater(this.flashMedia, this)){ return false; }
if(!options.url){
throw new Error("An url is required for loading media");
return false;
}else{
options.url = this._normalizeUrl(options.url);
}
this.flashMedia.load(options);
return options.url; // String
},
// ============================= //
// Methods to control the sound //
// ============================= //
doPlay: function(/*Object*/options){
// summary:
// Tell media to play, based on
// the options passed.
// options: Object
// volume: Number
// Sets the volume
// pan: Number
// Sets left/right pan
// index:Number OR id:String OR url:String
// Choose one of the above to indentify
// the media you wish to control. id is
// set by you. index is the order in which
// media was added (zero based)
// NOTE: lack of an identifier will default
// to first (or only) item.
// NOTE: Can't name this method "play()" as it causes
// an IE error.
this.flashMedia.doPlay(options);
},
pause: function(/*Object*/options){
// summary:
// Tell media to pause, based on identifier in
// the options passed.
// options: Object
// index:Number OR id:String OR url:String
// See doPlay()
//
this.flashMedia.pause(options);
},
stop: function(/*Object*/options){
// summary:
// Tell media to stop, based on identifier in
// the options passed.
// options:
// index:Number OR id:String OR url:String
// See doPlay()
//
this.flashMedia.doStop(options);
},
setVolume: function(/*Object*/options){
// summary:
// Set media volume, based on identifier in
// the options passed.
// options:
// volume: Number
// 0 to 1
// index:Number OR id:String OR url:String
// See doPlay()
//
this.flashMedia.setVolume(options);
},
setPan: function(/*Object*/options){
// summary:
// Set media pan, based on identifier in
// the options passed.
// options:
// pan:Number
// -1 to 1
// index:Number OR id:String OR url:String
// See doPlay()
//
this.flashMedia.setPan(options);
},
getVolume: function(/*Object*/options){
// summary:
// Get media volume, based on identifier in
// the options passed.
// options:
// index:Number OR id:String OR url:String
// See doPlay()
//
return this.flashMedia.getVolume(options);
},
getPan: function(/*Object*/options){
// summary:
// Set media pan, based on identifier in
// the options passed.
// options:
// index:Number OR id:String OR url:String
// See doPlay()
//
return this.flashMedia.getPan(options);
},
getPosition: function(/*Object*/options){
// summary:
// Get the current time.
// options:
// index:Number OR id:String OR url:String
// See doPlay()
//
return this.flashMedia.getPosition(options);
},
// ============= //
// Sound Events //
// ============= //
onError: function(msg){
// summary:
// stub fired when an error occurs
console.warn("SWF ERROR:", msg)
},
onLoadStatus: function(/*Array*/events){
// summary:
},
onAllLoaded: function(){
// summary:
// stub fired
},
onPlayStatus: function(/*Array*/events){
// summary:
},
onComplete: function(/*Array*/events){
// summary:
// Fired at the end of a media file.
},
onLoad: function(){
// summary:
// stub fired when SWF is ready
},
onID3: function(evt){
// summary:
// Fired when the ID3 data is received.
},
destroy: function(){
// summary:
// destroys flash
if(!this.flashMedia){
this._cons.push(dojo.connect(this, "onLoad", this, "destroy"));
return;
}
dojo.forEach(this._subs, function(s){
dojo.unsubscribe(s);
});
dojo.forEach(this._cons, function(c){
dojo.disconnect(c);
});
this._flashObject.destroy();
//dojo._destroyElement(this.flashDiv);
},
_sub: function(topic, method){
// summary:
// helper for subscribing to topics
dojo.subscribe(this.id+"/"+topic, this, method);
},
_normalizeVolume: function(vol){
// summary:
// Ensures volume is less than one
//
if(vol>1){
while(vol>1){
vol*=.1
}
}
return vol;
},
_normalizeUrl: function(_url){
// summary:
// Checks that path is relative to HTML file or
// convertes it to an absolute path.
//
if(_url && _url.toLowerCase().indexOf("http")<0){
//
// Appears to be a relative path. Attempt to convert it to absolute,
// so it will better target the SWF.
var loc = window.location.href.split("/");
loc.pop();
loc = loc.join("/")+"/";
_url = loc+_url;
}
return _url;
}
});
return dojox.av.FLAudio;
}); | PypiClean |
/FitBenchmarking-1.0.0.tar.gz/FitBenchmarking-1.0.0/fitbenchmarking/parsing/nist_data_functions.py | import numpy as np
from fitbenchmarking.utils.exceptions import ParsingError
def nist_func_definition(function, param_names):
"""
Processing a function plus different set of starting values as specified in
the NIST problem definition file into a callable
:param function: function string as defined in a NIST problem definition
file
:type function: str
:param param_names: names of the parameters in the function
:type param_names: list
:return: callable function
:rtype: callable
"""
function_scipy_format = format_function_scipy(function)
# Create a function def for each starting set in startvals
if not is_safe(function_scipy_format):
raise ParsingError('Error while sanitizing input')
# Sanitizing of function_scipy_format is done so exec use is valid
# Param_names is sanitized in get_nist_param_names_and_values
# pylint: disable=exec-used
local_dict = {}
global_dict = {'__builtins__': {}, 'np': np}
exec("def fitting_function(x, " + ','.join(param_names) + "): return "
+ function_scipy_format, global_dict, local_dict)
return local_dict['fitting_function']
def nist_jacobian_definition(jacobian, param_names):
"""
Processing a Jacobian plus different set of starting values as specified in
the NIST problem definition file into a callable
:param jacobian: Jacobian string as defined in the data files for the
corresponding NIST problem definition file
:type jacobian: str
:param param_names: names of the parameters in the function
:type param_names: list
:return: callable function
:rtype: callable
"""
scipy_jacobian = []
for jacobian_lines in jacobian:
jacobian_scipy_format = format_function_scipy(jacobian_lines)
# Create a function def for each starting set in startvals
if not is_safe(jacobian_scipy_format):
raise ParsingError('Error while sanitizing Jacobian input')
# Checks to see if the value is an integer and if so reformats the
# value to be a constant vector.
if is_int(jacobian_scipy_format):
jacobian_scipy_format += "*(np.ones(x.shape[0]))"
scipy_jacobian.append(jacobian_scipy_format)
jacobian_format = f'np.array([{",".join(scipy_jacobian)}]).T'
new_param_name = "params"
for i, name in enumerate(param_names):
jacobian_format = jacobian_format.replace(
name, f"{new_param_name}[{i}]")
# Sanitizing of jacobian_scipy_format is done so exec use is valid
# Param_names is sanitized in get_nist_param_names_and_values
# pylint: disable=exec-used
local_dict = {}
global_dict = {'__builtins__': {}, 'np': np}
exec("def jacobian_function(x, " + new_param_name + "): return "
+ jacobian_format, global_dict, local_dict)
return local_dict['jacobian_function']
def nist_hessian_definition(hessian, param_names):
"""
Processing a Hessian into a callable
:param hessian: Hessian string as defined in the data files for the
corresponding NIST problem definition file
:type hessian: str
:param param_names: names of the parameters in the function
:type param_names: list
:return: callable function
:rtype: callable
"""
scipy_hessian = []
for hessian_lines in hessian:
hessian_scipy_format = format_function_scipy(hessian_lines)
# Create a function def for each starting set in startvals
if not is_safe(hessian_scipy_format):
raise ParsingError('Error while sanitizing Hessian input')
# Checks to see if the value is an integer and if so reformats the
# value to be a constant vector.
if is_int(hessian_scipy_format):
hessian_scipy_format += "*(np.ones(x.shape[0]))"
new_param_name = "params"
for i, name in enumerate(param_names):
hessian_scipy_format = hessian_scipy_format.replace(
name, f"{new_param_name}[{i}]")
scipy_hessian.append(hessian_scipy_format)
dim = len(param_names)
# reshape into Hessian matrix
scipy_hessian = np.reshape(scipy_hessian, (dim, dim))
hessian_matrix = ''
for i in range(dim):
hess_row = ",".join(scipy_hessian[:, i])
hessian_matrix += '[' + hess_row + '],'
hessian_format = f"np.array([{hessian_matrix}])"
# Sanitizing of hessian_scipy_format is done so exec use is valid
# param_names is sanitized in get_nist_param_names_and_values
# pylint: disable=exec-used
local_dict = {}
global_dict = {'__builtins__': {}, 'np': np}
exec("def hessian_function(x, " + new_param_name + "): return "
+ hessian_format, global_dict, local_dict)
return local_dict['hessian_function']
def is_int(value):
"""
Checks to see if a value is an integer or not
:param value: String representation of an equation
:type value: str
:return: Whether or not value is an int
:rtype: bool
"""
try:
int(value)
value_bool = True
except ValueError:
value_bool = False
return value_bool
def format_function_scipy(function):
"""
Formats the function string such that it is scipy-ready.
:param function: The function to be formatted
:type function: str
:return: The formatted function
:rtype: str
"""
function = function.replace("exp", "np.exp")
function = function.replace("^", "**")
function = function.replace("cos", "np.cos")
function = function.replace("sin", "np.sin")
function = function.replace("tan", "np.tan")
function = function.replace("pi", "np.pi")
function = function.replace("log", "np.log")
function = function.replace("Log", "np.log")
return function
# Due to the nature of this function it is necessary to be able to return at
# multiple places
# pylint: disable=too-many-return-statements, too-many-branches
def is_safe(func_str):
"""
Verifies that a string is safe to be passed to exec in the context of an
equation.
:param func_str: The function to be checked
:type func_str: string
:return: Whether the string is of the expected format for an equation
:rtype: bool
"""
# Remove whitespace
func_str = func_str.replace(' ', '')
# Empty string is safe
if func_str == '':
return True
# These are all safe and can be stripped out
if 'np' in func_str:
np_funcs = ['np.exp', 'np.cos', 'np.sin', 'np.tan', 'np.log']
for s in np_funcs:
func_str = func_str.replace(s, '')
# Store valid symbols for later
symbols = ['**', '/', '*', '+', '-']
# Partition on outer brackets
if '(' in func_str:
if ')' not in func_str:
# Number of brackets don't match
return False
# Split string "left(centre)right"
left, remainder = func_str.split('(', 1)
centre, right = remainder.split(')', 1)
# Handle nested brackets
while centre.count('(') != centre.count(')'):
tmp, right = right.split(')', 1)
centre = centre + ')' + tmp
# If left is non-empty it should end with a symbol
if left != '':
left_ends_with_symbol = False
for sym in symbols:
if left.endswith(sym):
left = left.strip(sym)
left_ends_with_symbol = True
break
if left_ends_with_symbol is False:
return False
# If right is non-empty it should start with a symbol
if right != '':
right_starts_with_symbol = False
for sym in symbols:
if right.startswith(sym):
right = right.strip(sym)
right_starts_with_symbol = True
break
if right_starts_with_symbol is False:
return False
# Centre should not be empty
if centre == '':
return False
# Return True if all sub parts are safe
return is_safe(left) and is_safe(centre) and is_safe(right)
# Split on a symbol and recurse
for sym in symbols:
if sym in func_str:
left, right = func_str.split(sym, 1)
# Symbol should not be at start or end of string (unless it's a -)
if (left == '' and sym != '-') or right == '':
return False
# Return True if both sub parts are safe
return is_safe(left) and is_safe(right)
# np.pi is acceptable
if func_str == 'np.pi':
return True
# Floating points are acceptable
try:
float(func_str)
return True
except ValueError:
pass
# Ints are acceptable
try:
int(func_str)
return True
except ValueError:
pass
# Only remaining acceptable strings are variables
if func_str[0].isalpha() and func_str.isalnum():
return True
# Unparsed output remains
return False | PypiClean |
/Electrum-VTC-2.9.3.3.tar.gz/Electrum-VTC-2.9.3.3/plugins/digitalbitbox/digitalbitbox.py |
try:
import electrum_vtc as electrum
from electrum_vtc.bitcoin import TYPE_ADDRESS, var_int, msg_magic, Hash, verify_message, pubkey_from_signature, point_to_ser, public_key_to_p2pkh, EncodeAES, DecodeAES, MyVerifyingKey
from electrum_vtc.i18n import _
from electrum_vtc.keystore import Hardware_KeyStore
from ..hw_wallet import HW_PluginBase
from electrum_vtc.util import print_error
import time
import hid
import json
import math
import struct
import hashlib
from ecdsa.ecdsa import generator_secp256k1
from ecdsa.util import sigencode_der
from ecdsa.curves import SECP256k1
DIGIBOX = True
except ImportError as e:
DIGIBOX = False
# ----------------------------------------------------------------------------------
# USB HID interface
#
class DigitalBitbox_Client():
def __init__(self, hidDevice):
self.dbb_hid = hidDevice
self.opened = True
self.password = None
self.isInitialized = False
self.setupRunning = False
self.usbReportSize = 64 # firmware > v2.0.0
def close(self):
if self.opened:
try:
self.dbb_hid.close()
except:
pass
self.opened = False
def timeout(self, cutoff):
pass
def label(self):
return " "
def is_pairable(self):
return True
def is_initialized(self):
return self.dbb_has_password()
def is_paired(self):
return self.password is not None
def get_xpub(self, bip32_path):
if self.check_device_dialog():
msg = '{"xpub":"' + bip32_path + '"}'
reply = self.hid_send_encrypt(msg)
return reply['xpub']
return None
def dbb_has_password(self):
reply = self.hid_send_plain('{"ping":""}')
if 'ping' not in reply:
raise Exception('Device communication error. Please unplug and replug your Digital Bitbox.')
if reply['ping'] == 'password':
return True
return False
def stretch_key(self, key):
import pbkdf2, hmac
return pbkdf2.PBKDF2(key, 'Digital Bitbox', iterations = 20480, macmodule = hmac, digestmodule = hashlib.sha512).read(64).encode('hex')
def backup_password_dialog(self):
msg = _("Enter the password used when the backup was created:")
while True:
password = self.handler.get_passphrase(msg, False)
if password is None:
return None
if len(password) < 4:
msg = _("Password must have at least 4 characters.\r\n\r\nEnter password:")
elif len(password) > 64:
msg = _("Password must have less than 64 characters.\r\n\r\nEnter password:")
else:
return str(password)
def password_dialog(self, msg):
while True:
password = self.handler.get_passphrase(msg, False)
if password is None:
return False
if len(password) < 4:
msg = _("Password must have at least 4 characters.\r\n\r\nEnter password:")
elif len(password) > 64:
msg = _("Password must have less than 64 characters.\r\n\r\nEnter password:")
else:
self.password = str(password)
return True
def check_device_dialog(self):
# Set password if fresh device
if self.password is None and not self.dbb_has_password():
if not self.setupRunning:
return False # A fresh device cannot connect to an existing wallet
msg = _("An uninitialized Digital Bitbox is detected. " \
"Enter a new password below.\r\n\r\n REMEMBER THE PASSWORD!\r\n\r\n" \
"You cannot access your coins or a backup without the password.\r\n" \
"A backup is saved automatically when generating a new wallet.")
if self.password_dialog(msg):
reply = self.hid_send_plain('{"password":"' + self.password + '"}')
else:
return False
# Get password from user if not yet set
msg = _("Enter your Digital Bitbox password:")
while self.password is None:
if not self.password_dialog(msg):
return False
reply = self.hid_send_encrypt('{"led":"blink"}')
if 'error' in reply:
self.password = None
if reply['error']['code'] == 109:
msg = _("Incorrect password entered.\r\n\r\n" \
+ reply['error']['message'] + "\r\n\r\n" \
"Enter your Digital Bitbox password:")
else:
# Should never occur
msg = _("Unexpected error occurred.\r\n\r\n" \
+ reply['error']['message'] + "\r\n\r\n" \
"Enter your Digital Bitbox password:")
# Initialize device if not yet initialized
if not self.setupRunning:
self.isInitialized = True # Wallet exists. Electrum code later checks if the device matches the wallet
elif not self.isInitialized:
reply = self.hid_send_encrypt('{"device":"info"}')
if reply['device']['id'] <> "":
self.recover_or_erase_dialog() # Already seeded
else:
self.seed_device_dialog() # Seed if not initialized
return self.isInitialized
def recover_or_erase_dialog(self):
msg = _("The Digital Bitbox is already seeded. Choose an option:\n")
choices = [
(_("Create a wallet using the current seed")),
(_("Load a wallet from the micro SD card (the current seed is overwritten)")),
(_("Erase the Digital Bitbox"))
]
try:
reply = self.handler.win.query_choice(msg, choices)
except Exception:
return # Back button pushed
if reply == 2:
self.dbb_erase()
elif reply == 1:
if not self.dbb_load_backup():
return
else:
pass # Use existing seed
self.isInitialized = True
def seed_device_dialog(self):
msg = _("Choose how to initialize your Digital Bitbox:\n")
choices = [
(_("Generate a new random wallet")),
(_("Load a wallet from the micro SD card"))
]
try:
reply = self.handler.win.query_choice(msg, choices)
except Exception:
return # Back button pushed
if reply == 0:
self.dbb_generate_wallet()
else:
if not self.dbb_load_backup(show_msg=False):
return
self.isInitialized = True
def dbb_generate_wallet(self):
key = self.stretch_key(self.password)
filename = "Electrum-" + time.strftime("%Y-%m-%d-%H-%M-%S") + ".pdf"
msg = '{"seed":{"source": "create", "key": "%s", "filename": "%s", "entropy": "%s"}}' % (key, filename, 'Digital Bitbox Electrum Plugin')
reply = self.hid_send_encrypt(msg)
if 'error' in reply:
raise Exception(reply['error']['message'])
def dbb_erase(self):
self.handler.show_message(_("Are you sure you want to erase the Digital Bitbox?\r\n\r\n" \
"To continue, touch the Digital Bitbox's light for 3 seconds.\r\n\r\n" \
"To cancel, briefly touch the light or wait for the timeout."))
hid_reply = self.hid_send_encrypt('{"reset":"__ERASE__"}')
self.handler.clear_dialog()
if 'error' in hid_reply:
raise Exception(hid_reply['error']['message'])
else:
self.password = None
raise Exception('Device erased')
def dbb_load_backup(self, show_msg=True):
backups = self.hid_send_encrypt('{"backup":"list"}')
if 'error' in backups:
raise Exception(backups['error']['message'])
try:
f = self.handler.win.query_choice(_("Choose a backup file:"), backups['backup'])
except Exception:
return False # Back button pushed
key = self.backup_password_dialog()
if key is None:
raise Exception('Canceled by user')
key = self.stretch_key(key)
if show_msg:
self.handler.show_message(_("Loading backup...\r\n\r\n" \
"To continue, touch the Digital Bitbox's light for 3 seconds.\r\n\r\n" \
"To cancel, briefly touch the light or wait for the timeout."))
msg = '{"seed":{"source": "backup", "key": "%s", "filename": "%s"}}' % (key, backups['backup'][f])
hid_reply = self.hid_send_encrypt(msg)
self.handler.clear_dialog()
if 'error' in hid_reply:
raise Exception(hid_reply['error']['message'])
return True
def hid_send_frame(self, data):
HWW_CID = 0xFF000000
HWW_CMD = 0x80 + 0x40 + 0x01
data = bytearray(data)
data_len = len(data)
seq = 0;
idx = 0;
write = []
while idx < data_len:
if idx == 0:
# INIT frame
write = data[idx : idx + min(data_len, self.usbReportSize - 7)]
self.dbb_hid.write('\0' + struct.pack(">IBH", HWW_CID, HWW_CMD, data_len & 0xFFFF) + write + '\xEE' * (self.usbReportSize - 7 - len(write)))
else:
# CONT frame
write = data[idx : idx + min(data_len, self.usbReportSize - 5)]
self.dbb_hid.write('\0' + struct.pack(">IB", HWW_CID, seq) + write + '\xEE' * (self.usbReportSize - 5 - len(write)))
seq += 1
idx += len(write)
def hid_read_frame(self):
# INIT response
read = self.dbb_hid.read(self.usbReportSize)
cid = ((read[0] * 256 + read[1]) * 256 + read[2]) * 256 + read[3]
cmd = read[4]
data_len = read[5] * 256 + read[6]
data = read[7:]
idx = len(read) - 7;
while idx < data_len:
# CONT response
read = self.dbb_hid.read(self.usbReportSize)
data += read[5:]
idx += len(read) - 5
return data
def hid_send_plain(self, msg):
reply = ""
try:
serial_number = self.dbb_hid.get_serial_number_string()
if "v2.0." in serial_number or "v1." in serial_number:
hidBufSize = 4096
self.dbb_hid.write('\0' + bytearray(msg) + '\0' * (hidBufSize - len(msg)))
r = []
while len(r) < hidBufSize:
r = r + self.dbb_hid.read(hidBufSize)
else:
self.hid_send_frame(msg)
r = self.hid_read_frame()
r = str(bytearray(r)).rstrip(' \t\r\n\0')
r = r.replace("\0", '')
reply = json.loads(r)
except Exception as e:
print_error('Exception caught ' + str(e))
return reply
def hid_send_encrypt(self, msg):
reply = ""
try:
secret = Hash(self.password)
msg = EncodeAES(secret, msg)
reply = self.hid_send_plain(msg)
if 'ciphertext' in reply:
reply = DecodeAES(secret, ''.join(reply["ciphertext"]))
reply = json.loads(reply)
if 'error' in reply:
self.password = None
except Exception as e:
print_error('Exception caught ' + str(e))
return reply
# ----------------------------------------------------------------------------------
#
#
class DigitalBitbox_KeyStore(Hardware_KeyStore):
hw_type = 'digitalbitbox'
device = 'DigitalBitbox'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
self.force_watching_only = False
self.maxInputs = 14 # maximum inputs per single sign command
def get_derivation(self):
return str(self.derivation)
def give_error(self, message, clear_client = False):
if clear_client:
self.client = None
raise Exception(message)
def decrypt_message(self, pubkey, message, password):
raise RuntimeError(_('Encryption and decryption are currently not supported for %s') % self.device)
def sign_message(self, sequence, message, password):
sig = None
try:
inputPath = self.get_derivation() + "/%d/%d" % sequence
inputHash = Hash(msg_magic(message)).encode('hex')
hasharray = []
hasharray.append({'hash': inputHash, 'keypath': inputPath})
hasharray = json.dumps(hasharray)
msg = '{"sign":{"meta":"sign message", "data":%s}}' % (hasharray)
dbb_client = self.plugin.get_client(self)
if not dbb_client.is_paired():
raise Exception("Could not sign message.")
reply = dbb_client.hid_send_encrypt(msg)
self.handler.show_message(_("Signing message ...\r\n\r\n" \
"To continue, touch the Digital Bitbox's blinking light for 3 seconds.\r\n\r\n" \
"To cancel, briefly touch the blinking light or wait for the timeout."))
reply = dbb_client.hid_send_encrypt(msg) # Send twice, first returns an echo for smart verification (not implemented)
self.handler.clear_dialog()
if 'error' in reply:
raise Exception(reply['error']['message'])
if 'sign' not in reply:
raise Exception("Could not sign message.")
if 'recid' in reply['sign'][0]:
# firmware > v2.1.1
sig = chr(27 + int(reply['sign'][0]['recid'], 16) + 4) + reply['sign'][0]['sig'].decode('hex')
h = Hash(msg_magic(message))
pk, compressed = pubkey_from_signature(sig, h)
pk = point_to_ser(pk.pubkey.point, compressed)
addr = public_key_to_p2pkh(pk)
if verify_message(addr, sig, message) is False:
raise Exception("Could not sign message")
elif 'pubkey' in reply['sign'][0]:
# firmware <= v2.1.1
for i in range(4):
sig = chr(27 + i + 4) + reply['sign'][0]['sig'].decode('hex')
try:
addr = public_key_to_p2pkh(reply['sign'][0]['pubkey'].decode('hex'))
if verify_message(addr, sig, message):
break
except Exception:
continue
else:
raise Exception("Could not sign message")
except BaseException as e:
self.give_error(e)
return sig
def sign_transaction(self, tx, password):
if tx.is_complete():
return
try:
p2shTransaction = False
derivations = self.get_tx_derivations(tx)
inputhasharray = []
hasharray = []
pubkeyarray = []
# Build hasharray from inputs
for i, txin in enumerate(tx.inputs()):
if txin['type'] == 'coinbase':
self.give_error("Coinbase not supported") # should never happen
if txin['type'] in ['p2sh']:
p2shTransaction = True
for x_pubkey in txin['x_pubkeys']:
if x_pubkey in derivations:
index = derivations.get(x_pubkey)
inputPath = "%s/%d/%d" % (self.get_derivation(), index[0], index[1])
inputHash = Hash(tx.serialize_preimage(i).decode('hex'))
hasharray_i = {'hash': inputHash.encode('hex'), 'keypath': inputPath}
hasharray.append(hasharray_i)
inputhasharray.append(inputHash)
break
else:
self.give_error("No matching x_key for sign_transaction") # should never happen
# Sanity check
if p2shTransaction:
for txinput in tx.inputs():
if txinput['type'] != 'p2sh':
self.give_error("P2SH / regular input mixed in same transaction not supported") # should never happen
# Build pubkeyarray from outputs (unused because echo for smart verification not implemented)
if not p2shTransaction:
for _type, address, amount in tx.outputs():
assert _type == TYPE_ADDRESS
info = tx.output_info.get(address)
if info is not None:
index, xpubs, m = info
changePath = self.get_derivation() + "/%d/%d" % index
changePubkey = self.derive_pubkey(index[0], index[1])
pubkeyarray_i = {'pubkey': changePubkey, 'keypath': changePath}
pubkeyarray.append(pubkeyarray_i)
# Build sign command
dbb_signatures = []
steps = math.ceil(1.0 * len(hasharray) / self.maxInputs)
for step in range(int(steps)):
hashes = hasharray[step * self.maxInputs : (step + 1) * self.maxInputs]
msg = '{"sign": {"meta":"%s", "data":%s, "checkpub":%s} }' % \
(Hash(tx.serialize()).encode('hex'), json.dumps(hashes), json.dumps(pubkeyarray))
dbb_client = self.plugin.get_client(self)
if not dbb_client.is_paired():
raise Exception("Could not sign transaction.")
reply = dbb_client.hid_send_encrypt(msg)
if 'error' in reply:
raise Exception(reply['error']['message'])
if 'echo' not in reply:
raise Exception("Could not sign transaction.")
if steps > 1:
self.handler.show_message(_("Signing large transaction. Please be patient ...\r\n\r\n" \
"To continue, touch the Digital Bitbox's blinking light for 3 seconds. " \
"(Touch " + str(step + 1) + " of " + str(int(steps)) + ")\r\n\r\n" \
"To cancel, briefly touch the blinking light or wait for the timeout.\r\n\r\n"))
else:
self.handler.show_message(_("Signing transaction ...\r\n\r\n" \
"To continue, touch the Digital Bitbox's blinking light for 3 seconds.\r\n\r\n" \
"To cancel, briefly touch the blinking light or wait for the timeout."))
reply = dbb_client.hid_send_encrypt(msg) # Send twice, first returns an echo for smart verification (not implemented)
self.handler.clear_dialog()
if 'error' in reply:
raise Exception(reply['error']['message'])
if 'sign' not in reply:
raise Exception("Could not sign transaction.")
dbb_signatures.extend(reply['sign'])
# Fill signatures
if len(dbb_signatures) <> len(tx.inputs()):
raise Exception("Incorrect number of transactions signed.") # Should never occur
for i, txin in enumerate(tx.inputs()):
num = txin['num_sig']
for pubkey in txin['pubkeys']:
signatures = filter(None, txin['signatures'])
if len(signatures) == num:
break # txin is complete
ii = txin['pubkeys'].index(pubkey)
signed = dbb_signatures[i]
if 'recid' in signed:
# firmware > v2.1.1
recid = int(signed['recid'], 16)
s = signed['sig'].decode('hex')
h = inputhasharray[i]
pk = MyVerifyingKey.from_signature(s, recid, h, curve = SECP256k1)
pk = point_to_ser(pk.pubkey.point, True).encode('hex')
elif 'pubkey' in signed:
# firmware <= v2.1.1
pk = signed['pubkey']
if pk != pubkey:
continue
sig_r = int(signed['sig'][:64], 16)
sig_s = int(signed['sig'][64:], 16)
sig = sigencode_der(sig_r, sig_s, generator_secp256k1.order())
txin['signatures'][ii] = sig.encode('hex') + '01'
tx._inputs[i] = txin
except BaseException as e:
self.give_error(e, True)
else:
print_error("Transaction is_complete", tx.is_complete())
tx.raw = tx.serialize()
class DigitalBitboxPlugin(HW_PluginBase):
libraries_available = DIGIBOX
keystore_class = DigitalBitbox_KeyStore
client = None
DEVICE_IDS = [
(0x03eb, 0x2402) # Digital Bitbox
]
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def get_dbb_device(self, device):
dev = hid.device()
dev.open_path(device.path)
return dev
def create_client(self, device, handler):
if device.interface_number == 0 or device.usage_page == 0xffff:
self.handler = handler
client = self.get_dbb_device(device)
if client <> None:
client = DigitalBitbox_Client(client)
return client
else:
return None
def setup_device(self, device_info, wizard):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
client.handler = self.create_handler(wizard)
client.setupRunning = True
client.get_xpub("m/44'/0'")
def get_xpub(self, device_id, derivation, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = self.create_handler(wizard)
client.check_device_dialog()
xpub = client.get_xpub(derivation)
return xpub
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
if client <> None:
client.check_device_dialog()
return client | PypiClean |
/Krakatau-noff-v0.20181212.tar.gz/Krakatau-noff-v0.20181212/Krakatau/java/cfg.py | from collections import defaultdict as ddict
from .. import graph_util
from ..ssa import objtypes
from . import ast
def flattenDict(replace):
for k in list(replace):
while replace[k] in replace:
replace[k] = replace[replace[k]]
# The basic block in our temporary CFG
# instead of code, it merely contains a list of defs and uses
# This is an extended basic block, i.e. it only terminates in a normal jump(s).
# exceptions can be thrown from various points within the block
class DUBlock(object):
def __init__(self, key):
self.key = key
self.caught_excepts = ()
self.lines = [] # 3 types of lines: ('use', var), ('def', (var, var2_opt)), or ('canthrow', None)
self.e_successors = []
self.n_successors = []
self.vars = None # vars used or defined within the block. Does NOT include caught exceptions
def canThrow(self): return ('canthrow', None) in self.lines
def recalcVars(self):
self.vars = set()
for line_t, data in self.lines:
if line_t == 'use':
self.vars.add(data)
elif line_t == 'def':
self.vars.add(data[0])
if data[1] is not None:
self.vars.add(data[1])
def replace(self, replace):
if not self.vars.isdisjoint(replace):
newlines = []
for line_t, data in self.lines:
if line_t == 'use':
data = replace.get(data, data)
elif line_t == 'def':
data = replace.get(data[0], data[0]), replace.get(data[1], data[1])
newlines.append((line_t, data))
self.lines = newlines
for k, v in replace.items():
if k in self.vars:
self.vars.remove(k)
self.vars.add(v)
def simplify(self):
# try to prune redundant instructions
last = None
newlines = []
for line in self.lines:
if line[0] == 'def':
if line[1][0] == line[1][1]:
continue
elif line == last:
continue
newlines.append(line)
last = line
self.lines = newlines
self.recalcVars()
def varOrNone(expr):
return expr if isinstance(expr, ast.Local) else None
def canThrow(expr):
if isinstance(expr, (ast.ArrayAccess, ast.ArrayCreation, ast.Cast, ast.ClassInstanceCreation, ast.FieldAccess, ast.MethodInvocation)):
return True
if isinstance(expr, ast.BinaryInfix) and expr.opstr in ('/','%'): # check for possible division by 0
return expr.dtype not in (objtypes.FloatTT, objtypes.DoubleTT)
return False
def visitExpr(expr, lines):
if expr is None:
return
if isinstance(expr, ast.Local):
lines.append(('use', expr))
if isinstance(expr, ast.Assignment):
lhs, rhs = map(varOrNone, expr.params)
# with assignment we need to only visit LHS if it isn't a local in order to avoid spurious uses
# also, we need to visit RHS before generating the def
if lhs is None:
visitExpr(expr.params[0], lines)
visitExpr(expr.params[1], lines)
if lhs is not None:
lines.append(('def', (lhs, rhs)))
else:
for param in expr.params:
visitExpr(param, lines)
if canThrow(expr):
lines.append(('canthrow', None))
class DUGraph(object):
def __init__(self):
self.blocks = []
self.entry = None
def makeBlock(self, key, break_dict, caught_except, myexcept_parents):
block = DUBlock(key)
self.blocks.append(block)
for parent in break_dict[block.key]:
parent.n_successors.append(block)
del break_dict[block.key]
assert (myexcept_parents is None) == (caught_except is None)
if caught_except is not None: # this is the head of a catch block:
block.caught_excepts = (caught_except,)
for parent in myexcept_parents:
parent.e_successors.append(block)
return block
def finishBlock(self, block, catch_stack):
# register exception handlers for completed old block and calculate var set
assert(block.vars is None) # make sure it wasn't finished twice
if block.canThrow():
for clist in catch_stack:
clist.append(block)
block.recalcVars()
def visitScope(self, scope, break_dict, catch_stack, caught_except=None, myexcept_parents=None, head_block=None):
# catch_stack is copy on modify
if head_block is None:
head_block = block = self.makeBlock(scope.continueKey, break_dict, caught_except, myexcept_parents)
else:
block = head_block
for stmt in scope.statements:
if isinstance(stmt, (ast.ExpressionStatement, ast.ThrowStatement, ast.ReturnStatement)):
visitExpr(stmt.expr, block.lines)
if isinstance(stmt, ast.ThrowStatement):
block.lines.append(('canthrow', None))
continue
# compound statements
assert stmt.continueKey is not None
if isinstance(stmt, (ast.IfStatement, ast.SwitchStatement)):
visitExpr(stmt.expr, block.lines)
if isinstance(stmt, ast.SwitchStatement):
ft = not stmt.hasDefault()
else:
ft = len(stmt.getScopes()) == 1
for sub in stmt.getScopes():
break_dict[sub.continueKey].append(block)
self.visitScope(sub, break_dict, catch_stack)
if ft:
break_dict[stmt.breakKey].append(block)
elif isinstance(stmt, ast.WhileStatement):
if stmt.expr != ast.Literal.TRUE: # while(cond)
assert stmt.breakKey is not None
self.finishBlock(block, catch_stack)
block = self.makeBlock(stmt.continueKey, break_dict, None, None)
visitExpr(stmt.expr, block.lines)
break_dict[stmt.breakKey].append(block)
break_dict[stmt.continueKey].append(block)
body_block = self.visitScope(stmt.getScopes()[0], break_dict, catch_stack)
continue_target = body_block if stmt.expr == ast.Literal.TRUE else block
for parent in break_dict[stmt.continueKey]:
parent.n_successors.append(continue_target)
del break_dict[stmt.continueKey]
elif isinstance(stmt, ast.TryStatement):
new_stack = catch_stack + [[] for _ in stmt.pairs]
break_dict[stmt.tryb.continueKey].append(block)
self.visitScope(stmt.tryb, break_dict, new_stack)
for cdecl, catchb in stmt.pairs:
parents = new_stack.pop()
self.visitScope(catchb, break_dict, catch_stack, cdecl.local, parents)
assert new_stack == catch_stack
else:
assert isinstance(stmt, ast.StatementBlock)
break_dict[stmt.continueKey].append(block)
self.visitScope(stmt, break_dict, catch_stack, head_block=block)
if not isinstance(stmt, ast.StatementBlock): # if we passed it to subscope, it will be finished in the subcall
self.finishBlock(block, catch_stack)
if stmt.breakKey is not None: # start new block after return from compound statement
block = self.makeBlock(stmt.breakKey, break_dict, None, None)
else:
block = None # should never be accessed anyway if we're exiting abruptly
if scope.jumpKey is not None:
break_dict[scope.jumpKey].append(block)
if block is not None:
self.finishBlock(block, catch_stack)
return head_block # head needs to be returned in case of loops so we can fix up backedges
def makeCFG(self, root):
break_dict = ddict(list)
self.visitScope(root, break_dict, [])
self.entry = self.blocks[0] # entry point should always be first block generated
reached = graph_util.topologicalSort([self.entry], lambda block:(block.n_successors + block.e_successors))
# if len(reached) != len(self.blocks):
# print 'warning, {} blocks unreachable!'.format(len(self.blocks) - len(reached))
self.blocks = reached
def replace(self, replace):
flattenDict(replace)
for block in self.blocks:
block.replace(replace)
def simplify(self):
for block in self.blocks:
block.simplify()
def makeGraph(root):
g = DUGraph()
g.makeCFG(root)
return g | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/pip/_vendor/html5lib/_tokenizer.py | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import unichr as chr
from collections import deque
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from ._inputstream import HTMLInputStream
from ._trie import Trie
entitiesTrie = Trie(entities)
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, parser=None, **kwargs):
self.stream = HTMLInputStream(stream, **kwargs)
self.parser = parser
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or
(allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, _ in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data) # pylint:disable=redefined-variable-type
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for _ in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True | PypiClean |
/McPhysics-1.5.12.tar.gz/McPhysics-1.5.12/__init__.py | import sys as _sys
import os as _os
import traceback as _traceback
_p = _traceback.print_last
try: import spinmob
except: raise Exception('You definitely need to install spinmob to do anything in mcphysics.')
# Add the appropriate paths for different operating systems
# Location of the linux libm2k dll
if not _sys.platform in ['win32', 'darwin']: _sys.path.append(_os.path.join(__path__[0], 'libm2k', 'linux'))
# Get the version
try: exec(spinmob.fun.read_lines(_os.path.join(__path__[0],'setup.py'))[0])
except: __version__ = 'unknown'
# Import all the other semi-optional libraries
def _safe_import(lib):
try:
exec('import '+lib)
return eval(lib)
except:
return None
_imageio = _safe_import('imageio')
_libm2k = _safe_import('libm2k')
_visa = _safe_import('visa')
_serial = _safe_import('serial')
_minimalmodbus = _safe_import('minimalmodbus')
_sounddevice = _safe_import('sounddevice')
_debug_enabled = False
def _debug(*a):
if _debug_enabled:
s = []
for x in a: s.append(str(x))
print(', '.join(s))
def check_installation():
"""
Prints out the status of the optional libraries.
"""
modules = [
'imageio',
'lmfit',
'libm2k',
'matplotlib',
'minimalmodbus',
'numpy',
'pyqtgraph',
'OpenGL',
'scipy',
'serial',
'sounddevice',
'visa',]
# Try importing them
installed = []
missing = []
for m in modules:
try:
exec('import ' + m)
if m in ['visa', 'serial', 'OpenGL']: installed.append('py'+m.lower())
else: installed.append(m)
except:
if m in ['visa', 'serial', 'OpenGL']: missing.append('py'+m.lower())
else: missing.append(m)
if len(installed): print('\nINSTALLED\n '+'\n '.join(installed))
if len(missing): print('\nMISSING\n ' +'\n '.join(missing))
print()
import mcphysics.instruments as instruments
import mcphysics.experiments as experiments
from . import data
from . import functions
from . import playground | PypiClean |
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/inherits/README.md | Browser-friendly inheritance fully compatible with standard node.js
[inherits](http://nodejs.org/api/util.html#util_util_inherits_constructor_superconstructor).
This package exports standard `inherits` from node.js `util` module in
node environment, but also provides alternative browser-friendly
implementation through [browser
field](https://gist.github.com/shtylman/4339901). Alternative
implementation is a literal copy of standard one located in standalone
module to avoid requiring of `util`. It also has a shim for old
browsers with no `Object.create` support.
While keeping you sure you are using standard `inherits`
implementation in node.js environment, it allows bundlers such as
[browserify](https://github.com/substack/node-browserify) to not
include full `util` package to your client code if all you need is
just `inherits` function. It worth, because browser shim for `util`
package is large and `inherits` is often the single function you need
from it.
It's recommended to use this package instead of
`require('util').inherits` for any code that has chances to be used
not only in node.js but in browser too.
## usage
```js
var inherits = require('inherits');
// then use exactly as the standard one
```
## note on version ~1.0
Version ~1.0 had completely different motivation and is not compatible
neither with 2.0 nor with standard node.js `inherits`.
If you are using version ~1.0 and planning to switch to ~2.0, be
careful:
* new version uses `super_` instead of `super` for referencing
superclass
* new version overwrites current prototype while old one preserves any
existing fields on it
| PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.