id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7 values |
|---|---|---|
/GeoNode-3.2.0-py3-none-any.whl/geonode/static/geonode/js/ol-2.13/lib/OpenLayers/Strategy/Refresh.js | * @requires OpenLayers/Strategy.js
*/
/**
* Class: OpenLayers.Strategy.Refresh
* A strategy that refreshes the layer. By default the strategy waits for a
* call to <refresh> before refreshing. By configuring the strategy with
* the <interval> option, refreshing can take place automatically.
*
* Inherits from:
* - <OpenLayers.Strategy>
*/
OpenLayers.Strategy.Refresh = OpenLayers.Class(OpenLayers.Strategy, {
/**
* Property: force
* {Boolean} Force a refresh on the layer. Default is false.
*/
force: false,
/**
* Property: interval
* {Number} Auto-refresh. Default is 0. If > 0, layer will be refreshed
* every N milliseconds.
*/
interval: 0,
/**
* Property: timer
* {Number} The id of the timer.
*/
timer: null,
/**
* Constructor: OpenLayers.Strategy.Refresh
* Create a new Refresh strategy.
*
* Parameters:
* options - {Object} Optional object whose properties will be set on the
* instance.
*/
/**
* APIMethod: activate
* Activate the strategy. Register any listeners, do appropriate setup.
*
* Returns:
* {Boolean} True if the strategy was successfully activated.
*/
activate: function() {
var activated = OpenLayers.Strategy.prototype.activate.call(this);
if(activated) {
if(this.layer.visibility === true) {
this.start();
}
this.layer.events.on({
"visibilitychanged": this.reset,
scope: this
});
}
return activated;
},
/**
* APIMethod: deactivate
* Deactivate the strategy. Unregister any listeners, do appropriate
* tear-down.
*
* Returns:
* {Boolean} True if the strategy was successfully deactivated.
*/
deactivate: function() {
var deactivated = OpenLayers.Strategy.prototype.deactivate.call(this);
if(deactivated) {
this.stop();
this.layer.events.un({
"visibilitychanged": this.reset,
scope: this
});
}
return deactivated;
},
/**
* Method: reset
* Start or cancel the refresh interval depending on the visibility of
* the layer.
*/
reset: function() {
if(this.layer.visibility === true) {
this.start();
} else {
this.stop();
}
},
/**
* Method: start
* Start the refresh interval.
*/
start: function() {
if(this.interval && typeof this.interval === "number" &&
this.interval > 0) {
this.timer = window.setInterval(
OpenLayers.Function.bind(this.refresh, this),
this.interval);
}
},
/**
* APIMethod: refresh
* Tell the strategy to refresh which will refresh the layer.
*/
refresh: function() {
if (this.layer && this.layer.refresh &&
typeof this.layer.refresh == "function") {
this.layer.refresh({force: this.force});
}
},
/**
* Method: stop
* Cancels the refresh interval.
*/
stop: function() {
if(this.timer !== null) {
window.clearInterval(this.timer);
this.timer = null;
}
},
CLASS_NAME: "OpenLayers.Strategy.Refresh"
}); | PypiClean |
/Flask-Copilot-0.2.0.tar.gz/Flask-Copilot-0.2.0/README.rst | ============================
Flask-Copilot |build status|
============================
.. |build status| image:: https://travis-ci.org/jonafato/Flask-Copilot.svg?branch=master
:target: https://travis-ci.org/jonafato/Flask-Copilot
Handling your app's navigation, so you don't have to.
Resources
=========
- `Documentation <https://flask-copilot.rtfd.org/>`_
- `Issue Tracker <https://github.com/jonafato/Flask-Copilot/issues>`_
- `Code <https://github.com/jonafato/Flask-Copilot>`_
- `Changelog <CHANGES.rst>`_
License
=======
Flask-Copilot is available under the BSD 3-Clause license. See the ``LICENSE``
file for more details.
Contributing
============
Issues and pull requests are always welcome.
| PypiClean |
/CleverHarold-0.1.tar.gz/CleverHarold-0.1/harold/form/responder/jsonexc.py |
# Copyright 2006 The Incorporated Wizzo Widget Works
# Distributed under the terms of the GNU General Public License v2
# Author: Troy Melhase <troy@gci.net>
##
# JSON exception decorator
#
# The json decorator calls its decorated function normally,
# and if there is an exception during the call, it instead returns the
# exception values as a json-encoded mapping.
#
# This is especially useful when combined with the validators, because
# the validators store all of their exceptions before raising their own.
# The effect of this is that all parameters are validated and all
# invalid values are specified at once.
#
# Note that the json decorator *does not* encode normal
# responses, only exceptions. To encode the normal responses, the
# Harold code publisher should be used. The code publisher and this
# decorator should be used together.
#
# When a decorated function raises a ValidationError, as it will when one of its
# validators fails, it will return a mapping in the form of::
#
# {'errors' : [sequence of errors],
# 'values' : [sequence, of valid values] }
#
# When any other exception type is raised, the 'values' sequence will be empty.
#
#
##
from inspect import getargspec
from harold.lib import keys, con_type, make_annotated, ValidationError
json_anno_template = """
def %(name)s%(signature)s:
%(docstring)r
return %(name)s.func_json_anno%(values)s
"""
def json(**attrs):
""" returns decorator for making json exception handlers
@param **attrs arbitrary keyword-value pairs to assign to the decorated function
@return decorator function that wraps its original with JSON exception handling
"""
content_type = 'content_type'
exc_status = '500 Internal Server Error'
if content_type not in attrs:
attrs[content_type] = con_type.json
def make_json_env(func, params, kwds):
""" make_json_env(...) -> hack to locate the wsgi environ and munge it
"""
args, varargs, varkw, defaults = getargspec(func)
if keys.env in args:
environ = params[args.index(keys.env)]
else:
environ = kwds.get(keys.env, {})
environ[keys.content_type] = con_type.json
environ[keys.response_status] = exc_status
def json_deco(original):
""" json_deco(original) -> replace original with a json-enabled copy
"""
def json_anno(*varparams, **keyparams):
""" json_anno(...) -> annotation which makes exceptions json-friendly
original return values and execption values should be
json-encoded elsewhere. (e.g., by the harold code
publisher).
"""
try:
return original(*varparams, **keyparams)
except (ValidationError, ), exc:
make_json_env(original, varparams, keyparams)
return dict(errors=exc.args[0], values=exc.args[1])
except (Exception, ), exc:
make_json_env(original, varparams, keyparams)
return dict(errors=[str(exc), ], values=[])
replacement = make_annotated(original, json_anno_template)
for k, v in attrs.items():
setattr(replacement, k, v)
replacement.func_json_anno = json_anno
return replacement
return json_deco | PypiClean |
/Blogofile-0.8.3.tar.gz/Blogofile-0.8.3/blogofile/default_config.py |
######################################################################
# Basic Settings
# (almost all sites will want to configure these settings)
######################################################################
## site.url -- Your site's full URL
# Your "site" is the same thing as your _site directory.
# If you're hosting a blogofile powered site as a subdirectory of a larger
# non-blogofile site, then you would set the site_url to the full URL
# including that subdirectory: "http://www.yoursite.com/path/to/blogofile-dir"
site.url = "http://www.example.com"
## site.author -- Your name, the author of the website.
# This is optional. If set to anything other than None, the
# simple_blog template creates a meta tag for the site author.
site.author = None
######################################################################
# Advanced Settings
######################################################################
# Use hard links when copying files. This saves disk space and shortens
# the time to build sites that copy lots of static files.
# This is turned off by default though, because hard links are not
# necessarily what every user wants.
site.use_hard_links = False
#Warn when we're overwriting a file?
site.overwrite_warning = True
# These are the default ignore patterns for excluding files and dirs
# from the _site directory
# These can be strings or compiled patterns.
# Strings are assumed to be case insensitive.
site.file_ignore_patterns = [
# All files that start with an underscore
".*/_.*",
# Emacs autosave files
".*/#.*",
# Emacs/Vim backup files
".*~$",
# Vim swap files
".*/\..*\.swp$",
# VCS directories
".*/\.(git|hg|svn|bzr)$",
# Git and Mercurial ignored files definitions
".*/.(git|hg)ignore$",
# CVS dir
".*/CVS$",
]
from blogofile.template import MakoTemplate, JinjaTemplate, \
MarkdownTemplate, RestructuredTextTemplate, TextileTemplate
#The site base template filename:
site.base_template = "site.mako"
#Template engines mapped to file extensions:
templates.engines = HC(
mako = MakoTemplate,
jinja = JinjaTemplate,
jinja2 = JinjaTemplate,
markdown = MarkdownTemplate,
rst = RestructuredTextTemplate,
textile = TextileTemplate
)
#Template content blocks:
templates.content_blocks = HC(
mako = HC(
pattern = re.compile("\${\W*next.body\(\)\W*}"),
replacement = "${next.body()}"
),
jinja2 = HC(
pattern = re.compile("{%\W*block content\W*%}.*?{%\W*endblock\W*%}", re.MULTILINE|re.DOTALL),
replacement = "{% block content %} {% endblock %}"
),
filter = HC(
pattern = re.compile("_^"), #Regex that matches nothing
replacement = "~~!`FILTER_CONTENT_HERE`!~~",
default_chains = HC(
markdown = "syntax_highlight, markdown",
rst = "syntax_highlight, rst"
)
)
)
### Pre/Post build hooks:
def pre_build():
#Do whatever you want before the _site is built.
pass
def post_build():
#Do whatever you want after the _site is built successfully.
pass
def build_exception():
#Do whatever you want if there is an unrecoverable error in building the site.
pass
def build_finally():
#Do whatever you want after the _site is built successfully OR after a fatal error
pass | PypiClean |
/Henson_AMQP-0.9.0-py3-none-any.whl/Henson_AMQP-0.9.0.dist-info/DESCRIPTION.rst | ##########################
Henson-AMQP |build status|
##########################
.. |build status| image:: https://travis-ci.org/iheartradio/Henson-AMQP.svg?branch=master
:target: https://travis-ci.org/iheartradio/Henson-AMQP
A library for interacting with AMQP with a Henson application.
* `Documentation <https://henson-amqp.readthedocs.io>`_
* `Installation <https://henson-amqp.readthedocs.io/en/latest/#installation>`_
* `Changelog <https://henson-amqp.readthedocs.io/en/latest/changes.html>`_
* `Source <https://github.com/iheartradio/Henson-AMQP>`_
| PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/diofant/polys/rings.py |
from __future__ import annotations
import functools
import math
import operator
from ..config import query
from ..core import Expr, Integer, Symbol, cacheit
from ..core import symbols as _symbols
from ..core.compatibility import is_sequence
from ..core.sympify import CantSympify, sympify
from ..domains.compositedomain import CompositeDomain
from ..domains.domainelement import DomainElement
from ..domains.ring import CommutativeRing
from ..ntheory import multinomial_coefficients
from ..ntheory.modular import symmetric_residue
from .euclidtools import _GCD
from .factortools import _Factor
from .monomials import Monomial
from .orderings import lex
from .polyerrors import (CoercionFailed, ExactQuotientFailed, GeneratorsError,
GeneratorsNeeded, PolynomialDivisionFailed)
from .polyoptions import Domain as DomainOpt
from .polyoptions import Order as OrderOpt
from .specialpolys import _test_polys
from .sqfreetools import _SQF
def ring(symbols, domain, order=lex):
"""Construct a polynomial ring returning ``(ring, x_1, ..., x_n)``.
Parameters
==========
symbols : str, Symbol/Expr or sequence of str, Symbol/Expr (non-empty)
domain : :class:`~diofant.domains.domain.Domain` or coercible
order : :class:`~diofant.polys.polyoptions.Order` or coercible, optional, defaults to ``lex``
Examples
========
>>> R, x, y, z = ring('x y z', ZZ)
>>> R
ZZ[x,y,z]
>>> x + y + z
x + y + z
"""
_ring = PolynomialRing(domain, symbols, order)
return (_ring,) + _ring.gens
def _parse_symbols(symbols):
if not symbols:
raise GeneratorsNeeded("generators weren't specified")
if isinstance(symbols, str):
return _symbols(symbols, seq=True)
elif isinstance(symbols, Expr):
return symbols,
elif is_sequence(symbols):
if all(isinstance(s, str) for s in symbols):
return _symbols(symbols)
elif all(isinstance(s, Expr) for s in symbols):
return symbols
raise GeneratorsError('expected a string, Symbol or expression '
'or a non-empty sequence of strings, '
'Symbols or expressions')
class PolynomialRing(_GCD, CommutativeRing, CompositeDomain, _SQF, _Factor, _test_polys):
"""A class for representing multivariate polynomial rings."""
is_PolynomialRing = True
has_assoc_Ring = True
def __new__(cls, domain, symbols, order=lex):
from .univar import UnivarPolyElement, UnivarPolynomialRing
symbols = tuple(_parse_symbols(symbols))
ngens = len(symbols)
domain = DomainOpt.preprocess(domain)
order = OrderOpt.preprocess(order)
new_cls = PolynomialRing if ngens > 1 else UnivarPolynomialRing
key = new_cls.__name__, symbols, ngens, domain, order
obj = _ring_cache.get(key)
if obj is None:
if isinstance(domain, CompositeDomain) and set(symbols) & set(domain.symbols):
raise GeneratorsError("polynomial ring and it's ground domain share generators")
obj = object.__new__(new_cls)
obj._hash = hash(key)
if new_cls == UnivarPolynomialRing:
dtype = UnivarPolyElement
else:
dtype = PolyElement
obj.dtype = type(dtype.__name__, (dtype,), {'ring': obj})
obj.symbols = symbols
obj.ngens = ngens
obj.domain = domain
obj.order = order
obj.zero_monom = Monomial((0,)*ngens)
gens = []
one = domain.one
expv = [0]*ngens
for i in range(ngens):
expv[i] = 1
poly = obj.zero
poly[expv] = one
gens.append(poly)
expv[i] = 0
obj.gens = tuple(gens)
obj.rep = str(domain) + '[' + ','.join(map(str, symbols)) + ']'
for symbol, generator in zip(obj.symbols, obj.gens):
if isinstance(symbol, Symbol):
name = symbol.name
if not hasattr(obj, name):
setattr(obj, name, generator)
_ring_cache[key] = obj
return obj
def __getnewargs_ex__(self):
return (self.domain, self.symbols), {'order': self.order}
def leading_expv(self, f, order=None):
order = self.order if order is None else OrderOpt.preprocess(order)
return Monomial(max(f, key=order))
@property
def characteristic(self):
return self.domain.characteristic
def __hash__(self):
return self._hash
def __eq__(self, other):
return self is other
def clone(self, symbols=None, domain=None, order=None):
return self.__class__(domain or self.domain, symbols or self.symbols, order or self.order)
@property
def zero(self):
return self.dtype()
@property
def one(self):
return self.ground_new(self.domain.one)
def domain_new(self, element, orig_domain=None):
return self.domain.convert(element, orig_domain)
def ground_new(self, coeff):
return self.term_new(self.zero_monom, coeff)
def term_new(self, monom, coeff):
poly = self.zero
if coeff := self.domain.convert(coeff):
poly[monom] = coeff
return poly
def __call__(self, element):
if isinstance(element, PolyElement):
if self == element.ring:
return element
elif isinstance(self.domain, PolynomialRing) and self.domain.ring == element.ring:
return self.ground_new(element)
else:
raise NotImplementedError
elif isinstance(element, str):
raise NotImplementedError
elif isinstance(element, dict):
return self.from_dict(element)
elif isinstance(element, list):
return self.from_terms(element)
elif isinstance(element, Expr):
return self.convert(element)
else:
return self.ground_new(element)
def from_dict(self, element):
domain_new = self.domain.convert
poly = self.zero
for monom, coeff in element.items():
if isinstance(monom, int):
monom = monom,
if coeff := domain_new(coeff):
poly[monom] = coeff
return poly
def from_terms(self, element):
return self.from_dict(dict(element))
def from_expr(self, expr):
expr = sympify(expr)
mapping = dict(zip(self.symbols, self.gens))
def _rebuild(expr):
if (generator := mapping.get(expr)) is not None:
return generator
elif expr.is_Add:
return functools.reduce(operator.add, map(_rebuild, expr.args))
elif expr.is_Mul:
return functools.reduce(operator.mul, map(_rebuild, expr.args))
elif expr.is_Pow:
c, a = expr.exp.as_coeff_Mul(rational=True)
if c.is_Integer and c > 1:
return _rebuild(expr.base**a)**int(c)
return self.ground_new(self.domain.convert(expr))
try:
return _rebuild(expr)
except CoercionFailed as exc:
raise ValueError('expected an expression convertible to a '
f'polynomial in {self}, got {expr}') from exc
def index(self, gen):
"""Compute index of ``gen`` in ``self.gens``."""
try:
if isinstance(gen, int) and -self.ngens <= gen < self.ngens:
return gen % self.ngens
elif isinstance(gen, self.dtype):
return self.gens.index(gen)
elif isinstance(gen, str):
return self.symbols.index(Symbol(gen))
elif isinstance(gen, Expr):
return self.symbols.index(gen)
except ValueError:
pass
raise ValueError('expected a polynomial generator, an integer, '
f'a string, an expression or None, got {gen}')
def drop(self, *gens):
"""Remove specified generators from this ring."""
indices = set(map(self.index, gens))
symbols = [s for i, s in enumerate(self.symbols) if i not in indices]
if not symbols:
return self.domain
else:
return self.clone(symbols=symbols)
def to_ground(self):
domain = self.domain
if isinstance(domain, CompositeDomain) or domain.is_AlgebraicField:
return self.clone(domain=domain.domain)
else:
raise ValueError(f'{domain} is not a composite or algebraic domain')
@property
def is_univariate(self):
return len(self.gens) == 1
@property
def is_multivariate(self):
return len(self.gens) > 1
def eject(self, *gens):
r"""
Remove specified generators from the ring and inject them into
its domain.
"""
indices = set(map(self.index, gens))
symbols = [s for i, s in enumerate(self.symbols) if i not in indices]
gens = [gen for i, gen in enumerate(self.gens) if i not in indices]
if not symbols:
return self
else:
return self.clone(symbols=symbols, domain=self.drop(*gens))
def to_expr(self, element):
symbols = self.symbols
domain = self.domain
return functools.reduce(operator.add,
(domain.to_expr(v)*k.as_expr(*symbols)
for k, v in element.items()),
Integer(0))
def _from_PythonFiniteField(self, a, K0):
if self.domain == K0:
return self(a)
_from_GMPYFiniteField = _from_PythonFiniteField
_from_AlgebraicField = _from_PythonFiniteField
_from_ExpressionDomain = _from_PythonFiniteField
def _from_PythonIntegerRing(self, a, K0):
return self(self.domain.convert(a, K0))
_from_GMPYIntegerRing = _from_PythonIntegerRing
_from_PythonRationalField = _from_PythonIntegerRing
_from_GMPYRationalField = _from_PythonIntegerRing
_from_RealField = _from_PythonIntegerRing
_from_ComplexField = _from_PythonIntegerRing
def _from_PolynomialRing(self, a, K0):
try:
return a.set_ring(self)
except (CoercionFailed, GeneratorsError):
return
def _from_FractionField(self, a, K0):
if self.domain == K0:
return self.ground_new(a)
(q,), r = a.numerator.div([a.denominator])
if not r:
return self.convert(q, K0.field.ring)
@property
def field(self):
"""Returns a field associated with ``self``."""
return self.domain.frac_field(*self.symbols, order=self.order)
def is_normal(self, a):
return self.domain.is_normal(a.LC)
def gcdex(self, a, b):
"""Extended GCD of ``a`` and ``b``."""
return a.gcdex(b)
def half_gcdex(self, a, b):
"""Half extended GCD of ``a`` and ``b``."""
return a.half_gcdex(b)
_ring_cache: dict[tuple, PolynomialRing] = {}
class PolyElement(DomainElement, CantSympify, dict):
"""Element of multivariate distributed polynomial ring.
Polynomial element is mutable, until the hash is computed, e.g.
when the polynomial was added to the :class:`set`.
If one is interested in preserving a polynomial, and one plans
to use inplace operations, one can copy the polynomial first.
Examples
========
>>> _, x, y = ring('x y', ZZ)
>>> p = (x + y)**2
>>> p1 = p.copy()
>>> p2 = p
>>> p[(0, 0)] = 3
>>> p1
x**2 + 2*x*y + y**2
>>> p2
x**2 + 2*x*y + y**2 + 3
>>> _ = hash(p)
>>> p[(1, 1)] = 2
Traceback (most recent call last):
...
RuntimeError: ... Polynomial element ... can't be modified ...
See Also
========
PolynomialRing
"""
@property
def parent(self):
return self.ring
_hash = None
def __hash__(self):
_hash = self._hash
if _hash is None:
self._hash = _hash = hash((self.ring, frozenset(self.items())))
return _hash
def __reduce__(self):
return self.parent.__call__, (dict(self),)
def copy(self):
"""Return a shallow copy of self."""
return self.__class__(self)
def set_ring(self, new_ring):
ring = self.ring
symbols = ring.symbols
new_symbols = new_ring.symbols
if ring == new_ring:
return self
elif ring == new_ring.domain:
return new_ring.ground_new(self)
elif set(new_symbols).issuperset(symbols):
coeffs = self.values()
new_monoms = [[] for _ in range(len(self))]
for gen in new_symbols:
try:
j = symbols.index(gen)
for M, new_M in zip(self, new_monoms):
new_M.append(M[j])
except ValueError:
for new_M in new_monoms:
new_M.append(0)
terms = zip(map(Monomial, new_monoms), coeffs)
return new_ring.from_terms(terms)
else:
raise CoercionFailed(f"Can't set element ring to {new_ring}")
def set_domain(self, new_domain):
if self.ring.domain == new_domain:
return self
else:
new_ring = self.ring.clone(domain=new_domain)
return self.set_ring(new_ring)
def clear_denoms(self, convert=False):
domain = self.ring.domain
if not domain.is_Field:
return domain.one, self
if domain.has_assoc_Ring:
ground_ring = domain.ring
else:
ground_ring = domain
common = ground_ring.one
lcm = ground_ring.lcm
for coeff in self.values():
common = lcm(common, coeff.denominator)
f = self*domain.convert(common)
if convert:
f = f.set_domain(ground_ring)
return common, f
def _strip_zero(self):
"""Eliminate monomials with zero coefficient."""
for k, v in list(self.items()):
if not v:
del self[k]
def __setitem__(self, key, item):
if self._hash is not None:
raise RuntimeError(f"Polynomial element {self} can't be"
' modified anymore.')
if not isinstance(key, Monomial):
key = Monomial(key)
super().__setitem__(key, item)
def __eq__(self, other):
"""Equality test for polynomials.
Examples
========
>>> _, x, y = ring('x y', ZZ)
>>> p1 = (x + y)**2 + (x - y)**2
>>> p1 == 4*x*y
False
>>> p1 == 2*(x**2 + y**2)
True
"""
if not other:
return not self
elif isinstance(other, self.ring.dtype):
return dict.__eq__(self, other)
elif isinstance(other, self.ring.field.dtype):
return other.__eq__(self)
elif len(self) > 1:
return False
else:
return self.get(self.ring.zero_monom) == other
def __ne__(self, other):
return not self.__eq__(other)
def drop(self, gen):
ring = self.ring
i = ring.index(gen)
if ring.is_univariate:
if self.is_ground:
return self[1]
else:
raise ValueError(f"can't drop {gen}")
else:
symbols = list(ring.symbols)
del symbols[i]
ring = ring.clone(symbols=symbols)
poly = ring.zero
for k, v in self.items():
if k[i] == 0:
K = list(k)
del K[i]
poly[K] = v
else:
raise ValueError(f"can't drop {gen}")
return poly
def eject(self, *gens):
ring = self.ring
if not gens:
return self
if ring.is_univariate:
raise ValueError("can't drop only generator to ground")
indexes = [ring.index(gen) for gen in gens]
ring = ring.eject(*indexes)
poly = ring.zero
gens = ring.domain.gens[0:len(indexes)]
for monom, coeff in self.items():
mon = tuple(monom[i] for i in range(self.ring.ngens) if i not in indexes)
gc = functools.reduce(operator.mul, [x**n for x, n in zip(gens, (monom[i] for i in indexes))])
if mon in poly:
poly[mon] += gc*coeff
else:
poly[mon] = gc*coeff
return poly
def inject(self, front=False):
ring = self.ring
domain = ring.domain
if not (isinstance(domain, CompositeDomain) or domain.is_AlgebraicField):
return self
new_ring = ring.to_ground()
new_ring = new_ring.inject(*domain.symbols, front=front)
poly = new_ring.zero
for monom, coeff in self.items():
coeff = coeff.to_dict()
for cmonom, ccoeff in coeff.items():
if front:
cmonom += monom
else:
cmonom = monom + cmonom
poly[cmonom] = ccoeff
return poly
def to_dict(self):
return dict(self)
def _str(self, printer, precedence, exp_pattern, mul_symbol):
if not self:
return printer._print(self.ring.domain.zero)
prec_add = precedence['Add']
prec_atom = precedence['Atom']
ring = self.ring
symbols = ring.symbols
ngens = ring.ngens
zm = ring.zero_monom
order = ring.order
sexpvs = []
for expv, coeff in sorted(self.items(),
key=lambda m: order(m[0]),
reverse=True):
normal = ring.domain.is_normal(coeff)
sign = ' + ' if normal else ' - '
sexpvs.append(sign)
if expv == zm:
scoeff = printer._print(coeff)
if scoeff.startswith('-'):
scoeff = scoeff[1:]
else:
if not normal:
coeff = -coeff
if coeff != 1:
scoeff = printer.parenthesize(coeff, prec_add)
else:
scoeff = ''
sexpv = []
for i in range(ngens):
exp = expv[i]
if not exp:
continue
symbol = printer.parenthesize(symbols[i], prec_atom-1)
if exp != 1:
sexpv.append(exp_pattern % (symbol, exp))
else:
sexpv.append(f'{symbol}')
if scoeff:
sexpv = [scoeff] + sexpv
sexpvs.append(mul_symbol.join(sexpv))
head = sexpvs.pop(0)
if head == ' - ':
sexpvs.insert(0, '-')
return ''.join(sexpvs)
@property
def is_generator(self):
return self.is_monomial and self.total_degree() == 1
@property
def is_ground(self):
return not self or (len(self) == 1 and self.ring.zero_monom in self)
@property
def is_monomial(self):
return not self or (len(self) == 1 and self.LC == 1)
@property
def is_term(self):
return len(self) <= 1
@property
def is_linear(self):
return all(sum(monom) <= 1 for monom in self)
@property
def is_quadratic(self):
return all(sum(monom) <= 2 for monom in self)
@property
def is_irreducible(self):
ring = self.ring
domain = ring.domain
if ring.is_univariate:
if domain.is_FiniteField:
method = query('GF_IRRED_METHOD')
_irred_methods = {'ben-or': ring._gf_irreducible_p_ben_or,
'rabin': ring._gf_irreducible_p_rabin}
return _irred_methods[method](self)
elif domain.is_IntegerRing:
res = ring._zz_irreducible_p(self)
if res is not None:
return res
_, factors = self.factor_list()
if not factors:
return True
elif len(factors) > 1:
return False
else:
return factors[0][1] == 1
@property
def is_homogeneous(self):
if not self:
return True
lm = self.LM
tdeg = sum(lm)
return all(sum(monom) == tdeg for monom in self if monom != lm)
def __neg__(self):
return self.__class__({monom: -self[monom] for monom in self})
def __pos__(self):
return self
def __abs__(self):
return self.__class__({monom: abs(self[monom]) for monom in self})
def __add__(self, other):
"""Add two polynomials."""
ring = self.ring
try:
other = ring.convert(other)
except CoercionFailed:
return NotImplemented
result = self.copy()
for t in other.items():
result = result._iadd_term(t)
return result
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
"""Subtract polynomial other from self."""
ring = self.ring
try:
other = ring.convert(other)
except CoercionFailed:
return NotImplemented
result = self.copy()
for k, v in other.items():
result = result._iadd_term((k, -v))
return result
def __rsub__(self, other):
"""Substract self from other, with other convertible to the coefficient domain."""
return (-self).__add__(other)
def __mul__(self, other):
"""Multiply two polynomials."""
ring = self.ring
domain = ring.domain
zero = ring.zero
if not other:
return zero
elif isinstance(other, domain.dtype):
result = ring.dtype({monom: self[monom]*other for monom in self})
if not domain.is_Field and not domain.is_IntegerRing:
result._strip_zero()
return result
try:
other = ring.convert(other)
except CoercionFailed:
return NotImplemented
if len(other) == 1:
[(m, c)] = other.items()
return self.__class__({monom*m: self[monom]*c for monom in self})
result = zero
for t in self.items():
result = result._iadd_poly_term(other, t)
return result
def __rmul__(self, other):
"""Multiply other to self with other in the coefficient domain of self."""
return self.__mul__(other)
def __pow__(self, n, mod=None):
"""Raise polynomial to power `n`."""
ring = self.ring
if not isinstance(n, int) or n < 0:
raise ValueError('exponent must be a nonnegative integer')
if not n:
return ring.one
elif len(self) > 5 or mod:
return self._pow_generic(n, mod)
elif len(self) == 1:
[(monom, coeff)] = self.items()
p = ring.zero
p[monom**n] = coeff**n
return p
elif n == 1:
return self.copy()
elif n == 2:
return self._square()
elif n == 3:
return self*self._square()
else:
return self._pow_multinomial(n)
def _pow_generic(self, n, mod=None):
p = self.ring.one
c = self
while n:
if n & 1:
p *= c
if mod:
p %= mod
n -= 1
c = c._square()
if mod:
c %= mod
n //= 2
return p
def _pow_multinomial(self, n):
multinomials = multinomial_coefficients(len(self), n).items()
ring = self.ring
zero_monom = ring.zero_monom
terms = self.items()
poly = ring.zero
for multinomial, multinomial_coeff in multinomials:
product_monom = zero_monom
product_coeff = multinomial_coeff
for exp, (monom, coeff) in zip(multinomial, terms):
if exp:
product_monom *= monom**exp
product_coeff *= coeff**exp
poly = poly._iadd_term((product_monom, product_coeff))
return poly
def _square(self):
"""Square of a polynomial."""
ring = self.ring
p = ring.zero
get = p.get
keys = list(self)
zero = ring.domain.zero
for i, k1 in enumerate(keys):
pk = self[k1]
for j in range(i):
k2 = keys[j]
exp = k1*k2
p[exp] = get(exp, zero) + pk*self[k2]
p += p
get = p.get
for k, v in self.items():
k2 = k**2
p[k2] = get(k2, zero) + v**2
p._strip_zero()
return p
def __divmod__(self, other):
ring = self.ring
domain = ring.domain
if not other:
raise ZeroDivisionError('polynomial division')
if isinstance(other, ring.dtype):
(q,), r = self.div([other])
return q, r
elif isinstance(other, PolyElement):
if isinstance(domain, PolynomialRing) and domain.ring == other.ring:
pass
else:
return NotImplemented
try:
other = ring.domain_new(other)
except CoercionFailed:
return NotImplemented
else:
return self.quo_ground(other), self.trunc_ground(other)
def __mod__(self, other):
return divmod(self, other)[1]
def __floordiv__(self, other):
return divmod(self, other)[0]
def __truediv__(self, other):
ring = self.ring
if not other:
raise ZeroDivisionError('polynomial division')
if isinstance(other, ring.domain.dtype):
return self.quo_ground(other)
try:
other = ring.domain_new(other)
except CoercionFailed:
return NotImplemented
else:
return self.quo_ground(other)
def div(self, fv):
"""Division algorithm for multivariate polynomials.
Parameters
==========
fv : sequence of PolyElement's
List of divsors.
Returns
=======
(qv, r) : tuple
Where qv is the sequence of quotients and r is the remainder.
Notes
=====
For multivariate polynomials the remainder is not uniquely
determined, unless divisors form a Gröbner basis.
Examples
========
>>> _, x, y = ring('x y', ZZ)
>>> f = x**2*y
>>> f1, f2 = x**2 - y, x*y - 1
>>> f.div([f1, f2])
([y, 0], y**2)
>>> f.div([f2, f1])
([x, 0], x)
>>> g1, g2 = x - y**2, y**3 - 1
>>> f.div([g1, g2])[1] == f.div([g2, g1])[1]
True
References
==========
* :cite:`Cox2015ideals`, p. 64.
"""
ring = self.ring
order = ring.order
if any(not f for f in fv):
raise ZeroDivisionError('polynomial division')
if any(f.ring != ring for f in fv):
raise ValueError('self and f must have the same ring')
if not self:
return [ring.zero], ring.zero
s = len(fv)
qv = [ring.zero for i in range(s)]
p = self.copy()
r = ring.zero
expvs = [fx.leading_expv() for fx in fv]
while p:
i = 0
divoccurred = 0
while i < s and divoccurred == 0:
lt = p.leading_term()
term = lt.quo_term((expvs[i], fv[i][expvs[i]]))
if term:
expv1, c = term.LT
qv[i] = qv[i]._iadd_term((expv1, c))
p = p._iadd_poly_term(fv[i], (expv1, -c))
divoccurred = 1
if p and order(p.LM) >= order(lt.LM):
raise PolynomialDivisionFailed(self, fv[i], self.ring)
else:
i += 1
if not divoccurred:
expv = p.leading_expv()
r = r._iadd_term((expv, p[expv]))
del p[expv]
r._hash = None
return qv, r
def exquo(self, other):
q, r = divmod(self, other)
if not r:
return q
else:
raise ExactQuotientFailed(self, other)
def _iadd_term(self, term):
"""Add to self the term inplace.
If self is a generator -- then just return the sum of the two.
"""
ring = self.ring
domain = ring.domain
p1 = self
if p1.is_generator:
p1 = p1.copy()
monom, coeff = term
coeff += p1.get(monom, domain.zero)
if coeff:
p1[monom] = coeff
elif monom in p1:
del p1[monom]
return p1
def _iadd_poly_term(self, p2, term):
"""Add inplace to self the product of p2 and term.
If self is a generator -- then just return the sum of the two.
"""
p1 = self
if p1.is_generator:
p1 = p1.copy()
monom, coeff = term
for m, c in p2.items():
m *= monom
c *= coeff
p1 = p1._iadd_term((m, c))
return p1
def degree(self, x=0):
"""
The leading degree in ``x`` or the main variable.
Note that the degree of 0 is negative floating-point infinity.
"""
i = self.ring.index(x)
return max((monom[i] for monom in self), default=-math.inf)
def tail_degree(self, x=0):
"""
The tail degree in ``x`` or the main variable.
Note that the degree of 0 is negative floating-point infinity.
"""
i = self.ring.index(x)
return min((monom[i] for monom in self), default=-math.inf)
def total_degree(self):
"""Returns the total degree."""
return max((sum(m) for m in self), default=-math.inf)
def leading_expv(self, order=None):
"""Leading monomial tuple according to the monomial ordering.
Examples
========
>>> _, x, y, z = ring('x y z', ZZ)
>>> p = x**4 + x**3*y + x**2*z**2 + z**7
>>> p.leading_expv()
(4, 0, 0)
"""
if self:
return self.ring.leading_expv(self, order=order)
def _get_coeff(self, expv):
return self.get(expv, self.ring.domain.zero)
def __getitem__(self, element):
"""
Returns the coefficient that stands next to the given monomial.
Parameters
==========
element : PolyElement (with ``is_monomial = True``) or 1
Examples
========
>>> _, x, y, z = ring('x y z', ZZ)
>>> f = 3*x**2*y - x*y*z + 7*z**3 + 23
>>> f[x**2*y]
3
>>> f[x*y]
0
>>> f[1]
23
"""
ring = self.ring
if isinstance(element, tuple):
return self._get_coeff(element)
elif element == 1:
return self._get_coeff(ring.zero_monom)
elif isinstance(element, ring.dtype) and element.is_monomial:
monom, = element
return self._get_coeff(monom)
raise ValueError(f'expected a monomial, got {element}')
@property
def LC(self):
return self._get_coeff(self.leading_expv())
@property
def LM(self):
if (expv := self.leading_expv()) is None:
return self.ring.zero_monom
else:
return expv
@property
def LT(self):
if (expv := self.leading_expv()) is None:
ring = self.ring
return ring.zero_monom, ring.domain.zero
else:
return expv, self._get_coeff(expv)
def leading_term(self, order=None):
"""Leading term as a polynomial element.
Examples
========
>>> _, x, y = ring('x y', ZZ)
>>> (3*x*y + y**2).leading_term()
3*x*y
"""
p = self.ring.zero
if expv := self.leading_expv(order=order):
p[expv] = self[expv]
return p
def content(self):
"""Returns GCD of polynomial's coefficients."""
ring = self.ring
domain = ring.domain
gcd = domain.gcd
cont = functools.reduce(gcd, self.values(), domain.zero)
if not ring.is_normal(self):
cont = -cont
return cont
def primitive(self):
"""Returns content and a primitive polynomial."""
cont = self.content()
prim = self.copy()
if prim:
prim = prim.quo_ground(cont)
return cont, prim
def monic(self):
"""Divides all coefficients by the leading coefficient."""
if not self:
return self
else:
return self.exquo_ground(self.LC)
def mul_monom(self, m):
return self.__class__({monom*m: self[monom] for monom in self})
def quo_ground(self, x):
domain = self.ring.domain
if not x:
raise ZeroDivisionError('polynomial division')
if not self or x == 1:
return self
if domain.is_Field:
quo = domain.quo
p = self.__class__({monom: quo(self[monom], x) for monom in self})
else:
p = self.__class__({monom: self[monom]//x for monom in self})
p._strip_zero()
return p
def exquo_ground(self, x):
domain = self.ring.domain
if not x:
raise ZeroDivisionError('polynomial division')
if not self or x == 1:
return self
p = self.__class__({monom: domain.exquo(self[monom], x) for monom in self})
p._strip_zero()
return p
def quo_term(self, term):
monom, coeff = term
if not coeff:
raise ZeroDivisionError('polynomial division')
if not self:
return self.ring.zero
ring = self.ring
domain = ring.domain
p = ring.zero
for tm, tc in self.items():
if monom != self.ring.zero_monom:
tm /= monom
if any(_ < 0 for _ in tm):
continue
if domain.is_Field or not tc % coeff:
p[tm] = domain.quo(tc, coeff)
return p
def trunc_ground(self, p):
if self.ring.domain.is_IntegerRing:
terms = {}
for monom, coeff in self.items():
coeff %= p
terms[monom] = symmetric_residue(coeff, p)
else:
terms = {monom: self[monom] % p for monom in self}
poly = self.__class__(terms)
poly._strip_zero()
return poly
def _norm(self, norm_func):
if not self:
return self.ring.domain.zero
else:
return norm_func([abs(coeff) for coeff in self.values()])
def max_norm(self):
return self._norm(max)
def l1_norm(self):
return self._norm(sum)
def gcd(self, other):
return self.ring.gcd(self, other)
def lcm(self, other):
return self.ring.lcm(self, other)
def cofactors(self, other):
return self.ring.cofactors(self, other)
def terms_gcd(self):
ring = self.ring
if not self:
return (0,)*ring.ngens, self
G = functools.reduce(Monomial.gcd, self)
if all(g == 0 for g in G):
return G, self
f = ring.zero
for monom, coeff in self.items():
f[monom/G] = coeff
return G, f
def cancel(self, g, include=True):
"""
Cancel common factors in a rational function ``f/g``.
Examples
========
>>> _, x, y = ring('x y', ZZ)
>>> (2*x**2 - 2).cancel(x**2 - 2*x + 1)
(2*x + 2, x - 1)
"""
f = self
ring = f.ring
domain = ring.domain
if not domain.is_Field or not domain.has_assoc_Ring:
_, p, q = f.cofactors(g)
cp, cq = domain.one, domain.one
else:
new_ring = ring.clone(domain=domain.ring)
cq, f = f.clear_denoms(convert=True)
cp, g = g.clear_denoms(convert=True)
_, p, q = f.cofactors(g)
_, cp, cq = new_ring.domain.cofactors(cp, cq)
p = p.set_ring(ring)
q = q.set_ring(ring)
p_neg = not ring.is_normal(p)
q_neg = not ring.is_normal(q)
if p_neg and q_neg:
p, q = -p, -q
elif p_neg:
cp, p = -cp, -p
elif q_neg:
cp, q = -cp, -q
if not include:
return cp, cq, p, q
p *= domain(cp)
q *= domain(cq)
return p, q
def diff(self, x=0, m=1):
"""Computes partial derivative in ``x``.
Examples
========
>>> _, x, y = ring('x y', ZZ)
>>> p = x + x**2*y**3
>>> p.diff(x)
2*x*y**3 + 1
"""
ring = self.ring
i = ring.index(x)
x, = ring.gens[i]
x = x**m
g = ring.zero if m else self.compose(ring.gens[i], ring.zero)
for expv, coeff in self.items():
if expv[i]:
e = expv/x
for j in range(expv[i], expv[i] - m, -1):
coeff *= j
g[e] = coeff
g._strip_zero()
return g
def integrate(self, x=0, m=1):
"""Computes indefinite integral in ``x``."""
ring = self.ring
i = ring.index(x)
x, = ring.gens[i]
x = x**m
g = ring.zero
for expv, coeff in self.items():
e = expv*x
for j in range(expv[i] + m, expv[i], -1):
coeff /= j
g[e] = coeff
g._strip_zero()
return g
def __call__(self, *values):
if 0 < len(values) <= self.ring.ngens:
return self.eval(list(zip(self.ring.gens, values)))
else:
raise ValueError(f'expected at least 1 and at most {self.ring.ngens} values, got {len(values)}')
def eval(self, x=0, a=0):
if isinstance(x, list) and not a:
(X, a), x = x[0], x[1:]
f = self.eval(X, a)
if x:
return f.eval([(Y.drop(X), a) for (Y, a) in x])
else:
return f
return self.compose(x, a).drop(x)
def compose(self, x, a=None):
"""Computes the functional composition."""
ring = self.ring
poly = ring.zero
if a is not None:
replacements = [(x, a)]
else:
if isinstance(x, list):
replacements = list(x)
elif isinstance(x, dict):
replacements = list(x.items())
else:
raise ValueError('expected a generator, value pair a '
'sequence of such pairs')
replacements = [(ring.index(x), ring(g)) for x, g in replacements]
replacements.sort(key=lambda k: k[0])
if ring.is_univariate:
[(i, g)] = replacements
acc, d = ring.one, 0
for monom, coeff in sorted(self.items(), key=lambda x: x[0]):
n = monom[i]
acc *= g**(n - d)
d = n
poly += acc*coeff
return poly
for monom, coeff in self.items():
monom = list(monom)
subpoly = ring.one
for i, g in replacements:
n, monom[i] = monom[i], 0
subpoly *= g**n
monom = Monomial(monom)
subpoly *= ring.from_terms([(monom, coeff)])
poly += subpoly
return poly
def discriminant(self):
"""Computes discriminant of a polynomial."""
ring = self.ring
if (d := self.degree()) <= 0:
return ring.zero.drop(0)
else:
s = (-1)**((d*(d - 1)) // 2)
c = self.eject(*ring.gens[1:]).LC
return self.resultant(self.diff()) // (c*s)
def slice(self, m, n, x=0):
ring = self.ring
poly = ring.zero
j = ring.index(x)
for monom, coeff in self.items():
if not n > monom[j] >= m:
if ring.is_univariate:
continue
monom = monom[:j] + (0,) + monom[j + 1:]
if monom in poly:
poly[monom] += coeff
else:
poly[monom] = coeff
return poly
def prem(self, other):
"""Polynomial pseudo-remainder.
Examples
========
>>> _, x, y = ring('x y', ZZ)
>>> (x**2 + x*y).prem(2*x + 2)
-4*y + 4
References
==========
* :cite:`Knuth1985seminumerical`, p. 407.
"""
ring = self.ring
if not isinstance(other, ring.dtype):
other = ring.convert(other)
f, g = self, other
if ring.is_multivariate:
f, g = map(operator.methodcaller('eject', *ring.gens[1:]), (f, g))
r = f.prem(g)
return r.inject()
df = f.degree()
dg = g.degree()
if dg < 0:
raise ZeroDivisionError('polynomial division')
r, dr = f, df
if dr < dg:
return r
x = ring.gens[0]
n = df - dg + 1
lc_g = g.LC
while True:
lc_r = r.LC
n -= 1
r *= lc_g
r -= g*x**(dr - dg)*lc_r
dr = r.degree()
if dr < dg:
break
r *= lc_g**n
return r
@cacheit
def resultant(self, other, includePRS=False):
"""
Computes resultant of two polynomials in `K[X]`.
Examples
========
>>> _, x, y = ring('x y', ZZ)
>>> f = 3*x**2*y - y**3 - 4
>>> g = x**2 + x*y**3 - 9
>>> f.resultant(g)
-3*y**10 - 12*y**7 + y**6 - 54*y**4 + 8*y**3 + 729*y**2 - 216*y + 16
"""
ring = self.ring
domain = ring.domain
if (not includePRS and query('USE_COLLINS_RESULTANT') and
(domain.is_IntegerRing or domain.is_RationalField)):
return ring._collins_resultant(self, other)
res = ring._primitive_prs(self, other)
return res if includePRS else res[0]
def subresultants(self, other):
"""
Computes subresultant PRS of two polynomials in `K[X]`.
Examples
========
>>> _, x, y = ring('x y', ZZ)
>>> f = 3*x**2*y - y**3 - 4
>>> g = x**2 + x*y**3 - 9
>>> a = 3*x*y**4 + y**3 - 27*y + 4
>>> b = -3*y**10 - 12*y**7 + y**6 - 54*y**4 + 8*y**3 + 729*y**2 - 216*y + 16
>>> f.subresultants(g) == [f, g, a, b]
True
"""
return self.resultant(other, includePRS=True)[1]
def gcdex(self, other):
"""
Extended Euclidean algorithm in `F[x]`.
Returns ``(s, t, h)`` such that ``h = gcd(self, other)`` and
``s*self + t*other = h``.
Examples
========
>>> _, x = ring('x', QQ)
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> f.gcdex(g)
(-1/5*x + 3/5, 1/5*x**2 - 6/5*x + 2, x + 1)
"""
s, h = self.half_gcdex(other)
t = h - self*s
t //= other
return s, t, h
def sqf_list(self):
return self.ring.sqf_list(self)
def sqf_part(self):
return self.ring.sqf_part(self)
@property
def is_squarefree(self):
return self.ring.is_squarefree(self)
def sqf_norm(self):
return self.ring.sqf_norm(self)
def factor_list(self):
return self.ring.factor_list(self) | PypiClean |
/CHASM_NuSpacesim-1.0.0.tar.gz/CHASM_NuSpacesim-1.0.0/demo/.ipynb_checkpoints/Custom_Distribution_Example-checkpoint.ipynb | CHASM allows for users to implement custom parameterizations for the charged particle energy and angular distribution. This notebook demonstrates how custom custom charged particle distributions can be used to create Cherenkov light angular distribution.
```
from CHASM.charged_particle_ABC import *
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
Let's say instead of the Nerling parameterization for the charged particle energy distribution we want to instead use a simple Gaussian, with mean and scaling parameters dependent on shower stage. We'll use the scipy stats normal distribution so we don't have to write the actual function. To create an energy distribution object we will define a class which inherits from EnergyDistribution(), and simply implement the n_t_lE() method. An EnergyDistribution() comes with methods to normalize the distribution, as well as reset the stage (default is t = 0, which is X_max) at which we desire a distribution. The stage is stored as the 't' attribute, and the normalization constant is stored as the 'C0' attribute, which must be included as a prefactor in the n_t_lE() method. The lower and upper limits to the particle energies respectively are stored as 'll' and 'ul'.
```
from scipy.stats import norm
class GaussianEnergyDistribution(EnergyDistribution):
'''
This is an implementation of an energy distribution class
where the distribution is a Gaussian.
'''
#some parameters that depend on the stage:
@property
def mu(self):
return self.t / 20. + 3.
@property
def sig(self):
return 1.e-3 * (self.t + 20.)**2 + 1.
def n_t_lE(self, lE):
'''This method, which is the PDF, must be implemented for the class to be instantiated.
It must include the prefactor "self.C0" which is the normaliztion constant'''
return self.C0 * norm.pdf(lE, loc=self.mu, scale=self.sig)
ed = GaussianEnergyDistribution(-20.)
lEs = np.linspace(ed.ll,ed.ul,100)
plt.figure()
plt.plot(lEs, ed.n_t_lE(lEs), label=ed.t)
ed.set_stage(0.)
plt.plot(lEs, ed.n_t_lE(lEs), label=ed.t)
ed.set_stage(20.)
plt.plot(lEs, ed.n_t_lE(lEs), label=ed.t)
plt.legend(title='Stage')
plt.title('Custom Charged Particle Energy Distribution')
plt.xlabel('log(E (MeV))')
plt.ylabel('dn / dlE')
```
It's also possible to implement a custom charged particle angular distribution. For simplicity let's again use a gaussian
```
class GaussianAngularDistribution(AngularDistribution):
#some parameter that depends on the energy
@property
def sig(self):
return 1. / self.E
def n_t_lE_Omega(self, theta):
return self.C0 * norm.pdf(theta, scale=self.sig)
qd = GaussianAngularDistribution(np.log(1.))
ll = np.radians(0.1)
ul = np.radians(45.)
lqrad = np.linspace(np.log(ll),np.log(ul),450)
qrad = np.exp(lqrad)
fig = plt.figure()
plt.plot(qrad,qd.n_t_lE_Omega(qrad),label='1 MeV')
qd.set_lE(np.log(5.))
plt.plot(qrad,qd.n_t_lE_Omega(qrad),label='5 MeV')
qd.set_lE(np.log(30.))
plt.plot(qrad,qd.n_t_lE_Omega(qrad),label='30 MeV')
qd.set_lE(np.log(170.))
plt.plot(qrad,qd.n_t_lE_Omega(qrad),label='170 MeV')
qd.set_lE(np.log(1.e3))
plt.plot(qrad,qd.n_t_lE_Omega(qrad),label='1 GeV')
plt.loglog()
plt.legend()
plt.xlabel('Theta [rad]')
plt.ylabel('n(t;lE,Omega)')
```
| PypiClean |
/GoogleAppEngineMapReduce-1.9.22.0.tar.gz/GoogleAppEngineMapReduce-1.9.22.0/mapreduce/status.py | __author__ = ("aizatsky@google.com (Mike Aizatsky)",
"bslatkin@google.com (Brett Slatkin)")
import os
import pkgutil
import time
import zipfile
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_errors
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
from google.appengine.ext import db
from google.appengine.ext import webapp
from mapreduce import base_handler
from mapreduce import errors
from mapreduce import model
# TODO(user): a list of features we'd like to have in status page:
# - show sparklet of entities/sec on index page
# - shard bar chart should color finished shards differently
# mapreduce.yaml file names
MR_YAML_NAMES = ["mapreduce.yaml", "mapreduce.yml"]
class BadStatusParameterError(Exception):
"""A parameter passed to a status handler was invalid."""
class UserParam(validation.Validated):
"""A user-supplied parameter to a mapreduce job."""
ATTRIBUTES = {
"name": r"[a-zA-Z0-9_\.]+",
"default": validation.Optional(r".*"),
"value": validation.Optional(r".*"),
}
class MapperInfo(validation.Validated):
"""Configuration parameters for the mapper part of the job."""
ATTRIBUTES = {
"handler": r".+",
"input_reader": r".+",
"output_writer": validation.Optional(r".+"),
"params": validation.Optional(validation.Repeated(UserParam)),
"params_validator": validation.Optional(r".+"),
}
class MapreduceInfo(validation.Validated):
"""Mapreduce description in mapreduce.yaml."""
ATTRIBUTES = {
"name": r".+",
"mapper": MapperInfo,
"params": validation.Optional(validation.Repeated(UserParam)),
"params_validator": validation.Optional(r".+"),
}
class MapReduceYaml(validation.Validated):
"""Root class for mapreduce.yaml.
File format:
mapreduce:
- name: <mapreduce_name>
mapper:
- input_reader: google.appengine.ext.mapreduce.DatastoreInputReader
- handler: path_to_my.MapperFunction
- params:
- name: foo
default: bar
- name: blah
default: stuff
- params_validator: path_to_my.ValidatorFunction
Where
mapreduce_name: The name of the mapreduce. Used for UI purposes.
mapper_handler_spec: Full <module_name>.<function_name/class_name> of
mapper handler. See MapreduceSpec class documentation for full handler
specification.
input_reader: Full <module_name>.<function_name/class_name> of the
InputReader sub-class to use for the mapper job.
params: A list of optional parameter names and optional default values
that may be supplied or overridden by the user running the job.
params_validator is full <module_name>.<function_name/class_name> of
a callable to validate the mapper_params after they are input by the
user running the job.
"""
ATTRIBUTES = {
"mapreduce": validation.Optional(validation.Repeated(MapreduceInfo))
}
@staticmethod
def to_dict(mapreduce_yaml):
"""Converts a MapReduceYaml file into a JSON-encodable dictionary.
For use in user-visible UI and internal methods for interfacing with
user code (like param validation). as a list
Args:
mapreduce_yaml: The Pyton representation of the mapreduce.yaml document.
Returns:
A list of configuration dictionaries.
"""
all_configs = []
for config in mapreduce_yaml.mapreduce:
out = {
"name": config.name,
"mapper_input_reader": config.mapper.input_reader,
"mapper_handler": config.mapper.handler,
}
if config.mapper.params_validator:
out["mapper_params_validator"] = config.mapper.params_validator
if config.mapper.params:
param_defaults = {}
for param in config.mapper.params:
param_defaults[param.name] = param.default or param.value
out["mapper_params"] = param_defaults
if config.params:
param_defaults = {}
for param in config.params:
param_defaults[param.name] = param.default or param.value
out["params"] = param_defaults
if config.mapper.output_writer:
out["mapper_output_writer"] = config.mapper.output_writer
all_configs.append(out)
return all_configs
# N.B. Sadly, we currently don't have and ability to determine
# application root dir at run time. We need to walk up the directory structure
# to find it.
def find_mapreduce_yaml(status_file=__file__):
"""Traverse directory trees to find mapreduce.yaml file.
Begins with the location of status.py and then moves on to check the working
directory.
Args:
status_file: location of status.py, overridable for testing purposes.
Returns:
the path of mapreduce.yaml file or None if not found.
"""
checked = set()
yaml = _find_mapreduce_yaml(os.path.dirname(status_file), checked)
if not yaml:
yaml = _find_mapreduce_yaml(os.getcwd(), checked)
return yaml
def _find_mapreduce_yaml(start, checked):
"""Traverse the directory tree identified by start until a directory already
in checked is encountered or the path of mapreduce.yaml is found.
Checked is present both to make loop termination easy to reason about and so
that the same directories do not get rechecked.
Args:
start: the path to start in and work upward from
checked: the set of already examined directories
Returns:
the path of mapreduce.yaml file or None if not found.
"""
dir = start
while dir not in checked:
checked.add(dir)
for mr_yaml_name in MR_YAML_NAMES:
yaml_path = os.path.join(dir, mr_yaml_name)
if os.path.exists(yaml_path):
return yaml_path
dir = os.path.dirname(dir)
return None
def parse_mapreduce_yaml(contents):
"""Parses mapreduce.yaml file contents.
Args:
contents: mapreduce.yaml file contents.
Returns:
MapReduceYaml object with all the data from original file.
Raises:
errors.BadYamlError: when contents is not a valid mapreduce.yaml file.
"""
try:
builder = yaml_object.ObjectBuilder(MapReduceYaml)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(contents)
mr_info = handler.GetResults()
except (ValueError, yaml_errors.EventError), e:
raise errors.BadYamlError(e)
if len(mr_info) < 1:
raise errors.BadYamlError("No configs found in mapreduce.yaml")
if len(mr_info) > 1:
raise errors.MultipleDocumentsInMrYaml("Found %d YAML documents" %
len(mr_info))
jobs = mr_info[0]
job_names = set(j.name for j in jobs.mapreduce)
if len(jobs.mapreduce) != len(job_names):
raise errors.BadYamlError(
"Overlapping mapreduce names; names must be unique")
return jobs
def get_mapreduce_yaml(parse=parse_mapreduce_yaml):
"""Locates mapreduce.yaml, loads and parses its info.
Args:
parse: Used for testing.
Returns:
MapReduceYaml object.
Raises:
errors.BadYamlError: when contents is not a valid mapreduce.yaml file or the
file is missing.
"""
mr_yaml_path = find_mapreduce_yaml()
if not mr_yaml_path:
raise errors.MissingYamlError()
mr_yaml_file = open(mr_yaml_path)
try:
return parse(mr_yaml_file.read())
finally:
mr_yaml_file.close()
class ResourceHandler(webapp.RequestHandler):
"""Handler for static resources."""
_RESOURCE_MAP = {
"status": ("overview.html", "text/html"),
"detail": ("detail.html", "text/html"),
"base.css": ("base.css", "text/css"),
"jquery.js": ("jquery-1.6.1.min.js", "text/javascript"),
"jquery-json.js": ("jquery.json-2.2.min.js", "text/javascript"),
"jquery-url.js": ("jquery.url.js", "text/javascript"),
"status.js": ("status.js", "text/javascript"),
}
def get(self, relative):
if relative not in self._RESOURCE_MAP:
self.response.set_status(404)
self.response.out.write("Resource not found.")
return
real_path, content_type = self._RESOURCE_MAP[relative]
path = os.path.join(os.path.dirname(__file__), "static", real_path)
# It's possible we're inside a zipfile (zipimport). If so, path
# will include 'something.zip'.
if ('.zip' + os.sep) in path:
(zip_file, zip_path) = os.path.relpath(path).split('.zip' + os.sep, 1)
content = zipfile.ZipFile(zip_file + '.zip').read(zip_path)
else:
try:
data = pkgutil.get_data(__name__, "static/" + real_path)
except AttributeError: # Python < 2.6.
data = None
content = data or open(path, 'rb').read()
self.response.headers["Cache-Control"] = "public; max-age=300"
self.response.headers["Content-Type"] = content_type
self.response.out.write(content)
class ListConfigsHandler(base_handler.GetJsonHandler):
"""Lists mapreduce configs as JSON for users to start jobs."""
def handle(self):
self.json_response["configs"] = MapReduceYaml.to_dict(get_mapreduce_yaml())
class ListJobsHandler(base_handler.GetJsonHandler):
"""Lists running and completed mapreduce jobs for an overview as JSON."""
def handle(self):
cursor = self.request.get("cursor")
count = int(self.request.get("count", "50"))
query = model.MapreduceState.all()
if cursor:
query.filter("__key__ >=", db.Key(cursor))
query.order("__key__")
jobs_list = query.fetch(count + 1)
if len(jobs_list) == (count + 1):
self.json_response["cursor"] = str(jobs_list[-1].key())
jobs_list = jobs_list[:-1]
all_jobs = []
for job in jobs_list:
out = {
# Data shared between overview and detail pages.
"name": job.mapreduce_spec.name,
"mapreduce_id": job.mapreduce_spec.mapreduce_id,
"active": job.active,
"start_timestamp_ms":
int(time.mktime(job.start_time.utctimetuple()) * 1000),
"updated_timestamp_ms":
int(time.mktime(job.last_poll_time.utctimetuple()) * 1000),
# Specific to overview page.
"chart_url": job.sparkline_url,
"chart_width": job.chart_width,
"active_shards": job.active_shards,
"shards": job.mapreduce_spec.mapper.shard_count,
}
if job.result_status:
out["result_status"] = job.result_status
all_jobs.append(out)
self.json_response["jobs"] = all_jobs
class GetJobDetailHandler(base_handler.GetJsonHandler):
"""Retrieves the details of a mapreduce job as JSON."""
def handle(self):
mapreduce_id = self.request.get("mapreduce_id")
if not mapreduce_id:
raise BadStatusParameterError("'mapreduce_id' was invalid")
job = model.MapreduceState.get_by_key_name(mapreduce_id)
if job is None:
raise KeyError("Could not find job with ID %r" % mapreduce_id)
self.json_response.update(job.mapreduce_spec.to_json())
self.json_response.update(job.counters_map.to_json())
self.json_response.update({
# Shared with overview page.
"active": job.active,
"start_timestamp_ms":
int(time.mktime(job.start_time.utctimetuple()) * 1000),
"updated_timestamp_ms":
int(time.mktime(job.last_poll_time.utctimetuple()) * 1000),
# Specific to detail page.
"chart_url": job.chart_url,
"chart_width": job.chart_width,
})
self.json_response["result_status"] = job.result_status
all_shards = []
for shard in model.ShardState.find_all_by_mapreduce_state(job):
out = {
"active": shard.active,
"result_status": shard.result_status,
"shard_number": shard.shard_number,
"shard_id": shard.shard_id,
"updated_timestamp_ms":
int(time.mktime(shard.update_time.utctimetuple()) * 1000),
"shard_description": shard.shard_description,
"last_work_item": shard.last_work_item,
}
out.update(shard.counters_map.to_json())
all_shards.append(out)
all_shards.sort(key=lambda x: x["shard_number"])
self.json_response["shards"] = all_shards | PypiClean |
/DeepPhysX.Sofa-22.12.1.tar.gz/DeepPhysX.Sofa-22.12.1/examples/demos/Armadillo/UNet/training.py | import os.path
import sys
import torch
# DeepPhysX related imports
from DeepPhysX.Core.Pipelines.BaseTraining import BaseTraining
from DeepPhysX.Core.Database.BaseDatabaseConfig import BaseDatabaseConfig
from DeepPhysX.Torch.UNet.UNetConfig import UNetConfig
from DeepPhysX.Sofa.Environment.SofaEnvironmentConfig import SofaEnvironmentConfig
# Working session imports
from download import ArmadilloDownloader
ArmadilloDownloader().get_session('run')
from Environment.ArmadilloTraining import ArmadilloTraining
from Environment.parameters import grid_resolution
# Training parameters
nb_epochs = 200
nb_batch = 1000
batch_size = 16
lr = 1e-5
def launch_trainer(dataset_dir, nb_env):
# Environment config
environment_config = SofaEnvironmentConfig(environment_class=ArmadilloTraining,
visualizer='vedo',
number_of_thread=nb_env)
# UNet config
network_config = UNetConfig(lr=lr,
loss=torch.nn.MSELoss,
optimizer=torch.optim.Adam,
input_size=grid_resolution,
nb_dims=3,
nb_input_channels=3,
nb_first_layer_channels=128,
nb_output_channels=3,
nb_steps=3,
two_sublayers=True,
border_mode='same',
skip_merge=False, )
# Dataset config
database_config = BaseDatabaseConfig(existing_dir=dataset_dir,
max_file_size=1,
shuffle=True,
normalize=True)
# Trainer
trainer = BaseTraining(network_config=network_config,
database_config=database_config,
environment_config=environment_config if dataset_dir is None else None,
session_dir='sessions',
session_name='armadillo_training_user',
epoch_nb=nb_epochs,
batch_nb=nb_batch,
batch_size=batch_size)
# Launch the training session
trainer.execute()
if __name__ == '__main__':
# Define dataset
dpx_session = 'sessions/armadillo_dpx'
user_session = 'sessions/armadillo_data_user'
# Take user dataset by default
dataset = user_session if os.path.exists(user_session) else dpx_session
# Get nb_thread options
nb_thread = 1
if len(sys.argv) > 1:
dataset = None
try:
nb_thread = int(sys.argv[1])
except ValueError:
print("Script option must be an integer <nb_sample> for samples produced in Environment(s)."
"Without option, samples are loaded from an existing Dataset.")
quit(0)
# Check missing data
session_name = 'train' if dataset is None else 'train_data'
ArmadilloDownloader().get_session(session_name)
# Launch pipeline
launch_trainer(dataset, nb_thread) | PypiClean |
/ArchivViewer-3.tar.gz/ArchivViewer-3/archivviewer/archivviewer.py |
import sys, codecs, os, fdb, json, tempfile, shutil, subprocess, io, winreg, configparser
from datetime import datetime, timedelta
from collections import OrderedDict
from contextlib import contextmanager
from pathlib import Path
from lhafile import LhaFile
from PyPDF2 import PdfFileMerger
import img2pdf
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QFileDialog
from PyQt5.QtCore import QAbstractTableModel, Qt, QThread, pyqtSignal, pyqtSlot, QObject, QMutex, QTranslator, QLocale, QLibraryInfo
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from archivviewer.forms import ArchivviewerUi
exportThread = None
def parseBlobs(blobs):
entries = {}
for blob in blobs:
offset = 0
totalLength = int.from_bytes(blob[offset:offset+2], 'little')
offset += 4
entryCount = int.from_bytes(blob[offset:offset+2], 'little')
offset += 2
while offset < len(blob):
entryLength = int.from_bytes(blob[offset:offset+2], 'little')
offset += 2
result = parseBlobEntry(blob[offset:offset+entryLength])
entries[result['categoryId']] = result['name']
offset += entryLength
return OrderedDict(sorted(entries.items()))
def parseBlobEntry(blob):
offset = 6
name_length = int.from_bytes(blob[offset:offset+2], 'little')
offset += 2
name = blob[offset:offset+name_length-1].decode('cp1252')
offset += name_length
catid = int.from_bytes(blob[-2:], 'little')
return { 'name': name, 'categoryId': catid }
def displayErrorMessage(msg):
QMessageBox.critical(None, "Fehler", str(msg))
class ArchivViewer(QMainWindow, ArchivviewerUi):
def __init__(self, parent = None):
super(ArchivViewer, self).__init__(parent)
self.setupUi(self)
self.setWindowTitle("Archiv Viewer")
class FileChangeHandler(FileSystemEventHandler):
def __init__(self, gdtfile, model):
super().__init__()
self.gdtfile = gdtfile
self.model = model
def on_modified(self, event):
if self.gdtfile == event.src_path:
infos = readGDT(self.gdtfile)
try:
patid = int(infos["id"])
self.model.setActivePatient(infos)
except TypeError:
pass
class PdfExporter(QObject):
progress = pyqtSignal(int)
completed = pyqtSignal(int, str)
error = pyqtSignal(str)
kill = pyqtSignal()
def __init__(self, model, filelist, destination, parent=None):
super(PdfExporter, self).__init__(parent)
self.model = model
self.files = filelist
self.destination = destination
def work(self):
self.model.exportAsPdfThread(self, self.files, self.destination)
self.kill.emit()
class ArchivTableModel(QAbstractTableModel):
_startDate = datetime(1890, 1, 1)
def __init__(self, con, tmpdir, librepath, mainwindow, application):
super(ArchivTableModel, self).__init__()
self._files = []
self._categories = []
self._con = con
self._tmpdir = tmpdir
self._librepath = librepath
self._table = mainwindow.documentView
self._av = mainwindow
self._application = application
self._infos = {}
self.exportThread = None
self.mutex = QMutex(mode=QMutex.Recursive)
def __del__(self):
if self.exportThread:
self.exportThread.wait()
@contextmanager
def lock(self, msg=None):
if msg:
pass
self.mutex.lock()
yield
if msg:
pass
self.mutex.unlock()
def setActivePatient(self, infos):
with self.lock("setActivePatient"):
self._infos = infos
self._av.patientName.setText('{name}, {surname} [{id}]'.format(**infos))
self.reloadData(int(infos["id"]))
def data(self, index, role):
if role == Qt.DisplayRole:
with self.lock("setActivePatient"):
file = self._files[index.row()]
col = index.column()
if col == 0:
return file["datum"].strftime('%d.%m.%Y')
elif col == 1:
return file["datum"].strftime('%H:%M')
elif col == 2:
try:
return self._categories[file["category"]]
except KeyError:
return file["category"]
elif col == 3:
return file["beschreibung"]
def rowCount(self, index):
with self.lock("rowCount"):
return len(self._files)
def columnCount(self, index):
return 4
def headerData(self, section, orientation, role):
# section is the index of the column/row.
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
if section == 0:
return "Datum"
elif section == 1:
return "Zeit"
elif section == 2:
return "Kategorie"
elif section == 3:
return "Beschreibung"
def reloadData(self, patnr):
self._application.setOverrideCursor(Qt.WaitCursor)
self.beginResetModel()
self.reloadCategories()
self._files = []
selectStm = "SELECT a.FSUROGAT, a.FTEXT, a.FEINTRAGSART, a.FZEIT, a.FDATUM FROM ARCHIV a WHERE a.FPATNR = ? ORDER BY a.FDATUM DESC, a.FZEIT DESC"
cur = self._con.cursor()
cur.execute(selectStm, (patnr,))
for (surogat, beschreibung, eintragsart, zeit, datum) in cur:
self._files.append({
'id': surogat,
'datum': self._startDate + timedelta(days = datum, seconds = zeit),
'beschreibung': beschreibung,
'category': eintragsart
})
del cur
self.endResetModel()
self._table.resizeColumnsToContents()
self._table.horizontalHeader().setStretchLastSection(True)
self._application.restoreOverrideCursor()
def reloadCategories(self):
cur = self._con.cursor()
cur.execute("SELECT s.FKATEGORIELISTE, s.FABLAGELISTE, s.FBRIEFKATEGORIELISTE FROM MOSYSTEM s")
for blobs in cur:
self._categories = parseBlobs(blobs)
break
del cur
def generateFile(self, rowIndex, errorSlot = None):
with self.lock("generateFile"):
file = self._files[rowIndex]
filename = self._tmpdir + os.sep + '{}.pdf'.format(file["id"])
if not os.path.isfile(filename):
selectStm = "SELECT a.FDATEI FROM ARCHIV a WHERE a.FSUROGAT = ?"
cur = self._con.cursor()
cur.execute(selectStm, (file["id"],))
(datei,) = cur.fetchone()
merger = PdfFileMerger()
ios = None
try:
contents = datei.read()
ios = io.BytesIO(contents)
except:
ios = io.BytesIO(datei)
lf = LhaFile(ios)
for name in lf.namelist():
content = lf.read(name)
if content[0:5] == b'%PDF-':
merger.append(io.BytesIO(content))
elif content[0:5] == b'{\\rtf':
with tempdir() as tmpdir:
tmpfile = tmpdir + os.sep + "temp.rtf"
pdffile = tmpdir + os.sep + "temp.pdf"
with open(tmpfile, "wb") as f:
f.write(content)
command = '"'+" ".join(['"'+self._librepath+'"', "--convert-to pdf", "--outdir", '"'+tmpdir+'"', '"'+tmpfile+'"'])+'"'
if os.system(command) == 0:
try:
with open(pdffile, "rb") as f:
merger.append(io.BytesIO(f.read()))
except:
if errorSlot:
errorSlot.emit("Fehler beim Öffnen der konvertierten PDF-Datei '%s'" % (pdffile))
else:
displayErrorMessage("Fehler beim Öffnen der konvertierten PDF-Datei '%s'" % (pdffile))
else:
if errorSlot:
errorSlot.emit("Fehler beim Ausführen des Kommandos: '%s'" % (command))
else:
displayErrorMessage("Fehler beim Ausführen des Kommandos: '%s'" % (command))
elif name == "message.eml":
# eArztbrief
eml = email.message_from_bytes(content)
for part in eml.get_payload():
fnam = part.get_filename()
partcont = part.get_payload(decode=True)
if partcont[0:5] == b'%PDF-':
print("eArztbrief: hänge Anhang '%s' an den Export an" % (fnam))
merger.append(io.BytesIO(partcont))
else:
print("eArztbrief: nicht unterstütztes Anhangsformat in Anhang '%s'" % (fnam))
else:
try:
merger.append(io.BytesIO(img2pdf.convert(content)))
except Exception as e:
print("Dateiinhalt '%s' ist kein unterstützter Dateityp -> wird nicht an PDF angehängt (%s)" % (name, e))
merger.write(filename)
merger.close()
try:
datei.close()
except:
pass
ios.close()
return filename
def displayFile(self, rowIndex):
self._application.setOverrideCursor(Qt.WaitCursor)
filename = self.generateFile(rowIndex)
self._application.restoreOverrideCursor()
subprocess.run(['start', filename], shell=True)
def exportAsPdfThread(self, thread, filelist, destination):
self._application.setOverrideCursor(Qt.WaitCursor)
with self.lock("exportAsPdfThread"):
files = list(self._files)
try:
merger = PdfFileMerger()
counter = 0
filelist = sorted(filelist)
for file in filelist:
counter += 1
thread.progress.emit(counter)
filename = self.generateFile(file, errorSlot = thread.error)
bmtext = " ".join([files[file]["beschreibung"], files[file]["datum"].strftime('%d.%m.%Y %H:%M')])
merger.append(filename, bookmark=bmtext)
merger.write(destination)
merger.close()
except IOError as e:
thread.error.emit("Fehler beim Schreiben der PDF-Datei: {}".format(e))
thread.progress.emit(0)
self._application.restoreOverrideCursor()
thread.completed.emit(counter, destination)
return counter
def updateProgress(self, value):
self._av.exportProgress.setFormat('%d von %d' % (value, self._av.exportProgress.maximum()))
self._av.exportProgress.setValue(value)
def exportCompleted(self, counter, destination):
self._av.exportProgress.setFormat('')
self._av.exportProgress.setEnabled(False)
self._av.exportPdf.setEnabled(True)
self._av.documentView.setEnabled(True)
QMessageBox.information(None, "Export abgeschlossen", "%d Dokumente wurden nach '%s' exportiert" % (counter, destination))
def handleError(self, msg):
displayErrorMessage(msg)
def exportAsPdf(self, filelist):
if len(filelist) == 0:
buttonReply = QMessageBox.question(self._av, 'PDF-Export', "Kein Dokument ausgewählt. Export aus allen Dokumenten des Patienten erzeugen?", QMessageBox.Yes | QMessageBox.No)
if(buttonReply == QMessageBox.Yes):
with self.lock("exportAsPdf (filelist)"):
filelist = range(len(self._files))
else:
return
dirconfpath = os.sep.join([os.environ["AppData"], "ArchivViewer", "config.json"])
outfiledir = str(Path.home())
try:
with open(dirconfpath, "r") as f:
conf = json.load(f)
outfiledir = conf["outfiledir"]
except:
pass
outfilename = os.sep.join([outfiledir, 'Patientenakte_%d_%s_%s_%s-%s.pdf' % (int(self._infos["id"]),
self._infos["name"], self._infos["surname"], self._infos["birthdate"], datetime.now().strftime('%Y%m%d%H%M%S'))])
destination, _ = QFileDialog.getSaveFileName(self._av, "Auswahl als PDF exportieren", outfilename, "PDF-Datei (*.pdf)")
if len(destination) > 0:
try:
os.makedirs(os.path.dirname(dirconfpath), exist_ok = True)
with open(dirconfpath, "w") as f:
json.dump({ 'outfiledir': os.path.dirname(destination) }, f, indent = 1)
except:
pass
self._av.exportPdf.setEnabled(False)
self._av.documentView.setEnabled(False)
self._av.exportProgress.setEnabled(True)
self._av.exportProgress.setRange(0, len(filelist))
self._av.exportProgress.setFormat('0 von %d' % (len(filelist)))
self.pdfExporter = PdfExporter(self, filelist, destination)
self.exportThread = QThread()
self.pdfExporter.moveToThread(self.exportThread)
self.pdfExporter.kill.connect(self.exportThread.quit)
self.pdfExporter.progress.connect(self.updateProgress)
self.pdfExporter.completed.connect(self.exportCompleted)
self.pdfExporter.error.connect(self.handleError)
self.exportThread.started.connect(self.pdfExporter.work)
self.exportThread.start()
def readGDT(gdtfile):
grabinfo = {
3000: "id",
3101: "name",
3102: "surname",
3103: "birthdate"
}
infos = {
"id": None,
"name": None,
"surname": None
}
with codecs.open(gdtfile, encoding="iso-8859-15", mode="r") as f:
for line in f:
linelen = int(line[:3])
feldkennung = int(line[3:7])
inhalt = line[7:linelen - 2]
if feldkennung in grabinfo:
infos[grabinfo[feldkennung]] = inhalt
return infos
@contextmanager
def tempdir(prefix='tmp'):
"""A context manager for creating and then deleting a temporary directory."""
tmpdir = tempfile.mkdtemp(prefix=prefix)
try:
yield tmpdir
finally:
shutil.rmtree(tmpdir)
def tableDoubleClicked(table, model):
row = table.currentIndex().row()
if row > -1:
model.displayFile(row)
def exportSelectionAsPdf(table, model):
global exportThread
indexes = table.selectionModel().selectedRows()
files = []
for idx in indexes:
files.append(idx.row())
model.exportAsPdf(files)
def main():
app = QApplication(sys.argv)
qt_translator = QTranslator()
qt_translator.load("qt_" + QLocale.system().name(),
QLibraryInfo.location(QLibraryInfo.TranslationsPath))
app.installTranslator(qt_translator)
qtbase_translator = QTranslator()
qt_translator.load("qtbase_" + QLocale.system().name(),
QLibraryInfo.location(QLibraryInfo.TranslationsPath))
app.installTranslator(qtbase_translator)
try:
mokey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\WOW6432Node\\INDAMED')
defaultClientLib = os.sep.join([winreg.QueryValueEx(mokey, 'DataPath')[0], '..', 'Firebird', 'bin', 'fbclient.dll'])
except OSError as e:
displayErrorMessage("Failed to open Medical Office registry key: {}".format(e))
sys.exit()
conffile = os.getcwd() + os.sep + "Patientenakte.cnf"
conffile2 = os.path.dirname(os.path.realpath(__file__)) + os.sep + "Patientenakte.cnf"
try:
rstservini = configparser.ConfigParser()
rstservini.read(os.sep.join([os.environ["SYSTEMROOT"], 'rstserv.ini']))
defaultHost = rstservini["SYSTEM"]["Computername"]
defaultDb = os.sep.join([rstservini["MED95"]["DataPath"], "MEDOFF.GDB"])
except Exception as e:
displayErrorMessage("Failed to open rstserv.ini: {}".format(e))
sys.exit()
defaultDbUser = "sysdba"
defaultDbPassword = "masterkey"
defaultLibrePath = "C:\Program Files\LibreOffice\program\soffice.exe"
for cfg in [conffile, conffile2]:
try:
print("Attempting config %s" % (cfg))
with open(cfg, 'r') as f:
conf = json.load(f)
if "dbuser" in conf:
defaultDbUser = conf["dbuser"]
if "dbpassword" in conf:
defaultDbPassword = conf["dbpassword"]
if "libreoffice" in conf:
defaultLibrePath = conf["libreoffice"]
break
except Exception as e:
print("Failed to load config: %s." % (e))
print("Client lib is %s" % (defaultClientLib))
print("DB Path is %s on %s" % (defaultDb, defaultHost))
try:
print("Connecting db")
con = fdb.connect(host=defaultHost, database=defaultDb,
user=defaultDbUser, password=defaultDbPassword, fb_library_name=defaultClientLib)
print("Connection established.")
except Exception as e:
displayErrorMessage('Fehler beim Verbinden mit der Datenbank: {}'.format(e))
sys.exit()
try:
cur = con.cursor()
stm = "SELECT FVARVALUE FROM MED95INI WHERE FCLIENTNAME=? AND FVARNAME='PatexportDatei'"
cur.execute(stm, (os.environ["COMPUTERNAME"],))
res = cur.fetchone()
if res is None:
raise Exception("Keine Konfiguration für den Namen '{}' hinterlegt!".format(os.environ["COMPUTERNAME"]))
gdtfile = res[0][:-1].decode('windows-1252')
del cur
if not os.path.isdir(os.path.dirname(gdtfile)):
raise Exception("Ungültiger Pfad: '{}'. Bitte korrekten Pfad für Patientenexportdatei im Datenpflegesystem konfigurieren.".format(gdtfile))
except Exception as e:
displayErrorMessage("Fehler beim Feststellen des Exportpfades: {}".format(e))
sys.exit()
with tempdir() as myTemp:
av = ArchivViewer()
tm = ArchivTableModel(con, myTemp, defaultLibrePath, av, app)
av.documentView.doubleClicked.connect(lambda: tableDoubleClicked(av.documentView, tm))
av.documentView.setModel(tm)
av.exportPdf.clicked.connect(lambda: exportSelectionAsPdf(av.documentView, tm))
event_handler = FileChangeHandler(gdtfile, tm)
av.action_quit.triggered.connect(lambda: app.quit())
av.action_about.triggered.connect(lambda: QMessageBox.about(av, "Über Archiv Viewer",
"""<p><b>Archiv Viewer</b> ist eine zur Verwendung mit Medical Office der Fa. Indamed entwickelte
Software, die synchron zur Medical Office-Anwendung die gespeicherten Dokumente eines Patienten im Archiv
anzeigen kann. Zusätzlich können ausgewählte Dokumente auch als PDF-Datei exportiert werden.</p><p>(c) 2020 Julian Hartig - Lizensiert unter den Bedingungen der GPLv3</p>"""))
observer = Observer()
observer.schedule(event_handler, path=os.path.dirname(gdtfile), recursive=False)
observer.start()
try:
infos = readGDT(gdtfile)
tm.setActivePatient(infos)
except Exception as e:
print("While loading GDT file: %s" % (e))
av.show()
ret = app.exec_()
observer.stop()
if exportThread:
exportThread.stop()
exportThread.join()
observer.join()
sys.exit(ret) | PypiClean |
/ButterflyNet-1.1.0-py3-none-any.whl/bfnet/util.py | import struct
def infer_int_pack(arg) -> str:
"""
Attempt to infer the correct struct format for an int.
:param arg: The integer argument to infer.
:return: A character for the struct string.
"""
# Short
if (-32768) <= arg <= 32767:
return "h"
# Int
elif (-2147483648) <= arg <= 2147483647:
return "i"
# Long Long, I think
elif (-18446744073709551616) <= arg <= 18446744073709551615:
return "q"
else:
raise OverflowError("Number {} too big to fit into a struct normally".format(arg))
def _process_args(*args):
a = []
for arg in args:
if isinstance(arg, str):
for _ in arg:
a.append(_)
elif isinstance(arg, bytes):
for _ in arg:
a.append(_.to_bytes(1, "big"))
else:
a.append(arg)
return tuple(a)
def auto_infer_struct_pack(*args, pack: bool=False) -> str:
"""
This will automatically attempt to infer the struct pack/unpack format string
from the types of your arguments.
All integer values will be set as unsigned by default.
:param pack: Should we automatically pack your data up?
:param args: The items to infer from.
:return: Either the string format string, or the packed bytes data.
"""
# Set the fmt string
fmt_string = "!"
for arg in args:
if type(arg) == int:
# Complicated stuff here.
fmt_string += infer_int_pack(arg)
elif type(arg) == float:
# Use a double.
fmt_string += "d"
elif type(arg) == str:
# Use a char[] s
fmt_string += "{}s".format(len(arg))
elif type(arg) == bytes:
# Use a normal 'c'
fmt_string += "{}c".format(len(arg))
elif type(arg) == bool:
fmt_string += "?"
else:
raise ValueError("Type could not be determined - {}".format(type(arg)))
if not pack:
return fmt_string
# Pack data.
s = struct.pack(fmt_string, *_process_args(*args))
return s | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/glob2/glob2/fnmatch.py | import os
import re
try:
from functools import lru_cache
except ImportError:
from .compat import lru_cache
__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
def _norm_paths(path, norm_paths, sep):
if norm_paths is None:
path = re.sub(r'\/', sep or os.sep, path) # cached internally
elif norm_paths:
path = os.path.normcase(path)
return path
def fnmatch(name, pat, norm_paths=True, case_sensitive=True, sep=None):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
:param slashes:
:param norm_paths:
A tri-state boolean:
when true, invokes `os.path,.normcase()` on both paths,
when `None`, just equalize slashes/backslashes to `os.sep`,
when false, does not touch paths at all.
Note that a side-effect of `normcase()` on *Windows* is that
it converts to lower-case all matches of `?glob()` functions.
:param case_sensitive:
defines the case-sensitiviness of regex doing the matches
:param sep:
in case only slahes replaced, what sep-char to substitute with;
if false, `os.sep` is used.
Notice that by default, `normcase()` causes insensitive matching
on *Windows*, regardless of `case_insensitive` param.
Set ``norm_paths=None, case_sensitive=False`` to preserve
verbatim mathces.
"""
name, pat = [_norm_paths(p, norm_paths, sep)
for p in (name, pat)]
return fnmatchcase(name, pat, case_sensitive=case_sensitive)
@lru_cache(maxsize=256, typed=True)
def _compile_pattern(pat, case_sensitive):
if isinstance(pat, bytes):
pat_str = pat.decode('ISO-8859-1')
res_str = translate(pat_str)
res = res_str.encode('ISO-8859-1')
else:
res = translate(pat)
flags = 0 if case_sensitive else re.IGNORECASE
return re.compile(res, flags).match
def filter(names, pat, norm_paths=True, case_sensitive=True, sep=None):
"""Return the subset of the list NAMES that match PAT."""
result = []
pat = _norm_paths(pat, norm_paths, sep)
match = _compile_pattern(pat, case_sensitive)
for name in names:
m = match(_norm_paths(name, norm_paths, sep))
if m:
result.append((name,
tuple(_norm_paths(p, norm_paths, sep) for p in m.groups())))
return result
def fnmatchcase(name, pat, case_sensitive=True):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
match = _compile_pattern(pat, case_sensitive)
return match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i+1
if c == '*':
res = res + '(.*)'
elif c == '?':
res = res + '(.)'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j+1
if j < n and pat[j] == ']':
j = j+1
while j < n and pat[j] != ']':
j = j+1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\','\\\\')
i = j+1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s([%s])' % (res, stuff)
else:
res = res + re.escape(c)
return '(?ms)' + res + '\Z' | PypiClean |
/K-Format-0.3.0.tar.gz/K-Format-0.3.0/README.md | # K-Format
[](https://travis-ci.org/Rainist/K-Format) [](https://badge.fury.io/py/K-Format) [](https://badge.fury.io/py/K-Format) [](https://rainist.com/recruit)
K-Format is a Python library designed for dealing with KCB K-Format in a convenient way.
## Getting Started
```python
from kformat import *
@kclass
class Item:
sth: N(2)
@kclass
class Base:
birthday: AN(8)
name: AN(10)
grade: N(3)
items: List[Item]
base = Base(date(1980, 8, 12), '홍길동', 1, [Item(3), Item(4)])
assert base.bytes == b'19800812\xc8\xab\xb1\xe6\xb5\xbf 0010304'
```
## Installation
```bash
pip install K-Format
```
| PypiClean |
/Boutique-0.1.3.tar.gz/Boutique-0.1.3/boutiqueclient/bservice/cmds/auth.py | import sys
import getpass
from path import path
from boutiqueclient.bservice.api.error import ApiUnauthorizedError
from .base import BaseCommand
class Login(BaseCommand):
"""Log in with your boutique credentials."""
def run(self):
success = False
retries, retry = 3, 0
sys.stdout.write('Enter your boutiqueforge.com credentials.\n')
sys.stdout.write('Email: ')
email = sys.stdin.readline().strip()
token = None
while retry < retries and not success:
password = getpass.getpass('Password: ')
resp = self._service.post('/api/account/login',
data=dict(email=email, password=password))
if resp.is_success():
success = True
resp_json = resp.json()
token = resp_json.get('account', {}).get('api_token')
print 'Authentication successful!'
break
elif resp.status_code == 412:
print resp.get('error')
return
else:
retry += 1
if not success:
print 'Could not login'
return
# save the netrc email and token in the .netrc file
self._update_credentials(email, token)
# send the pubfile to the server so git ssh authentication will work
self._upload_pubfile()
def _update_credentials(self, email, token):
self._service.credentials.set(email, token)
def _upload_pubfile(self):
file_path = path('~/.ssh/id_rsa.pub').expanduser()
if not file_path.isfile():
print 'Could not find public ssh key, skipping uploading of key...'
return
with open(file_path, 'r') as f:
publickey = f.read()
data = {'publickey': publickey}
resp = self._service.post('/api/account/publickey', data)
if not resp.is_success():
print 'Could not upload public key: %s' % resp.error
class Whoami(BaseCommand):
"""Print the user you're currently logged in with."""
def run(self):
email = self._service.credentials.email
token = self._service.credentials.token
if email is None or token is None:
raise ApiUnauthorizedError()
print email | PypiClean |
/MDAnalysis-2.1.0.tar.gz/MDAnalysis-2.1.0/doc/sphinx/source/documentation_pages/trajectory_transformations.rst | .. Contains the formatted docstrings for the transformations located in 'mdanalysis/MDAnalysis/transformations'
.. _transformations:
*********************************************************
Trajectory transformations ("on-the-fly" transformations)
*********************************************************
.. module:: MDAnalysis.transformations
In MDAnalysis, a *transformation* is a function/function-like class
that modifies the data for the current :class:`Timestep` and returns the
:class:`Timestep`. For instance, coordinate transformations, such as
PBC corrections and molecule fitting are often required for some
analyses and visualization. Transformation functions
(``transformation_1`` and ``transformation_2`` in the following
example) can be called by the user for any given :class:`Timestep` of
the trajectory,
.. code-block:: python
u = MDAnalysis.Universe(topology, trajectory)
for ts in u.trajectory:
ts = transformation_2(transformation_1(ts))
where they change the coordinates of the timestep ``ts`` in
place. There is nothing special about these transformations except
that they have to be written in such a way that they change the
:class:`Timestep` in place.
As described under :ref:`workflows`, multiple transformations can be
grouped together and associated with a trajectory so that the
trajectory is **transformed on-the-fly**, i.e., the data read from the
trajectory file will be changed before it is made available in, say,
the :attr:`AtomGroup.positions` attribute.
The submodule :mod:`MDAnalysis.transformations` contains a
collection of transformations (see :ref:`transformations-module`) that
can be immediately used but one can always write custom
transformations (see :ref:`custom-transformations`).
.. _workflows:
Workflows
---------
Instead of manually applying transformations, it is much more
convenient to associate a whole *workflow* of transformations with a
trajectory and have the transformations be called automatically.
A workflow is a sequence (tuple or list) of transformation functions
that will be applied in this order. For example,
.. code-block:: python
workflow = [transformation_1, transformation_2]
would effectively result in
.. code-block:: python
ts = transformation_2(transformation_1(ts))
for every time step in the trajectory.
One can add a workflow using the
:meth:`Universe.trajectory.add_transformations
<MDAnalysis.coordinates.base.ReaderBase.add_transformations>` method
of a trajectory (where the list ``workflow`` is taken from the example
above),
.. code-block:: python
u.trajectory.add_transformations(*workflow)
or upon :class:`Universe <MDAnalysis.core.universe.Universe>`
creation using the keyword argument `transformations`:
.. code-block:: python
u = MDAnalysis.Universe(topology, trajectory, transformations=workflow)
Note that in these two cases, the workflow cannot be changed after having
being added.
.. _custom-transformations:
Creating transformations
------------------------
A simple *transformation* can also be a function that takes a
:class:`~MDAnalysis.coordinates.base.Timestep` as input, modifies it, and
returns it. If it takes no other arguments but a :class:`Timestep`
can be defined as the following example:
.. code-block:: python
def up_by_2(ts):
"""
Translate all coordinates by 2 angstroms up along the Z dimension.
"""
ts.positions = ts.positions + np.array([0, 0, 2], dtype=np.float32)
return ts
If the transformation requires other arguments besides the :class:`Timestep`,
the following two methods can be used to create such transformation:
.. _custom-transformations-class:
Creating complex transformation classes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It is implemented by inheriting from
:class:`MDAnalysis.transformations.base.TransformationBase`,
which defines :func:`__call__` for the transformation class
and can be applied directly to a :class:`Timestep`. :func:`_transform` has to
be defined and include the operations on the :class:`MDAnalysis.coordinates.base.Timestep`.
So, a transformation class can be roughly defined as follows:
.. code-block:: python
from MDAnalysis.transformations import TransformationBase
class up_by_x_class(TransformationBase):
def __init__(self, distance):
self.distance = distance
def _transform(self, ts):
ts.positions = ts.positions + np.array([0, 0, self.distance], dtype=np.float32)
return ts
It is the default construction method in :mod:`MDAnalysis.transformations`
from release 2.0.0 onwards because it can be reliably serialized.
See :class:`MDAnalysis.transformations.translate` for a simple example.
.. _custom-transformations-closure:
Creating complex transformation closure functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Transformation can also be a wrapped function takes the :class:`Timestep` object as argument.
So in this case, a transformation function (closure) can be roughly defined as follows:
.. code-block:: python
def up_by_x_func(distance):
"""
Creates a transformation that will translate all coordinates by a given amount along the Z dimension.
"""
def wrapped(ts):
ts.positions = ts.positions + np.array([0, 0, distance], dtype=np.float32)
return ts
return wrapped
An alternative to using a wrapped function is using partials from :mod:`functools`. The
above function can be written as:
.. code-block:: python
import functools
def up_by_x(ts, distance):
ts.positions = ts.positions + np.array([0, 0, distance], dtype=np.float32)
return ts
up_by_2 = functools.partial(up_by_x, distance=2)
Although functions (closures) work as transformations, they are not used in
in MDAnalysis from release 2.0.0 onwards because they cannot be reliably
serialized and thus a :class:`Universe` with such transformations cannot be
used with common parallelization schemes (e.g., ones based on
:mod:`multiprocessing`).
For detailed descriptions about how to write a closure-style transformation,
please refer to MDAnalysis 1.x documentation.
.. _transformations-module:
Transformations in MDAnalysis
-----------------------------
The module :mod:`MDAnalysis.transformations` contains transformations that can
be immediately used in your own :ref:`workflows<workflows>`. In order to use
any of these transformations, the module must first be imported:
.. code-block:: python
import MDAnalysis.transformations
A workflow can then be added to a trajectory as described above. Notably,
the parameter `max_threads` can be defined when creating a transformation
instance to limit the maximum threads.
(See :class:`MDAnalysis.transformations.base.TransformationBase` for more details)
Whether a specific transformation can be used along with parallel analysis
can be assessed by checking its
:attr:`~MDAnalysis.transformations.base.TransformationBase.parallelizable`
attribute.
See :ref:`implemented-transformations` for more on the existing
transformations in :mod:`MDAnalysis.transformations`.
How to transformations
----------------------
Translating the coordinates of a single frame (although one would normally add
the transformation to a :ref:`workflow<workflows>`, as shown in the subsequent
examples):
.. code-block:: python
u = MDAnalysis.Universe(topology, trajectory)
new_ts = MDAnalysis.transformations.translate([1,1,1])(u.trajectory.ts)
Create a workflow and add it to the trajectory:
.. code-block:: python
u = MDAnalysis.Universe(topology, trajectory)
workflow = [MDAnalysis.transformations.translate([1,1,1]),
MDAnalysis.transformations.translate([1,2,3])]
u.trajectory.add_transformations(*workflow)
Giving a workflow as a keyword argument when defining the universe:
.. code-block:: python
workflow = [MDAnalysis.transformations.translate([1,1,1]),
MDAnalysis.transformations.translate([1,2,3])]
u = MDAnalysis.Universe(topology, trajectory, transformations=workflow)
.. _building-block-transformation:
Building blocks for Transformation Classes
------------------------------------------
Transformations normally ultilize the power of NumPy to get better performance
on array operations. However, when it comes to parallelism, NumPy will sometimes
oversubscribe the threads, either by hyper threading (when it uses OpenBlas backend),
or by working with other parallel engines (e.g. Dask).
In MDAnalysis, we use `threadpoolctl <https://github.com/joblib/threadpoolctl>`_
inside :class:`~MDAnalysis.transformations.base.TransformationBase` to control the maximum threads for transformations.
It is also possible to apply a global thread limit by setting the external environmental
varibale, e.g. :code:`OMP_NUM_THREADS=1 MKL_NUM_THREADS=1 OPENBLAS_NUM_THREADS=1
BLIS_NUM_THREADS=1 python script.py`. Read more about parallelism and resource management
in `scikit-learn documentations <https://scikit-learn.org/dev/computing/parallelism.html>`_.
Users are advised to benchmark code because interaction between different
libraries can lead to sub-optimal performance with defaults.
.. toctree::
./transformations/base
.. _implemented-transformations:
Currently implemented transformations
-------------------------------------
.. toctree::
./transformations/translate
./transformations/rotate
./transformations/positionaveraging
./transformations/fit
./transformations/wrap
./transformations/boxdimensions
| PypiClean |
/Firefly%20III%20API%20Python%20Client-1.5.6.post2.tar.gz/Firefly III API Python Client-1.5.6.post2/firefly_iii_client/model/tag_single.py | import re # noqa: F401
import sys # noqa: F401
from firefly_iii_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from firefly_iii_client.exceptions import ApiAttributeError
def lazy_import():
from firefly_iii_client.model.tag_read import TagRead
globals()['TagRead'] = TagRead
class TagSingle(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': (TagRead,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, data, *args, **kwargs): # noqa: E501
"""TagSingle - a model defined in OpenAPI
Args:
data (TagRead):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.data = data
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, data, *args, **kwargs): # noqa: E501
"""TagSingle - a model defined in OpenAPI
Args:
data (TagRead):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.data = data
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | PypiClean |
/C-Telethon-1.28.5.tar.gz/C-Telethon-1.28.5/telethon/tl/functions/langpack.py | from ...tl.tlobject import TLObject
from ...tl.tlobject import TLRequest
from typing import Optional, List, Union, TYPE_CHECKING
import os
import struct
from datetime import datetime
class GetDifferenceRequest(TLRequest):
CONSTRUCTOR_ID = 0xcd984aa5
SUBCLASS_OF_ID = 0x52662d55
def __init__(self, lang_pack: str, lang_code: str, from_version: int):
"""
:returns LangPackDifference: Instance of LangPackDifference.
"""
self.lang_pack = lang_pack
self.lang_code = lang_code
self.from_version = from_version
def to_dict(self):
return {
'_': 'GetDifferenceRequest',
'lang_pack': self.lang_pack,
'lang_code': self.lang_code,
'from_version': self.from_version
}
def _bytes(self):
return b''.join((
b'\xa5J\x98\xcd',
self.serialize_bytes(self.lang_pack),
self.serialize_bytes(self.lang_code),
struct.pack('<i', self.from_version),
))
@classmethod
def from_reader(cls, reader):
_lang_pack = reader.tgread_string()
_lang_code = reader.tgread_string()
_from_version = reader.read_int()
return cls(lang_pack=_lang_pack, lang_code=_lang_code, from_version=_from_version)
class GetLangPackRequest(TLRequest):
CONSTRUCTOR_ID = 0xf2f2330a
SUBCLASS_OF_ID = 0x52662d55
def __init__(self, lang_pack: str, lang_code: str):
"""
:returns LangPackDifference: Instance of LangPackDifference.
"""
self.lang_pack = lang_pack
self.lang_code = lang_code
def to_dict(self):
return {
'_': 'GetLangPackRequest',
'lang_pack': self.lang_pack,
'lang_code': self.lang_code
}
def _bytes(self):
return b''.join((
b'\n3\xf2\xf2',
self.serialize_bytes(self.lang_pack),
self.serialize_bytes(self.lang_code),
))
@classmethod
def from_reader(cls, reader):
_lang_pack = reader.tgread_string()
_lang_code = reader.tgread_string()
return cls(lang_pack=_lang_pack, lang_code=_lang_code)
class GetLanguageRequest(TLRequest):
CONSTRUCTOR_ID = 0x6a596502
SUBCLASS_OF_ID = 0xabac89b7
def __init__(self, lang_pack: str, lang_code: str):
"""
:returns LangPackLanguage: Instance of LangPackLanguage.
"""
self.lang_pack = lang_pack
self.lang_code = lang_code
def to_dict(self):
return {
'_': 'GetLanguageRequest',
'lang_pack': self.lang_pack,
'lang_code': self.lang_code
}
def _bytes(self):
return b''.join((
b'\x02eYj',
self.serialize_bytes(self.lang_pack),
self.serialize_bytes(self.lang_code),
))
@classmethod
def from_reader(cls, reader):
_lang_pack = reader.tgread_string()
_lang_code = reader.tgread_string()
return cls(lang_pack=_lang_pack, lang_code=_lang_code)
class GetLanguagesRequest(TLRequest):
CONSTRUCTOR_ID = 0x42c6978f
SUBCLASS_OF_ID = 0x280912c9
def __init__(self, lang_pack: str):
"""
:returns Vector<LangPackLanguage>: This type has no constructors.
"""
self.lang_pack = lang_pack
def to_dict(self):
return {
'_': 'GetLanguagesRequest',
'lang_pack': self.lang_pack
}
def _bytes(self):
return b''.join((
b'\x8f\x97\xc6B',
self.serialize_bytes(self.lang_pack),
))
@classmethod
def from_reader(cls, reader):
_lang_pack = reader.tgread_string()
return cls(lang_pack=_lang_pack)
class GetStringsRequest(TLRequest):
CONSTRUCTOR_ID = 0xefea3803
SUBCLASS_OF_ID = 0xc7b7353d
def __init__(self, lang_pack: str, lang_code: str, keys: List[str]):
"""
:returns Vector<LangPackString>: This type has no constructors.
"""
self.lang_pack = lang_pack
self.lang_code = lang_code
self.keys = keys
def to_dict(self):
return {
'_': 'GetStringsRequest',
'lang_pack': self.lang_pack,
'lang_code': self.lang_code,
'keys': [] if self.keys is None else self.keys[:]
}
def _bytes(self):
return b''.join((
b'\x038\xea\xef',
self.serialize_bytes(self.lang_pack),
self.serialize_bytes(self.lang_code),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.keys)),b''.join(self.serialize_bytes(x) for x in self.keys),
))
@classmethod
def from_reader(cls, reader):
_lang_pack = reader.tgread_string()
_lang_code = reader.tgread_string()
reader.read_int()
_keys = []
for _ in range(reader.read_int()):
_x = reader.tgread_string()
_keys.append(_x)
return cls(lang_pack=_lang_pack, lang_code=_lang_code, keys=_keys) | PypiClean |
/Frontiersman-2.0.2-py3-none-any.whl/frontiersman/gameboard/Board.py | class Vertex:
def __init__(self, location=(0, 0, 0)):
self.location = location
self.neighbors = [None, None, None]
class Tile:
def __init__(self, resource='Wheat', location=(0, 0, 0)):
self.__resource = resource
self.__robber = False
self.__location = location
self.__roads = []
self.__buildings = []
self.__number = 0
# roads and building list start due north and iterate clockwise
def get_number(self):
return self.__number
def set_number(self, integer):
self.__number = integer
def get_resource(self):
return self.__resource
def set_robber(self, value):
self.__robber = value
# def update_buildings(self, index, player):
# self.__buildings[index]
def get_robber(self):
return self.__robber
def get_buildings(self):
return self.__buildings
def get_location(self):
return self.__location
def set_location(self, location):
self.__location = location
robber = property(get_robber, set_robber)
buildings = property(get_buildings) # , update_buildings)
resource = property(get_resource)
location = property(get_location, set_location)
number = property(get_number, set_number)
class Port:
def __init__(self, location=(0, 0, 0)):
self.location = location
self.specialty = False
self.trade = 'None'
self.is_port = False
self.rate = 4
def set_property(self, location, resource='None'):
self.location = location
self.trade = resource
def get_type(self):
return self.trade
def set_type(self, trade):
if trade == 'None':
self.rate = 3
else:
self.rate = 2
self.trade = trade
self.is_port = True
def get_location(self):
return self.location
def set_location(self, location):
self.location = location
resource = property(get_type, set_type)
class Board:
def __init__(self):
self.num_resource_tiles = 19
self.num_ocean_tiles = 18
self.num_ports = 9
self.ocean_location_list = []
self.fill_ocean_location_list()
self.lookup_table = {2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: [], 10: [], 11: [], 12: []}
self.location_list = []
self.fill_location_list()
self.__ocean_tiles = []
self.ports = []
self.robber = None
self.__resource_tiles = []
self.direction_array = [
(0, 1, -1), # top left
(1, 0, -1), # top right
(1, -1, 0), # right
(0, -1, 1), # bottom right
(-1, 0, 1), # bottom left
(-1, 1, 0) # left
]
# 2d array to reference tiles by coordinate
self.__array = []
for index in range(0, 7):
self.__array.append([0, 0, 0, 0, 0, 0, 0])
self.translate_to_3d()
def form_ocean(self):
pass
def get_array(self):
return self.__array
def translate_to_3d(self):
for tile in self.__resource_tiles:
self.__array[int(tile.location[0]) + 3][int(tile.location[1]) + 3] = tile
for port in self.ports:
self.__array[int(port.location[0]) + 3][int(port.location[1]) + 3] = port
def get_tile(self, tup):
return self.__array[int(tup[0] + 3)][int(tup[1] + 3)]
def get_resource_list(self):
return self.__resource_tiles
def get_ocean_list(self):
return self.__ocean_tiles
def get_port_list(self):
return self.ports
def generate_land(self, number_list, resource_list):
count = 0
# assign tile resource
for index in self.location_list:
self.__resource_tiles.append(
Tile(resource_list[count], index))
count += 1
# assign tile numbers
index = 0
for tile in self.__resource_tiles:
if tile.resource == 'Desert':
tile.number = 7
tile.set_robber(True)
self.robber = tile
else:
tile.number = number_list[index]
self.lookup_table[number_list[index]].append(tile)
index += 1
def xy_give_port(self, location):
valid_ports = \
[
[0, 0, 1, 1, 0, 2, 2, 0, 0, 0, 0],
[0, 9, 0, 0, 0, 0, 0, 0, 3, 3, 0],
[0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 4],
[0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 4],
[0, 8, 0, 0, 0, 0, 0, 0, 5, 5, 0],
[0, 0, 7, 7, 0, 6, 6, 0, 0, 0, 0]]
index = valid_ports[location[1]][location[0]]
if index == 0:
return None
else:
return self.ports[index - 1]
def generate_ocean(self):
# add ocean, not really used
for index in self.ocean_location_list:
self.__ocean_tiles.append(Port(index))
def print_ports(self):
for port in self.__ocean_tiles:
if port.is_port:
print(port.location, port.rate, port.resource)
def generate_ports(self, port_list):
self.generate_ocean()
index = 0
even = 0
for tile in self.__ocean_tiles:
if even % 2 == 0:
tile.set_type(port_list[index])
self.ports.append(tile)
index += 1
even += 1
def set_robber(self, location):
self.robber.set_robber(False)
self.robber = self.get_tile(location)
self.robber.set_robber(True)
def get_robber(self):
return (self.robber)
def fill_ocean_location_list(self):
self.ocean_location_list.append([0, 3, -3])
self.ocean_location_list.append([1, 2, -3])
self.ocean_location_list.append([2, 1, -3])
self.ocean_location_list.append([3, 0, -3])
self.ocean_location_list.append([3, -1, -2])
self.ocean_location_list.append([3, -2, -1])
self.ocean_location_list.append([3, -3, 0])
self.ocean_location_list.append([2, -3, 1])
self.ocean_location_list.append([1, -3, 2])
self.ocean_location_list.append([0, -3, 3])
self.ocean_location_list.append([-1, -2, 3])
self.ocean_location_list.append([-2, -1, 3])
self.ocean_location_list.append([-3, 0, 3])
self.ocean_location_list.append([-3, 1, 2])
self.ocean_location_list.append([-3, 2, 1])
self.ocean_location_list.append([-3, 3, 0])
self.ocean_location_list.append([-2, 3, -1])
self.ocean_location_list.append([-1, 3, -2])
def fill_location_list(self):
self.location_list.append([0, 2, -2])
self.location_list.append([1, 1, -2])
self.location_list.append([2, 0, -2])
self.location_list.append([-1, 2, -1])
self.location_list.append([0, 1, -1])
self.location_list.append([1, 0, -1])
self.location_list.append([2, -1, -1])
self.location_list.append([-2, 2, 0])
self.location_list.append([-1, 1, 0])
self.location_list.append([0, 0, 0])
self.location_list.append([1, -1, 0])
self.location_list.append([2, -2, 0])
self.location_list.append([-2, 1, 1])
self.location_list.append([-1, 0, 1])
self.location_list.append([0, -1, 1])
self.location_list.append([1, -2, 1])
self.location_list.append([-2, 0, 2])
self.location_list.append([-1, -1, 2])
self.location_list.append([0, -2, 2])
land_list = property(get_resource_list)
ocean_list = property(get_ocean_list)
port_list = property(get_port_list) | PypiClean |
/JacksonQuery-0.0.1-py3-none-any.whl/portfolio_optimization/optimization_model.py | import bt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from tqdm import tqdm
from pypfopt import efficient_frontier, objective_functions
from . import optimization_config as oc
from . import optimization_inputs as oi
from . import optimization_data as od
from . import benchmark_model as bm
# Constants
EXPECTED_RETURNS = oi.compute_expected_returns(fees=oc.INCLUDE_FEES)
COVARIANCE_MATRIX = oi.COVARIANCE_MATRIX
RISK_FREE_RATE = oi.RISK_FREE_RATE
FUND_EXPENSE_PERCENT = od.get_fund_expense_percent()
VARIABLE_INSURANCE_CHARGES = od.get_variable_insurance_charges()
CATEGORY_MAPPER = oi.category_constraints()['mapper']
CATEGORY_LOWER = oi.category_constraints()['lower']
CATEGORY_UPPER = oi.category_constraints()['upper']
CURRENT_PORTFOLIO = oc.CURRENT_PORTFOLIO
def clean_weights(weightings, cutoff=0.0001, rounding=4):
"""
Helper function to clean the raw weights output by the optimizer.
:param weightings: (pd.Series) Raw weights output by the optimizer.
:param cutoff: (float) Minimum weight to include in the portfolio. Defaults to 0.0001.
:param rounding: (int) Number of decimal places to round weights. Defaults to 4.
:return: (pd.Series) Cleaned weights.
"""
if weightings is None:
raise AttributeError("Weights not yet computed")
clean_weights_ = weightings.copy()
clean_weights_[np.abs(clean_weights_) < cutoff] = 0
if rounding is not None:
if not isinstance(rounding, int) or rounding < 1:
raise ValueError("Rounding must be a positive integer")
clean_weights_ = np.round(clean_weights_, rounding)
clean_weights_ = clean_weights_.div(clean_weights_.sum())
clean_weights_ = clean_weights_[clean_weights_ != 0]
return clean_weights_
def calculate_minimum_risk():
"""
Calculates the minimum risk portfolio.
:return: (float) Minimum risk, (pd.Series) Minimum risk weights, (pd.DataFrame) Minimum risk performance metrics.
"""
results = pd.DataFrame()
ef = efficient_frontier.EfficientFrontier(EXPECTED_RETURNS, COVARIANCE_MATRIX, oc.BOUNDS)
ef.add_objective(objective_functions.L2_reg, gamma=oc.GAMMA)
ef.add_sector_constraints(CATEGORY_MAPPER, CATEGORY_LOWER, CATEGORY_UPPER)
ef.min_volatility()
min_risk_weights = ef.clean_weights(oc.MIN_WEIGHT)
min_risk_weights = pd.DataFrame.from_dict(min_risk_weights, orient='index', columns=['Min_Risk_Portfolio'])
min_risk_weights = clean_weights(min_risk_weights, cutoff=oc.MIN_WEIGHT)
min_risk_weights = min_risk_weights.squeeze().fillna(0)
min_risk_weights = min_risk_weights[min_risk_weights != 0].sort_values(ascending=False).round(4)
min_risk_weights.index.name = 'TICKER'
# Calculate Portfolio Total Expense%
subaccount_fees = FUND_EXPENSE_PERCENT.loc[min_risk_weights.index] + VARIABLE_INSURANCE_CHARGES
total_portfolio_expense = (subaccount_fees * min_risk_weights).sum()
performance = pd.DataFrame(
ef.portfolio_performance(risk_free_rate=RISK_FREE_RATE),
columns=['Min_Risk_Portfolio'],
index=['Expected_Return', 'Volatility', 'Sharpe_Ratio']
)
performance.loc['Total_Expense%'] = total_portfolio_expense
results = pd.concat([results, performance], axis=1)
results.columns = ['PORTFOLIO']
results = results.rename(index={0: 'Expected_Return', 1: 'Volatility', 2: 'Sharpe_Ratio', 3: 'Total_Expense%'})
min_risk = results.loc['Volatility'].squeeze()
return min_risk, min_risk_weights, results
def calculate_maximum_risk():
"""
Calculates the maximum risk portfolio.
:return: (float) Maximum risk, (pd.Series) Maximum risk weights, (pd.DataFrame) Maximum risk performance metrics.
"""
results = pd.DataFrame()
ef = efficient_frontier.EfficientFrontier(EXPECTED_RETURNS, COVARIANCE_MATRIX, oc.BOUNDS)
ef.add_objective(objective_functions.L2_reg, gamma=oc.GAMMA)
ef.add_sector_constraints(CATEGORY_MAPPER, CATEGORY_LOWER, CATEGORY_UPPER)
ef.efficient_risk(1.0)
max_risk_weights = ef.clean_weights(oc.MIN_WEIGHT)
max_risk_weights = pd.DataFrame.from_dict(max_risk_weights, orient='index', columns=['Max_Risk_Portfolio'])
max_risk_weights = clean_weights(max_risk_weights, cutoff=oc.MIN_WEIGHT)
max_risk_weights = max_risk_weights.squeeze().fillna(0)
max_risk_weights = max_risk_weights[max_risk_weights != 0].sort_values(ascending=False).round(4)
max_risk_weights.index.name = 'TICKER'
# Calculate Portfolio Total Expense%
subaccount_fees = FUND_EXPENSE_PERCENT.loc[max_risk_weights.index] + VARIABLE_INSURANCE_CHARGES
total_portfolio_expense = (subaccount_fees * max_risk_weights).sum()
performance = pd.DataFrame(
ef.portfolio_performance(risk_free_rate=RISK_FREE_RATE),
columns=['Max_Risk_Portfolio'],
index=['Expected_Return', 'Volatility', 'Sharpe_Ratio']
)
performance.loc['Total_Expense%'] = total_portfolio_expense
results = pd.concat([results, performance], axis=1).round(4)
results.columns = ['PORTFOLIO']
results = results.rename(index={0: 'Expected_Return', 1: 'Volatility', 2: 'Sharpe_Ratio', 3: 'Total_Expense%'})
max_risk = results.loc['Volatility'].squeeze()
return max_risk, max_risk_weights, results
def plot_efficient_frontier(weightings, results, figsize=(10, 6)):
"""
Plots the efficient frontier.
:param weightings: (pd.Series) Portfolio weights.
:param results: (pd.DataFrame) Portfolio performance metrics.
:param figsize: (tuple) Figure size.
:return: None
"""
# set the figure size
plt.figure(figsize=figsize)
# plot individual holdings as a scatterplot
for ticker in weightings.index:
plt.scatter(COVARIANCE_MATRIX.loc[ticker, ticker] ** 0.5, EXPECTED_RETURNS[ticker], marker='o', color='k')
# get the minimum risk portfolio and maximum sharpe portfolio
min_risk_portfolio = results.iloc[:, 0]
max_sharpe_portfolio = results.loc[:, results.loc['Sharpe_Ratio'].idxmax()]
benchmark_portfolio = results.loc[:, 'Benchmark']
jnl_sp500_portfolio = results.loc[:, 'JNL_Mellon_S&P_500_Index']
current_portfolio = results.loc[:, 'Current']
# plot the efficient frontier
plt.plot(
results.loc['Volatility'].iloc[:-3],
results.loc['Expected_Return'].iloc[:-3],
'k-', label="Efficient Frontier", zorder=1
)
# plot the minimum risk portfolio
plt.scatter(
min_risk_portfolio.loc['Volatility'],
min_risk_portfolio.loc['Expected_Return'],
marker='*', color='red', s=250, label='Minimum Risk', zorder=2
)
# plot the maximum sharpe portfolio
plt.scatter(
max_sharpe_portfolio.loc['Volatility'],
max_sharpe_portfolio.loc['Expected_Return'],
marker='*', color='green', s=250, label='Maximum Sharpe', zorder=2
)
# plot the benchmark portfolio
plt.scatter(
benchmark_portfolio.loc['Volatility'],
benchmark_portfolio.loc['Expected_Return'],
marker='*', color='blue', s=250, label='Benchmark', zorder=2
)
# plot the current portfolio
plt.scatter(
current_portfolio.loc['Volatility'],
current_portfolio.loc['Expected_Return'],
marker='*', color='magenta', s=250, label='Current', zorder=2
)
# plot the JNL Mellon S&P 500 Index portfolio
plt.scatter(
jnl_sp500_portfolio.loc['Volatility'],
jnl_sp500_portfolio.loc['Expected_Return'],
marker='*', color='purple', s=250, label='JNL_Mellon_S&P_500_Index', zorder=2
)
plt.title('Efficient Frontier with Individual Holdings')
plt.xlabel('Volatility')
plt.ylabel('Expected Returns')
plt.legend(loc='best')
plt.show()
def calculate_efficient_frontier(display_plot=False):
"""
Calculates the efficient frontier.
:param display_plot: (bool) Display the efficient frontier plot.
:return: (pd.DataFrame) Efficient frontier performance metrics.
"""
min_risk = calculate_minimum_risk()[0]
max_risk = calculate_maximum_risk()[0]
weightings = pd.DataFrame()
results = pd.DataFrame()
counter = 1
for risk in tqdm(np.linspace(min_risk + .001, max_risk - .001, 20).round(4)):
ef = efficient_frontier.EfficientFrontier(EXPECTED_RETURNS, COVARIANCE_MATRIX, oc.BOUNDS)
ef.add_objective(objective_functions.L2_reg, gamma=oc.GAMMA)
ef.add_sector_constraints(CATEGORY_MAPPER, CATEGORY_LOWER, CATEGORY_UPPER)
ef.efficient_risk(risk)
weights = ef.clean_weights(oc.MIN_WEIGHT)
weights = pd.DataFrame.from_dict(weights, orient='index', columns=[counter]).round(4)
weights = clean_weights(weights, cutoff=oc.MIN_WEIGHT)
weights.index.name = 'TICKER'
weights = weights.fillna(0)
# Properly align subaccount_fees and weights before multiplication
subaccount_fees = FUND_EXPENSE_PERCENT.reindex(weights.index) + VARIABLE_INSURANCE_CHARGES
total_portfolio_expense = (subaccount_fees * weights[counter]).sum()
performance = pd.DataFrame(
ef.portfolio_performance(risk_free_rate=RISK_FREE_RATE),
columns=[counter],
index=['Expected_Return', 'Volatility', 'Sharpe_Ratio']
)
performance.loc['Total_Expense%'] = total_portfolio_expense
weightings = pd.concat([weightings, weights], axis=1).round(4)
results = pd.concat([results, performance], axis=1).round(4)
counter += 1
if display_plot:
print('Efficient Frontier:')
plot_efficient_frontier(weightings, results)
weightings['Benchmark'] = bm.get_benchmark_portfolio()
weightings = weightings.fillna(0)
# Calculate the expected return, volatility, and Sharpe ratio for the benchmark portfolio
benchmark_weights = weightings['Benchmark']
benchmark_returns = EXPECTED_RETURNS.loc[benchmark_weights.index]
benchmark_exp_return = np.dot(benchmark_weights, benchmark_returns)
benchmark_cov_matrix = COVARIANCE_MATRIX.loc[benchmark_weights.index, benchmark_weights.index]
benchmark_volatility = np.sqrt(np.dot(benchmark_weights.T, np.dot(benchmark_cov_matrix, benchmark_weights)))
benchmark_sharpe_ratio = (benchmark_exp_return - RISK_FREE_RATE) / benchmark_volatility
subaccount_fees = FUND_EXPENSE_PERCENT.loc[benchmark_weights.index] + VARIABLE_INSURANCE_CHARGES
benchmark_expense_percent = (subaccount_fees * benchmark_weights).sum()
# Add the benchmark performance to the results
results['Benchmark'] = [benchmark_exp_return, benchmark_volatility, benchmark_sharpe_ratio,
benchmark_expense_percent]
# Calculate the expected return, volatility, and Sharpe ratio for the current portfolio
weightings['Current'] = CURRENT_PORTFOLIO
weightings = weightings.fillna(0)
current_returns = EXPECTED_RETURNS.loc[CURRENT_PORTFOLIO.index]
current_exp_return = np.dot(CURRENT_PORTFOLIO, current_returns)
current_cov_matrix = COVARIANCE_MATRIX.loc[CURRENT_PORTFOLIO.index, CURRENT_PORTFOLIO.index]
current_volatility = np.sqrt(np.dot(CURRENT_PORTFOLIO.T, np.dot(current_cov_matrix, CURRENT_PORTFOLIO)))
current_sharpe_ratio = (current_exp_return - RISK_FREE_RATE) / current_volatility
subaccount_fees = FUND_EXPENSE_PERCENT.loc[CURRENT_PORTFOLIO.index] + VARIABLE_INSURANCE_CHARGES
current_expense_percent = (subaccount_fees * CURRENT_PORTFOLIO).sum()
# Add the current portfolio performance to the results
results['Current'] = [current_exp_return, current_volatility, current_sharpe_ratio,
current_expense_percent]
# Calculate the expected return, volatility, and Sharpe ratio for 'JNL_Mellon_S&P_500_Index'
weightings.loc['JNL_Mellon_S&P_500_Index', 'JNL_Mellon_S&P_500_Index'] = 1
weightings = weightings.fillna(0)
jnl_sp500_weights = weightings['JNL_Mellon_S&P_500_Index']
jnl_sp500_returns = EXPECTED_RETURNS.loc[jnl_sp500_weights.index]
jnl_sp500_exp_return = np.dot(jnl_sp500_weights, jnl_sp500_returns)
jnl_sp500_cov_matrix = COVARIANCE_MATRIX.loc[jnl_sp500_weights.index, jnl_sp500_weights.index]
jnl_sp500_volatility = np.sqrt(np.dot(jnl_sp500_weights.T, np.dot(jnl_sp500_cov_matrix, jnl_sp500_weights)))
jnl_sp500_sharpe_ratio = (jnl_sp500_exp_return - RISK_FREE_RATE) / jnl_sp500_volatility
subaccount_fees = FUND_EXPENSE_PERCENT.loc[jnl_sp500_weights.index] + VARIABLE_INSURANCE_CHARGES
jnl_sp500_expense_percent = (subaccount_fees * jnl_sp500_weights).sum()
# Add the 'JNL_Mellon_S&P_500_Index' performance to the results
results['JNL_Mellon_S&P_500_Index'] = [
jnl_sp500_exp_return, jnl_sp500_volatility, jnl_sp500_sharpe_ratio, jnl_sp500_expense_percent
]
return weightings, results
def compute_effective_exposure(weightings, categories):
"""
Compute the effective exposure of a portfolio to each category.
:param weightings: (pd.DataFrame) The weightings of the portfolio.
:param categories: (list) The categories to compute the effective exposure for.
:return: (pd.DataFrame) The effective exposure of the portfolio to each category.
"""
# Transpose the weightings dataframe to align with the standard dataframe format where each column represents a
# feature.
weightings_ = weightings.copy().T
# Initialize the effective_exposure dataframe
effective_exposure = pd.DataFrame(index=weightings_.index, columns=categories)
for portfolio in weightings_.index:
for category in categories:
# Compute the effective exposure of the portfolio for the category
effective_exposure.loc[portfolio, category] = np.sum(
weightings_.loc[portfolio] * oc.CLASSIFICATION_SCHEMA.loc[weightings_.columns, category]
)
effective_exposure = effective_exposure.T
return effective_exposure.round(4)
def select_portfolio(portfolio_number, weightings):
"""
Select the portfolio with the given number from the weightings dataframe.
:param portfolio_number: (int) The number of the portfolio to select.
:param weightings: (pd.DataFrame) The weightings of the portfolios.
:return: (pd.Series) The weightings of the selected portfolio.
"""
portfolio = weightings[portfolio_number]
return portfolio[portfolio != 0].sort_values(ascending=False)
def compute_category_exposure(weightings):
"""
Compute the exposure of the portfolio to each category.
:param weightings: (pd.DataFrame) The weightings of the portfolio.
:return: (pd.DataFrame) The exposure of the portfolio to each category.
"""
category_exposure = weightings.groupby(oc.CLASSIFICATION_SCHEMA['Morningstar Category']).sum()
category_exposure = category_exposure[category_exposure != 0].dropna(axis=1, how='all').fillna(0).round(4)
return category_exposure
def plot_holdings_values(portfolio, figsize=(12, 6)):
"""
Plot the values of the holdings in the portfolio over time.
:param portfolio: (pd.Series) The weightings of the portfolio.
:param figsize: (tuple) The size of the figure.
:return: None
"""
subaccount_prices = od.get_subaccount_prices()
df = subaccount_prices[portfolio.index]
df.index = pd.to_datetime(df.index)
plt.figure(figsize=figsize)
for column in df.columns:
plt.plot(df.index, df[column], label=column)
ax = plt.gca()
ax.xaxis.set_major_locator(mdates.YearLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
plt.xlabel('Time')
plt.ylabel('Value')
plt.title('Subaccounts over time')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.grid(True)
plt.show()
def display_selected_portfolio(portfolio_number, weightings, results):
"""
Display the selected portfolio.
:param portfolio_number: (int) The number of the portfolio to select.
:param weightings: (pd.DataFrame) The weightings of the portfolios.
:param results: (pd.DataFrame) The results of the portfolios.
:return: None
"""
portfolio = select_portfolio(portfolio_number, weightings)
asset_class_exposure = compute_effective_exposure(
weightings,
['US Stocks', 'Non US Stocks', 'Bonds', 'Cash', 'Other']
).round(4)
category_exposure = compute_category_exposure(weightings)
sector_exposure = compute_effective_exposure(weightings, [
'Basic Materials', 'Consumer Cyclical', 'Financial Services', 'Real Estate',
'Communication Services', 'Energy', 'Industrials', 'Technology', 'Consumer Defensive',
'Healthcare', 'Utilities'
])
marketcap_exposure = compute_effective_exposure(weightings, ['Giant', 'Large', 'Medium', 'Small', 'Micro'])
style_exposure = compute_effective_exposure(weightings, ['Cyclical', 'Sensitive', 'Defensive'])
print('Portfolio {}:'.format(portfolio_number))
print('\n')
print(compute_effective_exposure(weightings, ['YTD Return%'])[portfolio_number])
print('\n')
print(portfolio)
print('\n')
print(results[portfolio_number])
print('\n')
print(asset_class_exposure[portfolio_number].sort_values(ascending=False))
print('\n')
print(category_exposure[portfolio_number][category_exposure[portfolio_number] != 0].sort_values(ascending=False))
print('\n')
print(sector_exposure[portfolio_number].sort_values(ascending=False))
print('\n')
print(marketcap_exposure[portfolio_number].sort_values(ascending=False))
print('\n')
print(style_exposure[portfolio_number].sort_values(ascending=False))
print('\n')
print()
def optimal_portfolio(results):
"""
Select the optimal portfolio from the results dataframe.
:param results: (pd.DataFrame) The results of the portfolios.
:return: (pd.Series) The weightings of the optimal portfolio.
"""
print('Optimal Portfolio: ')
return results.loc[:, results.loc['Sharpe_Ratio'].idxmax()]
def run_backtests(weightings):
"""
Run backtests for each portfolio.
:param weightings: (pd.DataFrame) The weightings of the portfolios.
:return: (bt.backtest.Backtest) The backtest results.
"""
# Get the subaccount prices
subaccount_prices = od.get_subaccount_prices()
# Create a strategy for each portfolio
strategies = []
for col in weightings.columns:
weights = weightings[col]
# Define the strategy
strategy = bt.Strategy(f'Portfolio_{col}',
[bt.algos.RunOnce(),
bt.algos.SelectAll(),
bt.algos.WeighSpecified(**weights),
bt.algos.Rebalance()])
# Add to the list of strategies
strategies.append(bt.Backtest(strategy, subaccount_prices))
# Run all backtests
return bt.run(*strategies)
def backtest_timeseries(backtest_results):
"""
Get the timeseries of the backtest results.
:param backtest_results: (bt.backtest.Backtest) The backtest results.
:return: (pd.Series) The timeseries of the backtest results.
"""
return backtest_results._get_series('y').drop_duplicates() | PypiClean |
/MODOI-1.0.tar.gz/MODOI-1.0/SimulationClient/CustomBFGS.py | import numpy as np
from scipy.optimize import minpack2
import LinearAlgebra as la
from Geometric import Length, GradLength
from MetricValues import get_metric
def find_geodesic_midpoint(start_point, end_point, number_of_inner_points, basis_rotation_matrix,
tangent_direction, codimension, metric_server_addresses, mass_matrix, authkey,
gtol=1e-5):
""" This function computes the local geodesic curve joining start_point to end_point using a modified BFGS method.
The modification arises from taking the implementation of BFGS and re-writing it to minimise the number
of times the metric function is called.
Args:
start_point (numpy.array) :
The first end point of the curve.
end_point (numpy.array) :
The last end point of the curve.
number_of_inner_points (int) :
The number of nodes along the curve, less the end points.
basis_rotation_matrix (numpy.array) :
The matrix computed as a result of the orthogonal_tangent_basis function.
tangent_direction (numpy.array) :
The tangent direction as computed by the SimulationClient.
codimension (int) :
The dimension of the problem minus 1. Computed from the atomistic simulation environment.
metric_server_addresses :
A list of tuples of the form (str, int) containing the hostnames and port numbers of the SimulationPotential
instances.
mass_matrix (numpy.array) :
A diagonal NumPy array containing the masses of the molecular system as computed in the SimulationClient
object.
authkey (str) :
The password used in order to communicate with the SimulationPotential instances.
gtol (optional float) :
The tolerance threshold for the BGFS method.
Returns:
numpy.array: The midpoint along the local geodesic curve.
"""
# Determine the number of variable the BFGS method will be applied to
number_of_variables = number_of_inner_points * codimension
# Produce an initial guess for the minimum, in this case it will be that the straight line segment joining
# start_point to end_point is the initial guess
x0 = np.zeros(number_of_variables)
# Allocate memory for the kth iterate
xk = x0
# Convert the description of the curve as shifts in the orthonormal hyperspace along the initial line to points in
# the full space. See LinearAlgebra.shifts_to_curve for more details.
curve = la.shifts_to_curve(start_point, end_point, xk, number_of_inner_points,
basis_rotation_matrix, tangent_direction, codimension)
# Get the initial metric values along the starting curve.
metric = get_metric(curve, number_of_inner_points, metric_server_addresses, authkey)
# If the SimulationPotential couldn't be contacted then return None to close the SimulationClient
if metric is None:
return None
# Obtain the initial gradient of the length functional along the curve
gfk = GradLength(curve, metric, number_of_inner_points, mass_matrix, basis_rotation_matrix)
# Create an identity matrix object
I = np.eye(number_of_variables, dtype=int)
# Initialise the memory to store the approximate Hessian matrix
Hk = I
# Compute the norm of the gradient in the L^{\infty} norm
gnorm = np.amax(np.abs(gfk))
# The main body of the BFGS calculation:
# Repeat the method until the norm of the gradient of the length is sufficiently small.
while gnorm > gtol:
alpha1 = 1.0
pk = -np.dot(Hk, gfk)
phi0 = Length(curve, metric, number_of_inner_points, mass_matrix)
phi1 = phi0
derphi0 = np.dot(gfk, pk)
derphi1 = derphi0
isave = np.zeros((2,), np.intc)
dsave = np.zeros((13,), float)
task = b'START'
# Perform the linesearch
for i in xrange(30):
stp, phi1, derphi1, task = minpack2.dcsrch(alpha1, phi1, derphi1, 1e-4, 0.9, 1e-14, task, 1e-8, 50,
isave, dsave)
if task[:2] == b'FG':
alpha1 = stp
# Convert the description of the curve as shifts in the orthonormal hyperspace along the initial line
# to points in the full space. See LinearAlgebra.shifts_to_curve for more details.
curve = la.shifts_to_curve(start_point, end_point, xk + stp*pk, number_of_inner_points,
basis_rotation_matrix, tangent_direction, codimension)
# Get the initial metric values along the current trial.
metric = get_metric(curve, number_of_inner_points, metric_server_addresses, authkey)
# If the SimulationPotential couldn't be reached then return None to close SimulationClient
if metric is None:
return None
phi1 = Length(curve, metric, number_of_inner_points, mass_matrix)
gfkp1 = GradLength(curve, metric, number_of_inner_points, mass_matrix, basis_rotation_matrix)
derphi1 = np.dot(gfkp1, pk)
else:
break
else:
break
if task[:5] == b'ERROR' or task[:4] == b'WARN':
break
alpha_k = stp
xkp1 = xk + alpha_k * pk
sk = xkp1 - xk
xk = xkp1
yk = gfkp1 - gfk
gfk = gfkp1
gnorm = np.amax(np.abs(gfk))
if gnorm <= gtol:
break
rhok = 1.0 / (np.dot(yk, sk))
if np.isinf(rhok): rhok = 1000.0 # this is patch for numpy
Hk = np.dot(I - sk[:, np.newaxis] * yk[np.newaxis, :] *
rhok, np.dot(Hk, I - yk[:, np.newaxis] * sk[np.newaxis, :] * rhok)) + (rhok * sk[:, np.newaxis]
* sk[np.newaxis, :])
# Return the midpoint
return curve[(number_of_inner_points + 1) / 2] | PypiClean |
/Gnotty-0.2.7.tar.gz/Gnotty-0.2.7/gnotty/static/js/jquery.tmpl.min.js | (function(a){var r=a.fn.domManip,d="_tmplitem",q=/^[^<]*(<[\w\W]+>)[^>]*$|\{\{\! /,b={},f={},e,p={key:0,data:{}},h=0,c=0,l=[];function g(e,d,g,i){var c={data:i||(d?d.data:{}),_wrap:d?d._wrap:null,tmpl:null,parent:d||null,nodes:[],calls:u,nest:w,wrap:x,html:v,update:t};e&&a.extend(c,e,{nodes:[],parent:d});if(g){c.tmpl=g;c._ctnt=c._ctnt||c.tmpl(a,c);c.key=++h;(l.length?f:b)[h]=c}return c}a.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(f,d){a.fn[f]=function(n){var g=[],i=a(n),k,h,m,l,j=this.length===1&&this[0].parentNode;e=b||{};if(j&&j.nodeType===11&&j.childNodes.length===1&&i.length===1){i[d](this[0]);g=this}else{for(h=0,m=i.length;h<m;h++){c=h;k=(h>0?this.clone(true):this).get();a.fn[d].apply(a(i[h]),k);g=g.concat(k)}c=0;g=this.pushStack(g,f,i.selector)}l=e;e=null;a.tmpl.complete(l);return g}});a.fn.extend({tmpl:function(d,c,b){return a.tmpl(this[0],d,c,b)},tmplItem:function(){return a.tmplItem(this[0])},template:function(b){return a.template(b,this[0])},domManip:function(d,l,j){if(d[0]&&d[0].nodeType){var f=a.makeArray(arguments),g=d.length,i=0,h;while(i<g&&!(h=a.data(d[i++],"tmplItem")));if(g>1)f[0]=[a.makeArray(d)];if(h&&c)f[2]=function(b){a.tmpl.afterManip(this,b,j)};r.apply(this,f)}else r.apply(this,arguments);c=0;!e&&a.tmpl.complete(b);return this}});a.extend({tmpl:function(d,h,e,c){var j,k=!c;if(k){c=p;d=a.template[d]||a.template(null,d);f={}}else if(!d){d=c.tmpl;b[c.key]=c;c.nodes=[];c.wrapped&&n(c,c.wrapped);return a(i(c,null,c.tmpl(a,c)))}if(!d)return[];if(typeof h==="function")h=h.call(c||{});e&&e.wrapped&&n(e,e.wrapped);j=a.isArray(h)?a.map(h,function(a){return a?g(e,c,d,a):null}):[g(e,c,d,h)];return k?a(i(c,null,j)):j},tmplItem:function(b){var c;if(b instanceof a)b=b[0];while(b&&b.nodeType===1&&!(c=a.data(b,"tmplItem"))&&(b=b.parentNode));return c||p},template:function(c,b){if(b){if(typeof b==="string")b=o(b);else if(b instanceof a)b=b[0]||{};if(b.nodeType)b=a.data(b,"tmpl")||a.data(b,"tmpl",o(b.innerHTML));return typeof c==="string"?(a.template[c]=b):b}return c?typeof c!=="string"?a.template(null,c):a.template[c]||a.template(null,q.test(c)?c:a(c)):null},encode:function(a){return(""+a).split("<").join("<").split(">").join(">").split('"').join(""").split("'").join("'")}});a.extend(a.tmpl,{tag:{tmpl:{_default:{$2:"null"},open:"if($notnull_1){_=_.concat($item.nest($1,$2));}"},wrap:{_default:{$2:"null"},open:"$item.calls(_,$1,$2);_=[];",close:"call=$item.calls();_=call._.concat($item.wrap(call,_));"},each:{_default:{$2:"$index, $value"},open:"if($notnull_1){$.each($1a,function($2){with(this){",close:"}});}"},"if":{open:"if(($notnull_1) && $1a){",close:"}"},"else":{_default:{$1:"true"},open:"}else if(($notnull_1) && $1a){"},html:{open:"if($notnull_1){_.push($1a);}"},"=":{_default:{$1:"$data"},open:"if($notnull_1){_.push($.encode($1a));}"},"!":{open:""}},complete:function(){b={}},afterManip:function(f,b,d){var e=b.nodeType===11?a.makeArray(b.childNodes):b.nodeType===1?[b]:[];d.call(f,b);m(e);c++}});function i(e,g,f){var b,c=f?a.map(f,function(a){return typeof a==="string"?e.key?a.replace(/(<\w+)(?=[\s>])(?![^>]*_tmplitem)([^>]*)/g,"$1 "+d+'="'+e.key+'" $2'):a:i(a,e,a._ctnt)}):e;if(g)return c;c=c.join("");c.replace(/^\s*([^<\s][^<]*)?(<[\w\W]+>)([^>]*[^>\s])?\s*$/,function(f,c,e,d){b=a(e).get();m(b);if(c)b=j(c).concat(b);if(d)b=b.concat(j(d))});return b?b:j(c)}function j(c){var b=document.createElement("div");b.innerHTML=c;return a.makeArray(b.childNodes)}function o(b){return new Function("jQuery","$item","var $=jQuery,call,_=[],$data=$item.data;with($data){_.push('"+a.trim(b).replace(/([\\'])/g,"\\$1").replace(/[\r\t\n]/g," ").replace(/\$\{([^\}]*)\}/g,"{{= $1}}").replace(/\{\{(\/?)(\w+|.)(?:\(((?:[^\}]|\}(?!\}))*?)?\))?(?:\s+(.*?)?)?(\(((?:[^\}]|\}(?!\}))*?)\))?\s*\}\}/g,function(m,l,j,d,b,c,e){var i=a.tmpl.tag[j],h,f,g;if(!i)throw"Template command not found: "+j;h=i._default||[];if(c&&!/\w$/.test(b)){b+=c;c=""}if(b){b=k(b);e=e?","+k(e)+")":c?")":"";f=c?b.indexOf(".")>-1?b+c:"("+b+").call($item"+e:b;g=c?f:"(typeof("+b+")==='function'?("+b+").call($item):("+b+"))"}else g=f=h.$1||"null";d=k(d);return"');"+i[l?"close":"open"].split("$notnull_1").join(b?"typeof("+b+")!=='undefined' && ("+b+")!=null":"true").split("$1a").join(g).split("$1").join(f).split("$2").join(d?d.replace(/\s*([^\(]+)\s*(\((.*?)\))?/g,function(d,c,b,a){a=a?","+a+")":b?")":"";return a?"("+c+").call($item"+a:d}):h.$2||"")+"_.push('"})+"');}return _;")}function n(c,b){c._wrap=i(c,true,a.isArray(b)?b:[q.test(b)?b:a(b).html()]).join("")}function k(a){return a?a.replace(/\\'/g,"'").replace(/\\\\/g,"\\"):null}function s(b){var a=document.createElement("div");a.appendChild(b.cloneNode(true));return a.innerHTML}function m(o){var n="_"+c,k,j,l={},e,p,i;for(e=0,p=o.length;e<p;e++){if((k=o[e]).nodeType!==1)continue;j=k.getElementsByTagName("*");for(i=j.length-1;i>=0;i--)m(j[i]);m(k)}function m(j){var p,i=j,k,e,m;if(m=j.getAttribute(d)){while(i.parentNode&&(i=i.parentNode).nodeType===1&&!(p=i.getAttribute(d)));if(p!==m){i=i.parentNode?i.nodeType===11?0:i.getAttribute(d)||0:0;if(!(e=b[m])){e=f[m];e=g(e,b[i]||f[i],null,true);e.key=++h;b[h]=e}c&&o(m)}j.removeAttribute(d)}else if(c&&(e=a.data(j,"tmplItem"))){o(e.key);b[e.key]=e;i=a.data(j.parentNode,"tmplItem");i=i?i.key:0}if(e){k=e;while(k&&k.key!=i){k.nodes.push(j);k=k.parent}delete e._ctnt;delete e._wrap;a.data(j,"tmplItem",e)}function o(a){a=a+n;e=l[a]=l[a]||g(e,b[e.parent.key+n]||e.parent,null,true)}}}function u(a,d,c,b){if(!a)return l.pop();l.push({_:a,tmpl:d,item:this,data:c,options:b})}function w(d,c,b){return a.tmpl(a.template(d),c,b,this)}function x(b,d){var c=b.options||{};c.wrapped=d;return a.tmpl(a.template(b.tmpl),b.data,c,b.item)}function v(d,c){var b=this._wrap;return a.map(a(a.isArray(b)?b.join(""):b).filter(d||"*"),function(a){return c?a.innerText||a.textContent:a.outerHTML||s(a)})}function t(){var b=this.nodes;a.tmpl(null,null,null,this).insertBefore(b[0]);a(b).remove()}})(jQuery) | PypiClean |
/MokaPlayer-0.8.5.7.tar.gz/MokaPlayer-0.8.5.7/mokaplayer/core/fetchers/tabs/provider_ultimateguitar.py | import logging
import os
import re
import json
import requests
from lxml import html
class ProviderUltimateGuitar():
SEARCH_URL = 'https://www.ultimate-guitar.com/search.php'
DOWNLOAD_URL = 'https://tabs.ultimate-guitar.com/tab/download'
@staticmethod
def __get_json_data_in_page(page):
json_data = page.xpath("//script[contains(text(),'window.UGAPP.store.page')]")[0].text
json_data = json_data.replace("window.UGAPP.store.i18n = {};", "")
json_data = json_data[json_data.find('{'):json_data.rfind('}') + 1]
json_data = json.loads(json_data)
return json_data
@staticmethod
def search(title, artist):
""" Return a list of tabs (a dict with all infos) that can be fetched or downloaded"""
response = requests.get(ProviderUltimateGuitar.SEARCH_URL,
params={
'band_name': artist,
'song_name': title,
'type[]': ['200', '500'], # tab and guitar pro
})
if response.ok:
try:
tabs = []
page = html.fromstring(response.content)
json_data = ProviderUltimateGuitar.__get_json_data_in_page(page)
for tab_json in json_data['data']['results']:
if 'type_name' in tab_json and (tab_json['type_name'] == 'Tab' or tab_json['type_name'] == 'Guitar Pro'):
tabs.append({
'name': tab_json['song_name'],
'url': tab_json['tab_url'],
'rating': tab_json['rating'],
'votes': tab_json['votes'],
'type': tab_json['type_name'],
})
return sorted(tabs, key=lambda tab: (tab['type'],
-tab['rating']))
except:
logging.exception('Could not search guitar tabs for: ' + title)
return []
@staticmethod
def fetch_ascii_tab(url):
"""Retrieve the ascii tab from a url"""
try:
response = requests.get(url)
if response.ok:
page = html.fromstring(response.content)
json_data = ProviderUltimateGuitar.__get_json_data_in_page(page)
return json_data['data']['tab_view']['wiki_tab']['content']
except:
logging.exception('Could not fetch ascii tabs for: ' + url)
return ''
@staticmethod
def download_guitar_pro_tab(url, directory):
"""Retrieve and download the guitar pro tab (file) from a url"""
try:
response = requests.get(url, cookies={
'back_to_classic_ug': '1'
})
if response.ok:
page = html.fromstring(response.content)
json_data = ProviderUltimateGuitar.__get_json_data_in_page(page)
response = requests.get(ProviderUltimateGuitar.DOWNLOAD_URL,
params={
'id': json_data['data']['tab']['id'],
}, headers={
"Referer": url,
})
filename = re.findall('filename\s*?=\s?"(.+)"',
response.headers['Content-Disposition'])[0]
os.makedirs(directory, exist_ok=True)
path = os.path.join(directory, filename)
with open(path, 'wb') as f:
f.write(response.content)
return path
except:
logging.exception('Could not download guitar pro tabs for: ' + url)
return None | PypiClean |
/LFake-18.9.0.tar.gz/LFake-18.9.0/lfake/providers/ssn/no_NO/__init__.py | import datetime
import operator
from typing import List, Optional, Sequence
from ....typing import SexLiteral
from .. import Provider as SsnProvider
def checksum(digits: Sequence[int], scale: List[int]) -> int:
"""
Calculate checksum of Norwegian personal identity code.
Checksum is calculated with "Module 11" method using a scale.
The digits of the personal code are multiplied by the corresponding
number in the scale and summed;
if remainder of module 11 of the sum is less than 10, checksum is the
remainder.
If remainder is 0, the checksum is 0.
https://no.wikipedia.org/wiki/F%C3%B8dselsnummer
"""
chk_nbr = 11 - (sum(map(operator.mul, digits, scale)) % 11)
if chk_nbr == 11:
return 0
return chk_nbr
class Provider(SsnProvider):
scale1 = (3, 7, 6, 1, 8, 9, 4, 5, 2)
scale2 = (5, 4, 3, 2, 7, 6, 5, 4, 3, 2)
def ssn(self, dob: Optional[str] = None, gender: Optional[SexLiteral] = None) -> str:
"""
Returns 11 character Norwegian personal identity code (Fødselsnummer).
A Norwegian personal identity code consists of 11 digits, without any
whitespace or other delimiters. The form is DDMMYYIIICC, where III is
a serial number separating persons born oh the same date with different
intervals depending on the year they are born. CC is two checksums.
https://en.wikipedia.org/wiki/National_identification_number#Norway
:param dob: date of birth as a "YYYYMMDD" string
:type dob: str
:param gender: gender of the person - "F" for female, M for male.
:type gender: str
:return: Fødselsnummer in str format (11 digs)
:rtype: str
"""
if dob:
birthday = datetime.datetime.strptime(dob, "%Y%m%d")
else:
age = datetime.timedelta(days=self.generator.random.randrange(18 * 365, 90 * 365))
birthday = datetime.datetime.now() - age
if not gender:
gender = self.generator.random.choice(("F", "M"))
elif gender not in ("F", "M"):
raise ValueError("Gender must be one of F or M.")
while True:
if 1900 <= birthday.year <= 1999:
suffix = str(self.generator.random.randrange(0, 49))
elif 1854 <= birthday.year <= 1899:
suffix = str(self.generator.random.randrange(50, 74))
elif 2000 <= birthday.year <= 2039:
suffix = str(self.generator.random.randrange(50, 99))
elif 1940 <= birthday.year <= 1999:
suffix = str(self.generator.random.randrange(90, 99))
if gender == "F":
gender_num = self.generator.random.choice((0, 2, 4, 6, 8))
elif gender == "M":
gender_num = self.generator.random.choice((1, 3, 5, 7, 9))
pnr = birthday.strftime("%d%m%y") + suffix.zfill(2) + str(gender_num)
pnr_nums = [int(ch) for ch in pnr]
k1 = checksum(Provider.scale1, pnr_nums)
k2 = checksum(Provider.scale2, pnr_nums + [k1])
# Checksums with a value of 10 is rejected.
# https://no.wikipedia.org/wiki/F%C3%B8dselsnummer
if k1 == 10 or k2 == 10:
continue
pnr += f"{k1}{k2}"
return pnr | PypiClean |
/FFEAT-1.0.0-py3-none-any.whl/ffeat/utils/decay/Linear.py | from typing import Union
_IFU = Union[int, float]
class Linear:
"""
Linear decay rate, parameter decreases linearly from `start` to `min`.
"""
def __init__(self,
start: _IFU,
min: _IFU = None,
step: _IFU = None,
result_type=float):
"""
Linear decay rate, parameter decreases linearly from `start` to `min`.
:param start: Start value.
:param min: Optional minimum value.
If the algorithm has NOT limited number of iterations, the parameter value will not decrease bellow.
:param step: Optional step size. The implementation will decrease the parameter by this value every iteration.
If the algorithm has limited number of iterations and step is None, it will be automatically calculated so that
parameter is equal `min` in the last iteration.
Either `min` or `step` must be set.
:param result_type: Type into which the result should by transformed, by default `float`.
"""
if min is None and step is None:
raise ValueError("Either min value or step size must be set")
self._start = float(start)
self._min = float(min) if min is not None else None
self._step = float(step) if step is not None else None
self._result_type = result_type
def __call__(self, *args, iteration: int, max_iteration: int = None, **kwargs):
"""
Calculate new value of the parameter and return it.
:param args: Arguments.
:param iteration: Current iteration.
:param max_iteration: Maximum number of algorithm's iterations, if set.
:param kwargs: Keyword arguments.
:return: Linearly decreasing value of `result_type` type.
"""
step = self._step
if step is None and max_iteration is not None and self._min is not None:
step = (self._start - self._min) / max_iteration
if step is None:
raise ValueError("Step in linear decay is not known, either set maximum number of iterations or provide step parameter")
new_value = self._start - step * iteration
if self._min is not None:
new_value = max(new_value, self._min)
return self._result_type(new_value) | PypiClean |
/ChunkyPipes-0.2.4.tar.gz/ChunkyPipes-0.2.4/README.rst | ChunkyPipes
===========
.. image:: https://img.shields.io/pypi/v/chunkypipes.svg
:target: https://pypi.python.org/pypi/chunkypipes/
:alt: Latest Version
.. image:: https://img.shields.io/pypi/dm/chunkypipes.svg
:target: https://pypi.python.org/pypi/chunkypipes/
:alt: Downloads
.. image:: https://img.shields.io/pypi/status/chunkypipes.svg
:target: https://pypi.python.org/pypi/chunkypipes/
:alt: Status
ChunkyPipes is a framework for easily designing and distributing NGS pipelines written in Python.
Running a pipeline with ChunkyPipes can be as simple as::
$ chunky install easy-pipeline.py
$ chunky configure easy-pipeline
$ chunky run easy-pipeline [arguments]
Read the full documentation `here <http://chunky-pipes.readthedocs.io/en/latest/>`_. | PypiClean |
/AutoTorch-0.0.2b20200818.tar.gz/AutoTorch-0.0.2b20200818/autotorch/scheduler/resource/manager.py | import logging
import multiprocessing as mp
from .resource import *
from ...utils import Queue
__all__ = ['ResourceManager']
logger = logging.getLogger(__name__)
class ResourceManager(object):
"""Resource Manager to keep track of the cpu and gpu usage
"""
LOCK = mp.Lock()
CPU_QUEUE = Queue()
GPU_QUEUE = Queue()
MAX_CPU_COUNT = get_cpu_count()
MAX_GPU_COUNT = get_gpu_count()
for cid in range(MAX_CPU_COUNT):
CPU_QUEUE.put(cid)
for gid in range(MAX_GPU_COUNT):
GPU_QUEUE.put(gid)
@classmethod
def _request(cls, resource):
"""ResourceManager, we recommand using scheduler instead of creating your own
resource manager.
"""
assert cls.check_possible(resource), \
'Requested num_cpu={} and num_gpu={} should be less than or equal to' + \
'system availability CPUs={}, GPUs={}'. \
format(resource.num_cpus, resource.num_gpus, cls.MAX_GPU_COUNT, cls.MAX_CPU_COUNT)
with cls.LOCK:
cpu_ids = [cls.CPU_QUEUE.get() for i in range(resource.num_cpus)]
gpu_ids = [cls.GPU_QUEUE.get() for i in range(resource.num_gpus)]
resource._ready(cpu_ids, gpu_ids)
logger.debug("Reqeust succeed {}".format(resource))
return
@classmethod
def _release(cls, resource):
logger.debug("Releasing {}".format(resource))
cpu_ids = resource.cpu_ids
gpu_ids = resource.gpu_ids
resource._release()
if len(cpu_ids) > 0:
for cid in cpu_ids:
cls.CPU_QUEUE.put(cid)
if len(gpu_ids) > 0:
for gid in gpu_ids:
cls.GPU_QUEUE.put(gid)
logger.debug("Releasing succeed {}".format(resource))
@classmethod
def check_availability(cls, resource):
"""Unsafe check
"""
if resource.num_cpus > self.CPU_QUEUE.qsize() or resource.num_gpus > self.GPU_QUEUE.qsize():
return False
return True
@classmethod
def check_possible(cls, resource):
assert isinstance(resource, Resources), 'Only support autotorch.resource.Resources'
if resource.num_cpus > cls.MAX_CPU_COUNT or resource.num_gpus > cls.MAX_GPU_COUNT:
return False
return True
def __repr__(self):
reprstr = self.__class__.__name__ + '(' + \
'{} CPUs, '.format(self.MAX_CPU_COUNT) + \
'{} GPUs)'.format(self.MAX_GPU_COUNT)
return reprstr | PypiClean |
/CMinx-1.1.9.tar.gz/CMinx-1.1.9/docs/source/developer/overview.rst | .. Copyright 2021 CMakePP
..
.. Licensed under the Apache License, Version 2.0 (the "License");
.. you may not use this file except in compliance with the License.
.. You may obtain a copy of the License at
..
.. http://www.apache.org/licenses/LICENSE-2.0
..
.. Unless required by applicable law or agreed to in writing, software
.. distributed under the License is distributed on an "AS IS" BASIS,
.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
.. See the License for the specific language governing permissions and
.. limitations under the License.
..
##############################
Overview of How CMinx Works
##############################
.. sidebar:: Source File Parsing
.. _parsing_flowchart:
.. figure:: uml_diagrams/parsing.png
How CMinx parses a CMake source file.
.. _aggregation_flowchart:
.. figure:: uml_diagrams/aggregation.png
How CMinx aggregates documentation from the parse tree.
-------
Parsing
-------
In CMinx parsing of a source file is the role of the Antlr4 parsing runtime, generated from
the modified CMake.g4 grammar file.
#. As per the standard usage, the file contents are read into an
Antlr4 FileStream, which is then passed to the generated CMake lexer.
#. The lexer generates a token stream, which is then fed into the CMakeParser.
#. The parser generates a tree of parse elements, called contexts,
which are then walked over by the ParseTreeWalker.
This process is diagrammatically summarized in :numref:`parsing_flowchart`.
-----------
Aggregation
-----------
After the parser generates the parse tree, CMinx walks the tree and aggregates the various documentation.
#. The walker calls the aggregator methods upon entering or exiting
parse rules, such as entering a :code:`documented_command` parse rule.
#. The parse rule enterDocumented_command cleans the doccomment and
extracts the documented command. For example, if a function definition
is documented, enterDocumented_command will extract the :code:`function` command.
#. The aggregator then locates the subprocessor that corresponds to the extracted command,
for example if the extracted command is :code:`function` then the subprocessor would be
:code:`process_function()`. This subhandler is then executed with the parse context and
cleaned docstring.
#. The documentation aggregator subhandler generates NamedTuples representing the type
of documentation generated, such as FunctionDocumentation, complete
with all relevant information, and adds them to a *documented* list.
#. From there, Documenter loops over the documentation list,
generating equivalent RST via RSTWriter for each type of documentation.
This process is diagrammatically summarized in :numref:`aggregation_flowchart`.
| PypiClean |
/Mopidy-Touchscreen-1.0.0.tar.gz/Mopidy-Touchscreen-1.0.0/mopidy_touchscreen/screens/playlist_screen.py | from base_screen import BaseScreen
from ..graphic_utils import ListView
class PlaylistScreen(BaseScreen):
def __init__(self, size, base_size, manager, fonts):
BaseScreen.__init__(self, size, base_size, manager, fonts)
self.list_view = ListView((0, 0), (
self.size[0], self.size[1] -
self.base_size), self.base_size,
self.fonts['base'])
self.playlists_strings = []
self.playlists = []
self.selected_playlist = None
self.playlist_tracks = []
self.playlist_tracks_strings = []
self.playlists_loaded()
def should_update(self):
return self.list_view.should_update()
def update(self, screen, update_type, rects):
update_all = (update_type == BaseScreen.update_all)
self.list_view.render(screen, update_all, rects)
def playlists_loaded(self):
self.selected_playlist = None
self.playlists_strings = []
self.playlists = []
for playlist in self.manager.core.playlists.playlists.get():
self.playlists.append(playlist)
self.playlists_strings.append(playlist.name)
self.list_view.set_list(self.playlists_strings)
def playlist_selected(self, playlist):
self.selected_playlist = playlist
self.playlist_tracks = playlist.tracks
self.playlist_tracks_strings = ["../"]
for track in self.playlist_tracks:
self.playlist_tracks_strings.append(track.name)
self.list_view.set_list(self.playlist_tracks_strings)
def touch_event(self, touch_event):
clicked = self.list_view.touch_event(touch_event)
if clicked is not None:
if self.selected_playlist is None:
self.playlist_selected(self.playlists[clicked])
else:
if clicked == 0:
self.selected_playlist = None
self.list_view.set_list(self.playlists_strings)
else:
self.manager.core.tracklist.clear()
self.manager.core.tracklist.add(
self.playlist_tracks)
self.manager.core.playback.play(
tl_track=self.manager.core.
tracklist.tl_tracks.get()
[clicked-1]) | PypiClean |
/Eureqa-1.76.0.tar.gz/Eureqa-1.76.0/eureqa/analysis/components/box_plot.py |
from eureqa.analysis.components.base import _TwoVariablePlotComponent
from eureqa.utils.jsonrest import _JsonREST
class BoxPlot(_TwoVariablePlotComponent):
"""Represents a box plot card on the server.
Example::
bp = BoxPlot(d, axis_labels={'x': 'the x var', 'y' : 'the y var'}, label_format={'y':'.3s'}, x_var='W', y_var='A', needs_guides=True)
analysis.create_card(bp)
:param DataSource datasource: The data source containing the data to be plotted.
:param XYMap axis_labels: Axis labels for this card's plot (XYMap). Set member fields "x" and "y" to set the X and Y axis labels.
:param XYMap label_format: Label format for this card. Set member fields "x" and "y" to set the X and Y axis printf-style format-strings; for example, ".3s".
:param str x_var: The X-axis variable for the component's plot. (must be binary)
:param str y_var: The Y-axis variable for the component's plot.
:param bool needs_guides: Whether the card needs guides.
:var str ~eureqa.analysis_cards.two_variable_plot.TwoVariablePlot.title: The title of the card
:var str ~eureqa.analysis_cards.two_variable_plot.TwoVariablePlot.x_var: The X-axis variable for the card's plot (must be binary)
:var str ~eureqa.analysis_cards.two_variable_plot.TwoVariablePlot.y_var: The Y-axis variable for the card's plot
:var bool ~eureqa.analysis_cards.two_variable_plot.TwoVariablePlot.needs_guides: Whether the card needs guides
:var XYMap ~eureqa.analysis_cards.two_variable_plot.TwoVariablePlot.axis_labels: Axis labels for this card's plot. Set member fields "x" and "y" to set the X and Y axis labels.
:var XYMap ~eureqa.analysis_cards.two_variable_plot.TwoVariablePlot.label_format: Label format for this card. Set member fields "x" and "y" to set the X and Y axis printf-style format-strings; for example, ".3s".
"""
_component_type_str = 'BOX_PLOT'
def __init__(self, datasource=None, axis_labels=None,
label_format=None, needs_guides=None, x_var=None, y_var=None,
_analysis=None, _component_id=None, _component_type=None):
if datasource is not None:
self.datasource = datasource
if axis_labels is not None:
self._axisLabels = axis_labels
if label_format is not None:
self._labelFormat = label_format
if needs_guides is not None:
self._needsGuides = needs_guides
if x_var is not None:
self._x_var = x_var
if y_var is not None:
self._y_var = y_var
super(BoxPlot, self).__init__(_analysis=_analysis, _component_id=_component_id, _component_type=_component_type)
@property
def datasource(self):
"""The :class:`~eureqa.data_source.DataSource` providing data for this component
:return: :class:`~eureqa.data_source.DataSource` providing data for this card
"""
if hasattr(self, "_eureqa"):
return self._eureqa.get_data_source_by_id(self._datasource_id)
@datasource.setter
def datasource(self, val):
if isinstance(val, basestring):
val = self._eureqa.get_data_source_from_id(val)
self._datasource_id = val._data_source_id
self._eureqa = val._eureqa
self._update()
@property
def axis_labels(self):
"""The axis labels for this card
defaults to:
::
{ 'x': x_var, 'y': y_var }
:return: Axis labels for this card
:rtype: self.XYMap
"""
return self.XYMap(self, getattr(self, "_axisLabels", {"x": "", "y": ""}))
@axis_labels.setter
def axis_labels(self, val):
if hasattr(val, "x") and hasattr(val, "y"):
val = {"x": val.x, "y": val.y}
self._axisLabels = val
self._update()
@property
def label_format(self):
"""Label format for this card. Set keys "x" and "y" to set the X and
Y axis printf-style format-strings; for example, ".3s".
defaults to:
::
{'x': 'g', 'y': '.2s'}
:rtype: XYMap
"""
return self.XYMap(self, getattr(self, "_labelFormat", {"x": "", "y": ""}))
@label_format.setter
def label_format(self, val):
if hasattr(val, "x") and hasattr(val, "y"):
val = {"x": val.x, "y": val.y}
self._labelFormat = val
self._update()
@property
def needs_guides(self):
"""Does this card need guides?
:return: Whether this card needs guides
:rtype: bool
"""
return getattr(self, "_needsGuides", None)
@needs_guides.setter
def needs_guides(self, val):
self._needsGuides = val
self._update()
@property
def x_var(self):
"""The X variable for this card.
:return: X variable for this card. (must be binary)
:rtype: str
"""
return getattr(self, "_x_var", None)
@x_var.setter
def x_var(self, val):
self._x_var = val
self._update()
@property
def y_var(self):
"""The Y variable for this card.
:return: Y variable for this card
:rtype: str
"""
return getattr(self, "_y_var", None)
@y_var.setter
def y_var(self, val):
self._y_var = val
self._update()
def _fields(self):
return super(BoxPlot, self)._fields() + [ 'datasource_id', 'axisLabels', 'labelFormat', 'needsGuides', 'x_var', 'y_var' ] | PypiClean |
/FAT-Forensics-0.1.2.tar.gz/FAT-Forensics-0.1.2/README.rst | .. -*- mode: rst -*-
============= ================================================================
Software |Licence|_ |GitHubRelease|_ |PyPi|_ |Python35|_
Docs |Homepage|_
CI |GitHubTests|_ |GitHubDocs|_ |Codecov|_
Try it |Binder|_
Contact |MailingList|_ |Gitter|_
Cite |BibTeX|_ |JOSS|_ |ZENODO|_
============= ================================================================
.. |Licence| image:: https://img.shields.io/github/license/fat-forensics/fat-forensics.svg
.. _Licence: https://github.com/fat-forensics/fat-forensics/blob/master/LICENCE
.. |GitHubRelease| image:: https://img.shields.io/github/release/fat-forensics/fat-forensics.svg
.. _GitHubRelease: https://github.com/fat-forensics/fat-forensics/releases
.. |PyPi| image:: https://img.shields.io/pypi/v/fat-forensics.svg
.. _PyPi: https://pypi.org/project/fat-forensics/
.. |Python35| image:: https://img.shields.io/badge/python-3.5-blue.svg
.. _Python35: https://badge.fury.io/py/fat-forensics
.. .. |ReadTheDocs| image:: https://readthedocs.org/projects/fat-forensics/badge/?version=latest&style=flat
.. .. _ReadTheDocs: https://fat-forensics.readthedocs.io/en/latest/
.. |Homepage| image:: https://img.shields.io/badge/homepage-read-green.svg
.. _Homepage: https://fat-forensics.org
.. What about wiki?
.. |GitHubTests| image:: https://github.com/fat-forensics/fat-forensics/actions/workflows/tests.yml/badge.svg
.. _GitHubTests: https://github.com/fat-forensics/fat-forensics/actions/workflows/tests.yml
.. |GitHubDocs| image:: https://github.com/fat-forensics/fat-forensics/actions/workflows/docs.yml/badge.svg
.. _GitHubDocs: https://github.com/fat-forensics/fat-forensics/actions/workflows/docs.yml
.. .. |CircleCI| image:: https://circleci.com/gh/fat-forensics/fat-forensics/tree/master.svg?style=shield
.. .. _CircleCI: https://circleci.com/gh/fat-forensics/fat-forensics/tree/master
.. |Codecov| image:: https://codecov.io/gh/fat-forensics/fat-forensics/branch/master/graph/badge.svg
.. _Codecov: https://codecov.io/gh/fat-forensics/fat-forensics
.. https://codeclimate.com/
.. https://requires.io/
.. |Binder| image:: https://mybinder.org/badge_logo.svg
.. _Binder: https://mybinder.org/v2/gh/fat-forensics/fat-forensics-doc/master?filepath=notebooks
.. |MailingList| image:: https://img.shields.io/badge/mailing%20list-Google%20Groups-green.svg
.. _MailingList: https://groups.google.com/forum/#!forum/fat-forensics
.. |Gitter| image:: https://img.shields.io/gitter/room/fat-forensics/FAT-Forensics.svg
.. _Gitter: https://gitter.im/fat-forensics
.. |BibTeX| image:: https://img.shields.io/badge/cite-BibTeX-blue.svg
.. _BibTeX: https://fat-forensics.org/getting_started/cite.html
.. |JOSS| image:: https://joss.theoj.org/papers/10.21105/joss.01904/status.svg
.. _JOSS: https://doi.org/10.21105/joss.01904
.. |ZENODO| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3833199.svg
.. _ZENODO: https://doi.org/10.5281/zenodo.3833199
============================================================================
FAT Forensics: Algorithmic Fairness, Accountability and Transparency Toolbox
============================================================================
FAT Forensics (``fatf``) is a Python toolbox for evaluating fairness,
accountability and transparency of predictive systems. It is built on top of
SciPy_ and NumPy_, and is distributed under the 3-Clause BSD license (new BSD).
FAT Forensics implements the state of the art *fairness*, *accountability* and
*transparency* (FAT) algorithms for the three main components of any data
modelling pipeline: *data* (raw data and features), predictive *models* and
model *predictions*. We envisage two main use cases for the package, each
supported by distinct features implemented to support it: an interactive
*research mode* aimed at researchers who may want to use it for an exploratory
analysis and a *deployment mode* aimed at practitioners who may want to use it
for monitoring FAT aspects of a predictive system.
Please visit the project's web site `https://fat-forensics.org`_ for more
details.
Installation
============
Dependencies
------------
FAT Forensics requires **Python 3.5** or higher and the following dependencies:
+------------+------------+
| Package | Version |
+============+============+
| NumPy_ | >=1.10.0 |
+------------+------------+
| SciPy_ | >=0.13.3 |
+------------+------------+
In addition, some of the modules require *optional* dependencies:
+--------------------------------------------------------------+------------------+------------+
| ``fatf`` module | Package | Version |
+==============================================================+==================+============+
| ``fatf.transparency.predictions.surrogate_explainers`` | | |
+--------------------------------------------------------------+ | |
| ``fatf.transparency.predictions.surrogate_image_explainers`` | | |
+--------------------------------------------------------------+ | |
| ``fatf.transparency.sklearn`` | `scikit-learn`_ | >=0.19.2 |
+--------------------------------------------------------------+ | |
| ``fatf.utils.data.feature_selection.sklearn`` | | |
+--------------------------------------------------------------+------------------+------------+
| ``fatf.transparency.predictions.surrogate_image_explainers`` | | |
+--------------------------------------------------------------+ | |
| ``fatf.utils.data.occlusion`` | `scikit-image`_ | >=0.17.0 |
+--------------------------------------------------------------+ | |
| ``fatf.utils.data.segmentation`` | | |
+--------------------------------------------------------------+------------------+------------+
| ``fatf.transparency.predictions.surrogate_image_explainers`` | | |
+--------------------------------------------------------------+ | |
| ``fatf.utils.data.occlusion`` | `Pillow`_ | >=8.4.0 |
+--------------------------------------------------------------+ | |
| ``fatf.utils.data.segmentation`` | | |
+--------------------------------------------------------------+------------------+------------+
| ``fatf.vis`` | matplotlib_ | >=3.0.0 |
+--------------------------------------------------------------+------------------+------------+
User Installation
-----------------
The easies way to install FAT Forensics is via ``pip``::
pip install fat-forensics
which will only installed the required dependencies. If you want to install the
package together with all the auxiliary dependencies please consider using the
``[all]`` option::
pip install fat-forensics[all]
The documentation provides more detailed `installation instructions <inst_>`_.
Changelog
=========
See the changelog_ for a development history and project milestones.
Development
===========
We welcome new contributors of all experience levels. The
`Development Guide <dev_guide_>`_ has detailed information about contributing
code, documentation, tests and more. Some basic development instructions are
included below.
Important Links
---------------
* Project's web site and documentation: `https://fat-forensics.org`_.
* Official source code repository:
`https://github.com/fat-forensics/fat-forensics`_.
* FAT Forensics releases: `https://pypi.org/project/fat-forensics`_.
* Issue tracker: `https://github.com/fat-forensics/fat-forensics/issues`_.
Source Code
-----------
You can check out the latest FAT Forensics source code via git with the
command::
git clone https://github.com/fat-forensics/fat-forensics.git
Contributing
------------
To learn more about contributing to FAT Forensics, please see our
`Contributing Guide <contrib_guide_>`_.
Testing
-------
You can launch the test suite from the root directory of this repository with::
make test-with-code-coverage
To run the tests you will need to have version 3.9.1 of ``pytest`` installed.
This package, together with other development dependencies, can be also
installed with::
pip install -r requirements-dev.txt
or with::
pip install fat-forensics[dev]
See the *Testing* section of the `Development Guide <dev_testing_>`_ page for
more information.
Please note that the ``make test-with-code-coverage`` command will test the
version of the package in the local ``fatf`` directory and not the one
installed since the pytest command is preceded by ``PYTHONPATH=./``. If
you want to test the installed version, consider using the command from the
``Makefile`` without the ``PYTHONPATH`` variable.
To control the randomness during the tests the ``Makefile`` sets the random
seed to ``42`` by preceding each test command with ``FATF_SEED=42``, which
sets the environment variable responsible for that. More information about
the setup of the *Testing Environment* is available on the
`development <dev_testing_env_>`_ web page in the documentation.
Submitting a Pull Request
-------------------------
Before opening a Pull Request, please have a look at the
`Contributing <contrib_guide_>`_ page to make sure that your code complies with
our guidelines.
Help and Support
================
For help please have a look at our
`documentation web page <https://fat-forensics.org>`_, especially the
`Getting Started <getting_started_>`_ page.
Communication
-------------
You can reach out to us at:
* our gitter_ channel for code-related development discussion; and
* our `mailing list`_ for discussion about the project's future and the
direction of the development.
More information about the communication can be found in our documentation
on the `main page <https://fat-forensics.org/index.html#communication>`_ and
on the
`develop page <https://fat-forensics.org/development.html#communication>`_.
Citation
--------
If you use FAT Forensics in a scientific publication, we would appreciate
citations! Information on how to cite use is available on the
`citation <https://fat-forensics.org/getting_started/cite.html>`_ web page in
our documentation.
Acknowledgements
================
This project is the result of a collaborative research agreement between Thales
and the University of Bristol with the initial funding provided by Thales.
.. _SciPy: https://scipy.org/
.. _NumPy: https://www.numpy.org/
.. _scikit-learn: https://scikit-learn.org/stable/
.. _matplotlib: https://matplotlib.org/
.. _scikit-image: https://scikit-image.org/
.. _Pillow: https://pillow.readthedocs.io/
.. _`https://fat-forensics.org`: https://fat-forensics.org
.. _inst: https://fat-forensics.org/getting_started/install_deps_os.html#installation-instructions
.. _changelog: https://fat-forensics.org/changelog.html
.. _dev_guide: https://fat-forensics.org/development.html
.. _`https://github.com/fat-forensics/fat-forensics`: https://github.com/fat-forensics/fat-forensics
.. _`https://pypi.org/project/fat-forensics`: https://pypi.org/project/fat-forensics
.. _`https://github.com/fat-forensics/fat-forensics/issues`: https://github.com/fat-forensics/fat-forensics/issues
.. _contrib_guide: https://fat-forensics.org/development.html#contributing-code
.. _dev_testing: https://fat-forensics.org/development.html#testing
.. _dev_testing_env: https://fat-forensics.org/development.html#testing-environment
.. _getting_started: https://fat-forensics.org/getting_started/index.html
.. _gitter: https://gitter.im/fat-forensics
.. _`mailing list`: https://groups.google.com/forum/#!forum/fat-forensics
| PypiClean |
/MIAvisual-0.0.6-py3-none-any.whl/matplotlib/bezier.py | from functools import lru_cache
import math
import warnings
import numpy as np
from matplotlib import _api
# same algorithm as 3.8's math.comb
@np.vectorize
@lru_cache(maxsize=128)
def _comb(n, k):
if k > n:
return 0
k = min(k, n - k)
i = np.arange(1, k + 1)
return np.prod((n + 1 - i)/i).astype(int)
class NonIntersectingPathException(ValueError):
pass
# some functions
def get_intersection(cx1, cy1, cos_t1, sin_t1,
cx2, cy2, cos_t2, sin_t2):
"""
Return the intersection between the line through (*cx1*, *cy1*) at angle
*t1* and the line through (*cx2*, *cy2*) at angle *t2*.
"""
# line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0.
# line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1
line1_rhs = sin_t1 * cx1 - cos_t1 * cy1
line2_rhs = sin_t2 * cx2 - cos_t2 * cy2
# rhs matrix
a, b = sin_t1, -cos_t1
c, d = sin_t2, -cos_t2
ad_bc = a * d - b * c
if abs(ad_bc) < 1e-12:
raise ValueError("Given lines do not intersect. Please verify that "
"the angles are not equal or differ by 180 degrees.")
# rhs_inverse
a_, b_ = d, -b
c_, d_ = -c, a
a_, b_, c_, d_ = [k / ad_bc for k in [a_, b_, c_, d_]]
x = a_ * line1_rhs + b_ * line2_rhs
y = c_ * line1_rhs + d_ * line2_rhs
return x, y
def get_normal_points(cx, cy, cos_t, sin_t, length):
"""
For a line passing through (*cx*, *cy*) and having an angle *t*, return
locations of the two points located along its perpendicular line at the
distance of *length*.
"""
if length == 0.:
return cx, cy, cx, cy
cos_t1, sin_t1 = sin_t, -cos_t
cos_t2, sin_t2 = -sin_t, cos_t
x1, y1 = length * cos_t1 + cx, length * sin_t1 + cy
x2, y2 = length * cos_t2 + cx, length * sin_t2 + cy
return x1, y1, x2, y2
# BEZIER routines
# subdividing bezier curve
# http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html
def _de_casteljau1(beta, t):
next_beta = beta[:-1] * (1 - t) + beta[1:] * t
return next_beta
def split_de_casteljau(beta, t):
"""
Split a Bezier segment defined by its control points *beta* into two
separate segments divided at *t* and return their control points.
"""
beta = np.asarray(beta)
beta_list = [beta]
while True:
beta = _de_casteljau1(beta, t)
beta_list.append(beta)
if len(beta) == 1:
break
left_beta = [beta[0] for beta in beta_list]
right_beta = [beta[-1] for beta in reversed(beta_list)]
return left_beta, right_beta
def find_bezier_t_intersecting_with_closedpath(
bezier_point_at_t, inside_closedpath, t0=0., t1=1., tolerance=0.01):
"""
Find the intersection of the Bezier curve with a closed path.
The intersection point *t* is approximated by two parameters *t0*, *t1*
such that *t0* <= *t* <= *t1*.
Search starts from *t0* and *t1* and uses a simple bisecting algorithm
therefore one of the end points must be inside the path while the other
doesn't. The search stops when the distance of the points parametrized by
*t0* and *t1* gets smaller than the given *tolerance*.
Parameters
----------
bezier_point_at_t : callable
A function returning x, y coordinates of the Bezier at parameter *t*.
It must have the signature::
bezier_point_at_t(t: float) -> tuple[float, float]
inside_closedpath : callable
A function returning True if a given point (x, y) is inside the
closed path. It must have the signature::
inside_closedpath(point: tuple[float, float]) -> bool
t0, t1 : float
Start parameters for the search.
tolerance : float
Maximal allowed distance between the final points.
Returns
-------
t0, t1 : float
The Bezier path parameters.
"""
start = bezier_point_at_t(t0)
end = bezier_point_at_t(t1)
start_inside = inside_closedpath(start)
end_inside = inside_closedpath(end)
if start_inside == end_inside and start != end:
raise NonIntersectingPathException(
"Both points are on the same side of the closed path")
while True:
# return if the distance is smaller than the tolerance
if np.hypot(start[0] - end[0], start[1] - end[1]) < tolerance:
return t0, t1
# calculate the middle point
middle_t = 0.5 * (t0 + t1)
middle = bezier_point_at_t(middle_t)
middle_inside = inside_closedpath(middle)
if start_inside ^ middle_inside:
t1 = middle_t
end = middle
end_inside = middle_inside
else:
t0 = middle_t
start = middle
start_inside = middle_inside
class BezierSegment:
"""
A d-dimensional Bezier segment.
Parameters
----------
control_points : (N, d) array
Location of the *N* control points.
"""
def __init__(self, control_points):
self._cpoints = np.asarray(control_points)
self._N, self._d = self._cpoints.shape
self._orders = np.arange(self._N)
coeff = [math.factorial(self._N - 1)
// (math.factorial(i) * math.factorial(self._N - 1 - i))
for i in range(self._N)]
self._px = (self._cpoints.T * coeff).T
def __call__(self, t):
"""
Evaluate the Bezier curve at point(s) t in [0, 1].
Parameters
----------
t : (k,) array-like
Points at which to evaluate the curve.
Returns
-------
(k, d) array
Value of the curve for each point in *t*.
"""
t = np.asarray(t)
return (np.power.outer(1 - t, self._orders[::-1])
* np.power.outer(t, self._orders)) @ self._px
def point_at_t(self, t):
"""
Evaluate the curve at a single point, returning a tuple of *d* floats.
"""
return tuple(self(t))
@property
def control_points(self):
"""The control points of the curve."""
return self._cpoints
@property
def dimension(self):
"""The dimension of the curve."""
return self._d
@property
def degree(self):
"""Degree of the polynomial. One less the number of control points."""
return self._N - 1
@property
def polynomial_coefficients(self):
r"""
The polynomial coefficients of the Bezier curve.
.. warning:: Follows opposite convention from `numpy.polyval`.
Returns
-------
(n+1, d) array
Coefficients after expanding in polynomial basis, where :math:`n`
is the degree of the bezier curve and :math:`d` its dimension.
These are the numbers (:math:`C_j`) such that the curve can be
written :math:`\sum_{j=0}^n C_j t^j`.
Notes
-----
The coefficients are calculated as
.. math::
{n \choose j} \sum_{i=0}^j (-1)^{i+j} {j \choose i} P_i
where :math:`P_i` are the control points of the curve.
"""
n = self.degree
# matplotlib uses n <= 4. overflow plausible starting around n = 15.
if n > 10:
warnings.warn("Polynomial coefficients formula unstable for high "
"order Bezier curves!", RuntimeWarning)
P = self.control_points
j = np.arange(n+1)[:, None]
i = np.arange(n+1)[None, :] # _comb is non-zero for i <= j
prefactor = (-1)**(i + j) * _comb(j, i) # j on axis 0, i on axis 1
return _comb(n, j) * prefactor @ P # j on axis 0, self.dimension on 1
def axis_aligned_extrema(self):
"""
Return the dimension and location of the curve's interior extrema.
The extrema are the points along the curve where one of its partial
derivatives is zero.
Returns
-------
dims : array of int
Index :math:`i` of the partial derivative which is zero at each
interior extrema.
dzeros : array of float
Of same size as dims. The :math:`t` such that :math:`d/dx_i B(t) =
0`
"""
n = self.degree
if n <= 1:
return np.array([]), np.array([])
Cj = self.polynomial_coefficients
dCj = np.arange(1, n+1)[:, None] * Cj[1:]
dims = []
roots = []
for i, pi in enumerate(dCj.T):
r = np.roots(pi[::-1])
roots.append(r)
dims.append(np.full_like(r, i))
roots = np.concatenate(roots)
dims = np.concatenate(dims)
in_range = np.isreal(roots) & (roots >= 0) & (roots <= 1)
return dims[in_range], np.real(roots)[in_range]
def split_bezier_intersecting_with_closedpath(
bezier, inside_closedpath, tolerance=0.01):
"""
Split a Bezier curve into two at the intersection with a closed path.
Parameters
----------
bezier : (N, 2) array-like
Control points of the Bezier segment. See `.BezierSegment`.
inside_closedpath : callable
A function returning True if a given point (x, y) is inside the
closed path. See also `.find_bezier_t_intersecting_with_closedpath`.
tolerance : float
The tolerance for the intersection. See also
`.find_bezier_t_intersecting_with_closedpath`.
Returns
-------
left, right
Lists of control points for the two Bezier segments.
"""
bz = BezierSegment(bezier)
bezier_point_at_t = bz.point_at_t
t0, t1 = find_bezier_t_intersecting_with_closedpath(
bezier_point_at_t, inside_closedpath, tolerance=tolerance)
_left, _right = split_de_casteljau(bezier, (t0 + t1) / 2.)
return _left, _right
# matplotlib specific
def split_path_inout(path, inside, tolerance=0.01, reorder_inout=False):
"""
Divide a path into two segments at the point where ``inside(x, y)`` becomes
False.
"""
from .path import Path
path_iter = path.iter_segments()
ctl_points, command = next(path_iter)
begin_inside = inside(ctl_points[-2:]) # true if begin point is inside
ctl_points_old = ctl_points
iold = 0
i = 1
for ctl_points, command in path_iter:
iold = i
i += len(ctl_points) // 2
if inside(ctl_points[-2:]) != begin_inside:
bezier_path = np.concatenate([ctl_points_old[-2:], ctl_points])
break
ctl_points_old = ctl_points
else:
raise ValueError("The path does not intersect with the patch")
bp = bezier_path.reshape((-1, 2))
left, right = split_bezier_intersecting_with_closedpath(
bp, inside, tolerance)
if len(left) == 2:
codes_left = [Path.LINETO]
codes_right = [Path.MOVETO, Path.LINETO]
elif len(left) == 3:
codes_left = [Path.CURVE3, Path.CURVE3]
codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
elif len(left) == 4:
codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4]
codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
else:
raise AssertionError("This should never be reached")
verts_left = left[1:]
verts_right = right[:]
if path.codes is None:
path_in = Path(np.concatenate([path.vertices[:i], verts_left]))
path_out = Path(np.concatenate([verts_right, path.vertices[i:]]))
else:
path_in = Path(np.concatenate([path.vertices[:iold], verts_left]),
np.concatenate([path.codes[:iold], codes_left]))
path_out = Path(np.concatenate([verts_right, path.vertices[i:]]),
np.concatenate([codes_right, path.codes[i:]]))
if reorder_inout and not begin_inside:
path_in, path_out = path_out, path_in
return path_in, path_out
def inside_circle(cx, cy, r):
"""
Return a function that checks whether a point is in a circle with center
(*cx*, *cy*) and radius *r*.
The returned function has the signature::
f(xy: tuple[float, float]) -> bool
"""
r2 = r ** 2
def _f(xy):
x, y = xy
return (x - cx) ** 2 + (y - cy) ** 2 < r2
return _f
# quadratic Bezier lines
def get_cos_sin(x0, y0, x1, y1):
dx, dy = x1 - x0, y1 - y0
d = (dx * dx + dy * dy) ** .5
# Account for divide by zero
if d == 0:
return 0.0, 0.0
return dx / d, dy / d
def check_if_parallel(dx1, dy1, dx2, dy2, tolerance=1.e-5):
"""
Check if two lines are parallel.
Parameters
----------
dx1, dy1, dx2, dy2 : float
The gradients *dy*/*dx* of the two lines.
tolerance : float
The angular tolerance in radians up to which the lines are considered
parallel.
Returns
-------
is_parallel
- 1 if two lines are parallel in same direction.
- -1 if two lines are parallel in opposite direction.
- False otherwise.
"""
theta1 = np.arctan2(dx1, dy1)
theta2 = np.arctan2(dx2, dy2)
dtheta = abs(theta1 - theta2)
if dtheta < tolerance:
return 1
elif abs(dtheta - np.pi) < tolerance:
return -1
else:
return False
def get_parallels(bezier2, width):
"""
Given the quadratic Bezier control points *bezier2*, returns
control points of quadratic Bezier lines roughly parallel to given
one separated by *width*.
"""
# The parallel Bezier lines are constructed by following ways.
# c1 and c2 are control points representing the begin and end of the
# Bezier line.
# cm is the middle point
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c2x, c2y = bezier2[2]
parallel_test = check_if_parallel(c1x - cmx, c1y - cmy,
cmx - c2x, cmy - c2y)
if parallel_test == -1:
_api.warn_external(
"Lines do not intersect. A straight line is used instead.")
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, c2x, c2y)
cos_t2, sin_t2 = cos_t1, sin_t1
else:
# t1 and t2 is the angle between c1 and cm, cm, c2. They are
# also a angle of the tangential line of the path at c1 and c2
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)
# find c1_left, c1_right which are located along the lines
# through c1 and perpendicular to the tangential lines of the
# Bezier path at a distance of width. Same thing for c2_left and
# c2_right with respect to c2.
c1x_left, c1y_left, c1x_right, c1y_right = (
get_normal_points(c1x, c1y, cos_t1, sin_t1, width)
)
c2x_left, c2y_left, c2x_right, c2y_right = (
get_normal_points(c2x, c2y, cos_t2, sin_t2, width)
)
# find cm_left which is the intersecting point of a line through
# c1_left with angle t1 and a line through c2_left with angle
# t2. Same with cm_right.
try:
cmx_left, cmy_left = get_intersection(c1x_left, c1y_left, cos_t1,
sin_t1, c2x_left, c2y_left,
cos_t2, sin_t2)
cmx_right, cmy_right = get_intersection(c1x_right, c1y_right, cos_t1,
sin_t1, c2x_right, c2y_right,
cos_t2, sin_t2)
except ValueError:
# Special case straight lines, i.e., angle between two lines is
# less than the threshold used by get_intersection (we don't use
# check_if_parallel as the threshold is not the same).
cmx_left, cmy_left = (
0.5 * (c1x_left + c2x_left), 0.5 * (c1y_left + c2y_left)
)
cmx_right, cmy_right = (
0.5 * (c1x_right + c2x_right), 0.5 * (c1y_right + c2y_right)
)
# the parallel Bezier lines are created with control points of
# [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right]
path_left = [(c1x_left, c1y_left),
(cmx_left, cmy_left),
(c2x_left, c2y_left)]
path_right = [(c1x_right, c1y_right),
(cmx_right, cmy_right),
(c2x_right, c2y_right)]
return path_left, path_right
def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):
"""
Find control points of the Bezier curve passing through (*c1x*, *c1y*),
(*mmx*, *mmy*), and (*c2x*, *c2y*), at parametric values 0, 0.5, and 1.
"""
cmx = .5 * (4 * mmx - (c1x + c2x))
cmy = .5 * (4 * mmy - (c1y + c2y))
return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]
def make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.):
"""
Being similar to get_parallels, returns control points of two quadratic
Bezier lines having a width roughly parallel to given one separated by
*width*.
"""
# c1, cm, c2
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c3x, c3y = bezier2[2]
# t1 and t2 is the angle between c1 and cm, cm, c3.
# They are also a angle of the tangential line of the path at c1 and c3
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)
# find c1_left, c1_right which are located along the lines
# through c1 and perpendicular to the tangential lines of the
# Bezier path at a distance of width. Same thing for c3_left and
# c3_right with respect to c3.
c1x_left, c1y_left, c1x_right, c1y_right = (
get_normal_points(c1x, c1y, cos_t1, sin_t1, width * w1)
)
c3x_left, c3y_left, c3x_right, c3y_right = (
get_normal_points(c3x, c3y, cos_t2, sin_t2, width * w2)
)
# find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and
# c12-c23
c12x, c12y = (c1x + cmx) * .5, (c1y + cmy) * .5
c23x, c23y = (cmx + c3x) * .5, (cmy + c3y) * .5
c123x, c123y = (c12x + c23x) * .5, (c12y + c23y) * .5
# tangential angle of c123 (angle between c12 and c23)
cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)
c123x_left, c123y_left, c123x_right, c123y_right = (
get_normal_points(c123x, c123y, cos_t123, sin_t123, width * wm)
)
path_left = find_control_points(c1x_left, c1y_left,
c123x_left, c123y_left,
c3x_left, c3y_left)
path_right = find_control_points(c1x_right, c1y_right,
c123x_right, c123y_right,
c3x_right, c3y_right)
return path_left, path_right | PypiClean |
/BIT_framework-0.0.2-py3-none-any.whl/BIT_DL/pytorch/modules/classifiers/conv_classifiers.py | import sys
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from BIT_DL.pytorch.hyperparams import HParams
from BIT_DL.pytorch.modules.classifiers.classifier_base import ClassifierBase
from BIT_DL.pytorch.modules.encoders.conv_encoders import Conv1DEncoder
from BIT_DL.pytorch.utils import utils
__all__ = [
"Conv1DClassifier",
]
class Conv1DClassifier(ClassifierBase):
r"""Simple `Conv-1D` classifier.
This is a combination of the :class:`~texar.torch.modules.Conv1DEncoder`
with a classification layer.
Args:
in_channels (int): Number of channels in the input tensor.
in_features (int): Size of the feature dimension in the input tensor.
hparams (dict, optional): Hyperparameters. Missing
hyperparameters will be set to default values. See
:meth:`default_hparams` for the hyperparameter structure and
default values.
See :meth:`forward` for the inputs and outputs. If :attr:`"data_format"` is
set to ``"channels_first"`` (this is the default), inputs must be a tensor
of shape `[batch_size, channels, length]`. If :attr:`"data_format"` is set
to ``"channels_last"``, inputs must be a tensor of shape
`[batch_size, length, channels]`. For example, for sequence classification,
`length` corresponds to time steps, and `channels` corresponds to embedding
dim.
Example:
.. code-block:: python
inputs = torch.randn([64, 20, 256])
clas = Conv1DClassifier(in_channels=20, in_features=256,
hparams={'num_classes': 10})
logits, pred = clas(inputs)
# logits == Tensor of shape [64, 10]
# pred == Tensor of shape [64]
.. document private functions
"""
def __init__(self, in_channels: int, in_features: Optional[int] = None,
hparams: Optional[Union[HParams, Dict[str, Any]]] = None):
super().__init__(hparams=hparams)
encoder_hparams = utils.dict_fetch(hparams,
Conv1DEncoder.default_hparams())
self._encoder = Conv1DEncoder(in_channels=in_channels,
in_features=in_features,
hparams=encoder_hparams)
# Add an additional dense layer if needed
self._num_classes = self._hparams.num_classes
if self._num_classes > 0:
if self._hparams.num_dense_layers <= 0:
if in_features is None:
raise ValueError("'in_features' is required for logits "
"layer when 'num_dense_layers' <= 0")
self._encoder.append_layer({"type": "Flatten"})
ones = torch.ones(1, in_channels, in_features)
input_size = self._encoder._infer_dense_layer_input_size(ones) # pylint: disable=protected-access
self.hparams.logit_layer_kwargs.in_features = input_size[1]
logit_kwargs = self._hparams.logit_layer_kwargs
if logit_kwargs is None:
logit_kwargs = {}
elif not isinstance(logit_kwargs, HParams):
raise ValueError(
"hparams['logit_layer_kwargs'] must be a dict.")
else:
logit_kwargs = logit_kwargs.todict()
logit_kwargs.update({"out_features": self._num_classes})
self._encoder.append_layer({"type": "Linear",
"kwargs": logit_kwargs})
@staticmethod
def default_hparams() -> Dict[str, Any]:
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
# (1) Same hyperparameters as in Conv1DEncoder
...
# (2) Additional hyperparameters
"num_classes": 2,
"logit_layer_kwargs": {
"use_bias": False
},
"name": "conv1d_classifier"
}
Here:
1. Same hyperparameters as in
:class:`~texar.torch.modules.Conv1DEncoder`.
See the :meth:`~texar.torch.modules.Conv1DEncoder.default_hparams`.
An instance of :class:`~texar.torch.modules.Conv1DEncoder` is created
for feature extraction.
2. Additional hyperparameters:
`"num_classes"`: int
Number of classes:
- If `> 0`, an additional :torch_nn:`Linear`
layer is appended to the encoder to compute the logits over
classes.
- If `<= 0`, no dense layer is appended. The number of
classes is assumed to be equal to ``out_features`` of the
final dense layer size of the encoder.
`"logit_layer_kwargs"`: dict
Keyword arguments for the logit :torch_nn:`Linear` layer
constructor, except for argument ``out_features`` which is set
to ``"num_classes"``. Ignored if no extra logit layer is
appended.
`"name"`: str
Name of the classifier.
"""
hparams = Conv1DEncoder.default_hparams()
hparams.update({
"name": "conv1d_classifier",
"num_classes": 2, # set to <=0 to avoid appending output layer
"logit_layer_kwargs": {
"in_features": hparams["out_features"],
"bias": True
}
})
return hparams
def forward(self, # type:ignore
input: torch.Tensor,
sequence_length: Optional[Union[torch.LongTensor,
List[int]]] = None,
dtype: Optional[torch.dtype] = None,
data_format: Optional[str] = None) \
-> Tuple[torch.Tensor, torch.Tensor]:
r"""Feeds the inputs through the network and makes classification.
The arguments are the same as in
:class:`~texar.torch.modules.Conv1DEncoder`.
The predictions of binary classification (``num_classes`` =1) and
multi-way classification (``num_classes`` >1) are different, as
explained below.
Args:
input: The inputs to the network, which is a 3D tensor. See
:class:`~texar.torch.modules.Conv1DEncoder` for more details.
sequence_length (optional): An int tensor of shape `[batch_size]` or
a python array containing the length of each element in
:attr:`inputs`. If given, time steps beyond the length will
first be masked out before feeding to the layers.
dtype (optional): Type of the inputs. If not provided, infers
from inputs automatically.
data_format (optional): Data type of the input tensor. If
``channels_last``, the last dimension will be treated as channel
dimension so the size of the :attr:`input` should be
`[batch_size, X, channel]`. If ``channels_first``, first
dimension will be treated as channel dimension so the size
should be `[batch_size, channel, X]`. Defaults to None.
If None, the value will be picked from hyperparameters.
Returns:
A tuple ``(logits, pred)``, where
- ``logits`` is a :tensor:`Tensor` of shape
``[batch_size, num_classes]`` for ``num_classes`` >1, and
``[batch_size]`` for ``num_classes`` =1 (i.e., binary
classification).
- ``pred`` is the prediction, a :tensor:`LongTensor` of shape
``[batch_size]``. For binary classification, the standard
sigmoid function is used for prediction, and the class labels are
``{0, 1}``.
"""
logits = self._encoder(input, sequence_length=sequence_length,
dtype=dtype, data_format=data_format)
num_classes = self._hparams.num_classes
is_binary = num_classes == 1
is_binary = is_binary or (num_classes <= 0 and logits.shape[1] == 1)
if is_binary:
pred = (logits > 0)
logits = logits.view(-1)
else:
pred = torch.argmax(logits, dim=1)
pred = pred.view(-1).long()
return logits, pred
@property
def num_classes(self) -> int:
r"""The number of classes.
"""
return self._num_classes
@property
def encoder(self) -> nn.Module:
r"""The classifier neural network.
"""
return self._encoder
def has_layer(self, layer_name) -> bool:
r"""Returns `True` if the network with the name exists. Returns
`False` otherwise.
Args:
layer_name (str): Name of the layer.
"""
return self._encoder.has_layer(layer_name)
def layer_by_name(self, layer_name) -> Optional[nn.Module]:
r"""Returns the layer with the name. Returns `None` if the layer name
does not exist.
Args:
layer_name (str): Name of the layer.
"""
return self._encoder.layer_by_name(layer_name)
@property
def layers_by_name(self) -> Dict[str, nn.Module]:
r"""A dictionary mapping layer names to the layers.
"""
return self._encoder.layers_by_name
@property
def layers(self) -> nn.ModuleList:
r"""A list of the layers.
"""
return self._encoder.layers
@property
def layer_names(self) -> List[str]:
r"""A list of uniquified layer names.
"""
return self._encoder.layer_names
@property
def output_size(self) -> int:
r"""The feature size of :meth:`forward` output :attr:`logits`.
If :attr:`logits` size is only determined by input
(i.e. if ``num_classes`` == 1), the feature size is equal
to ``-1``. Otherwise, if ``num_classes`` > 1, it is equal
to ``num_classes``.
"""
if self._hparams.num_classes > 1:
logit_dim = self._hparams.num_classes
elif self._hparams.num_classes == 1:
logit_dim = -1
else:
raise AttributeError("'Conv1DClassifier' object has"
"no attribute 'output_size'"
"if 'self._hparams.num_classes' < 1.")
return logit_dim | PypiClean |
/LwmaL_mess_client-0.8.7.tar.gz/LwmaL_mess_client-0.8.7/client/main_window.py | from PyQt5.QtWidgets import QMainWindow, qApp, QMessageBox, QApplication, QListView
from PyQt5.QtGui import QStandardItemModel, QStandardItem, QBrush, QColor
from PyQt5.QtCore import pyqtSlot, QEvent, Qt
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
import json
import logging
import base64
from client.main_window_conv import Ui_MainClientWindow
from client.add_contact import AddContactDialog
from client.del_contact import DelContactDialog
from common.errors import ServerError
from common.variables import *
logger = logging.getLogger('client')
class ClientMainWindow(QMainWindow):
'''
Класс - основное окно пользователя.
Содержит всю основную логику работы клиентского модуля.
Конфигурация окна создана в QTDesigner и загружается из
конвертированого файла main_window_conv.py
'''
def __init__(self, database, transport, keys):
super().__init__()
# основные переменные
self.database = database
self.transport = transport
# объект - дешифорвщик сообщений с предзагруженным ключём
self.decrypter = PKCS1_OAEP.new(keys)
# Загружаем конфигурацию окна из дизайнера
self.ui = Ui_MainClientWindow()
self.ui.setupUi(self)
# Кнопка "Выход"
self.ui.menu_exit.triggered.connect(qApp.exit)
# Кнопка отправить сообщение
self.ui.btn_send.clicked.connect(self.send_message)
# "добавить контакт"
self.ui.btn_add_contact.clicked.connect(self.add_contact_window)
self.ui.menu_add_contact.triggered.connect(self.add_contact_window)
# Удалить контакт
self.ui.btn_remove_contact.clicked.connect(self.delete_contact_window)
self.ui.menu_del_contact.triggered.connect(self.delete_contact_window)
# Дополнительные требующиеся атрибуты
self.contacts_model = None
self.history_model = None
self.messages = QMessageBox()
self.current_chat = None
self.current_chat_key = None
self.encryptor = None
self.ui.list_messages.setHorizontalScrollBarPolicy(
Qt.ScrollBarAlwaysOff)
self.ui.list_messages.setWordWrap(True)
# Даблклик по листу контактов отправляется в обработчик
self.ui.list_contacts.doubleClicked.connect(self.select_active_user)
self.clients_list_update()
self.set_disabled_input()
self.show()
def set_disabled_input(self):
''' Метод делающий поля ввода неактивными'''
# Надпись - получатель.
self.ui.label_new_message.setText(
'Для выбора получателя дважды кликните на нем в окне контактов.')
self.ui.text_message.clear()
if self.history_model:
self.history_model.clear()
# Поле ввода и кнопка отправки неактивны до выбора получателя.
self.ui.btn_clear.setDisabled(True)
self.ui.btn_send.setDisabled(True)
self.ui.text_message.setDisabled(True)
self.encryptor = None
self.current_chat = None
self.current_chat_key = None
def history_list_update(self):
'''
Метод заполняющий соответствующий QListView
историей переписки с текущим собеседником.
'''
# Получаем историю сортированную по дате
list = sorted(
self.database.get_history(
self.current_chat),
key=lambda item: item[3])
# Если модель не создана, создадим.
if not self.history_model:
self.history_model = QStandardItemModel()
self.ui.list_messages.setModel(self.history_model)
# Очистим от старых записей
self.history_model.clear()
# Берём не более 20 последних записей.
length = len(list)
start_index = 0
if length > 20:
start_index = length - 20
# Заполнение модели записями, так-же стоит разделить входящие
# и исходящие выравниванием и разным фоном.
# отображает только последие 20 сообщений
for i in range(start_index, length):
item = list[i]
if item[1] == 'in':
mess = QStandardItem(
f'Входящее от {item[3].replace(microsecond=0)}:\n {item[2]}')
mess.setEditable(False)
mess.setBackground(QBrush(QColor(255, 213, 213)))
mess.setTextAlignment(Qt.AlignLeft)
self.history_model.appendRow(mess)
else:
mess = QStandardItem(
f'Исходящее от {item[3].replace(microsecond=0)}:\n {item[2]}')
mess.setEditable(False)
mess.setTextAlignment(Qt.AlignRight)
mess.setBackground(QBrush(QColor(204, 255, 204)))
self.history_model.appendRow(mess)
self.ui.list_messages.scrollToBottom()
def select_active_user(self):
'''Метод обработчик события двойного клика по списку контактов.'''
# Выбранный пользователем (даблклик) находится в выделеном элементе в
# QListView
self.current_chat = self.ui.list_contacts.currentIndex().data()
# вызываем основную функцию
self.set_active_user()
def set_active_user(self):
'''Метод активации чата с собеседником.'''
# Запрашиваем публичный ключ пользователя и создаём объект шифрования
try:
self.current_chat_key = self.transport.key_request(
self.current_chat)
logger.debug(f'Загружен открытый ключ для {self.current_chat}')
if self.current_chat_key:
self.encryptor = PKCS1_OAEP.new(
RSA.import_key(self.current_chat_key))
except (OSError, json.JSONDecodeError):
self.current_chat_key = None
self.encryptor = None
logger.debug(f'Не удалось получить ключ для {self.current_chat}')
# Если ключа нет то ошибка, что не удалось начать чат с пользователем
if not self.current_chat_key:
self.messages.warning(
self, 'Ошибка', 'Для выбранного пользователя нет ключа шифрования.')
return
# Ставим надпись и активируем кнопки
self.ui.label_new_message.setText(
f'Введите сообщенние для {self.current_chat}:')
self.ui.btn_clear.setDisabled(False)
self.ui.btn_send.setDisabled(False)
self.ui.text_message.setDisabled(False)
# Заполняем окно историю сообщений по требуемому пользователю.
self.history_list_update()
def clients_list_update(self):
'''Метод обновляющий список контактов.'''
contacts_list = self.database.get_contacts()
self.contacts_model = QStandardItemModel()
for i in sorted(contacts_list):
item = QStandardItem(i)
item.setEditable(False)
self.contacts_model.appendRow(item)
self.ui.list_contacts.setModel(self.contacts_model)
def add_contact_window(self):
'''Метод создающий окно - диалог добавления контакта'''
global select_dialog
select_dialog = AddContactDialog(self.transport, self.database)
select_dialog.btn_ok.clicked.connect(
lambda: self.add_contact_action(select_dialog))
select_dialog.show()
def add_contact_action(self, item):
'''Метод обработчк нажатия кнопки "Добавить"'''
new_contact = item.selector.currentText()
self.add_contact(new_contact)
item.close()
def add_contact(self, new_contact):
'''
Метод добавляющий контакт в серверную и клиентсткую BD.
После обновления баз данных обновляет и содержимое окна.
'''
try:
self.transport.add_contact(new_contact)
except ServerError as err:
self.messages.critical(self, 'Ошибка сервера', err.text)
except OSError as err:
if err.errno:
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
self.messages.critical(self, 'Ошибка', 'Таймаут соединения!')
else:
self.database.add_contact(new_contact)
new_contact = QStandardItem(new_contact)
new_contact.setEditable(False)
self.contacts_model.appendRow(new_contact)
logger.info(f'Успешно добавлен контакт {new_contact}')
self.messages.information(
self, 'Успех', 'Контакт успешно добавлен.')
def delete_contact_window(self):
'''Метод создающий окно удаления контакта.'''
global remove_dialog
remove_dialog = DelContactDialog(self.database)
remove_dialog.btn_ok.clicked.connect(
lambda: self.delete_contact(remove_dialog))
remove_dialog.show()
def delete_contact(self, item):
'''
Метод удаляющий контакт из серверной и клиентсткой BD.
После обновления баз данных обновляет и содержимое окна.
'''
selected = item.selector.currentText()
try:
self.transport.remove_contact(selected)
except ServerError as err:
self.messages.critical(self, 'Ошибка сервера', err.text)
except OSError as err:
if err.errno:
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
self.messages.critical(self, 'Ошибка', 'Таймаут соединения!')
else:
self.database.del_contact(selected)
self.clients_list_update()
logger.info(f'Успешно удалён контакт {selected}')
self.messages.information(self, 'Успех', 'Контакт успешно удалён.')
item.close()
# Если удалён активный пользователь, то деактивируем поля ввода.
if selected == self.current_chat:
self.current_chat = None
self.set_disabled_input()
def send_message(self):
'''
Функция отправки сообщения текущему собеседнику.
Реализует шифрование сообщения и его отправку.
'''
# Текст в поле, проверяем что поле не пустое затем забирается сообщение
# и поле очищается
message_text = self.ui.text_message.toPlainText()
self.ui.text_message.clear()
if not message_text:
return
# Шифруем сообщение ключом получателя и упаковываем в base64.
message_text_encrypted = self.encryptor.encrypt(
message_text.encode('utf8'))
message_text_encrypted_base64 = base64.b64encode(
message_text_encrypted)
try:
self.transport.send_message(
self.current_chat,
message_text_encrypted_base64.decode('ascii'))
pass
except ServerError as err:
self.messages.critical(self, 'Ошибка', err.text)
except OSError as err:
if err.errno:
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
self.messages.critical(self, 'Ошибка', 'Таймаут соединения!')
except (ConnectionResetError, ConnectionAbortedError):
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
else:
self.database.save_message(self.current_chat, 'out', message_text)
logger.debug(
f'Отправлено сообщение для {self.current_chat}: {message_text}')
self.history_list_update()
@pyqtSlot(dict)
def message(self, message):
'''
Слот обработчик поступаемых сообщений, выполняет дешифровку
поступаемых сообщений и их сохранение в истории сообщений.
Запрашивает пользователя если пришло сообщение не от текущего
собеседника. При необходимости меняет собеседника.
'''
# Получаем строку байтов
encrypted_message = base64.b64decode(message[MESSAGE_TEXT])
# Декодируем строку, при ошибке выдаём сообщение и завершаем функцию
try:
decrypted_message = self.decrypter.decrypt(encrypted_message)
except (ValueError, TypeError):
self.messages.warning(
self, 'Ошибка', 'Не удалось декодировать сообщение.')
return
# Сохраняем сообщение в базу и обновляем историю сообщений или
# открываем новый чат.
self.database.save_message(
self.current_chat,
'in',
decrypted_message.decode('utf8'))
sender = message[SENDER]
if sender == self.current_chat:
self.history_list_update()
else:
# Проверим есть ли такой пользователь у нас в контактах:
if self.database.check_contact(sender):
# Если есть, спрашиваем и желании открыть с ним чат и открываем
# при желании
if self.messages.question(
self,
'Новое сообщение',
f'Получено новое сообщение от {sender}, открыть чат с ним?',
QMessageBox.Yes,
QMessageBox.No) == QMessageBox.Yes:
self.current_chat = sender
self.set_active_user()
else:
print('NO')
# Раз нету,спрашиваем хотим ли добавить юзера в контакты.
if self.messages.question(
self,
'Новое сообщение',
f'Получено новое сообщение от {sender}.\n Данного пользователя нет в вашем контакт-листе.\n Добавить в контакты и открыть чат с ним?',
QMessageBox.Yes,
QMessageBox.No) == QMessageBox.Yes:
self.add_contact(sender)
self.current_chat = sender
# Нужно заново сохранить сообщение, иначе оно будет потеряно,
# т.к. на момент предыдущего вызова контакта не было.
self.database.save_message(
self.current_chat, 'in', decrypted_message.decode('utf8'))
self.set_active_user()
@pyqtSlot()
def connection_lost(self):
'''
Слот обработчик потери соеднинения с сервером.
Выдаёт окно предупреждение и завершает работу приложения.
'''
self.messages.warning(
self,
'Сбой соединения',
'Потеряно соединение с сервером. ')
self.close()
@pyqtSlot()
def sig_205(self):
'''
Слот выполняющий обновление баз данных по команде сервера.
'''
if self.current_chat and not self.database.check_user(
self.current_chat):
self.messages.warning(
self,
'Сочувствую',
'К сожалению собеседник был удалён с сервера.')
self.set_disabled_input()
self.current_chat = None
self.clients_list_update()
def make_connection(self, trans_obj):
'''Метод обеспечивающий соединение сигналов и слотов.'''
trans_obj.new_message.connect(self.message)
trans_obj.connection_lost.connect(self.connection_lost)
trans_obj.message_205.connect(self.sig_205) | PypiClean |
/Argaeus-0.1.10.tar.gz/Argaeus-0.1.10/argaeus/controller/operationcontroller.py | from argaeus.controller.acontroller import AController
class OperationController(AController):
"""
This controller is responsible whether the Themostat is operating in observation or in control mode. Thus, if the
output of the DAC is connected to the heating system or not.
It registers to this topic:
* toggle - toggles the output between True and False
Each change is published to the copreus controlled relay with the corresponding commands active and passive.
Config yaml entries:
default-is-active: True # Is the controller active or passive initially
topic-pub: /test/relais/closed # Topic that controls the output behavior relais of the thermostat.
command-active: ON # set to active command - publish this value to topic-pub, to set the controller to
active operation.
command-passive: OFF # set to passive command - publish this value to topic-pub, to set the controller to
passive operation.
topic-sub-toggle: /test/r1/button/pressed # incoming event to toggle active/passive operation (optional
together with command-toggle)
command-toggle: PRESSED # command for topic-sub-toggle / toggle active/passive operation (optional together
with topic-sub-toggle)
"""
_default_is_active = None # default state as defined in the config
active_operation = None # current state
_topic_pub = None # publish state for relay to this topic
_command_active = None # activate DAC output
_command_passive = None # deactivate DAC output
_topic_sub_toggle = None # subscribe for incoming toggle trigger events
_command_sub_toggle = None # command expected to initiate trigger
def __init__(self, config, mqtt_client, logger):
"""
Constructor
:param config: config yaml structure
:param mqtt_client: mymqttclient instance
:param logger: logger instance - a child instance will be spawned with name=__name__
"""
AController.__init__(self, config, mqtt_client, logger, logger_name=__name__)
self._default_is_active = bool(self._config["default-is-active"])
self.active_operation = self._default_is_active
self._topic_pub = self._config["topic-pub"]
self._command_active = self._config["command-active"]
self._command_passive = self._config["command-passive"]
try:
self._topic_sub_toggle = self._config["topic-sub-toggle"]
except KeyError:
pass
if self._topic_sub_toggle is not None:
try:
self._command_sub_toggle = self._config["command-toggle"]
except KeyError:
self._logger.error("OperationController.__init__ - 'topic-sub-toggle' is set but 'command-toggle' is "
"missing.")
raise KeyError("OperationController.__init__ - 'topic-sub-toggle' is set but 'command-toggle' "
"is missing.")
self._logger.info("OperationController.__init__ - done")
def _toggle_operation_handler(self, value):
"""
Check if the incoming message on _topic_sub_toggle is equivalent to _command_sub_toggle. Toggle
operation if yes.
:param value: mqtt message
"""
if len(value) > 0 and value.decode("UTF-8") == self._command_sub_toggle:
self._toggle_operation()
else:
self._logger.warning("OperationController._toggle_operation_handler - dont know how to handle "
"message '{}'".format(value))
def _toggle_operation(self):
"""Change operation state to not previous state and initialize publish."""
self.active_operation = not self.active_operation
self._logger.info("OperationController._toggle_operation - toggle active/passive (now: '{}').".
format(self.active_operation))
self._publish()
def _publish(self):
"""
publish current operation state (command_active or command_passive) to _topic_pub
"""
if self.active_operation:
self._mqtt_client.publish(self._topic_pub, self._command_active)
else:
self._mqtt_client.publish(self._topic_pub, self._command_passive)
def start(self):
"""
Subscribe topic_sub_toggle and publish initial state.
"""
if self._topic_sub_toggle is not None:
self._mqtt_client.subscribe(self._topic_sub_toggle, self._toggle_operation_handler)
self._publish()
def stop(self):
"""unsubcribe _topic_sub_toggle."""
if self._topic_sub_toggle is not None:
self._mqtt_client.unsubscribe(self._topic_sub_toggle, self._toggle_operation_handler) | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/ding/policy/rainbow.py | from typing import List, Dict, Any, Tuple, Union
import torch
import copy
from ding.torch_utils import Adam, to_device
from ding.rl_utils import dist_nstep_td_data, dist_nstep_td_error, get_train_sample, get_nstep_return_data
from ding.model import model_wrap
from ding.utils import POLICY_REGISTRY
from ding.utils.data import default_collate, default_decollate
from .dqn import DQNPolicy
from .common_utils import default_preprocess_learn
@POLICY_REGISTRY.register('rainbow')
class RainbowDQNPolicy(DQNPolicy):
r"""
Overview:
Rainbow DQN contain several improvements upon DQN, including:
- target network
- dueling architecture
- prioritized experience replay
- n_step return
- noise net
- distribution net
Therefore, the RainbowDQNPolicy class inherit upon DQNPolicy class
Config:
== ==================== ======== ============== ======================================== =======================
ID Symbol Type Default Value Description Other(Shape)
== ==================== ======== ============== ======================================== =======================
1 ``type`` str rainbow | RL policy register name, refer to | this arg is optional,
| registry ``POLICY_REGISTRY`` | a placeholder
2 ``cuda`` bool False | Whether to use cuda for network | this arg can be diff-
| erent from modes
3 ``on_policy`` bool False | Whether the RL algorithm is on-policy
| or off-policy
4 ``priority`` bool True | Whether use priority(PER) | priority sample,
| update priority
5 ``model.v_min`` float -10 | Value of the smallest atom
| in the support set.
6 ``model.v_max`` float 10 | Value of the largest atom
| in the support set.
7 ``model.n_atom`` int 51 | Number of atoms in the support set
| of the value distribution.
8 | ``other.eps`` float 0.05 | Start value for epsilon decay. It's
| ``.start`` | small because rainbow use noisy net.
9 | ``other.eps`` float 0.05 | End value for epsilon decay.
| ``.end``
10 | ``discount_`` float 0.97, | Reward's future discount factor, aka. | may be 1 when sparse
| ``factor`` [0.95, 0.999] | gamma | reward env
11 ``nstep`` int 3, | N-step reward discount sum for target
[3, 5] | q_value estimation
12 | ``learn.update`` int 3 | How many updates(iterations) to train | this args can be vary
| ``per_collect`` | after collector's one collection. Only | from envs. Bigger val
| valid in serial training | means more off-policy
== ==================== ======== ============== ======================================== =======================
"""
config = dict(
# (str) RL policy register name (refer to function "POLICY_REGISTRY").
type='rainbow',
# (bool) Whether to use cuda for network.
cuda=False,
# (bool) Whether the RL algorithm is on-policy or off-policy.
on_policy=False,
# (bool) Whether use priority(priority sample, IS weight, update priority)
priority=True,
# (bool) Whether use Importance Sampling Weight to correct biased update. If True, priority must be True.
priority_IS_weight=True,
# (int) Number of training samples(randomly collected) in replay buffer when training starts.
# random_collect_size=2000,
model=dict(
# (float) Value of the smallest atom in the support set.
# Default to -10.0.
v_min=-10,
# (float) Value of the smallest atom in the support set.
# Default to 10.0.
v_max=10,
# (int) Number of atoms in the support set of the
# value distribution. Default to 51.
n_atom=51,
),
# (float) Reward's future discount factor, aka. gamma.
discount_factor=0.99,
# (int) N-step reward for target q_value estimation
nstep=3,
learn=dict(
# How many updates(iterations) to train after collector's one collection.
# Bigger "update_per_collect" means bigger off-policy.
# collect data -> update policy-> collect data -> ...
update_per_collect=1,
batch_size=32,
learning_rate=0.001,
# ==============================================================
# The following configs are algorithm-specific
# ==============================================================
# (int) Frequence of target network update.
target_update_freq=100,
# (bool) Whether ignore done(usually for max step termination env)
ignore_done=False,
),
# collect_mode config
collect=dict(
# (int) Only one of [n_sample, n_episode] shoule be set
# n_sample=32,
# (int) Cut trajectories into pieces with length "unroll_len".
unroll_len=1,
),
eval=dict(),
# other config
other=dict(
# Epsilon greedy with decay.
eps=dict(
# (str) Decay type. Support ['exp', 'linear'].
type='exp',
# (float) End value for epsilon decay, in [0, 1]. It's equals to `end` because rainbow uses noisy net.
start=0.05,
# (float) End value for epsilon decay, in [0, 1].
end=0.05,
# (int) Env steps of epsilon decay.
decay=100000,
),
replay_buffer=dict(
# (int) Max size of replay buffer.
replay_buffer_size=100000,
# (float) Prioritization exponent.
alpha=0.6,
# (float) Importance sample soft coefficient.
# 0 means no correction, while 1 means full correction
beta=0.4,
# (int) Anneal step for beta: 0 means no annealing. Defaults to 0
anneal_step=100000,
)
),
)
def default_model(self) -> Tuple[str, List[str]]:
return 'rainbowdqn', ['ding.model.template.q_learning']
def _init_learn(self) -> None:
r"""
Overview:
Init the learner model of RainbowDQNPolicy
Arguments:
- learning_rate (:obj:`float`): the learning rate fo the optimizer
- gamma (:obj:`float`): the discount factor
- nstep (:obj:`int`): the num of n step return
- v_min (:obj:`float`): value distribution minimum value
- v_max (:obj:`float`): value distribution maximum value
- n_atom (:obj:`int`): the number of atom sample point
"""
self._priority = self._cfg.priority
self._priority_IS_weight = self._cfg.priority_IS_weight
self._optimizer = Adam(self._model.parameters(), lr=self._cfg.learn.learning_rate)
self._gamma = self._cfg.discount_factor
self._nstep = self._cfg.nstep
self._v_max = self._cfg.model.v_max
self._v_min = self._cfg.model.v_min
self._n_atom = self._cfg.model.n_atom
self._target_model = copy.deepcopy(self._model)
self._target_model = model_wrap(
self._target_model,
wrapper_name='target',
update_type='assign',
update_kwargs={'freq': self._cfg.learn.target_update_freq}
)
self._learn_model = model_wrap(self._model, wrapper_name='argmax_sample')
self._learn_model.reset()
self._target_model.reset()
def _forward_learn(self, data: dict) -> Dict[str, Any]:
"""
Overview:
Forward and backward function of learn mode, acquire the data and calculate the loss and\
optimize learner model
Arguments:
- data (:obj:`dict`): Dict type data, including at least ['obs', 'next_obs', 'reward', 'action']
Returns:
- info_dict (:obj:`Dict[str, Any]`): Including cur_lr and total_loss
- cur_lr (:obj:`float`): current learning rate
- total_loss (:obj:`float`): the calculated loss
"""
data = default_preprocess_learn(
data,
use_priority=self._priority,
use_priority_IS_weight=self._cfg.priority_IS_weight,
ignore_done=self._cfg.learn.ignore_done,
use_nstep=True
)
if self._cuda:
data = to_device(data, self._device)
# ====================
# Rainbow forward
# ====================
self._learn_model.train()
self._target_model.train()
# reset noise of noisenet for both main model and target model
self._reset_noise(self._learn_model)
self._reset_noise(self._target_model)
q_dist = self._learn_model.forward(data['obs'])['distribution']
with torch.no_grad():
target_q_dist = self._target_model.forward(data['next_obs'])['distribution']
self._reset_noise(self._learn_model)
target_q_action = self._learn_model.forward(data['next_obs'])['action']
value_gamma = data.get('value_gamma', None)
data = dist_nstep_td_data(
q_dist, target_q_dist, data['action'], target_q_action, data['reward'], data['done'], data['weight']
)
loss, td_error_per_sample = dist_nstep_td_error(
data, self._gamma, self._v_min, self._v_max, self._n_atom, nstep=self._nstep, value_gamma=value_gamma
)
# ====================
# Rainbow update
# ====================
self._optimizer.zero_grad()
loss.backward()
self._optimizer.step()
# =============
# after update
# =============
self._target_model.update(self._learn_model.state_dict())
return {
'cur_lr': self._optimizer.defaults['lr'],
'total_loss': loss.item(),
'priority': td_error_per_sample.abs().tolist(),
}
def _init_collect(self) -> None:
r"""
Overview:
Collect mode init moethod. Called by ``self.__init__``.
Init traj and unroll length, collect model.
.. note::
the rainbow dqn enable the eps_greedy_sample, but might not need to use it, \
as the noise_net contain noise that can help exploration
"""
self._unroll_len = self._cfg.collect.unroll_len
self._nstep = self._cfg.nstep
self._gamma = self._cfg.discount_factor
self._collect_model = model_wrap(self._model, wrapper_name='eps_greedy_sample')
self._collect_model.reset()
def _forward_collect(self, data: dict, eps: float) -> dict:
r"""
Overview:
Reset the noise from noise net and collect output according to eps_greedy plugin
Arguments:
- data (:obj:`Dict[str, Any]`): Dict type data, stacked env data for predicting policy_output(action), \
values are torch.Tensor or np.ndarray or dict/list combinations, keys are env_id indicated by integer.
- eps (:obj:`float`): epsilon value for exploration, which is decayed by collected env step.
Returns:
- output (:obj:`Dict[int, Any]`): Dict type data, including at least inferred action according to input obs.
ReturnsKeys
- necessary: ``action``
"""
data_id = list(data.keys())
data = default_collate(list(data.values()))
if self._cuda:
data = to_device(data, self._device)
self._collect_model.eval()
self._reset_noise(self._collect_model)
with torch.no_grad():
output = self._collect_model.forward(data, eps=eps)
if self._cuda:
output = to_device(output, 'cpu')
output = default_decollate(output)
return {i: d for i, d in zip(data_id, output)}
def _get_train_sample(self, traj: list) -> Union[None, List[Any]]:
r"""
Overview:
Get the trajectory and the n step return data, then sample from the n_step return data
Arguments:
- traj (:obj:`list`): The trajactory's buffer list
Returns:
- samples (:obj:`dict`): The training samples generated
"""
data = get_nstep_return_data(traj, self._nstep, gamma=self._gamma)
return get_train_sample(data, self._unroll_len)
def _reset_noise(self, model: torch.nn.Module):
r"""
Overview:
Reset the noise of model
Arguments:
- model (:obj:`torch.nn.Module`): the model to reset, must contain reset_noise method
"""
for m in model.modules():
if hasattr(m, 'reset_noise'):
m.reset_noise() | PypiClean |
/Chorus-0.9.0.tar.gz/Chorus-0.9.0/chorus/util/debug.py |
import os
import sys
import cProfile
import pstats
import linecache
import tracemalloc
from itertools import chain
from collections import deque
def profile(func):
""" Decorator
Execute cProfile
"""
def _f(*args, **kwargs):
print("\n<<<---")
pr = cProfile.Profile()
pr.enable()
res = func(*args, **kwargs)
p = pstats.Stats(pr)
p.strip_dirs().sort_stats('cumtime').print_stats(20)
print("\n--->>>")
return res
return _f
def total_size(obj, verbose=False):
""" Returns approximate memory size"""
seen = set()
def sizeof(o):
if id(o) in seen:
return 0
seen.add(id(o))
s = sys.getsizeof(o, default=0)
if verbose:
print(s, type(o), repr(o))
if isinstance(o, (tuple, list, set, frozenset, deque)):
s += sum(map(sizeof, iter(o)))
elif isinstance(o, dict):
s += sum(map(sizeof, chain.from_iterable(o.items())))
elif "__dict__" in dir(o):
s += sum(map(sizeof, chain.from_iterable(o.__dict__.items())))
return s
return sizeof(obj)
def malloc(func):
""" Decorator
Execute tracemalloc
"""
def _f(*args, **kwargs):
print("\n<<<---")
tracemalloc.start()
res = func(*args, **kwargs)
snapshot = tracemalloc.take_snapshot()
top_stats = snapshot.statistics('lineno')
print("[ Top 10 ]")
for i, stat in enumerate(top_stats[:10]):
frame = stat.traceback[0]
filename = os.sep.join(frame.filename.split(os.sep)[-2:])
print("#%s: %s:%s: %.1f KiB"
% (i, filename, frame.lineno, stat.size / 1024))
print(linecache.getline(frame.filename, frame.lineno).strip())
# print(stat)
print("--->>>\n")
return res
return _f
def malloc_diff(func):
""" Decorator
Execute tracemalloc
"""
def _f(*args, **kwargs):
print("\n<<<---")
tracemalloc.start()
snapshot1 = tracemalloc.take_snapshot()
res = func(*args, **kwargs)
snapshot2 = tracemalloc.take_snapshot()
top_stats = snapshot2.compare_to(snapshot1, 'lineno')
print("[ Top 10 differences]")
for stat in top_stats[:10]:
print(stat)
print("--->>>\n")
return res
return _f
def mute(func):
""" Decorator
Make stdout silent
"""
def _f(*args, **kwargs):
sys.stdout = open(os.devnull, 'w')
res = func(*args, **kwargs)
sys.stdout.close()
sys.stdout = sys.__stdout__
return res
return _f | PypiClean |
/B9gemyaeix-4.14.1.tar.gz/B9gemyaeix-4.14.1/weblate/metrics/wrapper.py |
from calendar import monthrange
from datetime import date, timedelta
from typing import Dict
from django.core.cache import cache
from django.utils.functional import cached_property
from django.utils.translation import pgettext_lazy
from weblate.metrics.models import METRIC_CHANGES, Metric
MONTH_NAMES = [
pgettext_lazy("Short name of month", "Jan"),
pgettext_lazy("Short name of month", "Feb"),
pgettext_lazy("Short name of month", "Mar"),
pgettext_lazy("Short name of month", "Apr"),
pgettext_lazy("Short name of month", "May"),
pgettext_lazy("Short name of month", "Jun"),
pgettext_lazy("Short name of month", "Jul"),
pgettext_lazy("Short name of month", "Aug"),
pgettext_lazy("Short name of month", "Sep"),
pgettext_lazy("Short name of month", "Oct"),
pgettext_lazy("Short name of month", "Nov"),
pgettext_lazy("Short name of month", "Dec"),
]
class MetricsWrapper:
def __init__(self, obj, scope: int, relation: int, secondary: int = 0):
self.obj = obj
self.scope = scope
self.relation = relation
self.secondary = secondary
@cached_property
def current(self):
return Metric.objects.get_current(
self.obj, self.scope, self.relation, self.secondary
)
@cached_property
def past_30(self):
return Metric.objects.get_past(self.scope, self.relation, self.secondary, 30)
@cached_property
def past_60(self):
return Metric.objects.get_past(self.scope, self.relation, self.secondary, 60)
@property
def all_words(self):
return self.current["all_words"]
@property
def all(self):
return self.current["all"]
@property
def translated_percent(self):
total = self.all
if not total:
return 0
return 100 * self.current["translated"] / total
@property
def contributors(self):
return self.current.get("contributors", 0)
def calculate_trend_percent(self, key, modkey, base: Dict, origin: Dict):
total = base.get(key, 0)
if not total:
return 0
divisor = base[modkey]
if not divisor:
return 0
total = 100 * total / divisor
past = origin[key]
if not past:
return total
divisor = origin[modkey]
if not divisor:
return total
past = 100 * past / divisor
return total - past
def calculate_trend(self, key, base: Dict, origin: Dict):
total = base.get(key, 0)
if not total:
return 0
return 100 * (total - origin[key]) / total
@property
def trend_30_all(self):
return self.calculate_trend("all", self.current, self.past_30)
@property
def trend_30_all_words(self):
return self.calculate_trend("all_words", self.current, self.past_30)
@property
def trend_30_contributors(self):
return self.calculate_trend("contributors", self.current, self.past_30)
@property
def trend_30_translated_percent(self):
return self.calculate_trend_percent(
"translated", "all", self.current, self.past_30
)
@property
def trend_60_all(self):
return self.calculate_trend("all", self.past_30, self.past_60)
@property
def trend_60_all_words(self):
return self.calculate_trend("all_words", self.past_30, self.past_60)
@property
def trend_60_contributors(self):
return self.calculate_trend("contributors", self.past_30, self.past_60)
@property
def trend_60_translated_percent(self):
return self.calculate_trend_percent(
"translated", "all", self.past_30, self.past_60
)
@property
def projects(self):
return self.current["projects"]
@property
def languages(self):
return self.current["languages"]
@property
def components(self):
return self.current["components"]
@property
def users(self):
return self.current["users"]
@property
def trend_30_projects(self):
return self.calculate_trend("projects", self.current, self.past_30)
@property
def trend_30_languages(self):
return self.calculate_trend("languages", self.current, self.past_30)
@property
def trend_30_components(self):
return self.calculate_trend("components", self.current, self.past_30)
@property
def trend_30_users(self):
return self.calculate_trend("users", self.current, self.past_30)
@property
def trend_60_projects(self):
return self.calculate_trend("projects", self.past_30, self.past_60)
@property
def trend_60_languages(self):
return self.calculate_trend("languages", self.past_30, self.past_60)
@property
def trend_60_components(self):
return self.calculate_trend("components", self.past_30, self.past_60)
@property
def trend_60_users(self):
return self.calculate_trend("users", self.past_30, self.past_60)
def get_daily_activity(self, start, days):
kwargs = {
"scope": self.scope,
"relation": self.relation,
}
if self.secondary:
kwargs["secondary"] = self.secondary
result = dict(
Metric.objects.filter(
date__in=[start - timedelta(days=i) for i in range(days + 1)],
kind=METRIC_CHANGES,
**kwargs,
).values_list("date", "value")
)
for offset in range(days):
current = start - timedelta(days=offset)
if current not in result:
result[current] = Metric.objects.calculate_changes(
date=current,
obj=self.obj,
**kwargs,
)
return result
@cached_property
def daily_activity(self):
today = date.today()
result = [0] * 52
for pos, value in self.get_daily_activity(today, 52).items():
result[51 - (today - pos).days] = value
return result
@cached_property
def cache_key_prefix(self):
return f"metrics:{self.scope}:{self.relation}:{self.secondary}"
def get_month_cache_key(self, year, month):
return f"{self.cache_key_prefix}:month:{year}:{month}"
def get_month_activity(self, year, month, cached_results):
cache_key = self.get_month_cache_key(year, month)
if cache_key in cached_results:
return cached_results[cache_key]
numdays = monthrange(year, month)[1]
daily = self.get_daily_activity(date(year, month, numdays), numdays - 1)
result = sum(daily.values())
cache.set(cache_key, result, None)
return result
@cached_property
def monthly_activity(self):
months = []
prefetch = []
last_month_date = date.today().replace(day=1) - timedelta(days=1)
month = last_month_date.month
year = last_month_date.year
for _dummy in range(12):
months.append((year, month))
prefetch.append(self.get_month_cache_key(year, month))
prefetch.append(self.get_month_cache_key(year - 1, month))
month -= 1
if month < 1:
month = 12
year -= 1
cached_results = cache.get_many(prefetch)
result = []
for year, month in reversed(months):
result.append(
{
"month": month,
"year": year,
"previous_year": year - 1,
"month_name": MONTH_NAMES[month - 1],
"start_date": date(year, month, 1),
"end_date": date(year, month, monthrange(year, month)[1]),
"previous_start_date": date(year - 1, month, 1),
"previous_end_date": date(
year - 1, month, monthrange(year - 1, month)[1]
),
"current": self.get_month_activity(year, month, cached_results),
"previous": self.get_month_activity(
year - 1, month, cached_results
),
}
)
maximum = max(
max(item["current"] for item in result),
max(item["previous"] for item in result),
1,
)
for item in result:
item["current_height"] = 140 * item["current"] // maximum
item["current_offset"] = 140 - item["current_height"]
item["previous_height"] = 140 * item["previous"] // maximum
item["previous_offset"] = 140 - item["previous_height"]
return result | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/mmdet/models/necks/nas_fpn.py | import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops.merge_cells import GlobalPoolingCell, SumCell
from mmcv.runner import BaseModule, ModuleList
from ..builder import NECKS
@NECKS.register_module()
class NASFPN(BaseModule):
"""NAS-FPN.
Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture
for Object Detection <https://arxiv.org/abs/1904.07392>`_
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
stack_times (int): The number of times the pyramid architecture will
be stacked.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool): It decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, its actual mode is specified by `extra_convs_on_inputs`.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
stack_times,
start_level=0,
end_level=-1,
add_extra_convs=False,
norm_cfg=None,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')):
super(NASFPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels) # num of input feature levels
self.num_outs = num_outs # num of output feature levels
self.stack_times = stack_times
self.norm_cfg = norm_cfg
if end_level == -1 or end_level == self.num_ins - 1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level is not the last level, no extra level is allowed
self.backbone_end_level = end_level + 1
assert end_level < self.num_ins
assert num_outs == end_level - start_level + 1
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
# add lateral connections
self.lateral_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
norm_cfg=norm_cfg,
act_cfg=None)
self.lateral_convs.append(l_conv)
# add extra downsample layers (stride-2 pooling or conv)
extra_levels = num_outs - self.backbone_end_level + self.start_level
self.extra_downsamples = nn.ModuleList()
for i in range(extra_levels):
extra_conv = ConvModule(
out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
self.extra_downsamples.append(
nn.Sequential(extra_conv, nn.MaxPool2d(2, 2)))
# add NAS FPN connections
self.fpn_stages = ModuleList()
for _ in range(self.stack_times):
stage = nn.ModuleDict()
# gp(p6, p4) -> p4_1
stage['gp_64_4'] = GlobalPoolingCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p4_1, p4) -> p4_2
stage['sum_44_4'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p4_2, p3) -> p3_out
stage['sum_43_3'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p3_out, p4_2) -> p4_out
stage['sum_34_4'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p5, gp(p4_out, p3_out)) -> p5_out
stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False)
stage['sum_55_5'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p7, gp(p5_out, p4_2)) -> p7_out
stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False)
stage['sum_77_7'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# gp(p7_out, p5_out) -> p6_out
stage['gp_75_6'] = GlobalPoolingCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
self.fpn_stages.append(stage)
def forward(self, inputs):
"""Forward function."""
# build P3-P5
feats = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build P6-P7 on top of P5
for downsample in self.extra_downsamples:
feats.append(downsample(feats[-1]))
p3, p4, p5, p6, p7 = feats
for stage in self.fpn_stages:
# gp(p6, p4) -> p4_1
p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[-2:])
# sum(p4_1, p4) -> p4_2
p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[-2:])
# sum(p4_2, p3) -> p3_out
p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[-2:])
# sum(p3_out, p4_2) -> p4_out
p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[-2:])
# sum(p5, gp(p4_out, p3_out)) -> p5_out
p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[-2:])
p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[-2:])
# sum(p7, gp(p5_out, p4_2)) -> p7_out
p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[-2:])
p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[-2:])
# gp(p7_out, p5_out) -> p6_out
p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[-2:])
return p3, p4, p5, p6, p7 | PypiClean |
/Mtrax-2.2.07.zip/Mtrax-2.2.07/mtrax/FlyMovieFormat.py | import sys
import struct
import warnings
import os.path
import numpy as nx
from numpy import nan
import time
import math
# version 1 formats:
VERSION_FMT = '<I'
FORMAT_LEN_FMT = '<I'
BITS_PER_PIXEL_FMT = '<I'
FRAMESIZE_FMT = '<II'
CHUNKSIZE_FMT = '<Q'
N_FRAME_FMT = '<Q'
TIMESTAMP_FMT = 'd' # XXX struct.pack('<d',nan) dies
# additional version 2 formats:
CHUNK_N_FRAME_FMT = '<Q'
CHUNK_TIMESTAMP_FMT = 'd' # XXX struct.pack('<d',nan) dies
CHUNK_DATASIZE_FMT = '<Q'
class NoMoreFramesException( Exception ):
pass
class InvalidMovieFileException( Exception ):
pass
class FlyMovie:
def __init__(self, filename,check_integrity=False):
self.filename = filename
try:
self.file = open(self.filename,mode="r+b")
except IOError:
self.file = open(self.filename,mode="r")
self.writeable = False
else:
self.writeable = True
# get the extension
tmp,ext = os.path.splitext(self.filename)
if ext == '.sbfmf':
self.init_sbfmf()
self.issbfmf = True
return
else:
self.issbfmf = False
r=self.file.read # shorthand
t=self.file.tell # shorthand
size=struct.calcsize
unpack=struct.unpack
version_buf = r(size(VERSION_FMT))
if len(version_buf)!=size(VERSION_FMT):
raise InvalidMovieFileException("could not read data file")
version, = unpack(VERSION_FMT,version_buf)
if version not in (1,3):
raise NotImplementedError('Can only read version 1 and 3 files')
if version == 1:
self.format = 'MONO8'
self.bits_per_pixel = 8
elif version == 3:
format_len = unpack(FORMAT_LEN_FMT,r(size(FORMAT_LEN_FMT)))[0]
self.format = r(format_len)
self.bits_per_pixel = unpack(BITS_PER_PIXEL_FMT,r(size(BITS_PER_PIXEL_FMT)))[0]
try:
self.framesize = unpack(FRAMESIZE_FMT,r(size(FRAMESIZE_FMT)))
except struct.error:
raise InvalidMovieFileException('file could not be read')
self.bytes_per_chunk, = unpack(CHUNKSIZE_FMT,r(size(CHUNKSIZE_FMT)))
self.n_frames, = unpack(N_FRAME_FMT,r(size(N_FRAME_FMT)))
self.timestamp_len = size(TIMESTAMP_FMT)
self.chunk_start = self.file.tell()
self.next_frame = None
if self.n_frames == 0: # unknown movie length, read to find out
# seek to end of the movie
self.file.seek(0,2)
# get the byte position
eb = self.file.tell()
# compute number of frames using bytes_per_chunk
self.n_frames = int((eb-self.chunk_start)/self.bytes_per_chunk)
# seek back to the start
self.file.seek(self.chunk_start,0)
if check_integrity:
n_frames_ok = False
while not n_frames_ok:
try:
self.get_frame(-1)
n_frames_ok = True
except NoMoreFramesException:
self.n_frames -= 1
self.file.seek(self.chunk_start,0) # go back to beginning
self._all_timestamps = None # cache
def init_sbfmf(self):
#try:
# read the version number
format = '<I'
nbytesver, = struct.unpack(format,self.file.read(struct.calcsize(format)))
version = self.file.read(nbytesver)
# read header parameters
format = '<4IQ'
nr,nc,self.n_frames,difference_mode,self.indexloc = \
struct.unpack(format,self.file.read(struct.calcsize(format)))
# read the background image
self.bgcenter = nx.fromstring(self.file.read(struct.calcsize('<d')*nr*nc),'<d')
# read the std
self.bgstd = nx.fromstring(self.file.read(struct.calcsize('<d')*nr*nc),'<d')
# read the index
ff = self.file.tell()
self.file.seek(self.indexloc,0)
self.framelocs = nx.fromstring(self.file.read(self.n_frames*8),'<Q')
#except:
# raise InvalidMovieFileException('file could not be read')
if version == "0.1":
self.format = 'MONO8'
self.bits_per_pixel = 8
self.framesize = (nr,nc)
self.bytes_per_chunk = None
self.timestamp_len = struct.calcsize('<d')
self.chunk_start = self.file.tell()
self.next_frame = None
self._all_timestamps = None # cache
def close(self):
self.file.close()
self.writeable = False
self.n_frames = None
self.next_frame = None
def get_width(self):
return self.framesize[1]
def get_height(self):
return self.framesize[0]
def get_n_frames(self):
return self.n_frames
def get_format(self):
return self.format
def get_bits_per_pixel(self):
return self.bits_per_pixel
def read_some_bytes(self,nbytes):
return self.file.read(nbytes)
def _read_next_frame(self):
if self.issbfmf:
format = '<Id'
npixels,timestamp = struct.unpack(format,self.file.read(struct.calcsize(format)))
idx = nx.fromstring(self.file.read(npixels*4),'<I')
v = nx.fromstring(self.file.read(npixels*1),'<B')
frame = self.bgcenter.copy()
frame[idx] = v
frame.shape = self.framesize
else:
data = self.file.read( self.bytes_per_chunk )
if data == '':
raise NoMoreFramesException('EOF')
if len(data)<self.bytes_per_chunk:
raise NoMoreFramesException('short frame')
timestamp_buf = data[:self.timestamp_len]
timestamp, = struct.unpack(TIMESTAMP_FMT,timestamp_buf)
frame = nx.fromstring(data[self.timestamp_len:],'<B')
frame.shape = self.framesize
## if self.format == 'MONO8':
## frame = nx.fromstring(data[self.timestamp_len:],nx.uint8)
## frame.shape = self.framesize
## elif self.format in ('YUV411','YUV422'):
## frame = nx.fromstring(data[self.timestamp_len:],nx.uint16)
## frame.shape = self.framesize
## elif self.format in ('MONO16',):
## print 'self.framesize',self.framesize
## frame = nx.fromstring(data[self.timestamp_len:],nx.uint8)
## frame.shape = self.framesize
## else:
## raise NotImplementedError("Reading not implemented for %s format"%(self.format,))
return frame, timestamp
def _read_next_timestamp(self):
if self.issbfmf:
format = '<Id'
self.npixelscurr,timestamp = struct.unpack(format,self.file.read(struct.calcsize(format)))
return timestamp
read_len = struct.calcsize(TIMESTAMP_FMT)
timestamp_buf = self.file.read( read_len )
self.file.seek( self.bytes_per_chunk-read_len, 1) # seek to next frame
if timestamp_buf == '':
raise NoMoreFramesException('EOF')
timestamp, = struct.unpack(TIMESTAMP_FMT,timestamp_buf)
return timestamp
def is_another_frame_available(self):
try:
if self.next_frame is None:
self.next_frame = self._read_next_frame()
except NoMoreFramesException:
return False
return True
def get_next_frame(self):
if self.next_frame is not None:
frame, timestamp = self.next_frame
self.next_frame = None
return frame, timestamp
else:
frame, timestamp = self._read_next_frame()
return frame, timestamp
def get_frame(self,frame_number):
if frame_number < 0:
frame_number = self.n_frames + frame_number
if frame_number < 0:
raise IndexError("index out of range (n_frames = %d)"%self.n_frames)
if self.issbfmf:
seek_to = self.framelocs[frame_number]
else:
seek_to = self.chunk_start+self.bytes_per_chunk*frame_number
self.file.seek(seek_to)
self.next_frame = None
return self.get_next_frame()
def get_all_timestamps(self):
if self._all_timestamps is None:
self._all_timestamps = []
if self.issbfmf:
self.seek(0)
format = '<Id'
l = struct.calcsize(format)
for i in range(self.n_frames):
self.seek(i)
npixels,timestamp = struct.unpack(format,self.file.read(l))
self._all_timestamps.append(timestamp)
else:
self.seek(0)
read_len = struct.calcsize(TIMESTAMP_FMT)
while 1:
timestamp_buf = self.file.read( read_len )
self.file.seek( self.bytes_per_chunk-read_len, 1) # seek to next frame
if timestamp_buf == '':
break
timestamp, = struct.unpack(TIMESTAMP_FMT,timestamp_buf)
self._all_timestamps.append( timestamp )
self.next_frame = None
self._all_timestamps = nx.asarray(self._all_timestamps)
return self._all_timestamps
def seek(self,frame_number):
if frame_number < 0:
frame_number = self.n_frames + frame_number
if self.issbfmf:
seek_to = self.framelocs[frame_number]
else:
seek_to = self.chunk_start+self.bytes_per_chunk*frame_number
self.file.seek(seek_to)
self.next_frame = None
def get_next_timestamp(self):
if self.next_frame is not None:
frame, timestamp = self.next_frame
self.next_frame = None
return timestamp
else:
timestamp = self._read_next_timestamp()
return timestamp
def get_frame_at_or_before_timestamp(self, timestamp):
tss = self.get_all_timestamps()
at_or_before_timestamp_cond = tss <= timestamp
nz = nx.nonzero(at_or_before_timestamp_cond)
if len(nz)==0:
raise ValueError("no frames at or before timestamp given")
fno = nz[-1]
return self.get_frame(fno)
class FlyMovieSaver:
def __init__(self,
filename,
version=1,
seek_ok=True,
compressor=None,
comp_level=1,
format=None,
bits_per_pixel=None,
):
"""create a FlyMovieSaver instance
arguments:
filename
version -- 1, 2, or 3
seek_ok -- is seek OK on this filename?
For version 2:
--------------
compressor -- None or 'lzo' (only used if version == 2)
comp_level -- compression level (only used if compressed)
For version 3:
--------------
format -- string representing format (e.g. 'MONO8' or 'YUV422')
bits_per_pixel -- number of bytes per pixel (MONO8 = 8, YUV422 = 16)
"""
# filename
path, ext = os.path.splitext(filename)
if ext == '':
ext = '.fmf'
self.filename = path+ext
# seek_ok
if seek_ok:
mode = "w+b"
else:
mode = "wb"
self.seek_ok = seek_ok
self.file = open(self.filename,mode=mode)
if version == 1:
self.add_frame = self._add_frame_v1
self.add_frames = self._add_frames_v1
elif version == 2:
self.add_frame = self._add_frame_v2
self.add_frames = self._add_frames_v2
elif version == 3:
self.add_frame = self._add_frame_v1
self.add_frames = self._add_frames_v1
else:
raise ValueError('only versions 1, 2, and 3 exist')
self.file.write(struct.pack(VERSION_FMT,version))
if version == 2:
self.compressor = compressor
if self.compressor is None:
self.compressor = 'non'
if self.compressor == 'non':
self.compress_func = lambda x: x
elif self.compressor == 'lzo':
import lzo
self.compress_func = lzo.compress
else:
raise ValueError("unknown compressor '%s'"%(self.compressor,))
assert type(self.compressor) == str and len(self.compressor)<=4
self.file.write(self.compressor)
if version == 3:
if type(format) != str:
raise ValueError("format must be string (e.g. 'MONO8', 'YUV422')")
if type(bits_per_pixel) != int:
raise ValueError("bits_per_pixel must be integer")
format_len = len(format)
self.file.write(struct.pack(FORMAT_LEN_FMT,format_len))
self.file.write(format)
self.file.write(struct.pack(BITS_PER_PIXEL_FMT,bits_per_pixel))
self.format = format
self.bits_per_pixel = bits_per_pixel
else:
self.format = 'MONO8'
self.bits_per_pixel = 8
self.framesize = None
self.n_frames = 0
self.n_frame_pos = None
def _add_frame_v1(self,origframe,timestamp=nan):
TIMESTAMP_FMT = 'd' # XXX struct.pack('<d',nan) dies
frame = nx.asarray(origframe)
if self.framesize is None:
self._do_v1_header(frame)
else:
if self.framesize != frame.shape:
raise ValueError('frame shape is now %s, but it used to be %s'%(str(frame.shape),str(self.framesize)))
b1 = struct.pack(TIMESTAMP_FMT,timestamp)
self.file.write(b1)
if hasattr(origframe,'dump_to_file'):
nbytes = origframe.dump_to_file( self.file )
assert nbytes == self._bytes_per_image
else:
if not hasattr(self,'gave_dump_fd_warning'):
warnings.warn('could save faster if %s implemented dump_to_file()'%(str(type(origframe)),))
self.gave_dump_fd_warning = True
b2 = frame.tostring()
if len(b2) != self._bytes_per_image:
raise ValueError("expected buffer of length %d, got length %d (shape %s)"%(self._bytes_per_image,len(b2),str(frame.shape)))
self.file.write(b2)
self.n_frames += 1
def _add_frames_v1(self, frames, timestamps=None):
if 0:
for frame, timestamp in zip(frames,timestamps):
self._add_frame_v1(frame,timestamp)
else:
if timestamps is None:
timestamps = [nan]*len(frames)
TIMESTAMP_FMT = 'd' # XXX struct.pack('<d',nan) dies
if self.framesize is None:
self._do_v1_header(frames[0])
else:
assert self.framesize == frames[0].shape
mega_buffer = ''
for frame, timestamp in zip(frames,timestamps):
b1 = struct.pack(TIMESTAMP_FMT,timestamp)
mega_buffer += b1
b2 = frame.tostring()
assert len(b2) == self._bytes_per_image
mega_buffer += b2
self.file.write(mega_buffer)
self.n_frames += len(frames)
def _do_v1_header(self,frame):
# first frame
# frame data are always type uint8, so frame shape (width) varies if data format not MONO8
self.framesize = frame.shape
buf = struct.pack(FRAMESIZE_FMT,frame.shape[0],frame.shape[1])
self.file.write(buf)
#bits_per_image = frame.shape[0] * frame.shape[1] * self.bits_per_pixel
bits_per_image = frame.shape[0] * frame.shape[1] * 8
if bits_per_image % 8 != 0:
raise ValueError('combination of frame size and bits_per_pixel make non-byte aligned image')
self._bytes_per_image = bits_per_image / 8
bytes_per_chunk = self._bytes_per_image + struct.calcsize(TIMESTAMP_FMT)
buf = struct.pack(CHUNKSIZE_FMT,bytes_per_chunk)
self.file.write(buf)
self.n_frame_pos = self.file.tell()
buf = struct.pack(N_FRAME_FMT,self.n_frames) # will fill in later
self.file.write(buf)
####### end of header ###########################
def _add_frames_v2(self, frames, timestamps=None):
if self.framesize is None:
# header stuff dependent on first frame
frame = frames[0]
assert len(frame.shape) == 2 # must be MxN array
self.framesize = frame.shape
buf = struct.pack(FRAMESIZE_FMT,frame.shape[0],frame.shape[1])
self.file.write(buf)
self.n_frame_pos = self.file.tell() # may fill value later
buf = struct.pack(N_FRAME_FMT,self.n_frames)
self.file.write(buf)
####### end of header ###########################
# begin chunk
chunk_n_frames = len(frames)
if timestamps is None:
timestamps = [nan]*chunk_n_frames
else:
assert len(timestamps) == chunk_n_frames
buf = struct.pack(CHUNK_N_FRAME_FMT,chunk_n_frames)
self.file.write(buf)
for timestamp in timestamps:
self.file.write(struct.pack(CHUNK_TIMESTAMP_FMT,timestamp))
# generate mega string
bufs = [ frame.tostring() for frame in frames ]
buf = ''.join(bufs)
del bufs
compressed_data = self.compress_func(buf)
chunk_datasize = len( compressed_data)
self.file.write(struct.pack(CHUNK_DATASIZE_FMT,chunk_datasize))
self.file.write(compressed_data)
self.n_frames += chunk_n_frames
def _add_frame_v2(self,frame,timestamp=nan):
self._add_frames_v2([frame],[timestamp])
def close(self):
if self.n_frames == 0:
warnings.warn('no frames in FlyMovie')
# no frames added
self.file.close()
del self.file
return
if self.seek_ok:
self.file.seek( self.n_frame_pos )
buf = struct.pack(N_FRAME_FMT,self.n_frames) # will fill in later
self.file.write(buf)
self.file.close()
del self.file # make sure we can't access self.file again
def __del__(self):
if hasattr(self,'file'):
self.close() | PypiClean |
/Flask-AWSCognito-1.3.tar.gz/Flask-AWSCognito-1.3/flask_awscognito/services/cognito_service.py | from base64 import b64encode
from urllib.parse import quote
import requests
from flask_awscognito.utils import get_state
from flask_awscognito.exceptions import FlaskAWSCognitoError
class CognitoService:
def __init__(
self,
user_pool_id,
user_pool_client_id,
user_pool_client_secret,
redirect_url,
region,
domain,
):
self.user_pool_id = user_pool_id
self.user_pool_client_id = user_pool_client_id
self.user_pool_client_secret = user_pool_client_secret
self.redirect_url = redirect_url
self.region = region
if domain.startswith("https://"):
self.domain = domain
else:
self.domain = f"https://{domain}"
def get_sign_in_url(self):
quoted_redirect_url = quote(self.redirect_url)
state = get_state(self.user_pool_id, self.user_pool_client_id)
full_url = (
f"{self.domain}/login"
f"?response_type=code"
f"&client_id={self.user_pool_client_id}"
f"&redirect_uri={quoted_redirect_url}"
f"&state={state}"
)
return full_url
def exchange_code_for_token(self, code, requests_client=None):
token_url = f"{self.domain}/oauth2/token"
data = {
"code": code,
"redirect_uri": self.redirect_url,
"client_id": self.user_pool_client_id,
"grant_type": "authorization_code",
}
headers = {}
if self.user_pool_client_secret:
secret = b64encode(
f"{self.user_pool_client_id}:{self.user_pool_client_secret}".encode(
"utf-8"
)
).decode("utf-8")
headers = {"Authorization": f"Basic {secret}"}
try:
if not requests_client:
requests_client = requests.post
response = requests_client(token_url, data=data, headers=headers)
response_json = response.json()
except requests.exceptions.RequestException as e:
raise FlaskAWSCognitoError(str(e)) from e
if "access_token" not in response_json:
raise FlaskAWSCognitoError(
f"no access token returned for code {response_json}"
)
access_token = response_json["access_token"]
return access_token | PypiClean |
/ATpy-0.9.7.tar.gz/ATpy-0.9.7/atpy/asciitables.py | from __future__ import print_function, division
import os
from .decorators import auto_download_to_file, auto_decompress_to_fileobj
# Thanks to Moritz Guenther for providing the initial code used to create this file
from astropy.io import ascii
def read_cds(self, filename, **kwargs):
'''
Read data from a CDS table (also called Machine Readable Tables) file
Required Arguments:
*filename*: [ string ]
The file to read the table from
Keyword Arguments are passed to astropy.io.ascii
'''
read_ascii(self, filename, Reader=ascii.Cds, **kwargs)
def read_daophot(self, filename, **kwargs):
'''
Read data from a DAOphot table
Required Arguments:
*filename*: [ string ]
The file to read the table from
Keyword Arguments are passed to astropy.io.ascii
'''
read_ascii(self, filename, Reader=ascii.Daophot, **kwargs)
def read_latex(self, filename, **kwargs):
'''
Read data from a Latex table
Required Arguments:
*filename*: [ string ]
The file to read the table from
Keyword Arguments are passed to astropy.io.ascii
'''
read_ascii(self, filename, Reader=ascii.Latex, **kwargs)
def write_latex(self, filename, **kwargs):
'''
Write data to a Latex table
Required Arguments:
*filename*: [ string ]
The file to write the table to
Keyword Arguments are passed to astropy.io.ascii
'''
write_ascii(self, filename, Writer=ascii.Latex, **kwargs)
def read_rdb(self, filename, **kwargs):
'''
Read data from an RDB table
Required Arguments:
*filename*: [ string ]
The file to read the table from
Keyword Arguments are passed to astropy.io.ascii
'''
read_ascii(self, filename, Reader=ascii.Rdb, **kwargs)
def write_rdb(self, filename, **kwargs):
'''
Write data to an RDB table
Required Arguments:
*filename*: [ string ]
The file to write the table to
Keyword Arguments are passed to astropy.io.ascii
'''
write_ascii(self, filename, Writer=ascii.Rdb, **kwargs)
# astropy.io.ascii can handle file objects
@auto_download_to_file
@auto_decompress_to_fileobj
def read_ascii(self, filename, **kwargs):
'''
Read a table from an ASCII file using astropy.io.ascii
Optional Keyword Arguments:
Reader - Reader class (default= BasicReader )
Inputter - Inputter class
delimiter - column delimiter string
comment - regular expression defining a comment line in table
quotechar - one-character string to quote fields containing special characters
header_start - line index for the header line not counting comment lines
data_start - line index for the start of data not counting comment lines
data_end - line index for the end of data (can be negative to count from end)
converters - dict of converters
data_Splitter - Splitter class to split data columns
header_Splitter - Splitter class to split header columns
names - list of names corresponding to each data column
include_names - list of names to include in output (default=None selects all names)
exclude_names - list of names to exlude from output (applied after include_names)
Note that the Outputter argument is not passed to astropy.io.ascii.
See the astropy.io.ascii documentation at http://docs.astropy.org/en/latest/io/ascii/index.html for more details.
'''
self.reset()
if 'Outputter' in kwargs:
kwargs.pop('Outputter')
table = ascii.read(filename, **kwargs)
for name in table.colnames:
self.add_column(name, table[name])
def write_ascii(self, filename, **kwargs):
'''
Read a table from an ASCII file using astropy.io.ascii
Optional Keyword Arguments:
Writer - Writer class (default= Basic)
delimiter - column delimiter string
write_comment - string defining a comment line in table
quotechar - one-character string to quote fields containing special characters
formats - dict of format specifiers or formatting functions
names - list of names corresponding to each data column
include_names - list of names to include in output (default=None selects all names)
exclude_names - list of names to exlude from output (applied after include_names)
See the astropy.io.ascii documentation at http://docs.astropy.org/en/latest/io/ascii/index.html for more details.
'''
if 'overwrite' in kwargs:
overwrite = kwargs.pop('overwrite')
else:
overwrite = False
if type(filename) is str and os.path.exists(filename):
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
ascii.write(self.data, filename, **kwargs) | PypiClean |
/MCPI_Addons-1.2.0-py3-none-any.whl/mcpi_addons/entity.py | class Entity:
"""Minecraft PI entity description. Can be sent to Minecraft.spawnEntity"""
def __init__(self, id, name=None):
self.id = id
self.name = name
def __cmp__(self, rhs):
return hash(self) - hash(rhs)
def __eq__(self, rhs):
return self.id == rhs.id
def __hash__(self):
return self.id
def __iter__(self):
"""Allows an Entity to be sent whenever id is needed"""
return iter((self.id,))
def __repr__(self):
return "Entity(%d)" % (self.id)
EXPERIENCE_ORB = Entity(2, "EXPERIENCE_ORB")
AREA_EFFECT_CLOUD = Entity(3, "AREA_EFFECT_CLOUD")
ELDER_GUARDIAN = Entity(4, "ELDER_GUARDIAN")
WITHER_SKELETON = Entity(5, "WITHER_SKELETON")
STRAY = Entity(6, "STRAY")
EGG = Entity(7, "EGG")
LEASH_HITCH = Entity(8, "LEASH_HITCH")
PAINTING = Entity(9, "PAINTING")
ARROW = Entity(10, "ARROW")
SNOWBALL = Entity(11, "SNOWBALL")
FIREBALL = Entity(12, "FIREBALL")
SMALL_FIREBALL = Entity(13, "SMALL_FIREBALL")
ENDER_PEARL = Entity(14, "ENDER_PEARL")
ENDER_SIGNAL = Entity(15, "ENDER_SIGNAL")
THROWN_EXP_BOTTLE = Entity(17, "THROWN_EXP_BOTTLE")
ITEM_FRAME = Entity(18, "ITEM_FRAME")
WITHER_SKULL = Entity(19, "WITHER_SKULL")
PRIMED_TNT = Entity(20, "PRIMED_TNT")
HUSK = Entity(23, "HUSK")
SPECTRAL_ARROW = Entity(24, "SPECTRAL_ARROW")
SHULKER_BULLET = Entity(25, "SHULKER_BULLET")
DRAGON_FIREBALL = Entity(26, "DRAGON_FIREBALL")
ZOMBIE_VILLAGER = Entity(27, "ZOMBIE_VILLAGER")
SKELETON_HORSE = Entity(28, "SKELETON_HORSE")
ZOMBIE_HORSE = Entity(29, "ZOMBIE_HORSE")
ARMOR_STAND = Entity(30, "ARMOR_STAND")
DONKEY = Entity(31, "DONKEY")
MULE = Entity(32, "MULE")
EVOKER_FANGS = Entity(33, "EVOKER_FANGS")
EVOKER = Entity(34, "EVOKER")
VEX = Entity(35, "VEX")
VINDICATOR = Entity(36, "VINDICATOR")
ILLUSIONER = Entity(37, "ILLUSIONER")
MINECART_COMMAND = Entity(40, "MINECART_COMMAND")
BOAT = Entity(41, "BOAT")
MINECART = Entity(42, "MINECART")
MINECART_CHEST = Entity(43, "MINECART_CHEST")
MINECART_FURNACE = Entity(44, "MINECART_FURNACE")
MINECART_TNT = Entity(45, "MINECART_TNT")
MINECART_HOPPER = Entity(46, "MINECART_HOPPER")
MINECART_MOB_SPAWNER = Entity(47, "MINECART_MOB_SPAWNER")
CREEPER = Entity(50, "CREEPER")
SKELETON = Entity(51, "SKELETON")
SPIDER = Entity(52, "SPIDER")
GIANT = Entity(53, "GIANT")
ZOMBIE = Entity(54, "ZOMBIE")
SLIME = Entity(55, "SLIME")
GHAST = Entity(56, "GHAST")
PIG_ZOMBIE = Entity(57, "PIG_ZOMBIE")
ENDERMAN = Entity(58, "ENDERMAN")
CAVE_SPIDER = Entity(59, "CAVE_SPIDER")
SILVERFISH = Entity(60, "SILVERFISH")
BLAZE = Entity(61, "BLAZE")
MAGMA_CUBE = Entity(62, "MAGMA_CUBE")
ENDER_DRAGON = Entity(63, "ENDER_DRAGON")
WITHER = Entity(64, "WITHER")
BAT = Entity(65, "BAT")
WITCH = Entity(66, "WITCH")
ENDERMITE = Entity(67, "ENDERMITE")
GUARDIAN = Entity(68, "GUARDIAN")
SHULKER = Entity(69, "SHULKER")
PIG = Entity(90, "PIG")
SHEEP = Entity(91, "SHEEP")
COW = Entity(92, "COW")
CHICKEN = Entity(93, "CHICKEN")
SQUID = Entity(94, "SQUID")
WOLF = Entity(95, "WOLF")
MUSHROOM_COW = Entity(96, "MUSHROOM_COW")
SNOWMAN = Entity(97, "SNOWMAN")
OCELOT = Entity(98, "OCELOT")
IRON_GOLEM = Entity(99, "IRON_GOLEM")
HORSE = Entity(100, "HORSE")
RABBIT = Entity(101, "RABBIT")
POLAR_BEAR = Entity(102, "POLAR_BEAR")
LLAMA = Entity(103, "LLAMA")
LLAMA_SPIT = Entity(104, "LLAMA_SPIT")
PARROT = Entity(105, "PARROT")
VILLAGER = Entity(120, "VILLAGER")
ENDER_CRYSTAL = Entity(200, "ENDER_CRYSTAL") | PypiClean |
/Django-Accounts-0.1.tar.gz/Django-Accounts-0.1/accounts/urls.py |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import patterns, url, include
from django.contrib.auth.decorators import login_required
from django.views.generic import TemplateView
from django.contrib.auth import views as django_auth_views
from views import ProfileFormView, SignupFormView, ActivationView, combined_signup_login
from models import ACTIVATION_CODE_REGEX
urlpatterns = patterns(
'',
url(r'^profile/edit/$', login_required(ProfileFormView.as_view()), name='accounts_profile_edit'),
url(r'^profile/$', login_required(TemplateView.as_view(template_name='accounts/profile_summary.html')), name='accounts_profile_home'),
url(r'^activate/(?P<activation_code>' + ACTIVATION_CODE_REGEX + r')/$', ActivationView.as_view(), name='accounts_activation'),
url(r'^signup/success/$', TemplateView.as_view(template_name='accounts/signup_success.html'), name='accounts_signup_success'),
url(r'^signup/$', SignupFormView.as_view(), name='accounts_signup_form'),
url(r'^login/$', django_auth_views.login, {'template_name': 'auth/login.html'}, name='auth_login'),
url(r'^logout/$', django_auth_views.logout, {'template_name': 'auth/logged_out.html'}, name='auth_logout'),
url(r'^password/change/$', django_auth_views.password_change, {'template_name': 'auth/password_change_form.html'}, name='auth_password_change'),
url(r'^password/change/done/$', django_auth_views.password_change_done, {'template_name': 'auth/password_change_done.html'}, name='auth_password_change_done'),
url(r'^password/reset/$', django_auth_views.password_reset, {'template_name': 'auth/password_reset_form.html', 'email_template_name': 'auth/password_reset_email.html', 'subject_template_name': 'auth/password_reset_subject.txt'}, name='auth_password_reset'),
url(r'^password/reset/confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', django_auth_views.password_reset_confirm, {'template_name': 'auth/password_reset_confirm.html'}, name='auth_password_reset_confirm'),
url(r'^password/reset/complete/$', django_auth_views.password_reset_complete, {'template_name': 'auth/password_reset_complete.html'}, name='auth_password_reset_complete'),
url(r'^password/reset/done/$', django_auth_views.password_reset_done, {'template_name': 'auth/password_reset_done.html'}, name='auth_password_reset_done'),
# This is a combined signup and login form that redirects to the profile page if the user is already logged in.
url(r'^$', combined_signup_login, name='signup_login_form'),
) | PypiClean |
/FlexTransform-1.2.1-py3-none-any.whl/ISAMarkingExtension/isamarkingsacs30.py | import ISAMarkingExtension.bindings.isamarkings as isa_binding
import stix.data_marking
from stix.data_marking import MarkingStructure
class ISAMarkingStructure30(MarkingStructure):
'''
This marking extension was created to apply the SD-EDH Cyber Profile to ISA shared documents.
This is one of two extensions used to apply the SD-EDH Cyber profile: the ISA Markings Extension and the
ISA Markings Assertions Extension.
'''
_binding = isa_binding
_binding_class = isa_binding.ISAMarkingStructureType30
_namespace = 'http://www.us-cert.gov/essa/Markings/ISAMarkings.v2'
_namespace_xsd = 'ISAMarkingsType.v2.xsd'
_XSI_TYPE = "edh2cyberMarking:ISAMarkingsType"
def __init__(self, isam_version=None, identifier=None, createdatetime=None,
responsibleentity=None, authref=None):
super(ISAMarkingStructure30, self).__init__()
@property
def identifier(self):
return self._identifier
@identifier.setter
def identifier(self, identifier):
if identifier is None:
self._identifier = None
if isinstance(identifier, EDH2Text30):
self._identifier = identifier
else:
self._identifier = EDH2Text30(value=identifier)
@property
def createdatetime(self):
return self._createdatetime
@createdatetime.setter
def createdatetime(self, createdatetime):
if createdatetime is None:
self._createdatetime = None
if isinstance(createdatetime, EDH2Text30):
self._createdatetime = createdatetime
else:
self._createdatetime = EDH2Text30(value=createdatetime)
@property
def responsibleentity(self):
return self._responsibleentity
@responsibleentity.setter
def responsibleentity(self, responsibleentity):
if responsibleentity is None:
self._responsibleentity = None
if isinstance(responsibleentity, EDH2Text30):
self._responsibleentity = responsibleentity
else:
self._responsibleentity = EDH2Text30(value=responsibleentity)
@property
def authref(self):
return self._authref
@authref.setter
def authref(self, authref):
if authref is None:
self._authref = None
if isinstance(authref, EDH2Text30):
self._authref = authref
else:
self._authref = EDH2Text30(value=authref)
def to_obj(self, return_obj=None, ns_info=None):
super(ISAMarkingStructure30, self).to_obj(return_obj=return_obj, ns_info=ns_info)
ns_info.input_schemalocs.update({self._namespace: self._namespace_xsd})
obj = self._binding_class()
MarkingStructure.to_obj(self, return_obj=obj, ns_info=ns_info)
obj.isam_version = self.isam_version
if (self.identifier):
obj.identifier = self.identifier.to_obj(ns_info=ns_info)
if (self.createdatetime):
obj.createdatetime = self.createdatetime.to_obj(ns_info=ns_info)
if (self.responsibleentity):
obj.responsibleentity = self.responsibleentity.to_obj(ns_info=ns_info)
if (self.authref):
obj.authref = self.authref.to_obj(ns_info=ns_info)
return obj
def to_dict(self):
d = MarkingStructure.to_dict(self)
if self.isam_version:
d['isam_version'] = self.isam_version
if self.identifier:
d['identifier'] = self.identifier.to_dict()
if self.createdatetime:
d['createdatetime'] = self.createdatetime.to_dict()
if self.responsibleentity:
d['responsibleentity'] = self.responsibleentity.to_dict()
if self.authref:
d['authref'] = self.authref.to_dict()
return d
@staticmethod
def from_obj(obj):
if not obj:
return None
m = ISAMarkingStructure30()
MarkingStructure.from_obj(obj, m)
m.isam_version = obj.isam_version
m.identifier = EDH2Text30.from_obj(obj.identifier)
m.createdatetime = EDH2Text30.from_obj(obj.createdatetime)
m.responsibleentity = EDH2Text30.from_obj(obj.responsibleentity)
m.authref = EDH2Text30.from_obj(obj.authref)
return m
@staticmethod
def from_dict(marking_dict):
if not marking_dict:
return None
m = ISAMarkingStructure30()
MarkingStructure.from_dict(marking_dict, m)
m.isam_version = marking_dict.get('isam_version')
if ('identifier' in marking_dict):
m.identifier = EDH2Text30.from_dict(marking_dict.get('identifier'))
if ('createdatetime' in marking_dict):
m.createdatetime = EDH2Text30.from_dict(marking_dict.get('createdatetime'))
if ('responsibleentity' in marking_dict):
m.responsibleentity = EDH2Text30.from_dict(marking_dict.get('responsibleentity'))
if ('authref' in marking_dict):
m.authref = EDH2Text30.from_dict(marking_dict.get('authref'))
return m
class ISAMarkingAssertionsStructure30(MarkingAssertionsStructure):
'''
This marking extension was created to apply the SD-EDH Cyber Profile to ISA shared documents.
This is one of two extensions used to apply the SD-EDH Cyber profile: the ISA Markings Extension and
the ISA Markings Assertions Extension.
'''
_binding = isa_binding
_binding_class = isa_binding.ISAMarkingAssertionsStructureType
_namespace = 'http://www.us-cert.gov/essa/Markings/ISAMarkingAssertions.v2'
_namespace_xsd = 'ISAMarkingsAssertionsType.v2.xsd'
_XSI_TYPE = "edh2cyberMarkingAssert:ISAMarkingsAssertionType"
def __init__(self, isam_version=None, default_marking=None, most_restrictive=None, policyref=None,
accessprivilege=None, resourcedisposition=None, controlset=None, originalclassification=None,
derivativeclassification=None, declassification=None, publicrelease=None, addlreference=None):
super(ISAMarkingAssertionsStructure30, self).__init__()
# Attributes
self.isam_version = isam_version
self.default_marking = default_marking
self.most_restrictive = most_restrictive
# Children
self.policyref = policyref
self.accessprivilege = accessprivilege
self.resourcedisposition = resourcedisposition
self.controlset = controlset
self.originalclassification = originalclassification
self.derivativeclassification = derivativeclassification
self.declassification = declassification
self.publicrelease = publicrelease
self.addlreference = addlreference
@property
def policyref(self):
return self._policyref
@policyref.setter
def policyref(self, policyref):
if policyref is None:
self._policyref = None
if isinstance(policyref, EDH2Text30):
self._policyref = policyref
else:
self._policyref = EDH2Text30(value=policyref)
@property
def accessprivilege(self):
return self._accessprivilege
@accessprivilege.setter
def accessprivilege(self, accessprivilege):
if accessprivilege is None:
self._accessprivilege = None
if isinstance(accessprivilege, EDH2Text30):
self._accessprivilege = accessprivilege
else:
self._accessprivilege = EDH2Text30(value=accessprivilege)
@property
def resourcedisposition(self):
return self._resourcedisposition
@resourcedisposition.setter
def resourcedisposition(self, resourcedisposition):
if resourcedisposition is None:
self._resourcedisposition = None
if isinstance(resourcedisposition, EDH2Text30):
self._resourcedisposition = resourcedisposition
else:
self._resourcedisposition = EDH2Text30(value=resourcedisposition)
@property
def controlset(self):
return self._controlset
@controlset.setter
def controlset(self, controlset):
if controlset is None:
self._controlset = None
if isinstance(controlset, EDH2Text30):
self._controlset = controlset
else:
self._controlset = EDH2Text30(value=controlset)
@property
def originalclassification(self):
return self._originalclassification
@originalclassification.setter
def originalclassification(self, originalclassification):
if originalclassification is None:
self._originalclassification = None
if isinstance(originalclassification, EDH2Text30):
self._originalclassification = originalclassification
else:
self._originalclassification = EDH2Text30(value=originalclassification)
@property
def derivativeclassification(self):
return self._derivativeclassification
@derivativeclassification.setter
def derivativeclassification(self, derivativeclassification):
if derivativeclassification is None:
self._derivativeclassification = None
if isinstance(derivativeclassification, EDH2Text30):
self._derivativeclassification = derivativeclassification
else:
self._derivativeclassification = EDH2Text30(value=derivativeclassification)
@property
def declassification(self):
return self._declassification
@declassification.setter
def declassification(self, declassification):
if declassification is None:
self._declassification = None
if isinstance(declassification, EDH2Text30):
self._declassification = declassification
else:
self._declassification = EDH2Text30(value=declassification)
@property
def publicrelease(self):
return self._publicrelease
@publicrelease.setter
def publicrelease(self, publicrelease):
if publicrelease is None:
self._publicrelease = None
if isinstance(publicrelease, EDH2Text30):
self._publicrelease = publicrelease
else:
self._publicrelease = EDH2Text30(value=publicrelease)
@property
def addlreference(self):
return self._addlreference
@addlreference.setter
def addlreference(self, addlreference):
if addlreference is None:
self._addlreference = None
elif isinstance(addlreference, AddlReference):
self._addlreference = addlreference
else:
raise ValueError("addlreference must be of type AddlReference")
def to_obj(self, return_obj=None, ns_info=None):
super(ISAMarkingAssertionsStructure30, self).to_obj(return_obj=return_obj, ns_info=ns_info)
ns_info.input_schemalocs.update({self._namespace: self._namespace_xsd})
obj = self._binding_class()
MarkingStructure.to_obj(self, return_obj=obj, ns_info=ns_info)
obj.isam_version = self.isam_version
obj.default_marking = self.default_marking
obj.most_restrictive = self.most_restrictive
if (self.policyref):
obj.policyref = self.policyref.to_obj(ns_info=ns_info)
if (self.accessprivilege):
obj.accessprivilege = self.accessprivilege.to_obj(ns_info=ns_info)
if (self.resourcedisposition):
obj.resourcedisposition = self.resourcedisposition.to_obj(ns_info=ns_info)
if (self.controlset):
obj.controlset = self.controlset.to_obj(ns_info=ns_info)
if (self.originalclassification):
obj.originalclassification = self.originalclassification.to_obj(ns_info=ns_info)
if (self.derivativeclassification):
obj.derivativeclassification = self.derivativeclassification.to_obj(ns_info=ns_info)
if (self.declassification):
obj.declassification = self.declassification.to_obj(ns_info=ns_info)
if (self.publicrelease):
obj.publicrelease = self.publicrelease.to_obj(ns_info=ns_info)
if (self.addlreference):
obj.addlreference = self.addlreference.to_obj(ns_info=ns_info)
return obj
def to_dict(self):
d = MarkingStructure.to_dict(self)
if self.isam_version:
d['isam_version'] = self.isam_version
if self.default_marking:
d['default_marking'] = self.default_marking
if self.most_restrictive:
d['most_restrictive'] = self.most_restrictive
if self.policyref:
d['policyref'] = self.policyref.to_dict()
if self.accessprivilege:
d['accessprivilege'] = self.accessprivilege.to_dict()
if self.resourcedisposition:
d['resourcedisposition'] = self.resourcedisposition.to_dict()
if self.controlset:
d['controlset'] = self.controlset.to_dict()
if self.originalclassification:
d['originalclassification'] = self.originalclassification.to_dict()
if self.derivativeclassification:
d['derivativeclassification'] = self.derivativeclassification.to_dict()
if self.declassification:
d['declassification'] = self.declassification.to_dict()
if self.publicrelease:
d['publicrelease'] = self.publicrelease.to_dict()
if self.addlreference:
d['addlreference'] = self.addlreference.to_dict()
return d
@staticmethod
def from_obj(obj):
if not obj:
return None
m = ISAMarkingAssertionsStructure30()
MarkingStructure.from_obj(obj, m)
m.isam_version = obj.isam_version
m.default_marking = obj.default_marking
m.most_restrictive = obj.most_restrictive
m.policyref = EDH2Text30.from_obj(obj.policyref)
m.accessprivilege = EDH2Text30.from_obj(obj.accessprivilege)
m.resourcedisposition = EDH2Text30.from_obj(obj.resourcedisposition)
m.controlset = EDH2Text30.from_obj(obj.controlset)
m.originalclassification = EDH2Text30.from_obj(obj.originalclassification)
m.derivativeclassification = EDH2Text30.from_obj(obj.derivativeclassification)
m.declassification = EDH2Text30.from_obj(obj.declassification)
m.publicrelease = EDH2Text30.from_obj(obj.publicrelease)
m.addlreference = AddlReference.from_obj(obj.addlreference)
return m
@staticmethod
def from_dict(marking_dict):
if not marking_dict:
return None
m = ISAMarkingAssertionsStructure30()
MarkingStructure.from_dict(marking_dict, m)
m.isam_version = marking_dict.get('isam_version')
m.default_marking = marking_dict.get('default_marking')
m.most_restrictive = marking_dict.get('most_restrictive')
if ('policyref' in marking_dict):
m.policyref = EDH2Text30.from_dict(marking_dict.get('policyref'))
if ('accessprivilege' in marking_dict):
m.accessprivilege = EDH2Text30.from_dict(marking_dict.get('accessprivilege'))
if ('resourcedisposition' in marking_dict):
m.resourcedisposition = EDH2Text30.from_dict(marking_dict.get('resourcedisposition'))
if ('controlset' in marking_dict):
m.controlset = EDH2Text30.from_dict(marking_dict.get('controlset'))
if ('originalclassification' in marking_dict):
m.originalclassification = EDH2Text30.from_dict(marking_dict.get('originalclassification'))
if ('derivativeclassification' in marking_dict):
m.derivativeclassification = EDH2Text30.from_dict(marking_dict.get('derivativeclassification'))
if ('declassification' in marking_dict):
m.declassification = EDH2Text30.from_dict(marking_dict.get('declassification'))
if ('publicrelease' in marking_dict):
m.publicrelease = EDH2Text30.from_dict(marking_dict.get('publicrelease'))
if ('addlreference' in marking_dict):
m.addlreference = AddlReference.from_dict(marking_dict.get('addlreference'))
return m
class EDH2Text30(stix.Entity):
_binding = isa_binding
_binding_class = isa_binding.EDH2TextType
_namespace = 'urn:edm:edh:cyber:v3'
_namespace_xsd = 'SD-EDH_Profile_Cyber.v3.xsd'
_XSI_NS = "edh2"
def __init__(self, value=None):
self.value = value
def to_obj(self, return_obj=None, ns_info=None):
super(EDH2Text30, self).to_obj(return_obj=return_obj, ns_info=ns_info)
ns_info.input_schemalocs.update({self._namespace: self._namespace_xsd})
text_obj = self._binding_class()
text_obj.valueOf_ = self.value
return text_obj
def to_dict(self):
# Return a plain string if there is no format specified.
return self.value
@classmethod
def from_obj(cls, text_obj):
if not text_obj:
return None
text = EDH2Text30()
text.value = text_obj.valueOf_
return text
@classmethod
def from_dict(cls, text_dict):
if text_dict is None:
return None
text = EDH2Text30()
if not isinstance(text_dict, dict):
text.value = text_dict
else:
text.value = text_dict.get('value')
return text
def __str__(self):
return self.__unicode__().encode("utf-8")
def __unicode__(self):
return str(self.value)
class AddlReference(stix.Entity):
_namespace = 'http://www.us-cert.gov/essa/Markings/ISAMarkingAssertions.v2'
_binding = isa_binding
_binding_class = isa_binding.AddlReferenceType
def __init__(self, url=None, comment=None):
self.url = url
self.comment = comment
def to_obj(self, return_obj=None, ns_info=None):
super(AddlReference, self).to_obj(return_obj=return_obj, ns_info=ns_info)
obj = self._binding_class()
if self.url:
obj.url = self.url
if self.comment:
obj.comment = self.comment
return obj
def to_dict(self):
d = {}
if self.url:
d['url'] = self.url
if self.comment:
d['coment'] = self.comment
return d
@staticmethod
def from_obj(obj):
if not obj:
return None
a = AddlReference()
a.url = obj.url
a.comment = obj.comment
return a
@staticmethod
def from_dict(dict_):
if dict_ is None:
return None
a = AddlReference()
a.url = dict_.get('url')
a.comment = dict_.get('comment')
return a
stix.data_marking.add_extension(ISAMarkingStructure30)
stix.data_marking.add_extension(ISAMarkingAssertionsStructure30) | PypiClean |
/Mroylib-1.3.0.tar.gz/Mroylib-1.3.0/qlib/file/__file.py | import os
import re
import sys
import time
import zipfile
from hashlib import md5
from termcolor import cprint, colored
from base64 import b64encode, b64decode
from io import BytesIO
from qlib.log import LogControl
LogControl.LOG_LEVEL = LogControl.WRN
j = os.path.join
def to_save(line,ty,root_dir):
if not os.path.exists(os.path.join(root_dir,ty)):
with open(os.path.join(root_dir,ty), "w") as fp:
pass
with open(os.path.join(root_dir,ty), "a+") as fp:
print(line, file=fp)
def file_search(info, fs):
for f in fs:
cprint("--> file: %15s" % colored(f,attrs=['bold']), 'yellow', file=sys.stderr)
with open(f) as fp:
dic = {}
for line in fp:
l = line.strip()
if re.search(re.compile('(%s)' % info), l):
yield l
def call_vim(tmp_file="/tmp/add.log.tmp.log"):
from fabric.api import output
from fabric.api import local
output.running = False
local("vim %s" % tmp_file)
time.sleep(0.5)
with open(tmp_file) as fp:
text = fp.read()
return text
def fzip(src, dst, direction='zip'):
"""
@direction:
'zip/unzip' to decide pack file or unpack file.
default: 'zip'
"""
try:
if direction == 'zip':
with zipfile.ZipFile(src,'w', zipfile.ZIP_BZIP2) as f:
if os.path.isdir(dst):
for dirpath, dirnames, filenames in os.walk(dst):
for filename in filenames:
f.write(os.path.join(dirpath,filename))
elif os.path.isfile(dst):
f.write(dst)
else:
raise OSError("file not exists! ", dst)
elif direction == 'unzip':
with zipfile.ZipFile(src,'r') as zfile:
for filename in zfile.namelist():
_p = j(dst, filename)
# LogControl.err(_p)
_d = '/'.join(_p.split("/")[:-1])
if not os.path.exists(_d):
os.makedirs(_d)
data = zfile.read(filename)
# LogControl.wrn(_p)
file = open(_p, 'w+b')
file.write(data)
else:
print("no such direction")
return False
return True
except Exception as e:
LogControl.err(e)
return False
def zip_64(fpath):
"""
zip a file then encode base64. return it.
"""
tmp_zip = "/tmp/tmp." + md5(fpath.encode("utf8")).hexdigest()[:10] + ".zip"
if os.path.exists(tmp_zip):
with open(tmp_zip, "w") as f:
pass
fzip(tmp_zip , fpath)
with open(tmp_zip, 'rb') as fp:
return b64encode(fp.read())
def unzip_64(data, fpath, override=False):
"""
decode b64-zip file ,then write data to fpath.
"""
if not os.path.isdir(fpath):
try:
os.makedirs(fpath)
except OSError as e:
LogControl.err(e)
return False
zdata = None
try:
zdata = b64decode(data)
except Exception as e:
LogControl.err("not b64 format")
return False
bfp = BytesIO(zdata)
try:
with zipfile.ZipFile(bfp, 'r') as zfp:
for filename in zfp.namelist():
_p = j(fpath, filename)
# LogControl.err(_p)
_d = '/'.join(_p.split("/")[:-1])
# mkdir -p
if not os.path.exists(_d):
os.makedirs(_d)
# read real data from zip archivement.
data = zfp.read(filename)
# write data.
with open(_p, 'w+b') as wbfp:
wbfp.write(data)
except Exception as e:
LogControl.err(e)
return False
return True | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/charting/themes/Electric.js.uncompressed.js | define("dojox/charting/themes/Electric", ["../Theme", "dojox/gfx/gradutils", "./common"], function(Theme, gradutils, themes){
var g = Theme.generateGradient,
defaultFill = {type: "linear", space: "shape", x1: 0, y1: 0, x2: 0, y2: 75};
themes.Electric = new Theme({
chart: {
fill: "#252525",
stroke: {color: "#252525"},
pageStyle: {backgroundColor: "#252525", backgroundImage: "none", color: "#ccc"}
},
plotarea: {
fill: "#252525"
},
axis:{
stroke: { // the axis itself
color: "#aaa",
width: 1
},
tick: { // used as a foundation for all ticks
color: "#777",
position: "center",
font: "normal normal normal 7pt Helvetica, Arial, sans-serif", // labels on axis
fontColor: "#777" // color of labels
}
},
series: {
stroke: {width: 2, color: "#ccc"},
outline: null,
font: "normal normal normal 8pt Helvetica, Arial, sans-serif",
fontColor: "#ccc"
},
marker: {
stroke: {width: 3, color: "#ccc"},
outline: null,
font: "normal normal normal 8pt Helvetica, Arial, sans-serif",
fontColor: "#ccc"
},
seriesThemes: [
{fill: g(defaultFill, "#004cbf", "#06f")},
{fill: g(defaultFill, "#bf004c", "#f06")},
{fill: g(defaultFill, "#43bf00", "#6f0")},
{fill: g(defaultFill, "#7300bf", "#90f")},
{fill: g(defaultFill, "#bf7300", "#f90")},
{fill: g(defaultFill, "#00bf73", "#0f9")}
],
markerThemes: [
{fill: "#06f", stroke: {color: "#06f"}},
{fill: "#f06", stroke: {color: "#f06"}},
{fill: "#6f0", stroke: {color: "#6f0"}},
{fill: "#90f", stroke: {color: "#90f"}},
{fill: "#f90", stroke: {color: "#f90"}},
{fill: "#0f9", stroke: {color: "#0f9"}}
]
});
themes.Electric.next = function(elementType, mixin, doPost){
var isLine = elementType == "line";
if(isLine || elementType == "area"){
// custom processing for lines: substitute colors
var s = this.seriesThemes[this._current % this.seriesThemes.length];
s.fill.space = "plot";
if(isLine){
s.stroke = { width: 2.5, color: s.fill.colors[1].color};
}
if(elementType == "area"){
s.fill.y2 = 90;
}
var theme = Theme.prototype.next.apply(this, arguments);
// cleanup
delete s.stroke;
s.fill.y2 = 75;
s.fill.space = "shape";
return theme;
}
return Theme.prototype.next.apply(this, arguments);
};
themes.Electric.post = function(theme, elementType){
theme = Theme.prototype.post.apply(this, arguments);
if((elementType == "slice" || elementType == "circle") && theme.series.fill && theme.series.fill.type == "radial"){
theme.series.fill = gradutils.reverse(theme.series.fill);
}
return theme;
};
return themes.Electric;
}); | PypiClean |
/MakkaPakka-1.0.4.tar.gz/MakkaPakka-1.0.4/src/makka_pakka/integrating/integrate.py | from pathlib import Path
from typing import List
from uuid import uuid4
from makka_pakka import settings
from makka_pakka.exceptions.exceptions import ErrorType
from makka_pakka.exceptions.exceptions import MKPKIntegratingError
from makka_pakka.exceptions.exceptions import MKPKInvalidParameter
from makka_pakka.parsing.parsing_structures import MKPKData
from makka_pakka.parsing.parsing_structures import MKPKDataType
from makka_pakka.processing.processing_structures import MKPKCode
def integrate_makka_pakka(code: MKPKCode, output_filepath: str = "") -> str:
"""
Runs the integrating phase of makka pakka compilation.
:param code: The MKPKCode object which as the result of the processing phase.
:param output_filepath: The filepath to write the integrated code to. This will
be a random /tmp/{uuid} path if not specified.
:return: The filepath that the code was written to.
"""
if not isinstance(code, MKPKCode):
raise MKPKInvalidParameter("code", "integrate_makka_pakka", code)
if not isinstance(output_filepath, str):
raise MKPKInvalidParameter(
"output_filepath", "integrate_makka_pakka", output_filepath
)
if settings.verbosity:
print("Integrating...")
# TODO: Integrate the gadgets here.
# Format the code into a _start function
code = _format_code_into_asm_function(code)
# Format the data, then append it to the bottom of the code.
asm_data: List[str] = _translate_mkpkdata_to_asm(code.data)
code.code = code.code + [""] + asm_data
# Finally, write the code to a file.
return _write_code_to_file(code, output_filepath)
def _write_code_to_file(code: MKPKCode, output_filepath: str = "") -> str:
"""
Writes makka pakka code into an output filepath.
:param code: The MKPKCode obj to write to file.
:param output_filepath: (Optional) The filepath to write the code to. This will
be a random /tmp/{uuid} path if not specified.
:return: The filepath that the code was written to.
"""
if not isinstance(code, MKPKCode):
raise MKPKInvalidParameter("code", "_write_code_to_file", code)
if not isinstance(output_filepath, str):
raise MKPKInvalidParameter(
"output_filepath", "_write_code_to_file", output_filepath
)
# If the filepath is not specified, then generate a random filepath.
if output_filepath == "":
output_filepath = f"/tmp/{uuid4()}.asm"
# Check that the path is valid, and create the file.
try:
Path(output_filepath).touch()
except FileNotFoundError:
# Pathlib raises a FileNotFound when there's insufficient permissions
# to create a file at this path.
raise MKPKIntegratingError(
"Couldn't create file.",
f"Creating a file at the path {output_filepath} is invalid.",
ErrorType.FATAL,
)
with open(output_filepath, "w") as output_file:
for line in code.code:
output_file.write(line + "\n")
return output_filepath
def _translate_mkpkdata_to_asm(data: List[MKPKData]) -> List[str]:
"""
Translates a MKPKData object into assembly data definitions.
:param data: The MKPKData object to translate into assembly.
:return: A List of assembly strings, translated from the passed MKPKData.
"""
if not isinstance(data, list) or not all([isinstance(d, MKPKData) for d in data]):
raise MKPKInvalidParameter("data", "_translate_mkpkdata_to_asm", data)
data_asm: List[str] = []
for mkpk_data in data:
# Only STR types need to be added as data definitions, as ints will be
# directly into inserted into data references.
if mkpk_data.type == MKPKDataType.STR:
data_asm += [f'{mkpk_data.name} db "{mkpk_data.value}", 0']
return data_asm
def _format_code_into_asm_function(code: MKPKCode) -> MKPKCode:
"""
Formats the assembly code into an _start function so that it can be
compiled as a standalone binary.
:param code: The MKPKCode to be formatted into an assembly function.
:return: The formatted MKPKCode obj.
"""
if not isinstance(code, MKPKCode):
raise MKPKInvalidParameter("code", "_format_code_into_asm_function", code)
formatted_code: List[str] = [
"Section .text",
" global _start",
"_start:",
]
for line in code.code:
formatted_code += [" " + line]
code.code = formatted_code
return code | PypiClean |
/BioSAK-1.72.0.tar.gz/BioSAK-1.72.0/My_Python_scripts/tree_related/tree_ploter.py | from ete3 import Tree, TreeStyle, NodeStyle, TextFace
# tree: tree in newick format
# tree type: species tree or gene tree
# name_list: if one node in name_list, it's name will be displayed in red
def plot_gene_tree(tree, tree_type, gene_name, tree_file_name, name_list, tree_image_folder):
# set tree parameters
ts = TreeStyle()
ts.mode = "r" # tree model: 'r' for rectangular, 'c' for circular
#ts.scale = 50
ts.show_leaf_name = False
tree_title = '%s (%s)' % (tree_type, gene_name) # define tree title
# tree title text setting
ts.title.add_face(TextFace(tree_title,
fsize = 8,
fgcolor = 'black',
ftype = 'Arial',
tight_text = False),
column = 0)
ts.rotation = 0 # from 0 to 360
ts.show_scale = False
ts.margin_top = 10 # top tree image margin
ts.margin_bottom = 10 # bottom tree image margin
ts.margin_left = 10 # left tree image margin
ts.margin_right = 10 # right tree image margin
ts.show_border = False # set tree image border
ts.branch_vertical_margin = 3 # 3 pixels between adjancent branches
# set tree node style
for each_node in tree.traverse():
if each_node.is_leaf(): # leaf node parameters
ns = NodeStyle()
ns["shape"] = "circle" # dot shape: circle, square or sphere
ns["size"] = 0 # dot size
ns['hz_line_width'] = 0.5 # branch line width
ns['vt_line_width'] = 0.5 # branch line width
ns['hz_line_type'] = 0 # branch line type: 0 for solid, 1 for dashed, 2 for dotted
ns['vt_line_type'] = 0 # branch line type
if each_node.name in name_list:
ns["fgcolor"] = "red" # the dot setting
# the node name text setting
each_node.add_face(TextFace(each_node.name,
fsize = 8,
fgcolor = 'red',
tight_text = False,
bold = False),
column = 0,
position = 'branch-right')
each_node.set_style(ns)
else:
ns["fgcolor"] = "blue" # the dot setting
# the node name text setting
each_node.add_face(TextFace(each_node.name,
fsize = 8,
fgcolor = 'black',
tight_text = False,
bold = False),
column = 0,
position = 'branch-right')
each_node.set_style(ns)
else: # non-leaf node parameters
nlns = NodeStyle()
nlns["size"] = 0 # dot size
each_node.set_style(nlns)
# set figures size
tree.render('%s/%s_%s.png' % (tree_image_folder, tree_type, tree_file_name), w = 900, units = "px", tree_style = ts)
def plot_species_tree(tree_newick, tree_type, gene_name, tree_file_name, name_list, tree_image_folder):
# set tree parameters
tree = Tree(tree_newick, format = 8)
ts = TreeStyle()
ts.mode = "r" # tree model: 'r' for rectangular, 'c' for circular
ts.show_leaf_name = False
tree_title = tree_type + ' (' + gene_name + ')' # define tree title
# set tree title text parameters
ts.title.add_face(TextFace(tree_title,
fsize = 8,
fgcolor = 'black',
ftype = 'Arial',
tight_text = False),
column = 0) # tree title text setting
# set layout parameters
ts.rotation = 0 # from 0 to 360
ts.show_scale = False
ts.margin_top = 10 # top tree image margin
ts.margin_bottom = 10 # bottom tree image margin
ts.margin_left = 10 # left tree image margin
ts.margin_right = 10 # right tree image margin
ts.show_border = False # set tree image border
ts.branch_vertical_margin = 3 # 3 pixels between adjancent branches
# set tree node style
for each_node in tree.traverse():
# leaf node parameters
if each_node.is_leaf():
ns = NodeStyle()
ns['shape'] = 'circle' # dot shape: circle, square or sphere
ns['size'] = 0 # dot size
ns['hz_line_width'] = 0.5 # branch line width
ns['vt_line_width'] = 0.5 # branch line width
ns['hz_line_type'] = 0 # branch line type: 0 for solid, 1 for dashed, 2 for dotted
ns['vt_line_type'] = 0 # branch line type
if each_node.name in name_list:
ns['fgcolor'] = 'red' # the dot setting
each_node.add_face(TextFace(each_node.name,
fsize = 8,
fgcolor = 'red',
tight_text = False,
bold = False),
column = 0,
position = 'branch-right') # the node name text setting
each_node.set_style(ns)
else:
ns['fgcolor'] = 'blue' # the dot setting
each_node.add_face(TextFace(each_node.name,
fsize = 8,
fgcolor = 'black',
tight_text = False,
bold = False),
column = 0,
position = 'branch-right') # the node name text setting
each_node.set_style(ns)
# non-leaf node parameters
else:
nlns = NodeStyle()
nlns['size'] = 0 # dot size
each_node.add_face(TextFace(each_node.name,
fsize = 4,
fgcolor = 'black',
tight_text = False,
bold = False),
column = 5,
position = 'branch-top') # non-leaf node name text setting)
each_node.set_style(nlns)
# set figures size
tree.render('%s/%s_%s.png' % (tree_image_folder, tree_type, tree_file_name), w = 900, units = 'px', tree_style = ts) | PypiClean |
/InformativeFeatureSelection-3.0.0.tar.gz/InformativeFeatureSelection-3.0.0/feature_extractor/common/DiscriminantAnalysis.py | from abc import ABC, abstractmethod
from typing import Tuple, Union, List, Any
import numpy as np
from scipy import linalg
from numba import njit
from sklearn.covariance import empirical_covariance
def _cov(X):
"""Estimate covariance matrix (using optional covariance_estimator).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
s : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
"""
return empirical_covariance(X)
def _class_cov(X, y, priors):
"""Compute weighted within-class covariance matrix.
The per-class covariance are weighted by the class priors.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like of shape (n_classes,)
Class priors.
Returns
-------
cov : array-like of shape (n_features, n_features)
Weighted within-class covariance matrix
"""
classes = np.unique(y)
cov = np.zeros(shape=(X.shape[1], X.shape[1]))
for idx, group in enumerate(classes):
Xg = X[y == group, :]
cov += priors[idx] * np.atleast_2d(_cov(Xg))
return cov
@njit
def single_feature_statistic(data: Union[np.ndarray, List[np.ndarray]]) -> Tuple[np.ndarray, np.ndarray, Any]:
"""
Method optimized for calculation individual criteria for single feature.
Work twice faster that "numba_calculate_matrices"
:return: scatter_between value, scatter_within value and individual criteria value
"""
n_classes = len(data)
separated_into_classes = data
aig = np.array([np.mean(obj) for obj in separated_into_classes])
n_k = np.array([class_samples.shape[0] for class_samples in separated_into_classes])
n = np.sum(n_k)
wa = np.sum(aig * n_k / n)
b = np.sum(n_k * (aig - wa) ** 2)
w = np.sum(np.array([np.sum((separated_into_classes[i] - aig[i]) ** 2) for i in range(0, n_classes)]))
_lambda = b / (w + b)
return b, w, _lambda
@njit
def numba_calculate_matrices(data: Union[np.ndarray, List[np.ndarray]]) -> Tuple[np.ndarray, np.ndarray]:
"""
The method computes scatter-between and scatter-within matrices using the
formula provided in Fakunaga's book, with the only difference being
that it uses multiplication by priors instead of a power of a set.
However, the result produced by this particular function is different
from the result that may be obtained using scikit-learn's LDA.
As of now, no error has been found in the method.
Usefull link with possible sb, sw calculation formulas:
https://stats.stackexchange.com/questions/123490/what-is-the-correct-formula-for-between-class-scatter-matrix-in-lda
"""
n_features = data[0].shape[1]
n_samples_total = 0.0
for class_samples in data:
n_samples_total += class_samples.shape[0]
Sb = np.zeros((n_features, n_features))
Sw = np.zeros((n_features, n_features))
mean_vectors = np.zeros((len(data), n_features,))
mean = np.zeros((n_features, 1))
for class_idx, class_samples in enumerate(data):
for feature_idx in range(n_features):
mean_vectors[class_idx, feature_idx] = np.mean(class_samples[::, feature_idx])
for feature_idx in range(n_features):
mean[feature_idx] = np.mean(mean_vectors[::, feature_idx])
# St = np.cov(np.vstack(data))
# return St - Sw, Sw
for cl in range(len(data)):
priors = data[cl].shape[0] / n_samples_total
if data[cl].shape[1] == 1:
# np.cov does not work with data of shape (N, 1) =)
Sw += priors * np.cov(data[cl][::, 0].T)
else:
# Sw += data[cl].shape[0] * (data[cl] - mean_vectors[cl]).dot((data[cl] - mean_vectors[cl]).T)
Sw += priors * np.cov(data[cl])
for cl, mean_v in enumerate(mean_vectors):
priors = data[cl].shape[0] / n_samples_total
Sb += (mean_v - mean).dot(np.transpose(mean_v - mean))
return Sb, Sw
@njit
def numba_calculate_individual_criteria(Sb, Sw):
return np.diag(Sb) / (np.diag(Sw) + np.diag(Sb))
@njit
def numba_calculate_group_criteria(Sb, Sw):
try:
return np.trace(np.linalg.inv(Sw + Sb).dot(Sb))
except:
return np.trace(np.linalg.pinv(Sw + Sb).dot(Sb))
class BaseDiscriminantAnalysis(ABC):
@abstractmethod
def calculate_individual_criteria(self, Sb: np.ndarray, Sw: np.ndarray) -> np.array:
pass
@abstractmethod
def calculate_group_criteria(self, Sb: np.ndarray, Sw: np.ndarray) -> float:
pass
@abstractmethod
def calculate_matrices(self, data: Union[np.ndarray, List[np.ndarray]]) \
-> Union[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, Any]]:
pass
class DiscriminantAnalysis(BaseDiscriminantAnalysis):
"""
The first version of DA
"""
def calculate_individual_criteria(self, Sb: np.ndarray, Sw: np.ndarray) -> np.array:
return numba_calculate_individual_criteria(Sb, Sw)
def calculate_group_criteria(self, Sb: np.ndarray, Sw: np.ndarray) -> float:
return numba_calculate_group_criteria(Sb, Sw)
def calculate_matrices(self, data: Union[np.ndarray, List[np.ndarray]]) \
-> Union[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, Any]]:
"""
Calculates scatter between and scatter within matrices
:see Linear discriminant analysis
:note if data with single feature is provided also returns individual criteria value. It also may be usefully
with extremely large data
:param data: numpy array of shape (n_classes, n_samples, n_features) or list of numpy arrays (n_classes, ?,
n_features)
:return: tuple of two numpy arrays which represents scatter between and scatter within matrices
"""
if data[0].shape[1] == 1:
return single_feature_statistic(data)
return numba_calculate_matrices(data)
class DiscriminantAnalysisV2(BaseDiscriminantAnalysis):
"""
The latest implementation of the DA Sb and Sw matrices are computed
in the same way as scikit.
However, it requires additional optimization for speed performance.
Additionally, the implementation of group criteria calculation has been changed.
Instead of computing invariant matrices, which usually results in NaN values
and requires computing pseudo-invariant matrices,
the new method is a bit faster and does not produce NaNs.
"""
def calculate_matrices(self, data: Union[np.ndarray, List[np.ndarray]]) -> Tuple[np.ndarray, np.ndarray]:
if data[0].shape[1] == 1:
return single_feature_statistic(data)
X = np.vstack(data)
Y = []
priors = []
for i, entry in enumerate(data):
Y.extend([i] * len(entry))
priors.append(
len(entry) / len(X)
)
covs = _class_cov(X, Y, priors)
Sw = covs
St = _cov(X)
Sb = St - Sw
return Sb, Sw
def calculate_group_criteria(self, Sb: np.ndarray, Sw: np.ndarray) -> float:
evals, _ = linalg.eigh(Sb, Sw)
return np.sum(evals)
def calculate_individual_criteria(self, Sb: np.ndarray, Sw: np.ndarray) -> np.array:
return numba_calculate_individual_criteria(Sb, Sw) | PypiClean |
/NREL_sup3r-0.1.0-py3-none-any.whl/sup3r/models/wind_conditional_moments.py | """Wind conditional moment estimator with handling of low and
high res topography inputs."""
import logging
import tensorflow as tf
from sup3r.models.abstract import AbstractWindInterface
from sup3r.models.conditional_moments import Sup3rCondMom
logger = logging.getLogger(__name__)
class WindCondMom(AbstractWindInterface, Sup3rCondMom):
"""Wind conditional moment estimator with handling of low and
high res topography inputs.
Modifications to standard Sup3rCondMom:
- Hi res topography is expected as the last feature channel in the true
data in the true batch observation.
- If a custom Sup3rAdder or Sup3rConcat layer (from phygnn) is present
in the network, the hi-res topography will be added or concatenated
to the data at that point in the network during either training or
the forward pass.
"""
def set_model_params(self, **kwargs):
"""Set parameters used for training the model
Parameters
----------
kwargs : dict
Keyword arguments including 'training_features', 'output_features',
'smoothed_features', 's_enhance', 't_enhance', 'smoothing'
"""
AbstractWindInterface.set_model_params(**kwargs)
Sup3rCondMom.set_model_params(self, **kwargs)
@tf.function
def calc_loss(self, hi_res_true, hi_res_gen, mask, **kwargs):
"""Calculate the loss function using generated and true high
resolution data.
Parameters
----------
hi_res_true : tf.Tensor
Ground truth high resolution spatiotemporal data.
hi_res_gen : tf.Tensor
Superresolved high resolution spatiotemporal data generated by the
generative model.
mask : tf.Tensor
Mask to apply
kwargs : dict
Key word arguments for:
Sup3rGan.calc_loss(hi_res_true, hi_res_gen, **kwargs)
Returns
-------
loss : tf.Tensor
0D tensor representing the loss value for the network being trained
(either generator or one of the discriminators)
loss_details : dict
Namespace of the breakdown of loss components
"""
# append the true topography to the generated synthetic wind data
hi_res_gen = tf.concat((hi_res_gen, hi_res_true[..., -1:]), axis=-1)
return super().calc_loss(hi_res_true, hi_res_gen, mask, **kwargs)
def calc_val_loss(self, batch_handler, loss_details):
"""Calculate the validation loss at the current state of model training
Parameters
----------
batch_handler : sup3r.data_handling.preprocessing.BatchHandler
BatchHandler object to iterate through
loss_details : dict
Namespace of the breakdown of loss components
Returns
-------
loss_details : dict
Same as input but now includes val_* loss info
"""
logger.debug('Starting end-of-epoch validation loss calculation...')
loss_details['n_obs'] = 0
for val_batch in batch_handler.val_data:
high_res_gen = self._tf_generate(val_batch.low_res,
val_batch.high_res[..., -1:])
_, v_loss_details = self.calc_loss(
val_batch.output, high_res_gen, val_batch.mask)
loss_details = self.update_loss_details(loss_details,
v_loss_details,
len(val_batch),
prefix='val_')
return loss_details | PypiClean |
/BalazarBrothers-1.1.tar.gz/BalazarBrothers-1.1/init_editobj.py |
import weakref, time
import soya
import balazar_brothers
import balazar_brothers.main_loop
from balazar_brothers.bonus import *
from balazar_brothers.platform import *
from balazar_brothers.trap import *
from balazar_brothers.level import Level
import editobj, editobj.main, editobj.editor as editor, editobj.custom as custom
import Tkinter, dircache, os, os.path
editobj.EVAL_ENV.update({
"balazar_brothers" : balazar_brothers,
"bonus" : balazar_brothers.bonus,
"platform" : balazar_brothers.platform,
"trap" : balazar_brothers.trap,
})
custom.register_attr("round" , None, Level)
custom.register_attr("active" , None, Level)
custom.register_attr("active" , None, Level)
custom.register_attr("terrain" , None, Level)
custom.register_attr("static_part" , None, Level)
custom.register_attr("center" , None, Level)
custom.register_attr("atmosphere" , None, Level)
custom.register_attr("mobiles" , None, Level)
custom.register_attr("parent" , None, Level)
custom.register_attr("scale_x" , None, Level)
custom.register_attr("scale_y" , None, Level)
custom.register_attr("scale_z" , None, Level)
custom.register_attr("model" , None, Level)
custom.register_attr("model_builder" , None, Level)
custom.register_attr("solid" , None, Level)
custom.register_attr("uid" , None, Level)
custom.register_attr("visible" , None, Level)
custom.register_attr("x" , None, Level)
custom.register_attr("y" , None, Level)
custom.register_attr("z" , None, Level)
custom.register_attr("atmosphere" , None)
custom.register_attr("bot" , None)
custom.register_attr("characters_on" , None)
custom.register_attr("controller" , None)
custom.register_attr("doer" , None)
custom.register_attr("interpolate_factor" , None)
custom.register_attr("label" , None, PortailPlatform)
custom.register_attr("level" , None)
custom.register_attr("parent" , None)
custom.register_attr("platform_position" , None)
custom.register_attr("player_name" , None)
custom.register_attr("round" , None)
custom.register_attr("model_builder" , None)
custom.register_attr("solid" , None)
custom.register_attr("state1" , None)
custom.register_attr("state2" , None)
custom.register_attr("uid" , None)
custom.register_attr("sprite" , None)
custom.register_attr("animation" , None, Bonus)
custom.register_attr("attached_coordsysts" , None)
custom.register_attr("attached_meshes" , None)
custom.register_attr("auto_static" , None)
custom.register_attr("deforms" , None)
custom.register_attr("static" , None)
# ODE stuff
custom.register_attr("angular_velocity" , None)
custom.register_attr("auto_disable" , None)
custom.register_attr("auto_disable_angular_threshold" , None)
custom.register_attr("auto_disable_linear_threshold" , None)
custom.register_attr("auto_disable_steps" , None)
custom.register_attr("auto_disable_time" , None)
custom.register_attr("cfm" , None)
custom.register_attr("contact_max_correcting_velocity", None)
custom.register_attr("contact_surface_layer" , None)
custom.register_attr("enabled" , None)
custom.register_attr("erp" , None)
custom.register_attr("finite_rotation_axis" , None)
custom.register_attr("finite_rotation_mode" , None)
custom.register_attr("force" , None)
custom.register_attr("geom" , None)
custom.register_attr("gravity" , None)
custom.register_attr("gravity_mode" , None)
custom.register_attr("has_space" , None)
custom.register_attr("linear_velocity" , None)
custom.register_attr("mass" , None)
custom.register_attr("num_joints" , None)
custom.register_attr("ode" , None)
custom.register_attr("odeWorld" , None)
custom.register_attr("ode_parent" , None)
custom.register_attr("pushable" , None)
custom.register_attr("quickstep_num_iterations" , None)
custom.register_attr("space" , None)
custom.register_attr("state_counter" , None)
custom.register_attr("torque" , None)
custom.register_attr("water" , editor.BoolEditor, Fountain)
custom.register_available_children([
"platform.PortailPlatform()",
"platform.Coqueliform()",
"platform.RotatingCoqueliform()",
"platform.FallingCoqueliform()",
"platform.Fountain()",
"platform.RotatingFountain()",
"platform.Mushroom()",
"platform.RotatingMushroom()",
"platform.Fasme()",
"platform.RotatingFasme()",
"platform.UpDownFasme()",
"platform.Tower()",
"platform.RotatingTower()",
"platform.FallingTower()",
"platform.StartingPlatform()",
"platform.Door()",
"bonus.SmallChest()",
"bonus.BigChest()",
"bonus.Key()",
"trap.Fruit()",
"trap.RotatingFruit()",
"trap.Balancelle()",
"trap.RotatingBalancelle()",
"trap.Balazarette()",
"trap.BalazarGris()",
"platform.TreePompon()",
"platform.Farm()",
], Level)
custom.register_children_attr("children" , "add_or_add_mobile", "del_or_del_mobile", clazz = Level)
custom.register_method("rotate_incline" , Level, None)
custom.register_method("rotate_lateral" , Level, None)
custom.register_method("rotate_vertical" , Level, None)
custom.register_method("random_terrain" , Level)
def change_camera_mode(level):
LEVEL_CUSTOM_EDITS[level].set_camera_mode(1 - LEVEL_CUSTOM_EDITS[level].camera_mode)
custom.register_method(change_camera_mode , Level)
# Disable soya editor
custom.register_on_edit(None, soya.World)
custom.register_attr("hour" , editor.FloatEditor)
custom.register_attr("weather" , editor.ListEditor(["sun", "rain", "snow"]))
custom.register_attr("next_level_name" , editor.LambdaListEditor(lambda obj: Level.availables()))
custom.register_attr("animation" , editor.ListEditor(["balance", "balance2", "balance3"]), Fruit)
CURRENT = None
def on_edit_level(level, window):
if not LEVEL_CUSTOM_EDITS.has_key(level):
ed = LevelCustomEdit(level, window)
def on_activate(event = None):
global CURRENT
if CURRENT: CURRENT.set_active(0)
ed.set_active(1)
CURRENT = ed
window.bind("<FocusIn>" , on_activate)
custom.register_on_edit(on_edit_level, Level)
LEVEL_CUSTOM_EDITS = weakref.WeakKeyDictionary()
class LevelCustomEdit(object):
def __init__(self, level, window):
self.active = 0
self.level = level
self.window = window
self.scene = soya.World()
self.scene.atmosphere = soya.Atmosphere()
self.scene.atmosphere.ambient = (0.8, 0.8, 0.8, 1.0)
self.scene.add(level)
self.main_loop = soya.MainLoop(self.scene)
self.camera = soya.Camera(self.scene)
self.camera.back = 250.0
self.camera_matrices = []
self.camera.set_xyz(0.0, 50.0, 0.0)
self.camera.look_at(soya.Vector(self.scene, 0.0, -1.0, 0.0))
self.camera_matrices.append(self.camera.matrix)
self.camera.set_identity()
self.camera.set_xyz(0.0, 0.0, 50.0)
self.camera_matrices.append(self.camera.matrix)
self.camera_mode = 0
self.camera.matrix = self.camera_matrices[0]
self.camera.ortho = 1
self.camera.fov = 1000.0
self.selections = set()
self.button_down = 0
self.mouse = soya.Point()
self.impact = None
self.last_click_time = 0
LEVEL_CUSTOM_EDITS[level] = self
def set_camera_mode(self, camera_mode):
self.camera_matrices[self.camera_mode] = self.camera.matrix
self.camera_mode = camera_mode
self.camera.matrix = self.camera_matrices[self.camera_mode]
if camera_mode == 0:
self.camera.ortho = 1
self.camera.fov = 1000.0
else:
self.camera.ortho = 0
self.camera.fov = 60.0
def set_active(self, active):
if active != self.active:
self.active = active
if active:
soya.set_root_widget(self.camera)
self.update_soya()
else:
self.window.after_cancel(self.cancel)
def update_soya(self):
self.cancel = self.window.after(50, self.update_soya)
if self.active:
self.main_loop.update()
for event in soya.coalesce_motion_event(soya.process_event()): self.on_event(event)
def on_event(self, event):
#print event
if event[0] == soya.sdlconst.MOUSEMOTION:
if self.button_down == 1: # Move
new_mouse = self.camera.coord2d_to_3d(event[1], event[2], (self.impact and self.impact.z) or 0.0)
v = self.mouse >> new_mouse
for obj in self.selections: obj += v
self.mouse = new_mouse
elif self.button_down == 3: # Rotate
new_mouse = self.camera.coord2d_to_3d(event[1], event[2], (self.impact and self.impact.z) or 0.0)
v = self.mouse >> new_mouse
for obj in self.selections:
if not isinstance(obj, soya.Terrain):
obj.look_at(self.camera.coord2d_to_3d(event[1], event[2], (obj % self.camera).z))
self.mouse = new_mouse
elif event[0] == soya.sdlconst.MOUSEBUTTONDOWN:
if self.last_click_time + 0.2 > time.time():
self.last_click_time = time.time()
if self.selections: self.window.edit(list(self.selections)[0])
else: self.window.edit(self.level)
return
self.last_click_time = time.time()
self.button_down = event[1]
if self.button_down == 2: # Repeat add
if self.selections: to_clone = list(self.selections)[0]
else: to_clone = self.level.children[-1]
obj = to_clone.__class__()
self.level.add_or_add_mobile(obj)
self.mouse = self.camera.coord2d_to_3d(event[2], event[3], (self.impact and self.impact.z) or 0.0)
obj.move(self.mouse); obj.y = to_clone.y
self.button_down = 1 # continue with the move tool
if not(soya.get_mod() & soya.sdlconst.MOD_SHIFT): self.selections = set()
self.mouse = self.camera.coord2d_to_3d(event[2], event[3])
if self.camera.ortho: result = self.scene.raypick(self.mouse, soya.Vector(self.scene, 0.0, -1.0, 0.0))
else: result = self.scene.raypick(self.camera, self.camera.vector_to(self.mouse))
#print self.mouse % self.scene
if result:
self.impact, normal = result
selection = self.impact
while not selection.parent is self.level: selection = selection.parent
self.selections.add(selection)
self.impact.convert_to(self.camera)
self.mouse = self.camera.coord2d_to_3d(event[2], event[3], self.impact.z)
#print self.selections
elif event[0] == soya.sdlconst.MOUSEBUTTONUP:
self.button_down = 0
elif event[0] == soya.sdlconst.KEYDOWN:
# Left, right, up, down, +, -
if event[1] == 276: self.camera += soya.Vector(self.camera, -10.0 + 9.0 * (soya.get_mod() & soya.sdlconst.MOD_SHIFT), 0.0, 0.0)
elif event[1] == 275: self.camera += soya.Vector(self.camera, 10.0 - 9.0 * (soya.get_mod() & soya.sdlconst.MOD_SHIFT), 0.0, 0.0)
elif event[1] == 273: self.camera += soya.Vector(self.camera, 0.0, 10.0 - 9.0 * (soya.get_mod() & soya.sdlconst.MOD_SHIFT), 0.0)
elif event[1] == 274: self.camera += soya.Vector(self.camera, 0.0, -10.0 + 9.0 * (soya.get_mod() & soya.sdlconst.MOD_SHIFT), 0.0)
elif event[1] == 270: self.camera += soya.Vector(self.camera, 0.0, 0.0, -10.0 + 9.0 * (soya.get_mod() & soya.sdlconst.MOD_SHIFT))
elif event[1] == 269: self.camera += soya.Vector(self.camera, 0.0, 0.0, 10.0 - 9.0 * (soya.get_mod() & soya.sdlconst.MOD_SHIFT)) | PypiClean |
/OWSLib-0.29.2.tar.gz/OWSLib-0.29.2/owslib/swe/observation/waterml2.py | from owslib.util import nspath_eval
from owslib.namespaces import Namespaces
from owslib.util import testXMLAttribute, testXMLValue
from owslib.swe.common import Quantity
from dateutil import parser
from owslib.swe.observation.om import OM_Observation, Result
def get_namespaces():
ns = Namespaces()
return ns.get_namespaces(["swe20", "xlink", "sos20", "om20", "gml32",
"xsi", "wml2"])
namespaces = get_namespaces()
def nspv(path):
return nspath_eval(path, namespaces)
class MeasurementTimeseriesObservation(OM_Observation):
''' A timeseries observation that has a measurement timeseries as
result. An implementation of the WaterML2
MeasurementTimeseriesObservation. '''
def __init__(self, element):
super(MeasurementTimeseriesObservation, self).__init__(element)
self._parse_result()
def _parse_result(self):
''' Parse the result element of the observation type '''
if self.result is not None:
result = self.result.find(nspv("wml2:MeasurementTimeseries"))
self.result = MeasurementTimeseries(result)
def get_result(self):
return self.result
class Timeseries(Result):
''' Generic time-series class '''
def __init__(self, element):
super(Timeseries, self).__init__(element)
class MeasurementTimeseries(Timeseries):
''' A WaterML2.0 timeseries of measurements, with per-value metadata. '''
def __init__(self, element):
super(MeasurementTimeseries, self).__init__(element)
self.defaultTVPMetadata = TVPMeasurementMetadata(element.find(
nspv("wml2:defaultPointMetadata/wml2:DefaultTVPMeasurementMetadata")))
elems = element.findall(nspv("wml2:point"))
self.points = []
for point in elems:
self.points.append(TimeValuePair(point))
def __iter__(self):
for point in self.points:
yield point
def __len__(self):
return len(self.points)
def _parse_metadata(self, element):
''' Parse metadata elements relating to timeseries:
TS: baseTime, spacing, commentBlock, parameter
MTS: startAnchor, endAnchor, cumulative, accAnchor/Length, maxGap
'''
pass
class TimeValuePair(object):
''' A time-value pair as specified by WaterML2.0
Currently no support for tvp metadata.
'''
def __init__(self, element):
date_str = testXMLValue(
element.find(nspv("wml2:MeasurementTVP/wml2:time")))
try:
self.datetime = parser.parse(date_str)
except Exception:
raise ValueError("Error parsing datetime string: %s" % date_str)
value_str = testXMLValue(element.find(nspv(
"wml2:MeasurementTVP/wml2:value")))
try:
self.value = float(value_str)
except Exception:
self.value = float('nan')
def __str__(self):
return str(self.datetime) + "," + str(self.value)
class TVPMetadata(object):
def __init__(self, element):
''' Base time-value pair metadata. Still to do:
- relatedObservation
'''
self.quality = testXMLAttribute(element.find(nspv(
"wml2:quality")), nspv("xlink:href"))
self.nilReason = testXMLAttribute(element.find(nspv(
"wml2:nilReason")), nspv("xlink:href"))
self.comment = testXMLValue(element.find(nspv(
"wml2:comment")))
self.qualifier = testXMLAttribute(element.find(nspv(
"wml2:qualifier")), nspv("xlink:href"))
self.processing = testXMLValue(element.find(nspv(
"wml2:processing")), nspv("xlink:href"))
self.source = testXMLValue(element.find(nspv(
"wml2:source")), nspv("xlink:href"))
class TVPMeasurementMetadata(TVPMetadata):
''' Measurement specific metadata. Still to do:
- aggregationDuration
'''
def __init__(self, element):
super(TVPMeasurementMetadata, self).__init__(element)
self.uom = testXMLAttribute(element.find(nspv(
"wml2:uom")), "code")
self.interpolationType = testXMLAttribute(element.find(nspv(
"wml2:interpolationType")), nspv("xlink:href"))
self.censoredReason = testXMLAttribute(element.find(nspv(
"wml2:censoredReason")), "xlink:href")
accuracy = testXMLValue(element.find(nspv("wml2:accuracy")))
if accuracy is not None:
self.accuracy = Quantity(element)
class MeasurementTimeseriesDomainRange(Timeseries):
''' Class to implement domain range timeseries encoding '''
def __init__(self, element):
super(MeasurementTimeseriesDomainRange, self, element).__init__()
class MonitoringPoint(object):
''' A WaterML2.0 Monitoring Point, which is a specialised O&M SamplingPoint
'''
def __init__(self, element):
pass | PypiClean |
/Driver_zch-3.2.0.tar.gz/Driver_zch-3.2.0/zhouch23/notifier.py | from typing import Iterable, List, Optional, Union
from can.bus import BusABC
from can.listener import Listener
from can.message import Message
import threading
import logging
import time
import asyncio
logger = logging.getLogger("can.Notifier")
class Notifier:
def __init__(
self,
bus: BusABC,
listeners: Iterable[Listener],
timeout: float = 1.0,
loop: Optional[asyncio.AbstractEventLoop] = None,
):
"""Manages the distribution of :class:`can.Message` instances to listeners.
Supports multiple buses and listeners.
.. Note::
Remember to call `stop()` after all messages are received as
many listeners carry out flush operations to persist data.
:param bus: A :ref:`bus` or a list of buses to listen to.
:param listeners: An iterable of :class:`~can.Listener`
:param timeout: An optional maximum number of seconds to wait for any message.
:param loop: An :mod:`asyncio` event loop to schedule listeners in.
"""
self.listeners = list(listeners)
self.bus = bus
self.timeout = timeout
self._loop = loop
#: Exception raised in thread
self.exception: Optional[Exception] = None
self._running = True
self._lock = threading.Lock()
self._readers: List[Union[int, threading.Thread]] = []
buses = self.bus if isinstance(self.bus, list) else [self.bus]
for bus in buses:
self.add_bus(bus)
def add_bus(self, bus: BusABC):
"""Add a bus for notification.
:param bus:
CAN bus instance.
"""
if (
self._loop is not None
and hasattr(bus, "fileno")
and bus.fileno() >= 0 # type: ignore
):
# Use file descriptor to watch for messages
reader = bus.fileno() # type: ignore
self._loop.add_reader(reader, self._on_message_available, bus)
else:
reader = threading.Thread(
target=self._rx_thread,
args=(bus,),
name='can.notifier for bus "{}"'.format(bus.channel_info),
)
reader.daemon = True
reader.start()
self._readers.append(reader)
def stop(self, timeout: float = 5):
"""Stop notifying Listeners when new :class:`~can.Message` objects arrive
and call :meth:`~can.Listener.stop` on each Listener.
:param timeout:
Max time in seconds to wait for receive threads to finish.
Should be longer than timeout given at instantiation.
"""
self._running = False
end_time = time.time() + timeout
for reader in self._readers:
if isinstance(reader, threading.Thread):
now = time.time()
if now < end_time:
reader.join(end_time - now)
elif self._loop:
# reader is a file descriptor
self._loop.remove_reader(reader)
for listener in self.listeners:
if hasattr(listener, "stop"):
listener.stop()
def _rx_thread(self, bus: BusABC):
msg = None
try:
while self._running:
if msg is not None:
with self._lock:
if self._loop is not None:
self._loop.call_soon_threadsafe(
self._on_message_received, msg
)
else:
self._on_message_received(msg)
msg = bus.recv(self.timeout)
except Exception as exc:
self.exception = exc
if self._loop is not None:
self._loop.call_soon_threadsafe(self._on_error, exc)
raise
elif not self._on_error(exc):
raise
def _on_message_available(self, bus: BusABC):
msg = bus.recv(0)
if msg is not None:
self._on_message_received(msg)
def _on_message_received(self, msg: Message):
for callback in self.listeners:
res = callback(msg)
if self._loop is not None and asyncio.iscoroutine(res):
# Schedule coroutine
self._loop.create_task(res)
def _on_error(self, exc: Exception) -> bool:
listeners_with_on_error = [
listener for listener in self.listeners if hasattr(listener, "on_error")
]
for listener in listeners_with_on_error:
listener.on_error(exc)
return bool(listeners_with_on_error)
def add_listener(self, listener: Listener):
"""Add new Listener to the notification list.
If it is already present, it will be called two times
each time a message arrives.
:param listener: Listener to be added to the list to be notified
"""
self.listeners.append(listener)
def remove_listener(self, listener: Listener):
"""Remove a listener from the notification list. This method
trows an exception if the given listener is not part of the
stored listeners.
:param listener: Listener to be removed from the list to be notified
:raises ValueError: if `listener` was never added to this notifier
"""
self.listeners.remove(listener) | PypiClean |
/KratosShallowWaterApplication-9.4-cp311-cp311-win_amd64.whl/KratosMultiphysics/ShallowWaterApplication/benchmarks/mac_donald_shock_benchmark.py | import KratosMultiphysics as KM
import KratosMultiphysics.ShallowWaterApplication as SW
from KratosMultiphysics.ShallowWaterApplication.benchmarks.base_benchmark_process import BaseBenchmarkProcess
from KratosMultiphysics.process_factory import Factory as ProcessFactory
# Other imports
import numpy as np
from scipy.integrate import odeint
def Factory(settings, model):
if not isinstance(settings, KM.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return MacDonaldShockBenchmark(model, settings["Parameters"])
class MacDonaldShockBenchmark(BaseBenchmarkProcess):
"""Mac Donald's shock benchmark.
This is a Mac Donald's type solution with a smooth transition and a shock in a
short domain, with Manning's friction coefficient.
The length of the channel is 100m and the discharge at steady state is q=2m^2/s. The flow
is fluvial both upstream and downstream, the boundary conditions are fixed as follows:
- upstream: q=2m^2/s
- downstream: h=h_ex(100)
This process sets the upstream and downstream boundary conditions.
O. Delestre, C. Lucas, P.-A. Ksinant, F. Darboux, C. Laguerre, T.N.T. Vo, F. James, S. Cordier
SWASHES: a compilation of Shallow Water Analytic Solutions for Hydraulic and Environmental Studies
International Journal for Numerical Methods in Fluids, Wiley, 2013, 72 (3), pp.269-300.
"""
def __init__(self, model, settings):
"""Constructor of the benchmark.
The base class validates the settings and sets the model_part, the variables and the benchmark_settings
"""
super().__init__(model, settings)
self.n = self.settings["benchmark_settings"]["manning"].GetDouble()
self.q = self.settings["benchmark_settings"]["discharge"].GetDouble()
self.g = self.model_part.ProcessInfo[KM.GRAVITY_Z]
self.x0 = 0
self.x100 = 100
self.h0 = self._H(self.x0)
self.h100 = self._H(self.x100)
self.__PreComputeTopography()
@staticmethod
def _GetBenchmarkDefaultSettings():
return KM.Parameters("""
{
"discharge" : 2,
"manning" : 0.0328,
"upstream_model_part" : "model_part.upstream",
"downstream_model_part" : "model_part.downstream"
}
"""
)
def _Topography(self, coordinates):
x = coordinates.X
return self._Z(x)
def _Height(self, coordinates, time):
x = coordinates.X
if time > 0:
return self._H(x)
else:
return self._InitialH(x)
def _Momentum(self, coordinates, time):
if time > 0:
return [self.q, 0.0, 0.0]
else:
return [0.0, 0.0, 0.0]
def _Velocity(self, coordinates, time):
return [q / self._Height(coordinates, time) for q in self._Momentum(coordinates, time)]
def Check(self):
"""This method checks if the input values have physical sense."""
super().Check()
label = self.__class__.__name__
if self.g <= 0:
msg = label + "Gravity must be a positive value. Please, check the definition of GRAVITY_Z component in the ProcessInfo."
raise Exception(msg)
elif self.n < 0:
msg = label + "The manning coefficient must be a positive value. Please, check the Parameters."
raise Exception(msg)
elif self.q <= 0:
msg = label + "The discharge must be a positive value. Please, check the Parameters."
raise Exception(msg)
self._CheckDomain()
def ExecuteInitialize(self):
"""This method sets the topography, the initial conditions and the upstream/downstream boundary conditions"""
super().ExecuteInitialize()
for process in self._GetListOfBoundaryConditionsProcesses():
process.ExecuteInitialize()
KM.VariableUtils().SetVariable(SW.MANNING, self.n, self.model_part.Nodes)
def _CheckDomain(self):
x_min = 1.0
x_max = -1.0
for node in self.model_part.Nodes:
x_min = min(x_min, node.X)
x_max = max(x_max, node.X)
tolerance = 1e-6
if abs(x_min - self.x0) > tolerance:
KM.Logger.PrintWarning(self.__class__.__name__, "This benchmark expects an x-aligned model part starting at x=0")
if abs(x_max - self.x100) > tolerance:
KM.Logger.PrintWarning(self.__class__.__name__, "This benchmark expects an x-aligned model part ending at x=100")
def __PreComputeTopography(self):
X = np.linspace(self.x100, 0)
z100 = 0
Z = odeint(self._dZ, z100, X)
Z = np.ndarray.flatten(Z)
self.__X = X[::-1]
self.__Z = Z[::-1]
def _Z(self, x):
return np.interp(x, self.__X, self.__Z)
def _H1(self, x):
g = self.g
return (4/g)**(1/3) * (4/3 - x/100) - 9*x/1000 * (x/100 - 2/3)
def _H2(self, x):
g = self.g
a1 = 0.674202
a2 = 21.7112
a3 = 14.492
a4 = 1.4305
return (4/g)**(1/3) * (a1*(x/100 - 2/3)**4 + a1*(x/100 - 2/3)**3 - a2*(x/100 - 2/3)**2 + a3*(x/100 - 2/3) + a4)
def _dH1(self, x):
g = self.g
return -9*x/50000 - (4/g)**(1/3)/100 + 0.006
def _dH2(self, x):
g = self.g
return (4/g)**(1/3)*(-0.00434224*x + 0.02696808*(x/100 - 0.666666666666667)**3 + 0.02022606*(x/100 - 0.666666666666667)**2 + 0.434402666666667)
def _H(self, x):
if x < 200/3:
return self._H1(x)
else:
return self._H2(x)
def _dH(self, x):
if x < 200/3:
return self._dH1(x)
else:
return self._dH2(x)
def _Sf(self, h):
return self.n**2 * self.q**2 / h**(10/3)
def _dZ(self, z, x):
q = self.q
g = self.g
return (q**2 / (g * self._H(x)**3) - 1) * self._dH(x) - self._Sf(self._H(x))
def _InitialH(self, x):
return np.maximum(self.h100 - self._Z(x), self.h0)
def _GetListOfBoundaryConditionsProcesses(self):
if not hasattr(self, 'list_of_bc_processes'):
self.list_of_bc_processes = self._CreateListOfBoundaryConditionsProcesses()
return self.list_of_bc_processes
def _CreateListOfBoundaryConditionsProcesses(self):
benchmark_settings = self.settings["benchmark_settings"]
self.upstream_settings = KM.Parameters("""{
"process_name" : "ApplyConstantVectorValueProcess",
"Parameters" : {
"variable_name" : "MOMENTUM",
"is_fixed_x" : true,
"is_fixed_y" : true,
"direction" : [1.0, 0.0, 0.0]}
}""")
self.upstream_settings["Parameters"].AddValue("model_part_name", benchmark_settings["upstream_model_part"])
self.upstream_settings["Parameters"].AddDouble("modulus", self.q)
self.downstream_settings = KM.Parameters("""{
"process_name" : "ApplyConstantScalarValueProcess",
"Parameters" : {
"variable_name" : "HEIGHT",
"is_fixed" : true
}
}""")
self.downstream_settings["Parameters"].AddValue("model_part_name", benchmark_settings["downstream_model_part"])
self.downstream_settings["Parameters"].AddDouble("value", self.h100)
list_of_bc_processes = []
list_of_bc_processes.append(ProcessFactory(self.upstream_settings, self.model))
list_of_bc_processes.append(ProcessFactory(self.downstream_settings, self.model))
return list_of_bc_processes | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_fi.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"ap.",
"ip."
],
"DAY": [
"sunnuntaina",
"maanantaina",
"tiistaina",
"keskiviikkona",
"torstaina",
"perjantaina",
"lauantaina"
],
"MONTH": [
"tammikuuta",
"helmikuuta",
"maaliskuuta",
"huhtikuuta",
"toukokuuta",
"kes\u00e4kuuta",
"hein\u00e4kuuta",
"elokuuta",
"syyskuuta",
"lokakuuta",
"marraskuuta",
"joulukuuta"
],
"SHORTDAY": [
"su",
"ma",
"ti",
"ke",
"to",
"pe",
"la"
],
"SHORTMONTH": [
"tammikuuta",
"helmikuuta",
"maaliskuuta",
"huhtikuuta",
"toukokuuta",
"kes\u00e4kuuta",
"hein\u00e4kuuta",
"elokuuta",
"syyskuuta",
"lokakuuta",
"marraskuuta",
"joulukuuta"
],
"fullDate": "cccc d. MMMM y",
"longDate": "d. MMMM y",
"medium": "d.M.y H.mm.ss",
"mediumDate": "d.M.y",
"mediumTime": "H.mm.ss",
"short": "d.M.y H.mm",
"shortDate": "d.M.y",
"shortTime": "H.mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "\u20ac",
"DECIMAL_SEP": ",",
"GROUP_SEP": "\u00a0",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a0\u00a4",
"posPre": "",
"posSuf": "\u00a0\u00a4"
}
]
},
"id": "fi",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/grid/enhanced/plugins/Menu.js | if(!dojo._hasResource["dojox.grid.enhanced.plugins.Menu"]){
dojo._hasResource["dojox.grid.enhanced.plugins.Menu"]=true;
dojo.provide("dojox.grid.enhanced.plugins.Menu");
dojo.declare("dojox.grid.enhanced.plugins.Menu",null,{constructor:function(_1){
_1.mixin(_1,this);
},_initMenus:function(){
var _2=this.menuContainer;
!this.headerMenu&&(this.headerMenu=this._getMenuWidget(this.menus["headerMenu"]));
!this.rowMenu&&(this.rowMenu=this._getMenuWidget(this.menus["rowMenu"]));
!this.cellMenu&&(this.cellMenu=this._getMenuWidget(this.menus["cellMenu"]));
!this.selectedRegionMenu&&(this.selectedRegionMenu=this._getMenuWidget(this.menus["selectedRegionMenu"]));
this.headerMenu&&this.attr("headerMenu",this.headerMenu)&&this.setupHeaderMenu();
this.rowMenu&&this.attr("rowMenu",this.rowMenu);
this.cellMenu&&this.attr("cellMenu",this.cellMenu);
this.isDndSelectEnable&&this.selectedRegionMenu&&dojo.connect(this.select,"setDrugCoverDivs",dojo.hitch(this,this._bindDnDSelectEvent));
},_getMenuWidget:function(_3){
if(!_3){
return;
}
var _4=dijit.byId(_3);
if(!_4){
throw new Error("Menu '"+_3+"' not existed");
}
return _4;
},_bindDnDSelectEvent:function(){
dojo.forEach(this.select.coverDIVs,dojo.hitch(this,function(_5){
this.selectedRegionMenu.bindDomNode(_5);
dojo.connect(_5,"contextmenu",dojo.hitch(this,function(e){
dojo.mixin(e,this.select.getSelectedRegionInfo());
this.onSelectedRegionContextMenu(e);
}));
}));
},_setRowMenuAttr:function(_6){
this._setRowCellMenuAttr(_6,"rowMenu");
},_setCellMenuAttr:function(_7){
this._setRowCellMenuAttr(_7,"cellMenu");
},_setRowCellMenuAttr:function(_8,_9){
if(!_8){
return;
}
if(this[_9]){
this[_9].unBindDomNode(this.domNode);
}
this[_9]=_8;
this[_9].bindDomNode(this.domNode);
},showRowCellMenu:function(e){
var _a=e.sourceView.declaredClass=="dojox.grid._RowSelector";
if(this.rowMenu&&(!e.cell||this.selection.isSelected(e.rowIndex))){
this.rowMenu._openMyself(e);
dojo.stopEvent(e);
return;
}
if(_a||e.cell&&e.cell.isRowSelector){
dojo.stopEvent(e);
return;
}
if(this.isDndSelectEnable){
this.select.cellClick(e.cellIndex,e.rowIndex);
this.focus.setFocusCell(e.cell,e.rowIndex);
}
this.cellMenu&&this.cellMenu._openMyself(e);
}});
} | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/basic/plugins/filebrowser/plugin.js | * @fileOverview The "filebrowser" plugin that adds support for file uploads and
* browsing.
*
* When a file is uploaded or selected inside the file browser, its URL is
* inserted automatically into a field defined in the <code>filebrowser</code>
* attribute. In order to specify a field that should be updated, pass the tab ID and
* the element ID, separated with a colon.<br /><br />
*
* <strong>Example 1: (Browse)</strong>
*
* <pre>
* {
* type : 'button',
* id : 'browse',
* filebrowser : 'tabId:elementId',
* label : editor.lang.common.browseServer
* }
* </pre>
*
* If you set the <code>filebrowser</code> attribute for an element other than
* the <code>fileButton</code>, the <code>Browse</code> action will be triggered.<br /><br />
*
* <strong>Example 2: (Quick Upload)</strong>
*
* <pre>
* {
* type : 'fileButton',
* id : 'uploadButton',
* filebrowser : 'tabId:elementId',
* label : editor.lang.common.uploadSubmit,
* 'for' : [ 'upload', 'upload' ]
* }
* </pre>
*
* If you set the <code>filebrowser</code> attribute for a <code>fileButton</code>
* element, the <code>QuickUpload</code> action will be executed.<br /><br />
*
* The filebrowser plugin also supports more advanced configuration performed through
* a JavaScript object.
*
* The following settings are supported:
*
* <ul>
* <li><code>action</code> – <code>Browse</code> or <code>QuickUpload</code>.</li>
* <li><code>target</code> – the field to update in the <code><em>tabId:elementId</em></code> format.</li>
* <li><code>params</code> – additional arguments to be passed to the server connector (optional).</li>
* <li><code>onSelect</code> – a function to execute when the file is selected/uploaded (optional).</li>
* <li><code>url</code> – the URL to be called (optional).</li>
* </ul>
*
* <strong>Example 3: (Quick Upload)</strong>
*
* <pre>
* {
* type : 'fileButton',
* label : editor.lang.common.uploadSubmit,
* id : 'buttonId',
* filebrowser :
* {
* action : 'QuickUpload', // required
* target : 'tab1:elementId', // required
* params : // optional
* {
* type : 'Files',
* currentFolder : '/folder/'
* },
* onSelect : function( fileUrl, errorMessage ) // optional
* {
* // Do not call the built-in selectFuntion.
* // return false;
* }
* },
* 'for' : [ 'tab1', 'myFile' ]
* }
* </pre>
*
* Suppose you have a file element with an ID of <code>myFile</code>, a text
* field with an ID of <code>elementId</code> and a <code>fileButton</code>.
* If the <code>filebowser.url</code> attribute is not specified explicitly,
* the form action will be set to <code>filebrowser[<em>DialogWindowName</em>]UploadUrl</code>
* or, if not specified, to <code>filebrowserUploadUrl</code>. Additional parameters
* from the <code>params</code> object will be added to the query string. It is
* possible to create your own <code>uploadHandler</code> and cancel the built-in
* <code>updateTargetElement</code> command.<br /><br />
*
* <strong>Example 4: (Browse)</strong>
*
* <pre>
* {
* type : 'button',
* id : 'buttonId',
* label : editor.lang.common.browseServer,
* filebrowser :
* {
* action : 'Browse',
* url : '/ckfinder/ckfinder.html&type=Images',
* target : 'tab1:elementId'
* }
* }
* </pre>
*
* In this example, when the button is pressed, the file browser will be opened in a
* popup window. If you do not specify the <code>filebrowser.url</code> attribute,
* <code>filebrowser[<em>DialogName</em>]BrowseUrl</code> or
* <code>filebrowserBrowseUrl</code> will be used. After selecting a file in the file
* browser, an element with an ID of <code>elementId</code> will be updated. Just
* like in the third example, a custom <code>onSelect</code> function may be defined.
*/
( function() {
'use strict';
// Default input element name for CSRF protection token.
var TOKEN_INPUT_NAME = 'ckCsrfToken';
// Adds (additional) arguments to given url.
//
// @param {String}
// url The url.
// @param {Object}
// params Additional parameters.
function addQueryString( url, params ) {
var queryString = [];
if ( !params )
return url;
else {
for ( var i in params )
queryString.push( i + '=' + encodeURIComponent( params[ i ] ) );
}
return url + ( ( url.indexOf( '?' ) != -1 ) ? '&' : '?' ) + queryString.join( '&' );
}
// Function sniffs for CKFinder URLs, and adds required parameters if needed (#1835).
//
// @since 4.9.1
// @param {String} url CKFinder's URL.
// @returns {String} Decorated URL.
function addMissingParams( url ) {
if ( !url.match( /command=QuickUpload/ ) || url.match( /(\?|&)responseType=json/ ) ) {
return url;
}
return addQueryString( url, { responseType: 'json' } );
}
// Make a string's first character uppercase.
//
// @param {String}
// str String.
function ucFirst( str ) {
str += '';
var f = str.charAt( 0 ).toUpperCase();
return f + str.substr( 1 );
}
// The onlick function assigned to the 'Browse Server' button. Opens the
// file browser and updates target field when file is selected.
//
// @param {CKEDITOR.event}
// evt The event object.
function browseServer() {
var dialog = this.getDialog();
var editor = dialog.getParentEditor();
editor._.filebrowserSe = this;
var width = editor.config[ 'filebrowser' + ucFirst( dialog.getName() ) + 'WindowWidth' ] || editor.config.filebrowserWindowWidth || '80%';
var height = editor.config[ 'filebrowser' + ucFirst( dialog.getName() ) + 'WindowHeight' ] || editor.config.filebrowserWindowHeight || '70%';
var params = this.filebrowser.params || {};
params.CKEditor = editor.name;
params.CKEditorFuncNum = editor._.filebrowserFn;
if ( !params.langCode )
params.langCode = editor.langCode;
var url = addQueryString( this.filebrowser.url, params );
// TODO: V4: Remove backward compatibility (https://dev.ckeditor.com/ticket/8163).
editor.popup( url, width, height, editor.config.filebrowserWindowFeatures || editor.config.fileBrowserWindowFeatures );
}
// Appends token preventing CSRF attacks to the form of provided file input.
//
// @since 4.5.6
// @param {CKEDITOR.dom.element} fileInput
function appendToken( fileInput ) {
var tokenElement;
var form = new CKEDITOR.dom.element( fileInput.$.form );
if ( form ) {
// Check if token input element already exists.
tokenElement = form.$.elements[ TOKEN_INPUT_NAME ];
// Create new if needed.
if ( !tokenElement ) {
tokenElement = new CKEDITOR.dom.element( 'input' );
tokenElement.setAttributes( {
name: TOKEN_INPUT_NAME,
type: 'hidden'
} );
form.append( tokenElement );
} else {
tokenElement = new CKEDITOR.dom.element( tokenElement );
}
tokenElement.setAttribute( 'value', CKEDITOR.tools.getCsrfToken() );
}
}
// The onclick function assigned to the 'Upload' button. Makes the final
// decision whether form is really submitted and updates target field when
// file is uploaded.
//
// @param {CKEDITOR.event}
// evt The event object.
function uploadFile() {
var dialog = this.getDialog();
var editor = dialog.getParentEditor();
editor._.filebrowserSe = this;
// If user didn't select the file, stop the upload.
if ( !dialog.getContentElement( this[ 'for' ][ 0 ], this[ 'for' ][ 1 ] ).getInputElement().$.value )
return false;
if ( !dialog.getContentElement( this[ 'for' ][ 0 ], this[ 'for' ][ 1 ] ).getAction() )
return false;
return true;
}
// Setups the file element.
//
// @param {CKEDITOR.ui.dialog.file}
// fileInput The file element used during file upload.
// @param {Object}
// filebrowser Object containing filebrowser settings assigned to
// the fileButton associated with this file element.
function setupFileElement( editor, fileInput, filebrowser ) {
var params = filebrowser.params || {};
params.CKEditor = editor.name;
params.CKEditorFuncNum = editor._.filebrowserFn;
if ( !params.langCode )
params.langCode = editor.langCode;
fileInput.action = addQueryString( filebrowser.url, params );
fileInput.filebrowser = filebrowser;
}
// Traverse through the content definition and attach filebrowser to
// elements with 'filebrowser' attribute.
//
// @param String
// dialogName Dialog name.
// @param {CKEDITOR.dialog.definitionObject}
// definition Dialog definition.
// @param {Array}
// elements Array of {@link CKEDITOR.dialog.definition.content}
// objects.
function attachFileBrowser( editor, dialogName, definition, elements ) {
if ( !elements || !elements.length )
return;
var element;
for ( var i = elements.length; i--; ) {
element = elements[ i ];
if ( element.type == 'hbox' || element.type == 'vbox' || element.type == 'fieldset' )
attachFileBrowser( editor, dialogName, definition, element.children );
if ( !element.filebrowser )
continue;
if ( typeof element.filebrowser == 'string' ) {
var fb = {
action: ( element.type == 'fileButton' ) ? 'QuickUpload' : 'Browse',
target: element.filebrowser
};
element.filebrowser = fb;
}
if ( element.filebrowser.action == 'Browse' ) {
var url = element.filebrowser.url;
if ( url === undefined ) {
url = editor.config[ 'filebrowser' + ucFirst( dialogName ) + 'BrowseUrl' ];
if ( url === undefined )
url = editor.config.filebrowserBrowseUrl;
}
if ( url ) {
element.onClick = browseServer;
element.filebrowser.url = url;
element.hidden = false;
}
} else if ( element.filebrowser.action == 'QuickUpload' && element[ 'for' ] ) {
url = element.filebrowser.url;
if ( url === undefined ) {
url = editor.config[ 'filebrowser' + ucFirst( dialogName ) + 'UploadUrl' ];
if ( url === undefined )
url = editor.config.filebrowserUploadUrl;
}
if ( url ) {
var onClick = element.onClick;
// "element" here means the definition object, so we need to find the correct
// button to scope the event call
element.onClick = function( evt ) {
var sender = evt.sender,
fileInput = sender.getDialog().getContentElement( this[ 'for' ][ 0 ], this[ 'for' ][ 1 ] ).getInputElement(),
isFileUploadApiSupported = CKEDITOR.fileTools && CKEDITOR.fileTools.isFileUploadSupported;
if ( onClick && onClick.call( sender, evt ) === false ) {
return false;
}
if ( uploadFile.call( sender, evt ) ) {
// Use one of two upload strategies, either form or XHR based (#643).
if ( editor.config.filebrowserUploadMethod === 'form' || !isFileUploadApiSupported ) {
// Append token preventing CSRF attacks.
appendToken( fileInput );
return true;
} else {
var loader = editor.uploadRepository.create( fileInput.$.files[ 0 ] );
loader.on( 'uploaded', function( evt ) {
var response = evt.sender.responseData;
setUrl.call( evt.sender.editor, response.url, response.message );
} );
// Return non-false value will disable fileButton in dialogui,
// below listeners takes care of such situation and re-enable "send" button.
loader.on( 'error', xhrUploadErrorHandler.bind( this ) );
loader.on( 'abort', xhrUploadErrorHandler.bind( this ) );
loader.loadAndUpload( addMissingParams( url ) );
return 'xhr';
}
}
return false;
};
element.filebrowser.url = url;
element.hidden = false;
setupFileElement( editor, definition.getContents( element[ 'for' ][ 0 ] ).get( element[ 'for' ][ 1 ] ), element.filebrowser );
}
}
}
}
function xhrUploadErrorHandler( evt ) {
var response = {};
try {
response = JSON.parse( evt.sender.xhr.response ) || {};
} catch ( e ) {}
// `this` is a reference to ui.dialog.fileButton.
this.enable();
alert( response.error ? response.error.message : evt.sender.message ); // jshint ignore:line
}
// Updates the target element with the url of uploaded/selected file.
//
// @param {String}
// url The url of a file.
function updateTargetElement( url, sourceElement ) {
var dialog = sourceElement.getDialog();
var targetElement = sourceElement.filebrowser.target || null;
// If there is a reference to targetElement, update it.
if ( targetElement ) {
var target = targetElement.split( ':' );
var element = dialog.getContentElement( target[ 0 ], target[ 1 ] );
if ( element ) {
element.setValue( url );
dialog.selectPage( target[ 0 ] );
}
}
}
// Returns true if filebrowser is configured in one of the elements.
//
// @param {CKEDITOR.dialog.definitionObject}
// definition Dialog definition.
// @param String
// tabId The tab id where element(s) can be found.
// @param String
// elementId The element id (or ids, separated with a semicolon) to check.
function isConfigured( definition, tabId, elementId ) {
if ( elementId.indexOf( ';' ) !== -1 ) {
var ids = elementId.split( ';' );
for ( var i = 0; i < ids.length; i++ ) {
if ( isConfigured( definition, tabId, ids[ i ] ) )
return true;
}
return false;
}
var elementFileBrowser = definition.getContents( tabId ).get( elementId ).filebrowser;
return ( elementFileBrowser && elementFileBrowser.url );
}
function setUrl( fileUrl, data ) {
var dialog = this._.filebrowserSe.getDialog(),
targetInput = this._.filebrowserSe[ 'for' ],
onSelect = this._.filebrowserSe.filebrowser.onSelect;
if ( targetInput )
dialog.getContentElement( targetInput[ 0 ], targetInput[ 1 ] ).reset();
if ( typeof data == 'function' && data.call( this._.filebrowserSe ) === false )
return;
if ( onSelect && onSelect.call( this._.filebrowserSe, fileUrl, data ) === false )
return;
// The "data" argument may be used to pass the error message to the editor.
if ( typeof data == 'string' && data )
alert( data ); // jshint ignore:line
if ( fileUrl )
updateTargetElement( fileUrl, this._.filebrowserSe );
}
CKEDITOR.plugins.add( 'filebrowser', {
requires: 'popup,filetools',
init: function( editor ) {
editor._.filebrowserFn = CKEDITOR.tools.addFunction( setUrl, editor );
editor.on( 'destroy', function() {
CKEDITOR.tools.removeFunction( this._.filebrowserFn );
} );
}
} );
CKEDITOR.on( 'dialogDefinition', function( evt ) {
// We require filebrowser plugin to be loaded.
if ( !evt.editor.plugins.filebrowser )
return;
var definition = evt.data.definition,
element;
// Associate filebrowser to elements with 'filebrowser' attribute.
for ( var i = 0; i < definition.contents.length; ++i ) {
if ( ( element = definition.contents[ i ] ) ) {
attachFileBrowser( evt.editor, evt.data.name, definition, element.elements );
if ( element.hidden && element.filebrowser )
element.hidden = !isConfigured( definition, element.id, element.filebrowser );
}
}
} );
} )();
/**
* The location of an external file manager that should be launched when the **Browse Server**
* button is pressed. If configured, the **Browse Server** button will appear in the
* **Link**, **Image**, and **Flash** dialog windows.
*
* Read more in the {@glink guide/dev_file_browse_upload documentation}
* and see the {@glink examples/fileupload example}.
*
* config.filebrowserBrowseUrl = '/browser/browse.php';
*
* @since 3.0.0
* @cfg {String} [filebrowserBrowseUrl='' (empty string = disabled)]
* @member CKEDITOR.config
*/
/**
* The location of the script that handles file uploads.
* If set, the **Upload** tab will appear in the **Link**, **Image**,
* and **Flash** dialog windows.
*
* Read more in the {@glink guide/dev_file_browse_upload documentation}
* and see the {@glink examples/fileupload example}.
*
* config.filebrowserUploadUrl = '/uploader/upload.php';
*
* **Note:** This is a configuration setting for a {@glink guide/dev_file_browse_upload file browser/uploader}.
* To configure {@glink guide/dev_file_upload uploading dropped or pasted files} use the {@link CKEDITOR.config#uploadUrl}
* configuration option.
*
* @since 3.0.0
* @cfg {String} [filebrowserUploadUrl='' (empty string = disabled)]
* @member CKEDITOR.config
*/
/**
* The location of an external file manager that should be launched when the **Browse Server**
* button is pressed in the **Image** dialog window.
*
* If not set, CKEditor will use {@link CKEDITOR.config#filebrowserBrowseUrl}.
*
* Read more in the [documentation](#!/guide/dev_file_manager_configuration-section-adding-file-manager-scripts-for-selected-dialog-windows)
* and see the {@glink examples/fileupload example}.
*
* config.filebrowserImageBrowseUrl = '/browser/browse.php?type=Images';
*
* @since 3.0.0
* @cfg {String} [filebrowserImageBrowseUrl='' (empty string = disabled)]
* @member CKEDITOR.config
*/
/**
* The location of an external file browser that should be launched when the **Browse Server**
* button is pressed in the **Flash** dialog window.
*
* If not set, CKEditor will use {@link CKEDITOR.config#filebrowserBrowseUrl}.
*
* Read more in the [documentation](#!/guide/dev_file_manager_configuration-section-adding-file-manager-scripts-for-selected-dialog-windows)
* and see the {@glink examples/fileupload example}.
*
* config.filebrowserFlashBrowseUrl = '/browser/browse.php?type=Flash';
*
* @since 3.0.0
* @cfg {String} [filebrowserFlashBrowseUrl='' (empty string = disabled)]
* @member CKEDITOR.config
*/
/**
* The location of the script that handles file uploads in the **Image** dialog window.
*
* If not set, CKEditor will use {@link CKEDITOR.config#filebrowserUploadUrl}.
*
* Read more in the [documentation](#!/guide/dev_file_manager_configuration-section-adding-file-manager-scripts-for-selected-dialog-windows)
* and see the {@glink examples/fileupload example}.
*
* config.filebrowserImageUploadUrl = '/uploader/upload.php?type=Images';
*
* **Note:** This is a configuration setting for a {@glink guide/dev_file_browse_upload file browser/uploader}.
* To configure {@glink guide/dev_file_upload uploading dropped or pasted files} use the {@link CKEDITOR.config#uploadUrl}
* or {@link CKEDITOR.config#imageUploadUrl} configuration option.
*
* @since 3.0.0
* @cfg {String} [filebrowserImageUploadUrl='' (empty string = disabled)]
* @member CKEDITOR.config
*/
/**
* The location of the script that handles file uploads in the **Flash** dialog window.
*
* If not set, CKEditor will use {@link CKEDITOR.config#filebrowserUploadUrl}.
*
* Read more in the [documentation](#!/guide/dev_file_manager_configuration-section-adding-file-manager-scripts-for-selected-dialog-windows)
* and see the {@glink examples/fileupload example}.
*
* config.filebrowserFlashUploadUrl = '/uploader/upload.php?type=Flash';
*
* @since 3.0.0
* @cfg {String} filebrowserFlashUploadUrl='' (empty string = disabled)]
* @member CKEDITOR.config
*/
/**
* The location of an external file manager that should be launched when the **Browse Server**
* button is pressed in the **Link** tab of the **Image** dialog window.
*
* If not set, CKEditor will use {@link CKEDITOR.config#filebrowserBrowseUrl}.
*
* Read more in the [documentation](#!/guide/dev_file_manager_configuration-section-adding-file-manager-scripts-for-selected-dialog-windows)
* and see the {@glink examples/fileupload example}.
*
* config.filebrowserImageBrowseLinkUrl = '/browser/browse.php';
*
* @since 3.2.0
* @cfg {String} [filebrowserImageBrowseLinkUrl='' (empty string = disabled)]
* @member CKEDITOR.config
*/
/**
* The features to use in the file manager popup window.
*
* config.filebrowserWindowFeatures = 'resizable=yes,scrollbars=no';
*
* @since 3.4.1
* @cfg {String} [filebrowserWindowFeatures='location=no,menubar=no,toolbar=no,dependent=yes,minimizable=no,modal=yes,alwaysRaised=yes,resizable=yes,scrollbars=yes']
* @member CKEDITOR.config
*/
/**
* The width of the file manager popup window. It can be a number denoting a value in
* pixels or a percent string.
*
* Read more in the [documentation](#!/guide/dev_file_manager_configuration-section-file-manager-window-size)
* and see the {@glink examples/fileupload example}.
*
* config.filebrowserWindowWidth = 750;
*
* config.filebrowserWindowWidth = '50%';
*
* @cfg {Number/String} [filebrowserWindowWidth='80%']
* @member CKEDITOR.config
*/
/**
* The height of the file manager popup window. It can be a number denoting a value in
* pixels or a percent string.
*
* Read more in the [documentation](#!/guide/dev_file_manager_configuration-section-file-manager-window-size)
* and see the {@glink examples/fileupload example}.
*
* config.filebrowserWindowHeight = 580;
*
* config.filebrowserWindowHeight = '50%';
*
* @cfg {Number/String} [filebrowserWindowHeight='70%']
* @member CKEDITOR.config
*/
/**
* Defines a preferred option for file uploading in the [File Browser](https://ckeditor.com/cke4/addon/filebrowser) plugin.
*
* Available values:
*
* * `'xhr'` – XMLHttpRequest is used to upload the file. Using this option allows to set additional XHR headers with
* the {@link CKEDITOR.config#fileTools_requestHeaders} option.
* * `'form'` – The file is uploaded by submitting a traditional `<form>` element. **Note: That was the only option available until CKEditor 4.9.0.**
*
* Example:
*
* // All browsers will use a plain form element to upload the file.
* config.filebrowserUploadMethod = 'form';
*
* @since 4.9.0
* @cfg {String} [filebrowserUploadMethod='xhr']
* @member CKEDITOR.config
*/ | PypiClean |
/Balert-1.1.8.tar.gz/Balert-1.1.8/balert/BatteryStatus.py | from Bsettings import Bpath
from Bsettings import SetLevel
from Config import Config
import logging
import os
class Battery(Bpath, Config):
"""
Module to fetch battery status
"""
def __init__(self):
if not os.access(Bpath.POWER_SUPPLY_PATH, os.R_OK):
raise RuntimeError("Unable to read {path}.\
".format(path=Bpath.POWER_SUPPLY_PATH))
else:
logging.debug("All ok!")
self.charging = False
def ac(self, supply_path):
with open(os.path.join(supply_path, 'online'), 'r') as online_file:
return online_file.readline().strip() == '1'
def power_source_type(self, supply_path):
with open(os.path.join(supply_path, 'type'), 'r') as type_file:
_type = type_file.readline().strip()
if _type == 'Mains':
return "Mains"
elif _type == 'UPS':
return "UPS"
elif _type == 'Battery':
return "Battery"
else:
raise RuntimeError("Type of {path} ({type}) is not supported\
".format(path=supply_path, type=type))
def battery_present(self, supply_path):
with open(os.path.join(supply_path, 'present'), 'r') as present_file:
return present_file.readline().strip() == '1'
def discharging(self, supply_path):
with open(os.path.join(supply_path, 'status'), 'r') as status_file:
return status_file.readline().strip() == 'Discharging'
def state(self, supply_path):
with open(os.path.join(supply_path, 'capacity'), 'r') as capacity_file:
return capacity_file.readline().split()[0]
def get_low_battery_warning_level(self):
capacity = None
for supply in os.listdir(Bpath.POWER_SUPPLY_PATH):
supply_path = os.path.join(Bpath.POWER_SUPPLY_PATH, supply)
try:
_type = self.power_source_type(supply_path)
if _type == "Mains":
if self.ac(supply_path):
pass
elif _type == "Battery":
if self.battery_present(supply_path) and \
self.discharging(supply_path):
capacity = int(self.state(supply_path))
else:
self.charging = True
except Exception as e:
print(e)
try:
logging.getLogger().setLevel(logging.DEBUG)
conf_charge = self.load_pickle()["CHARGE"]
if not self.charging:
logging.info("Current charge is %d, Level set is %d",
capacity, conf_charge)
if capacity <= conf_charge:
return (1, capacity)
else:
return (0, capacity)
else:
return (0, 0)
except ZeroDivisionError as e:
print(e) | PypiClean |
/FoLiA-tools-2.5.4.tar.gz/FoLiA-tools-2.5.4/foliatools/foliaquery.py | from __future__ import print_function, unicode_literals, division, absolute_import
import getopt
import sys
import os
import glob
from folia import fql
import folia.main as folia
def usage():
print("foliaquery",file=sys.stderr)
print(" by Maarten van Gompel (proycon)",file=sys.stderr)
print(" Radboud University Nijmegen",file=sys.stderr)
print(" 2015-2019 - Licensed under GPLv3",file=sys.stderr)
print("",file=sys.stderr)
print(__doc__,file=sys.stderr)
print("",file=sys.stderr)
print("Usage: foliaquery [options] -q <FQL query> file-or-dir1 file-or-dir2 ..etc..",file=sys.stderr)
print("",file=sys.stderr)
print("Parameters for output:" ,file=sys.stderr)
print(" -q 'fql query' Query (May be specified multiple times)",file=sys.stderr)
print(" -e [encoding] Output encoding (default: utf-8)" ,file=sys.stderr)
print("Parameters for processing directories:",file=sys.stderr)
print(" -r Process recursively",file=sys.stderr)
print(" -E [extension] Set extension (default: xml)",file=sys.stderr)
print(" -i Ignore errors",file=sys.stderr)
print("",file=sys.stderr)
def process(filename, queries):
try:
print("Processing " + filename, file=sys.stderr)
doc = folia.Document(file=filename)
dosave = False
for query in queries:
if query.format == "python":
query.format = "xml"
output = query(doc)
print(output)
if query.action and query.action.action in ('EDIT','DELETE','SUBSTITUTE','PREPEND','APPEND'):
dosave = True
#save document if changes are made
if dosave:
print("Saving " + filename, file=sys.stderr)
doc.save()
except Exception as e:
if settings.ignoreerrors:
print("ERROR: An exception was raised whilst processing " + filename + ":", e ,file=sys.stderr)
else:
raise
def processdir(d, queries):
print("Searching in " + d,file=sys.stderr)
for f in glob.glob(os.path.join(d ,'*')):
if f[-len(settings.extension) - 1:] == '.' + settings.extension:
process(f, queries)
elif settings.recurse and os.path.isdir(f):
processdir(f, queries)
class settings:
leftcontext = 0
rightcontext = 0
extension = 'xml'
recurse = False
encoding = 'utf-8'
ignoreerrors = False
casesensitive = True
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "o:OE:hq:nr", ["help","text=","pos=","lemma=","sense=","phon="])
except getopt.GetoptError as err:
print(str(err), file=sys.stderr)
usage()
sys.exit(2)
queries = []
for o, a in opts:
if o == '-h' or o == '--help':
usage()
sys.exit(0)
elif o == '-e':
settings.encoding = a
elif o == '-E':
settings.extension = a
elif o == '-r':
settings.recurse = True
elif o == '-n':
settings.ignoreerrors = True
elif o == '-q':
try:
queries.append(fql.Query(a))
except Exception as e:
print("FQL SYNTAX ERROR: " + str(e), file=sys.stderr)
else:
raise Exception("No such option: " + o)
if queries and args:
for x in args:
if os.path.isdir(x):
processdir(x, queries)
elif os.path.isfile(x):
process(x, queries)
elif x[0:2] != '--':
print("ERROR: File or directory not found: " + x,file=sys.stderr)
sys.exit(3)
elif not queries:
docs = []
if len(args) > 50:
print("ERROR: Too many files specified for interactive mode, specify a query on the command line instead",file=sys.stderr)
for x in args:
if os.path.isdir(x):
print("ERROR: Directories are not allowed in interactive mode, specify a query on the command line",file=sys.stderr)
for x in args:
print("Loading " + x + "...",file=sys.stderr)
docs.append( folia.Document(file=x) )
import readline
print("Starting interactive mode, enter your FQL queries, QUIT to save changes and exit.",file=sys.stderr)
savedocs = []
while True:
query = input("FQL> ")
if query == "QUIT" or query == "EXIT":
break
if query.startswith == "LOAD ":
print("Loading " + x + "...",file=sys.stderr)
docs.append( folia.Document(file=query[5:]))
continue
try:
query = fql.Query(query)
except fql.SyntaxError as e:
print("FQL SYNTAX ERROR: " + str(e), file=sys.stderr)
continue
if query.format == "python":
query.format = "xml"
for doc in docs:
output = query(doc)
print(output)
if query.action and query.action.action in ('EDIT','DELETE','SUBSTITUTE','PREPEND','APPEND'):
if not doc in savedocs:
savedocs.append(doc)
print("Saving changes to documents, please wait...",file=sys.stderr)
#save documents if changes are made
for doc in savedocs:
print("Saving " + doc.filename)
doc.save()
print("done.",file=sys.stderr)
else:
print("ERROR: Nothing to do, specify one or more files or directories",file=sys.stderr)
if __name__ == "__main__":
main() | PypiClean |
/AwesomeTkinter-2021.11.8-py3-none-any.whl/awesometkinter/tooltip.py | import tkinter as tk
from tkinter import font as tkfont
from tkinter import ttk
__package__ = 'awesometkinter'
from .utils import configure_widget
class ToolTip:
def __init__(self, widget, text, waittime=500, xoffset=10, yoffset=10, **kwargs):
"""
tooltip class
Args:
widget: any tkinter widget
text: tooltip text
waittime: time in milliseconds to wait before showing tooltip
xoffset(int): x - offset (pixels) of tooltip box from mouse pointer
yoffset(int): y - offset (pixels) of tooltip box from mouse pointer
kwargs: parameters to be passed to tooltip label, e.g: , background='red', foreground='blue', etc
"""
self.widget = widget
self._text = text
self.waittime = waittime # milliseconds
self.xoffset = xoffset
self.yoffset = yoffset
self.kwargs = kwargs
self.tipwindow = None
self.label = None
self.id = None
self._id1 = self.widget.bind("<Enter>", self.enter, add='+')
self._id2 = self.widget.bind("<Leave>", self.leave, add='+')
self._id3 = self.widget.bind("<ButtonPress>", self.leave, add='+')
# for dynamic tooltip, use widget.update_tooltip('new text')
widget.update_tooltip = self.update_tooltip
widget.tooltip = self
def __del__(self):
try:
self.widget.unbind("<Enter>", self._id1)
self.widget.unbind("<Leave>", self._id2)
self.widget.unbind("<ButtonPress>", self._id3)
self.unschedule()
self.hidetip()
except tk.TclError:
pass
@property
def text(self):
return self._text
@text.setter
def text(self, txt):
self._text = txt
self.update_tooltip(txt)
def update_tooltip(self, text):
self._text = text
try:
self.label.config(text=text)
except:
pass
def enter(self, event=None):
self.schedule()
def leave(self, event=None):
self.unschedule()
self.hidetip()
def schedule(self):
self.unschedule()
self.id = self.widget.after(self.waittime, self.showtip)
def unschedule(self):
if self.id:
self.widget.after_cancel(self.id)
self.id = None
def showtip(self):
if self.tipwindow:
return
# tip text should be displayed away from the mouse pointer to prevent triggering leave event
x = self.widget.winfo_pointerx() + self.xoffset
y = self.widget.winfo_pointery() + self.yoffset
self.tipwindow = tw = tk.Toplevel(self.widget)
# show no border on the top level window
tw.wm_overrideredirect(1)
self.label = ttk.Label(tw, text=self.text, justify=tk.LEFT, padding=(5, 2),
background="#ffffe0", relief=tk.SOLID, borderwidth=1)
lbl = self.label
self.kwargs['background'] = self.kwargs.get('background') or self.kwargs.get('bg') or "#ffffe0"
self.kwargs['foreground'] = self.kwargs.get('foreground') or self.kwargs.get('fg') or "black"
configure_widget(lbl, **self.kwargs)
# get text width using font, because .winfo_width() needs to call "update_idletasks()" to get correct width
font = tkfont.Font(font=lbl['font'])
txt_width = font.measure(self.text)
# correct position to stay inside screen
x = min(x, lbl.winfo_screenwidth() - txt_width)
tw.wm_geometry("+%d+%d" % (x, y))
lbl.pack()
def hidetip(self):
if self.tipwindow:
self.tipwindow.destroy()
self.tipwindow = None
tooltip = ToolTip
def main():
# Test code
root = tk.Tk()
b = tk.Button(root, text="Hello", command=root.destroy)
b.pack()
l = tk.Label(root, text='my label')
l.pack()
b.tp = tooltip(b, "Hello world")
l.tp = tooltip(l, "Hello world", bg='cyan', fg='blue')
# we can modify any property thru the widget.tooltip reference
b.tp.waittime = 100
b.tp.text = 'new text'
# use dynamic tooltip
x = list(range(20))
def foo():
if x:
l.tp.update_tooltip(f'counter: {x.pop()}') # or can use l.tp.text='some text'
root.after(1000, foo)
foo()
root.mainloop()
x = 0
def main():
root = tk.Tk()
btn = tk.Button(root, text="Hello", command=root.destroy)
btn.pack()
lbl = tk.Label(root, text='my label')
lbl.pack()
btn.tp = tooltip(btn, "Hello world")
lbl.tp = tooltip(lbl, "Hello world")
# we can modify any property thru the widget.tooltip reference
btn.tp.waittime = 100
btn.tp.text = 'new text'
# Also we can dynamically change tooltip as follows:
lbl.counter = 0
def foo():
# change tooltip every second to mimic progress
lbl.counter = lbl.counter + 1 if lbl.counter < 100 else 0
lbl.tp.update_tooltip('Progress: ' + str(lbl.counter) + '%') # or use l.tp.text='some text'
root.after(1000, foo)
foo()
root.mainloop()
if __name__ == '__main__':
main() | PypiClean |
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/afqbrowser/site/client/js/third-party/dat.gui.min.js | !function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t():"function"==typeof define&&define.amd?define([],t):"object"==typeof exports?exports.dat=t():e.dat=t()}(this,function(){return function(e){function t(o){if(n[o])return n[o].exports;var i=n[o]={exports:{},id:o,loaded:!1};return e[o].call(i.exports,i,i.exports,t),i.loaded=!0,i.exports}var n={};return t.m=e,t.c=n,t.p="",t(0)}([function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}t.__esModule=!0;var i=n(1),r=o(i);t.default=r.default,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}t.__esModule=!0;var i=n(2),r=o(i),a=n(6),l=o(a),s=n(3),u=o(s),d=n(7),c=o(d),f=n(8),_=o(f),p=n(10),h=o(p),m=n(11),b=o(m),g=n(12),v=o(g),y=n(13),w=o(y),x=n(14),E=o(x),C=n(15),A=o(C),S=n(16),k=o(S),O=n(9),T=o(O),R=n(17),L=o(R);t.default={color:{Color:r.default,math:l.default,interpret:u.default},controllers:{Controller:c.default,BooleanController:_.default,OptionController:h.default,StringController:b.default,NumberController:v.default,NumberControllerBox:w.default,NumberControllerSlider:E.default,FunctionController:A.default,ColorController:k.default},dom:{dom:T.default},gui:{GUI:L.default},GUI:L.default},e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t,n){Object.defineProperty(e,t,{get:function(){return"RGB"===this.__state.space?this.__state[t]:(h.recalculateRGB(this,t,n),this.__state[t])},set:function(e){"RGB"!==this.__state.space&&(h.recalculateRGB(this,t,n),this.__state.space="RGB"),this.__state[t]=e}})}function a(e,t){Object.defineProperty(e,t,{get:function(){return"HSV"===this.__state.space?this.__state[t]:(h.recalculateHSV(this),this.__state[t])},set:function(e){"HSV"!==this.__state.space&&(h.recalculateHSV(this),this.__state.space="HSV"),this.__state[t]=e}})}t.__esModule=!0;var l=n(3),s=o(l),u=n(6),d=o(u),c=n(4),f=o(c),_=n(5),p=o(_),h=function(){function e(){if(i(this,e),this.__state=s.default.apply(this,arguments),this.__state===!1)throw new Error("Failed to interpret color arguments");this.__state.a=this.__state.a||1}return e.prototype.toString=function(){return(0,f.default)(this)},e.prototype.toHexString=function(){return(0,f.default)(this,!0)},e.prototype.toOriginal=function(){return this.__state.conversion.write(this)},e}();h.recalculateRGB=function(e,t,n){if("HEX"===e.__state.space)e.__state[t]=d.default.component_from_hex(e.__state.hex,n);else{if("HSV"!==e.__state.space)throw new Error("Corrupted color state");p.default.extend(e.__state,d.default.hsv_to_rgb(e.__state.h,e.__state.s,e.__state.v))}},h.recalculateHSV=function(e){var t=d.default.rgb_to_hsv(e.r,e.g,e.b);p.default.extend(e.__state,{s:t.s,v:t.v}),p.default.isNaN(t.h)?p.default.isUndefined(e.__state.h)&&(e.__state.h=0):e.__state.h=t.h},h.COMPONENTS=["r","g","b","h","s","v","hex","a"],r(h.prototype,"r",2),r(h.prototype,"g",1),r(h.prototype,"b",0),a(h.prototype,"h"),a(h.prototype,"s"),a(h.prototype,"v"),Object.defineProperty(h.prototype,"a",{get:function(){return this.__state.a},set:function(e){this.__state.a=e}}),Object.defineProperty(h.prototype,"hex",{get:function(){return"HEX"!==!this.__state.space&&(this.__state.hex=d.default.rgb_to_hex(this.r,this.g,this.b)),this.__state.hex},set:function(e){this.__state.space="HEX",this.__state.hex=e}}),t.default=h,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}t.__esModule=!0;var i=n(4),r=o(i),a=n(5),l=o(a),s=[{litmus:l.default.isString,conversions:{THREE_CHAR_HEX:{read:function(e){var t=e.match(/^#([A-F0-9])([A-F0-9])([A-F0-9])$/i);return null!==t&&{space:"HEX",hex:parseInt("0x"+t[1].toString()+t[1].toString()+t[2].toString()+t[2].toString()+t[3].toString()+t[3].toString(),0)}},write:r.default},SIX_CHAR_HEX:{read:function(e){var t=e.match(/^#([A-F0-9]{6})$/i);return null!==t&&{space:"HEX",hex:parseInt("0x"+t[1].toString(),0)}},write:r.default},CSS_RGB:{read:function(e){var t=e.match(/^rgb\(\s*(.+)\s*,\s*(.+)\s*,\s*(.+)\s*\)/);return null!==t&&{space:"RGB",r:parseFloat(t[1]),g:parseFloat(t[2]),b:parseFloat(t[3])}},write:r.default},CSS_RGBA:{read:function(e){var t=e.match(/^rgba\(\s*(.+)\s*,\s*(.+)\s*,\s*(.+)\s*,\s*(.+)\s*\)/);return null!==t&&{space:"RGB",r:parseFloat(t[1]),g:parseFloat(t[2]),b:parseFloat(t[3]),a:parseFloat(t[4])}},write:r.default}}},{litmus:l.default.isNumber,conversions:{HEX:{read:function(e){return{space:"HEX",hex:e,conversionName:"HEX"}},write:function(e){return e.hex}}}},{litmus:l.default.isArray,conversions:{RGB_ARRAY:{read:function(e){return 3===e.length&&{space:"RGB",r:e[0],g:e[1],b:e[2]}},write:function(e){return[e.r,e.g,e.b]}},RGBA_ARRAY:{read:function(e){return 4===e.length&&{space:"RGB",r:e[0],g:e[1],b:e[2],a:e[3]}},write:function(e){return[e.r,e.g,e.b,e.a]}}}},{litmus:l.default.isObject,conversions:{RGBA_OBJ:{read:function(e){return!!(l.default.isNumber(e.r)&&l.default.isNumber(e.g)&&l.default.isNumber(e.b)&&l.default.isNumber(e.a))&&{space:"RGB",r:e.r,g:e.g,b:e.b,a:e.a}},write:function(e){return{r:e.r,g:e.g,b:e.b,a:e.a}}},RGB_OBJ:{read:function(e){return!!(l.default.isNumber(e.r)&&l.default.isNumber(e.g)&&l.default.isNumber(e.b))&&{space:"RGB",r:e.r,g:e.g,b:e.b}},write:function(e){return{r:e.r,g:e.g,b:e.b}}},HSVA_OBJ:{read:function(e){return!!(l.default.isNumber(e.h)&&l.default.isNumber(e.s)&&l.default.isNumber(e.v)&&l.default.isNumber(e.a))&&{space:"HSV",h:e.h,s:e.s,v:e.v,a:e.a}},write:function(e){return{h:e.h,s:e.s,v:e.v,a:e.a}}},HSV_OBJ:{read:function(e){return!!(l.default.isNumber(e.h)&&l.default.isNumber(e.s)&&l.default.isNumber(e.v))&&{space:"HSV",h:e.h,s:e.s,v:e.v}},write:function(e){return{h:e.h,s:e.s,v:e.v}}}}}],u=void 0,d=void 0,c=function(){d=!1;var e=arguments.length>1?l.default.toArray(arguments):arguments[0];return l.default.each(s,function(t){if(t.litmus(e))return l.default.each(t.conversions,function(t,n){if(u=t.read(e),d===!1&&u!==!1)return d=u,u.conversionName=n,u.conversion=t,l.default.BREAK}),l.default.BREAK}),d};t.default=c,e.exports=t.default},function(e,t){"use strict";t.__esModule=!0,t.default=function(e,t){var n=e.__state.conversionName.toString(),o=Math.round(e.r),i=Math.round(e.g),r=Math.round(e.b),a=e.a,l=Math.round(e.h),s=e.s.toFixed(1),u=e.v.toFixed(1);if(t||"THREE_CHAR_HEX"===n||"SIX_CHAR_HEX"===n){for(var d=e.hex.toString(16);d.length<6;)d="0"+d;return"#"+d}return"CSS_RGB"===n?"rgb("+o+","+i+","+r+")":"CSS_RGBA"===n?"rgba("+o+","+i+","+r+","+a+")":"HEX"===n?"0x"+e.hex.toString(16):"RGB_ARRAY"===n?"["+o+","+i+","+r+"]":"RGBA_ARRAY"===n?"["+o+","+i+","+r+","+a+"]":"RGB_OBJ"===n?"{r:"+o+",g:"+i+",b:"+r+"}":"RGBA_OBJ"===n?"{r:"+o+",g:"+i+",b:"+r+",a:"+a+"}":"HSV_OBJ"===n?"{h:"+l+",s:"+s+",v:"+u+"}":"HSVA_OBJ"===n?"{h:"+l+",s:"+s+",v:"+u+",a:"+a+"}":"unknown format"},e.exports=t.default},function(e,t){"use strict";t.__esModule=!0;var n=Array.prototype.forEach,o=Array.prototype.slice,i={BREAK:{},extend:function(e){return this.each(o.call(arguments,1),function(t){var n=this.isObject(t)?Object.keys(t):[];n.forEach(function(n){this.isUndefined(t[n])||(e[n]=t[n])}.bind(this))},this),e},defaults:function(e){return this.each(o.call(arguments,1),function(t){var n=this.isObject(t)?Object.keys(t):[];n.forEach(function(n){this.isUndefined(e[n])&&(e[n]=t[n])}.bind(this))},this),e},compose:function(){var e=o.call(arguments);return function(){for(var t=o.call(arguments),n=e.length-1;n>=0;n--)t=[e[n].apply(this,t)];return t[0]}},each:function(e,t,o){if(e)if(n&&e.forEach&&e.forEach===n)e.forEach(t,o);else if(e.length===e.length+0){var i=void 0,r=void 0;for(i=0,r=e.length;i<r;i++)if(i in e&&t.call(o,e[i],i)===this.BREAK)return}else for(var a in e)if(t.call(o,e[a],a)===this.BREAK)return},defer:function(e){setTimeout(e,0)},debounce:function(e,t,n){var o=void 0;return function(){function i(){o=null,n||e.apply(r,a)}var r=this,a=arguments,l=n||!o;clearTimeout(o),o=setTimeout(i,t),l&&e.apply(r,a)}},toArray:function(e){return e.toArray?e.toArray():o.call(e)},isUndefined:function(e){return void 0===e},isNull:function(e){return null===e},isNaN:function(e){function t(t){return e.apply(this,arguments)}return t.toString=function(){return e.toString()},t}(function(e){return isNaN(e)}),isArray:Array.isArray||function(e){return e.constructor===Array},isObject:function(e){return e===Object(e)},isNumber:function(e){return e===e+0},isString:function(e){return e===e+""},isBoolean:function(e){return e===!1||e===!0},isFunction:function(e){return"[object Function]"===Object.prototype.toString.call(e)}};t.default=i,e.exports=t.default},function(e,t){"use strict";t.__esModule=!0;var n=void 0,o={hsv_to_rgb:function(e,t,n){var o=Math.floor(e/60)%6,i=e/60-Math.floor(e/60),r=n*(1-t),a=n*(1-i*t),l=n*(1-(1-i)*t),s=[[n,l,r],[a,n,r],[r,n,l],[r,a,n],[l,r,n],[n,r,a]][o];return{r:255*s[0],g:255*s[1],b:255*s[2]}},rgb_to_hsv:function(e,t,n){var o=Math.min(e,t,n),i=Math.max(e,t,n),r=i-o,a=void 0,l=void 0;return 0===i?{h:NaN,s:0,v:0}:(l=r/i,a=e===i?(t-n)/r:t===i?2+(n-e)/r:4+(e-t)/r,a/=6,a<0&&(a+=1),{h:360*a,s:l,v:i/255})},rgb_to_hex:function(e,t,n){var o=this.hex_with_component(0,2,e);return o=this.hex_with_component(o,1,t),o=this.hex_with_component(o,0,n)},component_from_hex:function(e,t){return e>>8*t&255},hex_with_component:function(e,t,o){return o<<(n=8*t)|e&~(255<<n)}};t.default=o,e.exports=t.default},function(e,t){"use strict";function n(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}t.__esModule=!0;var o=function(){function e(t,o){n(this,e),this.initialValue=t[o],this.domElement=document.createElement("div"),this.object=t,this.property=o,this.__onChange=void 0,this.__onFinishChange=void 0}return e.prototype.onChange=function(e){return this.__onChange=e,this},e.prototype.onFinishChange=function(e){return this.__onFinishChange=e,this},e.prototype.setValue=function(e){return this.object[this.property]=e,this.__onChange&&this.__onChange.call(this,e),this.updateDisplay(),this},e.prototype.getValue=function(){return this.object[this.property]},e.prototype.updateDisplay=function(){return this},e.prototype.isModified=function(){return this.initialValue!==this.getValue()},e}();t.default=o,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}t.__esModule=!0;var l=n(7),s=o(l),u=n(9),d=o(u),c=function(e){function t(n,o){function a(){s.setValue(!s.__prev)}i(this,t);var l=r(this,e.call(this,n,o)),s=l;return l.__prev=l.getValue(),l.__checkbox=document.createElement("input"),l.__checkbox.setAttribute("type","checkbox"),d.default.bind(l.__checkbox,"change",a,!1),l.domElement.appendChild(l.__checkbox),l.updateDisplay(),l}return a(t,e),t.prototype.setValue=function(t){var n=e.prototype.setValue.call(this,t);return this.__onFinishChange&&this.__onFinishChange.call(this,this.getValue()),this.__prev=this.getValue(),n},t.prototype.updateDisplay=function(){return this.getValue()===!0?(this.__checkbox.setAttribute("checked","checked"),this.__checkbox.checked=!0,this.__prev=!0):(this.__checkbox.checked=!1,this.__prev=!1),e.prototype.updateDisplay.call(this)},t}(s.default);t.default=c,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e){if("0"===e||a.default.isUndefined(e))return 0;var t=e.match(u);return a.default.isNull(t)?0:parseFloat(t[1])}t.__esModule=!0;var r=n(5),a=o(r),l={HTMLEvents:["change"],MouseEvents:["click","mousemove","mousedown","mouseup","mouseover"],KeyboardEvents:["keydown"]},s={};a.default.each(l,function(e,t){a.default.each(e,function(e){s[e]=t})});var u=/(\d+(\.\d+)?)px/,d={makeSelectable:function(e,t){void 0!==e&&void 0!==e.style&&(e.onselectstart=t?function(){return!1}:function(){},e.style.MozUserSelect=t?"auto":"none",e.style.KhtmlUserSelect=t?"auto":"none",e.unselectable=t?"on":"off")},makeFullscreen:function(e,t,n){var o=n,i=t;a.default.isUndefined(i)&&(i=!0),a.default.isUndefined(o)&&(o=!0),e.style.position="absolute",i&&(e.style.left=0,e.style.right=0),o&&(e.style.top=0,e.style.bottom=0)},fakeEvent:function(e,t,n,o){var i=n||{},r=s[t];if(!r)throw new Error("Event type "+t+" not supported.");var l=document.createEvent(r);switch(r){case"MouseEvents":var u=i.x||i.clientX||0,d=i.y||i.clientY||0;l.initMouseEvent(t,i.bubbles||!1,i.cancelable||!0,window,i.clickCount||1,0,0,u,d,!1,!1,!1,!1,0,null);break;case"KeyboardEvents":var c=l.initKeyboardEvent||l.initKeyEvent;a.default.defaults(i,{cancelable:!0,ctrlKey:!1,altKey:!1,shiftKey:!1,metaKey:!1,keyCode:void 0,charCode:void 0}),c(t,i.bubbles||!1,i.cancelable,window,i.ctrlKey,i.altKey,i.shiftKey,i.metaKey,i.keyCode,i.charCode);break;default:l.initEvent(t,i.bubbles||!1,i.cancelable||!0)}a.default.defaults(l,o),e.dispatchEvent(l)},bind:function(e,t,n,o){var i=o||!1;return e.addEventListener?e.addEventListener(t,n,i):e.attachEvent&&e.attachEvent("on"+t,n),d},unbind:function(e,t,n,o){var i=o||!1;return e.removeEventListener?e.removeEventListener(t,n,i):e.detachEvent&&e.detachEvent("on"+t,n),d},addClass:function(e,t){if(void 0===e.className)e.className=t;else if(e.className!==t){var n=e.className.split(/ +/);n.indexOf(t)===-1&&(n.push(t),e.className=n.join(" ").replace(/^\s+/,"").replace(/\s+$/,""))}return d},removeClass:function(e,t){if(t)if(e.className===t)e.removeAttribute("class");else{var n=e.className.split(/ +/),o=n.indexOf(t);o!==-1&&(n.splice(o,1),e.className=n.join(" "))}else e.className=void 0;return d},hasClass:function(e,t){return new RegExp("(?:^|\\s+)"+t+"(?:\\s+|$)").test(e.className)||!1},getWidth:function(e){var t=getComputedStyle(e);return i(t["border-left-width"])+i(t["border-right-width"])+i(t["padding-left"])+i(t["padding-right"])+i(t.width)},getHeight:function(e){var t=getComputedStyle(e);return i(t["border-top-width"])+i(t["border-bottom-width"])+i(t["padding-top"])+i(t["padding-bottom"])+i(t.height)},getOffset:function(e){var t=e,n={left:0,top:0};if(t.offsetParent)do n.left+=t.offsetLeft,n.top+=t.offsetTop,t=t.offsetParent;while(t);return n},isActive:function(e){return e===document.activeElement&&(e.type||e.href)}};t.default=d,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}t.__esModule=!0;var l=n(7),s=o(l),u=n(9),d=o(u),c=n(5),f=o(c),_=function(e){function t(n,o,a){i(this,t);var l=r(this,e.call(this,n,o)),s=a,u=l;if(l.__select=document.createElement("select"),f.default.isArray(s)){var c={};f.default.each(s,function(e){c[e]=e}),s=c}return f.default.each(s,function(e,t){var n=document.createElement("option");n.innerHTML=t,n.setAttribute("value",e),u.__select.appendChild(n)}),l.updateDisplay(),d.default.bind(l.__select,"change",function(){var e=this.options[this.selectedIndex].value;u.setValue(e)}),l.domElement.appendChild(l.__select),l}return a(t,e),t.prototype.setValue=function(t){var n=e.prototype.setValue.call(this,t);return this.__onFinishChange&&this.__onFinishChange.call(this,this.getValue()),n},t.prototype.updateDisplay=function(){return d.default.isActive(this.__select)?this:(this.__select.value=this.getValue(),e.prototype.updateDisplay.call(this))},t}(s.default);t.default=_,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}t.__esModule=!0;var l=n(7),s=o(l),u=n(9),d=o(u),c=function(e){function t(n,o){function a(){u.setValue(u.__input.value)}function l(){u.__onFinishChange&&u.__onFinishChange.call(u,u.getValue())}i(this,t);var s=r(this,e.call(this,n,o)),u=s;return s.__input=document.createElement("input"),s.__input.setAttribute("type","text"),d.default.bind(s.__input,"keyup",a),d.default.bind(s.__input,"change",a),d.default.bind(s.__input,"blur",l),d.default.bind(s.__input,"keydown",function(e){13===e.keyCode&&this.blur()}),s.updateDisplay(),s.domElement.appendChild(s.__input),s}return a(t,e),t.prototype.updateDisplay=function(){return d.default.isActive(this.__input)||(this.__input.value=this.getValue()),e.prototype.updateDisplay.call(this)},t}(s.default);t.default=c,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}function l(e){var t=e.toString();return t.indexOf(".")>-1?t.length-t.indexOf(".")-1:0}t.__esModule=!0;var s=n(7),u=o(s),d=n(5),c=o(d),f=function(e){function t(n,o,a){i(this,t);var s=r(this,e.call(this,n,o)),u=a||{};return s.__min=u.min,s.__max=u.max,s.__step=u.step,c.default.isUndefined(s.__step)?0===s.initialValue?s.__impliedStep=1:s.__impliedStep=Math.pow(10,Math.floor(Math.log(Math.abs(s.initialValue))/Math.LN10))/10:s.__impliedStep=s.__step,s.__precision=l(s.__impliedStep),s}return a(t,e),t.prototype.setValue=function(t){var n=t;return void 0!==this.__min&&n<this.__min?n=this.__min:void 0!==this.__max&&n>this.__max&&(n=this.__max),void 0!==this.__step&&n%this.__step!==0&&(n=Math.round(n/this.__step)*this.__step),e.prototype.setValue.call(this,n)},t.prototype.min=function(e){return this.__min=e,this},t.prototype.max=function(e){return this.__max=e,this},t.prototype.step=function(e){return this.__step=e,this.__impliedStep=e,this.__precision=l(e),this},t}(u.default);t.default=f,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}function l(e,t){var n=Math.pow(10,t);return Math.round(e*n)/n}t.__esModule=!0;var s=n(12),u=o(s),d=n(9),c=o(d),f=n(5),_=o(f),p=function(e){function t(n,o,a){function l(){var e=parseFloat(m.__input.value);_.default.isNaN(e)||m.setValue(e)}function s(){m.__onFinishChange&&m.__onFinishChange.call(m,m.getValue())}function u(){s()}function d(e){var t=b-e.clientY;m.setValue(m.getValue()+t*m.__impliedStep),b=e.clientY}function f(){c.default.unbind(window,"mousemove",d),c.default.unbind(window,"mouseup",f),s()}function p(e){c.default.bind(window,"mousemove",d),c.default.bind(window,"mouseup",f),b=e.clientY}i(this,t);var h=r(this,e.call(this,n,o,a));h.__truncationSuspended=!1;var m=h,b=void 0;return h.__input=document.createElement("input"),h.__input.setAttribute("type","text"),c.default.bind(h.__input,"change",l),c.default.bind(h.__input,"blur",u),c.default.bind(h.__input,"mousedown",p),c.default.bind(h.__input,"keydown",function(e){13===e.keyCode&&(m.__truncationSuspended=!0,this.blur(),m.__truncationSuspended=!1,s())}),h.updateDisplay(),h.domElement.appendChild(h.__input),h}return a(t,e),t.prototype.updateDisplay=function(){return this.__input.value=this.__truncationSuspended?this.getValue():l(this.getValue(),this.__precision),e.prototype.updateDisplay.call(this)},t}(u.default);t.default=p,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}function l(e,t,n,o,i){return o+(i-o)*((e-t)/(n-t))}t.__esModule=!0;var s=n(12),u=o(s),d=n(9),c=o(d),f=function(e){function t(n,o,a,s,u){function d(e){document.activeElement.blur(),c.default.bind(window,"mousemove",f),c.default.bind(window,"mouseup",_),f(e)}function f(e){e.preventDefault();var t=h.__background.getBoundingClientRect();return h.setValue(l(e.clientX,t.left,t.right,h.__min,h.__max)),!1}function _(){c.default.unbind(window,"mousemove",f),c.default.unbind(window,"mouseup",_),h.__onFinishChange&&h.__onFinishChange.call(h,h.getValue())}i(this,t);var p=r(this,e.call(this,n,o,{min:a,max:s,step:u})),h=p;return p.__background=document.createElement("div"),p.__foreground=document.createElement("div"),c.default.bind(p.__background,"mousedown",d),c.default.addClass(p.__background,"slider"),c.default.addClass(p.__foreground,"slider-fg"),p.updateDisplay(),p.__background.appendChild(p.__foreground),p.domElement.appendChild(p.__background),p}return a(t,e),t.prototype.updateDisplay=function(){var t=(this.getValue()-this.__min)/(this.__max-this.__min);return this.__foreground.style.width=100*t+"%",e.prototype.updateDisplay.call(this)},t}(u.default);t.default=f,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}t.__esModule=!0;var l=n(7),s=o(l),u=n(9),d=o(u),c=function(e){function t(n,o,a){i(this,t);var l=r(this,e.call(this,n,o)),s=l;return l.__button=document.createElement("div"),l.__button.innerHTML=void 0===a?"Fire":a,d.default.bind(l.__button,"click",function(e){return e.preventDefault(),s.fire(),!1}),d.default.addClass(l.__button,"button"),l.domElement.appendChild(l.__button),l}return a(t,e),t.prototype.fire=function(){this.__onChange&&this.__onChange.call(this),this.getValue().call(this.object),this.__onFinishChange&&this.__onFinishChange.call(this,this.getValue())},t}(s.default);t.default=c,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function r(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}function l(e,t,n,o){e.style.background="",g.default.each(y,function(i){e.style.cssText+="background: "+i+"linear-gradient("+t+", "+n+" 0%, "+o+" 100%); "})}function s(e){e.style.background="",e.style.cssText+="background: -moz-linear-gradient(top, #ff0000 0%, #ff00ff 17%, #0000ff 34%, #00ffff 50%, #00ff00 67%, #ffff00 84%, #ff0000 100%);",e.style.cssText+="background: -webkit-linear-gradient(top, #ff0000 0%,#ff00ff 17%,#0000ff 34%,#00ffff 50%,#00ff00 67%,#ffff00 84%,#ff0000 100%);",e.style.cssText+="background: -o-linear-gradient(top, #ff0000 0%,#ff00ff 17%,#0000ff 34%,#00ffff 50%,#00ff00 67%,#ffff00 84%,#ff0000 100%);",e.style.cssText+="background: -ms-linear-gradient(top, #ff0000 0%,#ff00ff 17%,#0000ff 34%,#00ffff 50%,#00ff00 67%,#ffff00 84%,#ff0000 100%);",e.style.cssText+="background: linear-gradient(top, #ff0000 0%,#ff00ff 17%,#0000ff 34%,#00ffff 50%,#00ff00 67%,#ffff00 84%,#ff0000 100%);"}t.__esModule=!0;var u=n(7),d=o(u),c=n(9),f=o(c),_=n(2),p=o(_),h=n(3),m=o(h),b=n(5),g=o(b),v=function(e){function t(n,o){function a(e){h(e),f.default.bind(window,"mousemove",h),f.default.bind(window,"mouseup",u)}function u(){f.default.unbind(window,"mousemove",h),f.default.unbind(window,"mouseup",u),_()}function d(){var e=(0,m.default)(this.value);e!==!1?(y.__color.__state=e,y.setValue(y.__color.toOriginal())):this.value=y.__color.toString()}function c(){f.default.unbind(window,"mousemove",b),f.default.unbind(window,"mouseup",c),_()}function _(){y.__onFinishChange&&y.__onFinishChange.call(y,y.__color.toOriginal())}function h(e){e.preventDefault();var t=y.__saturation_field.getBoundingClientRect(),n=(e.clientX-t.left)/(t.right-t.left),o=1-(e.clientY-t.top)/(t.bottom-t.top);return o>1?o=1:o<0&&(o=0),n>1?n=1:n<0&&(n=0),y.__color.v=o,y.__color.s=n,y.setValue(y.__color.toOriginal()),!1}function b(e){e.preventDefault();var t=y.__hue_field.getBoundingClientRect(),n=1-(e.clientY-t.top)/(t.bottom-t.top);return n>1?n=1:n<0&&(n=0),y.__color.h=360*n,y.setValue(y.__color.toOriginal()),!1}i(this,t);var v=r(this,e.call(this,n,o));v.__color=new p.default(v.getValue()),v.__temp=new p.default(0);var y=v;v.domElement=document.createElement("div"),f.default.makeSelectable(v.domElement,!1),v.__selector=document.createElement("div"),v.__selector.className="selector",v.__saturation_field=document.createElement("div"),v.__saturation_field.className="saturation-field",v.__field_knob=document.createElement("div"),v.__field_knob.className="field-knob",v.__field_knob_border="2px solid ",v.__hue_knob=document.createElement("div"),v.__hue_knob.className="hue-knob",v.__hue_field=document.createElement("div"),v.__hue_field.className="hue-field",v.__input=document.createElement("input"),v.__input.type="text",v.__input_textShadow="0 1px 1px ",f.default.bind(v.__input,"keydown",function(e){13===e.keyCode&&d.call(this)}),f.default.bind(v.__input,"blur",d),f.default.bind(v.__selector,"mousedown",function(){f.default.addClass(this,"drag").bind(window,"mouseup",function(){f.default.removeClass(y.__selector,"drag")})});var w=document.createElement("div");return g.default.extend(v.__selector.style,{width:"122px",height:"102px",padding:"3px",backgroundColor:"#222",boxShadow:"0px 1px 3px rgba(0,0,0,0.3)"}),g.default.extend(v.__field_knob.style,{position:"absolute",width:"12px",height:"12px",border:v.__field_knob_border+(v.__color.v<.5?"#fff":"#000"),boxShadow:"0px 1px 3px rgba(0,0,0,0.5)",borderRadius:"12px",zIndex:1}),g.default.extend(v.__hue_knob.style,{position:"absolute",width:"15px",height:"2px",borderRight:"4px solid #fff",zIndex:1}),g.default.extend(v.__saturation_field.style,{width:"100px",height:"100px",border:"1px solid #555",marginRight:"3px",display:"inline-block",cursor:"pointer"}),g.default.extend(w.style,{width:"100%",height:"100%",background:"none"}),l(w,"top","rgba(0,0,0,0)","#000"),g.default.extend(v.__hue_field.style,{width:"15px",height:"100px",border:"1px solid #555",cursor:"ns-resize",position:"absolute",top:"3px",right:"3px"}),s(v.__hue_field),g.default.extend(v.__input.style,{outline:"none",textAlign:"center",color:"#fff",border:0,fontWeight:"bold",textShadow:v.__input_textShadow+"rgba(0,0,0,0.7)"}),f.default.bind(v.__saturation_field,"mousedown",a),f.default.bind(v.__field_knob,"mousedown",a),f.default.bind(v.__hue_field,"mousedown",function(e){b(e),f.default.bind(window,"mousemove",b),f.default.bind(window,"mouseup",c)}),v.__saturation_field.appendChild(w),v.__selector.appendChild(v.__field_knob),v.__selector.appendChild(v.__saturation_field),v.__selector.appendChild(v.__hue_field),v.__hue_field.appendChild(v.__hue_knob),v.domElement.appendChild(v.__input),v.domElement.appendChild(v.__selector),v.updateDisplay(),v}return a(t,e),t.prototype.updateDisplay=function(){var e=(0,m.default)(this.getValue());if(e!==!1){var t=!1;g.default.each(p.default.COMPONENTS,function(n){if(!g.default.isUndefined(e[n])&&!g.default.isUndefined(this.__color.__state[n])&&e[n]!==this.__color.__state[n])return t=!0,{}},this),t&&g.default.extend(this.__color.__state,e)}g.default.extend(this.__temp.__state,this.__color.__state),this.__temp.a=1;var n=this.__color.v<.5||this.__color.s>.5?255:0,o=255-n;g.default.extend(this.__field_knob.style,{marginLeft:100*this.__color.s-7+"px",marginTop:100*(1-this.__color.v)-7+"px",backgroundColor:this.__temp.toHexString(),border:this.__field_knob_border+"rgb("+n+","+n+","+n+")"}),this.__hue_knob.style.marginTop=100*(1-this.__color.h/360)+"px",this.__temp.s=1,this.__temp.v=1,l(this.__saturation_field,"left","#fff",this.__temp.toHexString()),this.__input.value=this.__color.toString(),g.default.extend(this.__input.style,{backgroundColor:this.__color.toHexString(),color:"rgb("+n+","+n+","+n+")",textShadow:this.__input_textShadow+"rgba("+o+","+o+","+o+",.7)"})},t}(d.default),y=["-moz-","-o-","-webkit-","-ms-",""];t.default=v,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t,n){var o=document.createElement("li");return t&&o.appendChild(t),n?e.__ul.insertBefore(o,n):e.__ul.appendChild(o),e.onResize(),o}function r(e,t){var n=e.__preset_select[e.__preset_select.selectedIndex];t?n.innerHTML=n.value+"*":n.innerHTML=n.value}function a(e,t,n){if(n.__li=t,n.__gui=e,U.default.extend(n,{options:function(t){if(arguments.length>1){var o=n.__li.nextElementSibling;return n.remove(),s(e,n.object,n.property,{before:o,factoryArgs:[U.default.toArray(arguments)]})}if(U.default.isArray(t)||U.default.isObject(t)){var i=n.__li.nextElementSibling;return n.remove(),s(e,n.object,n.property,{before:i,factoryArgs:[t]})}},name:function(e){return n.__li.firstElementChild.firstElementChild.innerHTML=e,n},listen:function(){return n.__gui.listen(n),n},remove:function(){return n.__gui.remove(n),n}}),n instanceof N.default){var o=new B.default(n.object,n.property,{
min:n.__min,max:n.__max,step:n.__step});U.default.each(["updateDisplay","onChange","onFinishChange","step"],function(e){var t=n[e],i=o[e];n[e]=o[e]=function(){var e=Array.prototype.slice.call(arguments);return i.apply(o,e),t.apply(n,e)}}),z.default.addClass(t,"has-slider"),n.domElement.insertBefore(o.domElement,n.domElement.firstElementChild)}else if(n instanceof B.default){var i=function(t){if(U.default.isNumber(n.__min)&&U.default.isNumber(n.__max)){var o=n.__li.firstElementChild.firstElementChild.innerHTML,i=n.__gui.__listening.indexOf(n)>-1;n.remove();var r=s(e,n.object,n.property,{before:n.__li.nextElementSibling,factoryArgs:[n.__min,n.__max,n.__step]});return r.name(o),i&&r.listen(),r}return t};n.min=U.default.compose(i,n.min),n.max=U.default.compose(i,n.max)}else n instanceof O.default?(z.default.bind(t,"click",function(){z.default.fakeEvent(n.__checkbox,"click")}),z.default.bind(n.__checkbox,"click",function(e){e.stopPropagation()})):n instanceof R.default?(z.default.bind(t,"click",function(){z.default.fakeEvent(n.__button,"click")}),z.default.bind(t,"mouseover",function(){z.default.addClass(n.__button,"hover")}),z.default.bind(t,"mouseout",function(){z.default.removeClass(n.__button,"hover")})):n instanceof j.default&&(z.default.addClass(t,"color"),n.updateDisplay=U.default.compose(function(e){return t.style.borderLeftColor=n.__color.toString(),e},n.updateDisplay),n.updateDisplay());n.setValue=U.default.compose(function(t){return e.getRoot().__preset_select&&n.isModified()&&r(e.getRoot(),!0),t},n.setValue)}function l(e,t){var n=e.getRoot(),o=n.__rememberedObjects.indexOf(t.object);if(o!==-1){var i=n.__rememberedObjectIndecesToControllers[o];if(void 0===i&&(i={},n.__rememberedObjectIndecesToControllers[o]=i),i[t.property]=t,n.load&&n.load.remembered){var r=n.load.remembered,a=void 0;if(r[e.preset])a=r[e.preset];else{if(!r[Q])return;a=r[Q]}if(a[o]&&void 0!==a[o][t.property]){var l=a[o][t.property];t.initialValue=l,t.setValue(l)}}}}function s(e,t,n,o){if(void 0===t[n])throw new Error('Object "'+t+'" has no property "'+n+'"');var r=void 0;if(o.color)r=new j.default(t,n);else{var s=[t,n].concat(o.factoryArgs);r=C.default.apply(e,s)}o.before instanceof S.default&&(o.before=o.before.__li),l(e,r),z.default.addClass(r.domElement,"c");var u=document.createElement("span");z.default.addClass(u,"property-name"),u.innerHTML=r.property;var d=document.createElement("div");d.appendChild(u),d.appendChild(r.domElement);var c=i(e,d,o.before);return z.default.addClass(c,oe.CLASS_CONTROLLER_ROW),r instanceof j.default?z.default.addClass(c,"color"):z.default.addClass(c,g(r.getValue())),a(e,c,r),e.__controllers.push(r),r}function u(e,t){return document.location.href+"."+t}function d(e,t,n){var o=document.createElement("option");o.innerHTML=t,o.value=t,e.__preset_select.appendChild(o),n&&(e.__preset_select.selectedIndex=e.__preset_select.length-1)}function c(e,t){t.style.display=e.useLocalStorage?"block":"none"}function f(e){var t=e.__save_row=document.createElement("li");z.default.addClass(e.domElement,"has-save"),e.__ul.insertBefore(t,e.__ul.firstChild),z.default.addClass(t,"save-row");var n=document.createElement("span");n.innerHTML=" ",z.default.addClass(n,"button gears");var o=document.createElement("span");o.innerHTML="Save",z.default.addClass(o,"button"),z.default.addClass(o,"save");var i=document.createElement("span");i.innerHTML="New",z.default.addClass(i,"button"),z.default.addClass(i,"save-as");var r=document.createElement("span");r.innerHTML="Revert",z.default.addClass(r,"button"),z.default.addClass(r,"revert");var a=e.__preset_select=document.createElement("select");if(e.load&&e.load.remembered?U.default.each(e.load.remembered,function(t,n){d(e,n,n===e.preset)}):d(e,Q,!1),z.default.bind(a,"change",function(){for(var t=0;t<e.__preset_select.length;t++)e.__preset_select[t].innerHTML=e.__preset_select[t].value;e.preset=this.value}),t.appendChild(a),t.appendChild(n),t.appendChild(o),t.appendChild(i),t.appendChild(r),q){var l=document.getElementById("dg-local-explain"),s=document.getElementById("dg-local-storage"),f=document.getElementById("dg-save-locally");f.style.display="block","true"===localStorage.getItem(u(e,"isLocal"))&&s.setAttribute("checked","checked"),c(e,l),z.default.bind(s,"change",function(){e.useLocalStorage=!e.useLocalStorage,c(e,l)})}var _=document.getElementById("dg-new-constructor");z.default.bind(_,"keydown",function(e){!e.metaKey||67!==e.which&&67!==e.keyCode||Z.hide()}),z.default.bind(n,"click",function(){_.innerHTML=JSON.stringify(e.getSaveObject(),void 0,2),Z.show(),_.focus(),_.select()}),z.default.bind(o,"click",function(){e.save()}),z.default.bind(i,"click",function(){var t=prompt("Enter a new preset name.");t&&e.saveAs(t)}),z.default.bind(r,"click",function(){e.revert()})}function _(e){function t(t){return t.preventDefault(),e.width+=i-t.clientX,e.onResize(),i=t.clientX,!1}function n(){z.default.removeClass(e.__closeButton,oe.CLASS_DRAG),z.default.unbind(window,"mousemove",t),z.default.unbind(window,"mouseup",n)}function o(o){return o.preventDefault(),i=o.clientX,z.default.addClass(e.__closeButton,oe.CLASS_DRAG),z.default.bind(window,"mousemove",t),z.default.bind(window,"mouseup",n),!1}var i=void 0;e.__resize_handle=document.createElement("div"),U.default.extend(e.__resize_handle.style,{width:"6px",marginLeft:"-3px",height:"200px",cursor:"ew-resize",position:"absolute"}),z.default.bind(e.__resize_handle,"mousedown",o),z.default.bind(e.__closeButton,"mousedown",o),e.domElement.insertBefore(e.__resize_handle,e.domElement.firstElementChild)}function p(e,t){e.domElement.style.width=t+"px",e.__save_row&&e.autoPlace&&(e.__save_row.style.width=t+"px"),e.__closeButton&&(e.__closeButton.style.width=t+"px")}function h(e,t){var n={};return U.default.each(e.__rememberedObjects,function(o,i){var r={},a=e.__rememberedObjectIndecesToControllers[i];U.default.each(a,function(e,n){r[n]=t?e.initialValue:e.getValue()}),n[i]=r}),n}function m(e){for(var t=0;t<e.__preset_select.length;t++)e.__preset_select[t].value===e.preset&&(e.__preset_select.selectedIndex=t)}function b(e){0!==e.length&&D.default.call(window,function(){b(e)}),U.default.each(e,function(e){e.updateDisplay()})}t.__esModule=!0;var g="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},v=n(18),y=o(v),w=n(19),x=o(w),E=n(20),C=o(E),A=n(7),S=o(A),k=n(8),O=o(k),T=n(15),R=o(T),L=n(13),B=o(L),M=n(14),N=o(M),H=n(16),j=o(H),P=n(21),D=o(P),V=n(22),F=o(V),I=n(9),z=o(I),G=n(5),U=o(G),X=n(23),K=o(X);y.default.inject(K.default);var Y="dg",J=72,W=20,Q="Default",q=function(){try{return"localStorage"in window&&null!==window.localStorage}catch(e){return!1}}(),Z=void 0,$=!0,ee=void 0,te=!1,ne=[],oe=function e(t){function n(){var e=o.getRoot();e.width+=1,U.default.defer(function(){e.width-=1})}var o=this,r=t||{};this.domElement=document.createElement("div"),this.__ul=document.createElement("ul"),this.domElement.appendChild(this.__ul),z.default.addClass(this.domElement,Y),this.__folders={},this.__controllers=[],this.__rememberedObjects=[],this.__rememberedObjectIndecesToControllers=[],this.__listening=[],r=U.default.defaults(r,{closeOnTop:!1,autoPlace:!0,width:e.DEFAULT_WIDTH}),r=U.default.defaults(r,{resizable:r.autoPlace,hideable:r.autoPlace}),U.default.isUndefined(r.load)?r.load={preset:Q}:r.preset&&(r.load.preset=r.preset),U.default.isUndefined(r.parent)&&r.hideable&&ne.push(this),r.resizable=U.default.isUndefined(r.parent)&&r.resizable,r.autoPlace&&U.default.isUndefined(r.scrollable)&&(r.scrollable=!0);var a=q&&"true"===localStorage.getItem(u(this,"isLocal")),l=void 0;if(Object.defineProperties(this,{parent:{get:function(){return r.parent}},scrollable:{get:function(){return r.scrollable}},autoPlace:{get:function(){return r.autoPlace}},closeOnTop:{get:function(){return r.closeOnTop}},preset:{get:function(){return o.parent?o.getRoot().preset:r.load.preset},set:function(e){o.parent?o.getRoot().preset=e:r.load.preset=e,m(this),o.revert()}},width:{get:function(){return r.width},set:function(e){r.width=e,p(o,e)}},name:{get:function(){return r.name},set:function(e){r.name=e,titleRowName&&(titleRowName.innerHTML=r.name)}},closed:{get:function(){return r.closed},set:function(t){r.closed=t,r.closed?z.default.addClass(o.__ul,e.CLASS_CLOSED):z.default.removeClass(o.__ul,e.CLASS_CLOSED),this.onResize(),o.__closeButton&&(o.__closeButton.innerHTML=t?e.TEXT_OPEN:e.TEXT_CLOSED)}},load:{get:function(){return r.load}},useLocalStorage:{get:function(){return a},set:function(e){q&&(a=e,e?z.default.bind(window,"unload",l):z.default.unbind(window,"unload",l),localStorage.setItem(u(o,"isLocal"),e))}}}),U.default.isUndefined(r.parent)){if(r.closed=!1,z.default.addClass(this.domElement,e.CLASS_MAIN),z.default.makeSelectable(this.domElement,!1),q&&a){o.useLocalStorage=!0;var s=localStorage.getItem(u(this,"gui"));s&&(r.load=JSON.parse(s))}this.__closeButton=document.createElement("div"),this.__closeButton.innerHTML=e.TEXT_CLOSED,z.default.addClass(this.__closeButton,e.CLASS_CLOSE_BUTTON),r.closeOnTop?(z.default.addClass(this.__closeButton,e.CLASS_CLOSE_TOP),this.domElement.insertBefore(this.__closeButton,this.domElement.childNodes[0])):(z.default.addClass(this.__closeButton,e.CLASS_CLOSE_BOTTOM),this.domElement.appendChild(this.__closeButton)),z.default.bind(this.__closeButton,"click",function(){o.closed=!o.closed})}else{void 0===r.closed&&(r.closed=!0);var d=document.createTextNode(r.name);z.default.addClass(d,"controller-name");var c=i(o,d),f=function(e){return e.preventDefault(),o.closed=!o.closed,!1};z.default.addClass(this.__ul,e.CLASS_CLOSED),z.default.addClass(c,"title"),z.default.bind(c,"click",f),r.closed||(this.closed=!1)}r.autoPlace&&(U.default.isUndefined(r.parent)&&($&&(ee=document.createElement("div"),z.default.addClass(ee,Y),z.default.addClass(ee,e.CLASS_AUTO_PLACE_CONTAINER),document.body.appendChild(ee),$=!1),ee.appendChild(this.domElement),z.default.addClass(this.domElement,e.CLASS_AUTO_PLACE)),this.parent||p(o,r.width)),this.__resizeHandler=function(){o.onResizeDebounced()},z.default.bind(window,"resize",this.__resizeHandler),z.default.bind(this.__ul,"webkitTransitionEnd",this.__resizeHandler),z.default.bind(this.__ul,"transitionend",this.__resizeHandler),z.default.bind(this.__ul,"oTransitionEnd",this.__resizeHandler),this.onResize(),r.resizable&&_(this),l=function(){q&&"true"===localStorage.getItem(u(o,"isLocal"))&&localStorage.setItem(u(o,"gui"),JSON.stringify(o.getSaveObject()))},this.saveToLocalStorageIfPossible=l,r.parent||n()};oe.toggleHide=function(){te=!te,U.default.each(ne,function(e){e.domElement.style.display=te?"none":""})},oe.CLASS_AUTO_PLACE="a",oe.CLASS_AUTO_PLACE_CONTAINER="ac",oe.CLASS_MAIN="main",oe.CLASS_CONTROLLER_ROW="cr",oe.CLASS_TOO_TALL="taller-than-window",oe.CLASS_CLOSED="closed",oe.CLASS_CLOSE_BUTTON="close-button",oe.CLASS_CLOSE_TOP="close-top",oe.CLASS_CLOSE_BOTTOM="close-bottom",oe.CLASS_DRAG="drag",oe.DEFAULT_WIDTH=245,oe.TEXT_CLOSED="Close Controls",oe.TEXT_OPEN="Open Controls",oe._keydownHandler=function(e){"text"===document.activeElement.type||e.which!==J&&e.keyCode!==J||oe.toggleHide()},z.default.bind(window,"keydown",oe._keydownHandler,!1),U.default.extend(oe.prototype,{add:function(e,t){return s(this,e,t,{factoryArgs:Array.prototype.slice.call(arguments,2)})},addColor:function(e,t){return s(this,e,t,{color:!0})},remove:function(e){this.__ul.removeChild(e.__li),this.__controllers.splice(this.__controllers.indexOf(e),1);var t=this;U.default.defer(function(){t.onResize()})},destroy:function(){this.autoPlace&&ee.removeChild(this.domElement),z.default.unbind(window,"keydown",oe._keydownHandler,!1),z.default.unbind(window,"resize",this.__resizeHandler),this.saveToLocalStorageIfPossible&&z.default.unbind(window,"unload",this.saveToLocalStorageIfPossible)},addFolder:function(e){if(void 0!==this.__folders[e])throw new Error('You already have a folder in this GUI by the name "'+e+'"');var t={name:e,parent:this};t.autoPlace=this.autoPlace,this.load&&this.load.folders&&this.load.folders[e]&&(t.closed=this.load.folders[e].closed,t.load=this.load.folders[e]);var n=new oe(t);this.__folders[e]=n;var o=i(this,n.domElement);return z.default.addClass(o,"folder"),n},open:function(){this.closed=!1},close:function(){this.closed=!0},onResize:function(){var e=this.getRoot();if(e.scrollable){var t=z.default.getOffset(e.__ul).top,n=0;U.default.each(e.__ul.childNodes,function(t){e.autoPlace&&t===e.__save_row||(n+=z.default.getHeight(t))}),window.innerHeight-t-W<n?(z.default.addClass(e.domElement,oe.CLASS_TOO_TALL),e.__ul.style.height=window.innerHeight-t-W+"px"):(z.default.removeClass(e.domElement,oe.CLASS_TOO_TALL),e.__ul.style.height="auto")}e.__resize_handle&&U.default.defer(function(){e.__resize_handle.style.height=e.__ul.offsetHeight+"px"}),e.__closeButton&&(e.__closeButton.style.width=e.width+"px")},onResizeDebounced:U.default.debounce(function(){this.onResize()},50),remember:function(){if(U.default.isUndefined(Z)&&(Z=new F.default,Z.domElement.innerHTML=x.default),this.parent)throw new Error("You can only call remember on a top level GUI.");var e=this;U.default.each(Array.prototype.slice.call(arguments),function(t){0===e.__rememberedObjects.length&&f(e),e.__rememberedObjects.indexOf(t)===-1&&e.__rememberedObjects.push(t)}),this.autoPlace&&p(this,this.width)},getRoot:function(){for(var e=this;e.parent;)e=e.parent;return e},getSaveObject:function(){var e=this.load;return e.closed=this.closed,this.__rememberedObjects.length>0&&(e.preset=this.preset,e.remembered||(e.remembered={}),e.remembered[this.preset]=h(this)),e.folders={},U.default.each(this.__folders,function(t,n){e.folders[n]=t.getSaveObject()}),e},save:function(){this.load.remembered||(this.load.remembered={}),this.load.remembered[this.preset]=h(this),r(this,!1),this.saveToLocalStorageIfPossible()},saveAs:function(e){this.load.remembered||(this.load.remembered={},this.load.remembered[Q]=h(this,!0)),this.load.remembered[e]=h(this),this.preset=e,d(this,e,!0),this.saveToLocalStorageIfPossible()},revert:function(e){U.default.each(this.__controllers,function(t){this.getRoot().load.remembered?l(e||this.getRoot(),t):t.setValue(t.initialValue),t.__onFinishChange&&t.__onFinishChange.call(t,t.getValue())},this),U.default.each(this.__folders,function(e){e.revert(e)}),e||r(this.getRoot(),!1)},listen:function(e){var t=0===this.__listening.length;this.__listening.push(e),t&&b(this.__listening)},updateDisplay:function(){U.default.each(this.__controllers,function(e){e.updateDisplay()}),U.default.each(this.__folders,function(e){e.updateDisplay()})}}),t.default=oe,e.exports=t.default},function(e,t){"use strict";e.exports={load:function(e,t){var n=t||document,o=n.createElement("link");o.type="text/css",o.rel="stylesheet",o.href=e,n.getElementsByTagName("head")[0].appendChild(o)},inject:function(e,t){var n=t||document,o=document.createElement("style");o.type="text/css",o.innerHTML=e;var i=n.getElementsByTagName("head")[0];try{i.appendChild(o)}catch(e){}}}},function(e,t){e.exports="<div id=dg-save class=\"dg dialogue\"> Here's the new load parameter for your <code>GUI</code>'s constructor: <textarea id=dg-new-constructor></textarea> <div id=dg-save-locally> <input id=dg-local-storage type=checkbox /> Automatically save values to <code>localStorage</code> on exit. <div id=dg-local-explain>The values saved to <code>localStorage</code> will override those passed to <code>dat.GUI</code>'s constructor. This makes it easier to work incrementally, but <code>localStorage</code> is fragile, and your friends may not see the same values you do. </div> </div> </div>"},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}t.__esModule=!0;var i=n(10),r=o(i),a=n(13),l=o(a),s=n(14),u=o(s),d=n(11),c=o(d),f=n(15),_=o(f),p=n(8),h=o(p),m=n(5),b=o(m),g=function(e,t){var n=e[t];return b.default.isArray(arguments[2])||b.default.isObject(arguments[2])?new r.default(e,t,arguments[2]):b.default.isNumber(n)?b.default.isNumber(arguments[2])&&b.default.isNumber(arguments[3])?b.default.isNumber(arguments[4])?new u.default(e,t,arguments[2],arguments[3],arguments[4]):new u.default(e,t,arguments[2],arguments[3]):b.default.isNumber(arguments[4])?new l.default(e,t,{min:arguments[2],max:arguments[3],step:arguments[4]}):new l.default(e,t,{min:arguments[2],max:arguments[3]}):b.default.isString(n)?new c.default(e,t):b.default.isFunction(n)?new _.default(e,t,""):b.default.isBoolean(n)?new h.default(e,t):null};t.default=g,e.exports=t.default},function(e,t){"use strict";function n(e){setTimeout(e,1e3/60)}t.__esModule=!0,t.default=window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||n,e.exports=t.default},function(e,t,n){"use strict";function o(e){return e&&e.__esModule?e:{default:e}}function i(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}t.__esModule=!0;var r=n(9),a=o(r),l=n(5),s=o(l),u=function(){function e(){i(this,e),this.backgroundElement=document.createElement("div"),s.default.extend(this.backgroundElement.style,{backgroundColor:"rgba(0,0,0,0.8)",top:0,left:0,display:"none",zIndex:"1000",opacity:0,WebkitTransition:"opacity 0.2s linear",transition:"opacity 0.2s linear"}),a.default.makeFullscreen(this.backgroundElement),this.backgroundElement.style.position="fixed",this.domElement=document.createElement("div"),s.default.extend(this.domElement.style,{position:"fixed",display:"none",zIndex:"1001",opacity:0,WebkitTransition:"-webkit-transform 0.2s ease-out, opacity 0.2s linear",transition:"transform 0.2s ease-out, opacity 0.2s linear"}),document.body.appendChild(this.backgroundElement),document.body.appendChild(this.domElement);var t=this;a.default.bind(this.backgroundElement,"click",function(){t.hide()})}return e.prototype.show=function(){var e=this;this.backgroundElement.style.display="block",this.domElement.style.display="block",this.domElement.style.opacity=0,this.domElement.style.webkitTransform="scale(1.1)",this.layout(),s.default.defer(function(){e.backgroundElement.style.opacity=1,e.domElement.style.opacity=1,e.domElement.style.webkitTransform="scale(1)"})},e.prototype.hide=function e(){var t=this,e=function e(){t.domElement.style.display="none",t.backgroundElement.style.display="none",a.default.unbind(t.domElement,"webkitTransitionEnd",e),a.default.unbind(t.domElement,"transitionend",e),a.default.unbind(t.domElement,"oTransitionEnd",e)};a.default.bind(this.domElement,"webkitTransitionEnd",e),a.default.bind(this.domElement,"transitionend",e),a.default.bind(this.domElement,"oTransitionEnd",e),this.backgroundElement.style.opacity=0,this.domElement.style.opacity=0,this.domElement.style.webkitTransform="scale(1.1)"},e.prototype.layout=function(){this.domElement.style.left=window.innerWidth/2-a.default.getWidth(this.domElement)/2+"px",this.domElement.style.top=window.innerHeight/2-a.default.getHeight(this.domElement)/2+"px"},e}();t.default=u,e.exports=t.default},function(e,t,n){t=e.exports=n(24)(),t.push([e.id,".dg ul{list-style:none;margin:0;padding:0;width:100%;clear:both}.dg.ac{position:fixed;top:0;left:0;right:0;height:0;z-index:0}.dg:not(.ac) .main{overflow:hidden}.dg.main{transition:opacity .1s linear}.dg.main.taller-than-window{overflow-y:auto}.dg.main.taller-than-window .close-button{opacity:1;margin-top:-1px;border-top:1px solid #2c2c2c}.dg.main ul.closed .close-button{opacity:1!important}.dg.main .close-button.drag,.dg.main:hover .close-button{opacity:1}.dg.main .close-button{transition:opacity .1s linear;border:0;line-height:19px;height:20px;cursor:pointer;text-align:center;background-color:#000}.dg.main .close-button.close-top{position:relative}.dg.main .close-button.close-bottom{position:absolute}.dg.main .close-button:hover{background-color:#111}.dg.a{float:right;margin-right:15px;overflow-y:visible}.dg.a.has-save>ul.close-top{margin-top:0}.dg.a.has-save>ul.close-bottom{margin-top:27px}.dg.a.has-save>ul.closed{margin-top:0}.dg.a .save-row{top:0;z-index:1002}.dg.a .save-row.close-top{position:relative}.dg.a .save-row.close-bottom{position:fixed}.dg li{transition:height .1s ease-out;transition:overflow .1s linear}.dg li:not(.folder){cursor:auto;height:27px;line-height:27px;padding:0 4px 0 5px}.dg li.folder{padding:0;border-left:4px solid transparent}.dg li.title{margin-left:-4px}.dg .closed li:not(.title),.dg .closed ul li,.dg .closed ul li>*{height:0;overflow:hidden;border:0}.dg .cr{clear:both;padding-left:3px;height:27px;overflow:hidden}.dg .property-name{cursor:default;float:left;clear:left;width:40%;overflow:hidden;text-overflow:ellipsis}.dg .c{float:left;width:60%;position:relative}.dg .c input[type=text]{border:0;margin-top:4px;padding:3px;width:100%;float:right}.dg .has-slider input[type=text]{width:30%;margin-left:0}.dg .slider{float:left;width:66%;margin-left:-5px;margin-right:0;height:19px;margin-top:4px}.dg .slider-fg{height:100%}.dg .c input[type=checkbox]{margin-top:7px}.dg .c select{margin-top:5px}.dg .cr.boolean,.dg .cr.boolean *,.dg .cr.function,.dg .cr.function *,.dg .cr.function .property-name{cursor:pointer}.dg .cr.color{overflow:visible}.dg .selector{display:none;position:absolute;margin-left:-9px;margin-top:23px;z-index:10}.dg .c:hover .selector,.dg .selector.drag{display:block}.dg li.save-row{padding:0}.dg li.save-row .button{display:inline-block;padding:0 6px}.dg.dialogue{background-color:#222;width:460px;padding:15px;font-size:13px;line-height:15px}#dg-new-constructor{padding:10px;color:#222;font-family:Monaco,monospace;font-size:10px;border:0;resize:none;box-shadow:inset 1px 1px 1px #888;word-wrap:break-word;margin:12px 0;display:block;width:440px;overflow-y:scroll;height:100px;position:relative}#dg-local-explain{display:none;font-size:11px;line-height:17px;border-radius:3px;background-color:#333;padding:8px;margin-top:10px}#dg-local-explain code{font-size:10px}#dat-gui-save-locally{display:none}.dg{color:#eee;font:11px Lucida Grande,sans-serif;text-shadow:0 -1px 0 #111}.dg.main::-webkit-scrollbar{width:5px;background:#1a1a1a}.dg.main::-webkit-scrollbar-corner{height:0;display:none}.dg.main::-webkit-scrollbar-thumb{border-radius:5px;background:#676767}.dg li:not(.folder){background:#1a1a1a;border-bottom:1px solid #2c2c2c}.dg li.save-row{line-height:25px;background:#dad5cb;border:0}.dg li.save-row select{margin-left:5px;width:108px}.dg li.save-row .button{margin-left:5px;margin-top:1px;border-radius:2px;font-size:9px;line-height:7px;padding:4px 4px 5px;background:#c5bdad;color:#fff;text-shadow:0 1px 0 #b0a58f;box-shadow:0 -1px 0 #b0a58f;cursor:pointer}.dg li.save-row .button.gears{background:#c5bdad url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAsAAAANCAYAAAB/9ZQ7AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAQJJREFUeNpiYKAU/P//PwGIC/ApCABiBSAW+I8AClAcgKxQ4T9hoMAEUrxx2QSGN6+egDX+/vWT4e7N82AMYoPAx/evwWoYoSYbACX2s7KxCxzcsezDh3evFoDEBYTEEqycggWAzA9AuUSQQgeYPa9fPv6/YWm/Acx5IPb7ty/fw+QZblw67vDs8R0YHyQhgObx+yAJkBqmG5dPPDh1aPOGR/eugW0G4vlIoTIfyFcA+QekhhHJhPdQxbiAIguMBTQZrPD7108M6roWYDFQiIAAv6Aow/1bFwXgis+f2LUAynwoIaNcz8XNx3Dl7MEJUDGQpx9gtQ8YCueB+D26OECAAQDadt7e46D42QAAAABJRU5ErkJggg==) 2px 1px no-repeat;height:7px;width:8px}.dg li.save-row .button:hover{background-color:#bab19e;box-shadow:0 -1px 0 #b0a58f}.dg li.folder{border-bottom:0}.dg li.title{padding-left:16px;background:#000 url(data:image/gif;base64,R0lGODlhBQAFAJEAAP////Pz8////////yH5BAEAAAIALAAAAAAFAAUAAAIIlI+hKgFxoCgAOw==) 6px 10px no-repeat;cursor:pointer;border-bottom:1px solid hsla(0,0%,100%,.2)}.dg .closed li.title{background-image:url(data:image/gif;base64,R0lGODlhBQAFAJEAAP////Pz8////////yH5BAEAAAIALAAAAAAFAAUAAAIIlGIWqMCbWAEAOw==)}.dg .cr.boolean{border-left:3px solid #806787}.dg .cr.color{border-left:3px solid}.dg .cr.function{border-left:3px solid #e61d5f}.dg .cr.number{border-left:3px solid #2fa1d6}.dg .cr.number input[type=text]{color:#2fa1d6}.dg .cr.string{border-left:3px solid #1ed36f}.dg .cr.string input[type=text]{color:#1ed36f}.dg .cr.boolean:hover,.dg .cr.function:hover{background:#111}.dg .c input[type=text]{background:#303030;outline:none}.dg .c input[type=text]:hover{background:#3c3c3c}.dg .c input[type=text]:focus{background:#494949;color:#fff}.dg .c .slider{background:#303030;cursor:ew-resize}.dg .c .slider-fg{background:#2fa1d6;max-width:100%}.dg .c .slider:hover{background:#3c3c3c}.dg .c .slider:hover .slider-fg{background:#44abda}",""])},function(e,t){e.exports=function(){var e=[];return e.toString=function(){for(var e=[],t=0;t<this.length;t++){var n=this[t];n[2]?e.push("@media "+n[2]+"{"+n[1]+"}"):e.push(n[1])}return e.join("")},e.i=function(t,n){"string"==typeof t&&(t=[[null,t,""]]);for(var o={},i=0;i<this.length;i++){var r=this[i][0];"number"==typeof r&&(o[r]=!0)}for(i=0;i<t.length;i++){var a=t[i];"number"==typeof a[0]&&o[a[0]]||(n&&!a[2]?a[2]=n:n&&(a[2]="("+a[2]+") and ("+n+")"),e.push(a))}},e}}])}); | PypiClean |
/DTMC/spatialModel/PeriodicMovement/periodicSIR.py | import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from math import pi, cos, sin
from ..randomMovement.randMoveSIR import RandMoveSIR
from Eir.DTMC.spatialModel.simul_details import Simul_Details
from Eir.utility import Person2 as Person
from Eir.utility import randEvent, dist
class PeriodicSIR(RandMoveSIR):
def __init__(self, S0:int, I0:int, R0:int, gamma:float, planeSize:float, move_r:float, sigma_R:float, spread_r:float, sigma_r:float, days:int, w0=1.0, alpha=2.0, k=5, std=pi/2):
self.floatCheck(k, std)
self.negValCheck(k, std)
super().__init__(S0, I0, R0, gamma, planeSize, move_r, sigma_R, spread_r, sigma_r, days, w0=w0, alpha=alpha)
self.k, self.std = k, std
self.details = Simul_Details(days, self.popsize)
self.Scollect, self.Icollect, self.Rcollect = [], [], []
spreading_r = np.random.normal(spread_r, sigma_r, S0+I0+R0)
mvnt = np.random.normal(move_r, sigma_r, self.popsize)
# generate the random x, y locations with every position within the plane being equally likely
loc_x = np.random.random(S0+I0+R0) * planeSize
loc_y = np.random.random(S0+I0+R0) * planeSize
# create the special objects:
for i in range(self.popsize):
theta = np.random.normal(2*pi/k, std)
# create the person object
# for this model, the people will move with random radius R each timestep
# therefore, the R component can be made 0, as that is only relevant for the
# periodic mobility model
p1 = Person(loc_x[i], loc_y[i], mvnt[i], spreading_r[i], theta=theta)
p2 = Person(loc_x[i], loc_y[i], mvnt[i], spreading_r[i], theta=theta)
p3 = Person(loc_x[i], loc_y[i], mvnt[i], spreading_r[i], theta=theta)
self.details.addLocation(0, (loc_x[i], loc_y[i]))
# if the person is in the susceptible objects created
if i < S0:
p1.isIncluded = True
self.details.addStateChange(i, "S", 0)
elif S0 <= i < S0+I0:
p2.isIncluded = True
self.details.addStateChange(i, "I", 0)
else:
p3.isIncluded=True
self.details.addStateChange(i, "R", 0)
# append them to the data structure
self.Scollect.append(p1)
self.Icollect.append(p2)
self.Rcollect.append(p3)
self.details.addLocation(0, (p1.x, p1.y))
def _move(self, day: int, collects: list):
"""
Responsible for moving the locations of each Person in the simulation. Does it in place.
Parameters
----------
day: int
The current day that the move is taking place on. Is important for the Simul_Details() object in order to keep track of the movement patterns each day.
collect: list
Contains all of the collection data structures that will be cycled through for the moves. This allows for easy object-oriented design.
"""
# generate the random thetas from a normal distribution
thetas = np.random.normal(2*pi/self.k, self.std, self.popsize)
for index, person in enumerate(collects[0]):
# adjust the theta current theta values in the object
collects[0][index].theta += thetas[index]
# adjust the x,y coordinate using polar coordinates
# conduct the boundary check at the same time
x = self._boundaryCheck(person.h + person.R * cos(collects[0][index].theta))
y = self._boundaryCheck(person.k + person.R * sin(collects[0][index].theta))
# add the new location to the Simul_Details object
self.details.addLocation(day, (x,y))
# change the x, y coordinates of every copy of person index in the other collections
for j, collect in enumerate(collects):
collects[j][index].x = x
collects[j][index].y = y
collects[j][index].theta += thetas[index]
# maybe add picking what to plot later
def plot(self):
"Plots the number of susceptible and infected individuals on the y-axis and the number of days on the x-axis."
t = np.linspace(0, self.days, self.days + 1)
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, sharex='all')
ax1.plot(t, self.S, label="Susceptible", color='r')
ax1.set_ylabel("# Susceptibles")
ax1.set_title("Periodic Movement SIR Simulation")
ax2.plot(t, self.I, label="Active Cases", color='b')
ax2.set_ylabel("# Active Infections")
ax3.set_xlabel("Days")
ax3.set_ylabel("# Recovered")
ax3.plot(t, self.R, label="Removed")
ax1.legend()
ax2.legend()
ax3.legend()
plt.show() | PypiClean |
/CNVkit-0.9.10-py3-none-any.whl/cnvlib/scatter.py | import collections
import logging
import numpy as np
from matplotlib import pyplot
from skgenome.rangelabel import unpack_range
from . import core, params, plots
from .plots import MB
from .cnary import CopyNumArray as CNA
HIGHLIGHT_COLOR = "gold"
POINT_COLOR = "#606060"
SEG_COLOR = "darkorange"
TREND_COLOR = "#A0A0A0"
def do_scatter(
cnarr,
segments=None,
variants=None,
show_range=None,
show_gene=None,
do_trend=False,
by_bin=False,
window_width=1e6,
y_min=None,
y_max=None,
fig_size=None,
antitarget_marker=None,
segment_color=SEG_COLOR,
title=None,
):
"""Plot probe log2 coverages and segmentation calls together."""
if by_bin:
bp_per_bin = sum(c.end.iat[-1] for _, c in cnarr.by_chromosome()) / len(cnarr)
window_width /= bp_per_bin
show_range_bins = plots.translate_region_to_bins(show_range, cnarr)
cnarr, segments, variants = plots.update_binwise_positions(
cnarr, segments, variants
)
global MB
orig_mb = MB
MB = 1
if not show_gene and not show_range:
fig = genome_scatter(
cnarr, segments, variants, do_trend, y_min, y_max, title, segment_color
)
else:
if by_bin:
show_range = show_range_bins
fig = chromosome_scatter(
cnarr,
segments,
variants,
show_range,
show_gene,
antitarget_marker,
do_trend,
by_bin,
window_width,
y_min,
y_max,
title,
segment_color,
)
if by_bin:
# Reset to avoid permanently altering the value of cnvlib.scatter.MB
MB = orig_mb
if fig_size:
width, height = fig_size
fig.set_size_inches(w=width, h=height)
return fig
# === Genome-level scatter plots ===
def genome_scatter(
cnarr,
segments=None,
variants=None,
do_trend=False,
y_min=None,
y_max=None,
title=None,
segment_color=SEG_COLOR,
):
"""Plot all chromosomes, concatenated on one plot."""
if (cnarr or segments) and variants:
# Lay out top 3/5 for the CN scatter, bottom 2/5 for SNP plot
axgrid = pyplot.GridSpec(5, 1, hspace=0.85)
axis = pyplot.subplot(axgrid[:3])
axis2 = pyplot.subplot(axgrid[3:], sharex=axis)
# Place chromosome labels between the CNR and SNP plots
axis2.tick_params(labelbottom=False)
chrom_sizes = plots.chromosome_sizes(cnarr or segments)
axis2 = snv_on_genome(
axis2, variants, chrom_sizes, segments, do_trend, segment_color
)
else:
_fig, axis = pyplot.subplots()
if title is None:
title = (cnarr or segments or variants).sample_id
if cnarr or segments:
axis.set_title(title)
axis = cnv_on_genome(
axis, cnarr, segments, do_trend, y_min, y_max, segment_color
)
else:
axis.set_title(f"Variant allele frequencies: {title}")
chrom_sizes = collections.OrderedDict(
(chrom, subarr["end"].max()) for chrom, subarr in variants.by_chromosome()
)
axis = snv_on_genome(
axis, variants, chrom_sizes, segments, do_trend, segment_color
)
return axis.get_figure()
def cnv_on_genome(
axis,
probes,
segments,
do_trend=False,
y_min=None,
y_max=None,
segment_color=SEG_COLOR,
):
"""Plot bin ratios and/or segments for all chromosomes on one plot."""
# Configure axes etc.
axis.axhline(color="k")
axis.set_ylabel("Copy ratio (log2)")
if not (y_min and y_max):
if segments:
# Auto-scale y-axis according to segment mean-coverage values
# (Avoid spuriously low log2 values in HLA and chrY)
low_chroms = segments.chromosome.isin(("6", "chr6", "Y", "chrY"))
seg_auto_vals = segments[~low_chroms]["log2"].dropna()
if not y_min:
y_min = (
np.nanmin([seg_auto_vals.min() - 0.2, -1.5])
if len(seg_auto_vals)
else -2.5
)
if not y_max:
y_max = (
np.nanmax([seg_auto_vals.max() + 0.2, 1.5])
if len(seg_auto_vals)
else 2.5
)
else:
if not y_min:
y_min = -2.5
if not y_max:
y_max = 2.5
axis.set_ylim(y_min, y_max)
# Group probes by chromosome (to calculate plotting coordinates)
if probes:
chrom_sizes = plots.chromosome_sizes(probes)
chrom_probes = dict(probes.by_chromosome())
# Precalculate smoothing window size so all chromosomes have similar
# degree of smoothness
# NB: Target panel has ~1k bins/chrom. -> 250-bin window
# Exome: ~10k bins/chrom. -> 2500-bin window
window_size = int(round(0.15 * len(probes) / probes.chromosome.nunique()))
else:
chrom_sizes = plots.chromosome_sizes(segments)
# Same for segment calls
chrom_segs = dict(segments.by_chromosome()) if segments else {}
# Plot points & segments
x_starts = plots.plot_chromosome_dividers(axis, chrom_sizes)
for chrom, x_offset in x_starts.items():
if probes and chrom in chrom_probes:
subprobes = chrom_probes[chrom]
x = 0.5 * (subprobes["start"] + subprobes["end"]) + x_offset
axis.scatter(
x,
subprobes["log2"],
marker=".",
color=POINT_COLOR,
edgecolor="none",
alpha=0.2,
)
if do_trend:
# ENH break trendline by chromosome arm boundaries?
# Here and in subsequent occurrences, it's important to use snap=False
# to avoid short lines/segment disappearing when saving as PNG.
# See also: https://github.com/etal/cnvkit/issues/604
axis.plot(
x,
subprobes.smooth_log2(),
color=POINT_COLOR,
linewidth=2,
zorder=-1,
snap=False,
)
if chrom in chrom_segs:
for seg in chrom_segs[chrom]:
color = choose_segment_color(seg, segment_color)
axis.plot(
(seg.start + x_offset, seg.end + x_offset),
(seg.log2, seg.log2),
color=color,
linewidth=3,
solid_capstyle="round",
snap=False,
)
return axis
def snv_on_genome(axis, variants, chrom_sizes, segments, do_trend, segment_color):
"""Plot a scatter-plot of SNP chromosomal positions and shifts."""
axis.set_ylim(0.0, 1.0)
axis.set_ylabel("VAF")
x_starts = plots.plot_chromosome_dividers(axis, chrom_sizes)
# Calculate the coordinates of plot components
chrom_snvs = dict(variants.by_chromosome())
if segments:
chrom_segs = dict(segments.by_chromosome())
elif do_trend:
# Pretend a single segment covers each chromosome
chrom_segs = {chrom: None for chrom in chrom_snvs}
else:
chrom_segs = {}
for chrom, x_offset in x_starts.items():
if chrom not in chrom_snvs:
continue
snvs = chrom_snvs[chrom]
# Plot the points
axis.scatter(
snvs["start"].values + x_offset,
snvs["alt_freq"].values,
color=POINT_COLOR,
edgecolor="none",
alpha=0.2,
marker=".",
)
# Trend bars: always calculated, only shown on request
if chrom in chrom_segs:
# Draw average VAF within each segment
segs = chrom_segs[chrom]
for seg, v_freq in get_segment_vafs(snvs, segs):
if seg:
posn = [seg.start + x_offset, seg.end + x_offset]
color = choose_segment_color(
seg, segment_color, default_bright=False
)
else:
posn = [snvs.start.iat[0] + x_offset, snvs.start.iat[-1] + x_offset]
color = TREND_COLOR
axis.plot(
posn,
[v_freq, v_freq],
color=color,
linewidth=2,
zorder=-1,
solid_capstyle="round",
snap=False,
)
return axis
# === Chromosome-level scatter plots ===
def chromosome_scatter(
cnarr,
segments,
variants,
show_range,
show_gene,
antitarget_marker,
do_trend,
by_bin,
window_width,
y_min,
y_max,
title,
segment_color,
):
"""Plot a specified region on one chromosome.
Possibilities::
Options | Shown
------------ | --------
-c | -g | Genes | Region
------- | -- | ----- | ------
- | + | given | auto: gene(s) + margin
chr | - | none | whole chrom
chr | + | given | whole chrom
chr:s-e | - | all | given
chr:s-e | + | given | given
"""
sel_probes, sel_segs, sel_snvs, window_coords, genes, chrom = select_range_genes(
cnarr, segments, variants, show_range, show_gene, window_width
)
# Create plots
if cnarr or segments:
# Plot CNVs at chromosome level
if variants:
# Lay out top 3/5 for the CN scatter, bottom 2/5 for SNP plot
axgrid = pyplot.GridSpec(5, 1, hspace=0.5)
axis = pyplot.subplot(axgrid[:3])
axis2 = pyplot.subplot(axgrid[3:], sharex=axis)
# Plot allele freqs for only the selected region
snv_on_chromosome(
axis2, sel_snvs, sel_segs, genes, do_trend, by_bin, segment_color
)
else:
_fig, axis = pyplot.subplots()
if by_bin:
axis.set_xlabel("Position (bin)")
else:
axis.set_xlabel("Position (Mb)")
axis = cnv_on_chromosome(
axis,
sel_probes,
sel_segs,
genes,
antitarget_marker=antitarget_marker,
do_trend=do_trend,
x_limits=window_coords,
y_min=y_min,
y_max=y_max,
segment_color=segment_color,
)
elif variants:
# Only plot SNVs in a single-panel layout
_fig, axis = pyplot.subplots()
axis = snv_on_chromosome(
axis, sel_snvs, sel_segs, genes, do_trend, by_bin, segment_color
)
if title is None:
title = "%s %s" % ((cnarr or segments or variants).sample_id, chrom)
axis.set_title(title)
return axis.get_figure()
def select_range_genes(cnarr, segments, variants, show_range, show_gene, window_width):
"""Determine which datapoints to show based on the given options.
Behaviors::
start/end show_gene
+ + given region + genes; err if any gene outside it
- + window +/- around genes
+ - given region, highlighting any genes within it
- - whole chromosome, no genes
If `show_range` is a chromosome name only, no start/end positions, then the
whole chromosome will be shown.
If region start/end coordinates are given and `show_gene` is '' or ',' (or
all commas, etc.), then instead of highlighting all genes in the selection,
no genes will be highlighted.
"""
chrom, start, end = unpack_range(show_range)
if start is None and end is None:
# Either the specified range is only chrom, no start-end, or gene names
# were given
window_coords = ()
else:
# Viewing region coordinates were specified -- take them as given
# Fill in open-ended ranges' endpoints
if start is None:
start = 0
elif start < 0:
start = 0
if not end:
# Default selection endpoint to the maximum chromosome position
end = (cnarr or segments or variants).filter(chromosome=chrom).end.iat[-1]
if end <= start:
raise ValueError(
f"Coordinate range {chrom}:{start}-{end} (from {show_range}) "
+ "has size <= 0"
)
window_coords = (start, end)
gene_ranges = []
if show_gene is None:
if window_coords:
if cnarr:
# Highlight all genes within the given range
gene_ranges = plots.gene_coords_by_range(cnarr, chrom, start, end)[
chrom
]
if not gene_ranges and (end - start) < 10 * window_width:
# No genes in the selected region, so if the selection is small
# (i.e. <80% of the displayed window, <10x window padding),
# highlight the selected region itself.
# (To prevent this, use show_gene='' or window_width=0)
logging.info(
"No genes found in selection; will highlight the "
"selected region itself instead"
)
gene_ranges = [(start, end, "Selection")]
window_coords = (max(0, start - window_width), end + window_width)
else:
gene_names = filter(None, show_gene.split(","))
if gene_names:
# Scan for probes matching the specified gene(s)
gene_coords = plots.gene_coords_by_name(cnarr or segments, gene_names)
if len(gene_coords) > 1:
raise ValueError(
f"Genes {show_gene} are split across chromosomes "
f"{list(gene_coords.keys())}"
)
g_chrom, gene_ranges = gene_coords.popitem()
if chrom:
# Confirm that the selected chromosomes match
core.assert_equal(
"Chromosome also selected by region (-c) does not match",
**{"chromosome": chrom, "gene(s)": g_chrom},
)
else:
chrom = g_chrom
gene_ranges.sort()
if window_coords:
# Verify all genes fit in the given window
for gene_start, gene_end, gene_name in gene_ranges:
if not (start <= gene_start and gene_end <= end):
raise ValueError(
f"Selected gene {gene_name} "
+ f"({chrom}:{gene_start}-{gene_end}) "
+ f"is outside specified region {show_range}"
)
elif not show_range:
# Set the display window to the selected genes +/- a margin
window_coords = (
max(0, gene_ranges[0][0] - window_width),
gene_ranges[-1][1] + window_width,
)
# Prune plotted elements to the selected region
sel_probes = cnarr.in_range(chrom, *window_coords) if cnarr else CNA([])
sel_segs = (
segments.in_range(chrom, *window_coords, mode="trim") if segments else CNA([])
)
sel_snvs = variants.in_range(chrom, *window_coords) if variants else None
logging.info(
"Showing %d probes and %d selected genes in region %s",
len(sel_probes),
len(gene_ranges),
(chrom + ":{}-{}".format(*window_coords) if window_coords else chrom),
)
return sel_probes, sel_segs, sel_snvs, window_coords, gene_ranges, chrom
def cnv_on_chromosome(
axis,
probes,
segments,
genes,
antitarget_marker=None,
do_trend=False,
x_limits=None,
y_min=None,
y_max=None,
segment_color=SEG_COLOR,
):
"""Draw a scatter plot of probe values with optional segments overlaid.
Parameters
----------
genes : list
Of tuples: (start, end, gene name)
"""
# TODO - allow plotting just segments without probes
# Get scatter plot coordinates
x = 0.5 * (probes["start"] + probes["end"]) * MB # bin midpoints
y = probes["log2"]
if "weight" in probes:
w = 46 * probes["weight"] ** 2 + 2
else:
w = np.repeat(30, len(x))
# Configure axes
if not y_min:
y_min = max(-5.0, min(y.min() - 0.1, -0.3)) if len(y) else -1.1
if not y_max:
y_max = max(0.3, y.max() + (0.25 if genes else 0.1)) if len(y) else 1.1
if x_limits:
x_min, x_max = x_limits
axis.set_xlim(x_min * MB, x_max * MB)
else:
set_xlim_from(axis, probes, segments)
setup_chromosome(axis, y_min, y_max, "Copy ratio (log2)")
if genes:
highlight_genes(axis, genes, min(2.4, y.max() + 0.1) if len(y) else 0.1)
if antitarget_marker in (None, "o"):
# Plot targets and antitargets with the same marker
axis.scatter(x, y, w, color=POINT_COLOR, alpha=0.4, marker="o")
else:
# Use the given marker to plot antitargets separately
x_fg = []
y_fg = []
w_fg = []
x_bg = []
y_bg = []
# w_bg = []
is_bg = probes["gene"].isin(params.ANTITARGET_ALIASES)
for x_pt, y_pt, w_pt, is_bg_pt in zip(x, y, w, is_bg):
if is_bg_pt:
x_bg.append(x_pt)
y_bg.append(y_pt)
# w_bg.append(w_pt)
else:
x_fg.append(x_pt)
y_fg.append(y_pt)
w_fg.append(w_pt)
axis.scatter(x_fg, y_fg, w_fg, color=POINT_COLOR, alpha=0.4, marker="o")
axis.scatter(x_bg, y_bg, color=POINT_COLOR, alpha=0.5, marker=antitarget_marker)
# Add a local trend line
if do_trend:
axis.plot(
x,
probes.smooth_log2(), # .1),
color=POINT_COLOR,
linewidth=2,
zorder=-1,
snap=False,
)
# Draw segments as horizontal lines
if segments:
for row in segments:
color = choose_segment_color(row, segment_color)
axis.plot(
(row.start * MB, row.end * MB),
(row.log2, row.log2),
color=color,
linewidth=4,
solid_capstyle="round",
snap=False,
)
# Warn about segments masked by default pruning at 'y_min=-5':
hidden_seg = segments.log2 < y_min
if hidden_seg.sum():
logging.warning(
"WARNING: With 'y_min=%s' %s segments are hidden"
" --> Add parameter '--y-min %s' to see them",
y_min,
hidden_seg.sum(),
int(np.floor(segments.log2.min())),
)
# Signal them as triangles crossing y-axis:
x_hidden = segments.start[hidden_seg] * MB
y_hidden = np.array([y_min] * len(x_hidden))
axis.scatter(
x_hidden,
y_hidden,
marker="^",
linewidth=3,
snap=False,
color=segment_color,
edgecolor="none",
clip_on=False,
zorder=10,
)
return axis
def snv_on_chromosome(axis, variants, segments, genes, do_trend, by_bin, segment_color):
# TODO set x-limits if not already done for probes/segments
# set_xlim_from(axis, None, segments, variants)
# setup_chromosome(axis, 0.0, 1.0, "VAF")
axis.set_ylim(0.0, 1.0)
axis.set_ylabel("VAF")
if by_bin:
axis.set_xlabel("Position (bin)")
else:
axis.set_xlabel("Position (Mb)")
axis.get_yaxis().tick_left()
axis.get_xaxis().tick_top()
axis.tick_params(which="both", direction="out", labelbottom=False, labeltop=False)
x_mb = variants["start"].values * MB
y = variants["alt_freq"].values
axis.scatter(x_mb, y, color=POINT_COLOR, alpha=0.3)
if segments or do_trend:
# Draw average VAF within each segment
for seg, v_freq in get_segment_vafs(variants, segments):
if seg:
posn = [seg.start * MB, seg.end * MB]
color = choose_segment_color(seg, segment_color, default_bright=False)
else:
posn = [variants.start.iat[0] * MB, variants.start.iat[-1] * MB]
color = TREND_COLOR
axis.plot(
posn,
[v_freq, v_freq],
color=color,
linewidth=2,
zorder=1,
solid_capstyle="round",
snap=False,
)
if genes:
highlight_genes(axis, genes, 0.9)
return axis
def set_xlim_from(axis, probes=None, segments=None, variants=None):
"""Configure axes for plotting a single chromosome's data.
Parameters
----------
probes : CopyNumArray
segments : CopyNumArray
variants : VariantArray
All should already be subsetted to the region that will be plotted.
"""
min_x = np.inf
max_x = 0
for arr in (probes, segments, variants):
if arr and len(arr):
max_x = max(max_x, arr.end.iat[-1])
min_x = min(min_x, arr.start.iat[0])
if max_x <= min_x:
if min_x != np.inf:
logging.warning(
"WARNING: selection start %s > end %s; did you "
"correctly sort the input file by genomic "
"location?",
min_x,
max_x,
)
raise ValueError(
"No usable data points to plot out of "
f"{len(probes) if probes else 0} probes, "
f"{len(segments) if segments else 0} segments, "
f"{len(variants) if variants else 0} variants"
)
axis.set_xlim(min_x * MB, max_x * MB)
def setup_chromosome(axis, y_min=None, y_max=None, y_label=None):
"""Configure axes for plotting a single chromosome's data."""
if y_min and y_max:
axis.set_ylim(y_min, y_max)
if y_min < 0 < y_max:
axis.axhline(color="k")
if y_label:
axis.set_ylabel(y_label)
axis.tick_params(which="both", direction="out")
axis.get_xaxis().tick_bottom()
axis.get_yaxis().tick_left()
# === Shared ===
def choose_segment_color(segment, highlight_color, default_bright=True):
"""Choose a display color based on a segment's CNA status.
Uses the fields added by the 'call' command. If these aren't present, use
`highlight_color` for everything.
For sex chromosomes, some single-copy deletions or gains might not be
highlighted, since sample sex isn't used to infer the neutral ploidies.
"""
neutral_color = TREND_COLOR
if "cn" not in segment._fields:
# No 'call' info
return highlight_color if default_bright else neutral_color
# Detect copy number alteration
expected_ploidies = {"chrY": (0, 1), "Y": (0, 1), "chrX": (1, 2), "X": (1, 2)}
if segment.cn not in expected_ploidies.get(segment.chromosome, [2]):
return highlight_color
# Detect regions of allelic imbalance / LOH
if (
segment.chromosome not in expected_ploidies
and "cn1" in segment._fields
and "cn2" in segment._fields
and (segment.cn1 != segment.cn2)
):
return highlight_color
return neutral_color
def get_segment_vafs(variants, segments):
"""Group SNP allele frequencies by segment.
Assume variants and segments were already subset to one chromosome.
Yields
------
tuple
(segment, value)
"""
if segments:
chunks = variants.by_ranges(segments)
else:
# Fake segments cover the whole region
chunks = [(None, variants)]
for seg, seg_snvs in chunks:
# ENH: seg_snvs.tumor_boost()
freqs = seg_snvs["alt_freq"].values
# Separately emit VAFs above and below .5 for plotting
idx_above_mid = freqs > 0.5
for idx_vaf in (idx_above_mid, ~idx_above_mid):
if sum(idx_vaf) > 1:
yield (seg, np.median(freqs[idx_vaf]))
def highlight_genes(axis, genes, y_posn):
"""Show gene regions with background color and a text label."""
# Rotate text in proportion to gene density
ngenes = len(genes)
text_size = "small" if ngenes <= 6 else "x-small"
if ngenes <= 3:
text_rot = "horizontal"
elif ngenes <= 6:
text_rot = 30
elif ngenes <= 10:
text_rot = 45
elif ngenes <= 20:
text_rot = 60
else:
text_rot = "vertical"
for gene in genes:
gene_start, gene_end, gene_name = gene
# Highlight and label gene region
# (rescale positions from bases to megabases)
axis.axvspan(
gene_start * MB, gene_end * MB, alpha=0.5, color=HIGHLIGHT_COLOR, zorder=-1
)
axis.text(
0.5 * (gene_start + gene_end) * MB,
y_posn,
gene_name,
horizontalalignment="center",
rotation=text_rot,
size=text_size,
) | PypiClean |
/Kotti-2.0.9.tar.gz/Kotti-2.0.9/docs/developing/advanced/close-to-anonymous.rst | .. _close-to-anonymous:
Close your site to anonymous users
==================================
This recipe describes how to configure Kotti to require users to log
in before they can view any of your site's pages.
To achieve this, we'll have to set our site's ACL. A custom populator
will help us do that (see :ref:`kotti.populators`).
Remember that the default site ACL gives ``view`` privileges to every
user, including anonymous (see :ref:`develop-security`). We'll thus
have to restrict the ``view`` permission to the ``viewer`` role:
.. code-block:: python
from kotti.resources import get_root
SITE_ACL = [
(u'Allow', u'role:viewer', [u'view']),
(u'Allow', u'role:editor', [u'view', u'add', u'edit']),
]
def populate():
site = get_root()
site.__acl__ = SITE_ACL
| PypiClean |
/HippodamiaAgent-0.1.12.tar.gz/HippodamiaAgent-0.1.12/hippodamia_agent/states/active.py | from hippodamia_agent.states.aagentstate import AAgentState
from hippodamia_agent.states.event_ids import event_ids
from asyncscheduler import AsyncScheduler
class Active(AAgentState):
send_config = None
send_config_interval = 0
send_ping = None
send_ping_interval = 0
send_runtime = None
send_runtime_interval = 0
activate_config_on_request = None
deactivate_config_on_request = None
activate_end_on_request = None
deactivate_end_on_request = None
activate_ping_on_request = None
deactivate_ping_on_request = None
activate_runtime_on_request = None
deactivate_runtime_on_request = None
activate_reonboarding_request = None
deactivate_reonboarding_request = None
activate_forward_logger = None
deactivate_forward_logger = None
activate_receive_heartbeat = None
deactivate_receive_heartbeat = None
_scheduler = None
def __init__(self, id, logger, history, sigint):
AAgentState.__init__(self, id, logger, history, sigint)
self._scheduler = AsyncScheduler()
def _on_entry(self):
if self.sigint.is_set():
return event_ids.SIGINT
self.activate_forward_logger()
self.activate_runtime_on_request()
self.activate_ping_on_request()
self.activate_config_on_request()
self.activate_end_on_request()
self.activate_reonboarding_request()
self.activate_receive_heartbeat()
self._scheduler.start()
if self.send_config_interval > 0:
self._scheduler.repeat(self.send_config_interval, 1, self.send_config)
if self.send_runtime_interval > 0:
self._scheduler.repeat(self.send_runtime_interval, 1, self.send_runtime)
if self.send_ping_interval > 0:
self._scheduler.repeat(self.send_ping_interval, 1, self.send_ping)
return None
def _on_exit(self):
self._scheduler.stop(wait=False)
self.deactivate_runtime_on_request()
self.deactivate_ping_on_request()
self.deactivate_config_on_request()
self.deactivate_end_on_request()
self.deactivate_reonboarding_request()
self.deactivate_receive_heartbeat()
self.deactivate_forward_logger() | PypiClean |
/DNBC4tools-2.1.0.tar.gz/DNBC4tools-2.1.0/dnbc4tools/rna/count.py | import os
from dnbc4tools.tools.utils import str_mkdir,logging_call,judgeFilexits,change_path,bin_path
from dnbc4tools.__init__ import __root_dir__
def get_barcode(raw,hex_barcode):
from dnbc4tools.tools.utils import seq_comp
import pandas as pd
barcode_all = pd.read_table(raw,sep = '\t',header=None)
barcode_all.columns = ['barcode','count']
barcode_all["hex"]= barcode_all["barcode"].map(seq_comp)
select_barcode = []
with open(hex_barcode,'r') as select:
for line in select:
line = line.strip()
select_barcode.append(line)
select_df = barcode_all.loc[barcode_all['hex'].isin(select_barcode)]
return barcode_all,select_df
def matrix_summary(matrixpath,outdir,cellreport):
from dnbc4tools.tools.utils import read_anndata
import scanpy as sc
adata = read_anndata(matrixpath)
adata.write("%s/filter_feature.h5ad"%outdir)
sc.pp.calculate_qc_metrics(
adata,
percent_top=None,
log1p=False,
inplace=True
)
total_gene = str(adata.var.shape[0])
mean_gene = str(round(adata.obs['n_genes_by_counts'].mean()))
median_gene = str(round(adata.obs['n_genes_by_counts'].median()))
mean_umi = str(round(adata.obs['total_counts'].mean()))
median_umi = str(round(adata.obs['total_counts'].median()))
with open(cellreport,'a') as reportfile:
reportfile.write('Mean UMI counts per cell,%s'%mean_umi+'\n')
reportfile.write('Median UMI Counts per Cell,%s'%median_umi+'\n')
reportfile.write('Total Genes Detected,%s'%total_gene+'\n')
reportfile.write('Mean Genes per Cell,%s'%mean_gene+'\n')
reportfile.write('Median Genes per Cell,%s'%median_gene+'\n')
class Count:
def __init__(self,args):
self.name = args.name
self.threads = args.threads
self.calling_method = args.calling_method
self.expectcells = args.expectcells
self.forcecells = args.forcecells
self.minumi = args.minumi
self.outdir = os.path.abspath(os.path.join(args.outdir,args.name))
def run(self):
judgeFilexits(
'%s/01.data/final_sorted.bam'%self.outdir,
'%s/01.data/cDNA_barcode_counts_raw.txt'%self.outdir,
'%s/01.data/Index_reads.fq.gz'%self.outdir,
'%s/01.data/beads_stat.txt'%self.outdir,
'%s/01.data/raw_matrix'%self.outdir
)
str_mkdir('%s/02.count'%self.outdir)
str_mkdir('%s/log'%self.outdir)
str_mkdir('%s/log/.temp'%self.outdir)
os.environ[ 'MPLCONFIGDIR' ] = '%s/log/.temp'%self.outdir
os.environ[ 'NUMBA_CACHE_DIR' ] = '%s/log/.temp'%self.outdir
change_path()
bin_command = bin_path()
## cell calling using DropletUtils
print('\nCalling cell barcodes.')
# cellCalling_cmd = '%s/Rscript %s/rna/src/cell_calling.R --matrix %s/01.data/raw_matrix --outdir %s/02.count/ --method %s --expectcells %s --forcecells %s --minumi %s'\
# %(bin_command,__root_dir__,self.outdir,self.outdir,self.calling_method,self.expectcells,self.forcecells,self.minumi)
cellCalling_cmd = [
f"{bin_command}/Rscript",
f"{__root_dir__}/rna/src/cell_calling.R",
f"--matrix {self.outdir}/01.data/raw_matrix",
f"--outdir {self.outdir}/02.count/",
f"--method {self.calling_method}",
f"--expectcells {self.expectcells}",
f"--forcecells {self.forcecells}",
f"--minumi {self.minumi}"
]
cellCalling_cmd_str = " ".join(cellCalling_cmd)
logging_call(
cellCalling_cmd_str,'count',self.outdir
)
### get all barcode and select barcode
barcode_all,select_df = get_barcode(
'%s/01.data/cDNA_barcode_counts_raw.txt'%self.outdir,
'%s/02.count/beads_barcodes_hex.txt'%self.outdir
)
barcode_all['barcode'].to_csv(
os.path.join(self.outdir,'02.count/beads_barcode_all.txt'),
index=False,
header=False
)
select_df['barcode'].to_csv(
os.path.join(self.outdir,'02.count/beads_barcodes.txt'),
index=False,
header=False
)
print('\nCalculating bead similarity and merging beads..')
### using index reads to merge beads
# mergeBarcodes_cmd = '%s/software/mergeBarcodes -b %s/02.count/beads_barcode_all.txt -f %s/01.data/Index_reads.fq.gz -n %s -o %s/02.count/'\
# %(__root_dir__,self.outdir,self.outdir,self.name,self.outdir)
# similiarBeads_cmd = '%s/software/similarityOfBeads -n %s %s %s/02.count/%s_CB_UB_count.txt %s/02.count/beads_barcodes.txt %s/config/oligo_type8.txt %s/02.count/Similarity.all.csv %s/02.count/Similarity.droplet.csv %s/02.count/Similarity.droplet.filtered.csv'\
# %(__root_dir__,self.threads,self.name,self.outdir,self.name,self.outdir,__root_dir__,self.outdir,self.outdir,self.outdir)
mergeBarcodes_cmd = [
f"{__root_dir__}/software/mergeBarcodes",
f"-b {self.outdir}/02.count/beads_barcode_all.txt",
f"-f {self.outdir}/01.data/Index_reads.fq.gz",
f"-n {self.name}",
f"-o {self.outdir}/02.count/"
]
mergeBarcodes_cmd_str = " ".join(mergeBarcodes_cmd)
similiarBeads_cmd = [
f"{__root_dir__}/software/similarityOfBeads",
f"-n {self.threads}",
f"{self.name}",
f"{self.outdir}/02.count/{self.name}_CB_UB_count.txt",
f"{self.outdir}/02.count/beads_barcodes.txt",
f"{__root_dir__}/config/oligo_type8.txt",
f"{self.outdir}/02.count/Similarity.all.csv",
f"{self.outdir}/02.count/Similarity.droplet.csv",
f"{self.outdir}/02.count/Similarity.droplet.filtered.csv"
]
similiarBeads_cmd_str = " ".join(similiarBeads_cmd)
logging_call(mergeBarcodes_cmd_str,'count',self.outdir)
logging_call(similiarBeads_cmd_str,'count',self.outdir)
### merge beads list
from dnbc4tools.rna.src.combinedListOfBeads import similarity_droplet_file
similarity_droplet_file('%s/02.count/Similarity.droplet.csv'%self.outdir,
'%s/02.count/beads_barcodes.txt'%self.outdir,
'%s/02.count/combined_list.txt'%self.outdir)
print('\nCount the results of beads merging and generate statistics...')
### summary beads merge
from dnbc4tools.rna.src.cellMerge import summary_count
summary_count('%s/02.count/combined_list.txt'%self.outdir,
'%s/02.count/beads_barcodes.txt'%self.outdir,
'%s/01.data/beads_stat.txt'%self.outdir,
'%s/02.count/barcodeTranslate.txt'%self.outdir,
'%s/02.count/barcodeTranslate_hex.txt'%self.outdir,
'%s/02.count/cell.id'%self.outdir,
'%s/02.count/cellCount_report.csv'%self.outdir,
'%s/02.count'%self.outdir)
### add DB tag for bam
# tagAdd_cmd = '%s/software/tagAdd -n %s -bam %s/01.data/final_sorted.bam -file %s/02.count/barcodeTranslate_hex.txt -out %s/02.count/anno_decon_sorted.bam -tag_check CB:Z: -tag_add DB:Z: '\
# %(__root_dir__,self.threads,self.outdir,self.outdir,self.outdir)
tagAdd_cmd = [
f"{__root_dir__}/software/tagAdd",
f"-n {self.threads}",
f"-bam {self.outdir}/01.data/final_sorted.bam",
f"-file {self.outdir}/02.count/barcodeTranslate_hex.txt",
f"-out {self.outdir}/02.count/anno_decon_sorted.bam",
"-tag_check CB:Z:",
"-tag_add DB:Z:"
]
tagAdd_cmd_str = " ".join(tagAdd_cmd)
logging_call(tagAdd_cmd_str,'count',self.outdir)
### PISA count matrix
print('\nGenerating filter expression matrix.')
str_mkdir('%s/02.count/filter_matrix'%self.outdir)
# PISA_count_cmd = '%s/software/PISA count -one-hit -@ %s -cb DB -anno-tag GN -umi UB -list %s/02.count/cell.id -outdir %s/02.count/filter_matrix %s/02.count/anno_decon_sorted.bam'\
# %(__root_dir__,self.threads,self.outdir,self.outdir,self.outdir)
PISA_count_cmd = [
f"{__root_dir__}/software/PISA",
"count",
"-one-hit",
f"-@ {self.threads}",
"-cb DB",
"-anno-tag GN",
"-umi UB",
f"-list {self.outdir}/02.count/cell.id",
f"-outdir {self.outdir}/02.count/filter_matrix",
f"{self.outdir}/02.count/anno_decon_sorted.bam"
]
PISA_count_cmd_str = " ".join(PISA_count_cmd)
logging_call(PISA_count_cmd_str,'count',self.outdir)
### get cell report
matrix_summary('%s/02.count/filter_matrix'%self.outdir,
'%s/02.count'%self.outdir,
'%s/02.count/cellCount_report.csv'%self.outdir)
### get bam index
def create_index(threads,bam):
try:
bam_index_cmd = '%s/samtools index -@ %s %s'%(bin_command,threads,bam)
logging_call(bam_index_cmd,'count',self.outdir)
except Exception as e:
print('build csi index for bam')
bam_index_cmd = '%s/samtools index -c -@ %s %s'%(bin_command,threads,bam)
logging_call(bam_index_cmd,'count',self.outdir)
create_index(self.threads,'%s/02.count/anno_decon_sorted.bam'%self.outdir)
from dnbc4tools.rna.src.saturation import count_saturation
print('\ncalculate saturation.')
count_saturation('%s/02.count/anno_decon_sorted.bam'%self.outdir,
'%s/02.count/cellCount_report.csv'%self.outdir,
'%s/02.count'%self.outdir,
threads = self.threads,
quality=20
)
def count(args):
Count(args).run()
def helpInfo_count(parser):
parser.add_argument(
'--name',
metavar='NAME',
help='sample name.'
)
parser.add_argument(
'--threads',
metavar='INT',
help='Analysis threads. [default: 4].',
type=int,default=4
)
parser.add_argument(
'--outdir',
metavar='DIR',
help='output dir, [default: current directory].',
default=os.getcwd()
)
parser.add_argument(
'--calling_method',
metavar='STR',
help='Cell calling method, Choose from barcoderanks and emptydrops, [default: emptydrops].',
default='emptydrops'
)
parser.add_argument(
'--expectcells',
metavar='INT',
help='Expected number of recovered beads, used as input to cell calling algorithm, [default: 3000].',
default=3000
)
parser.add_argument(
'--forcecells',
metavar='INT',
help='Force pipeline to use this number of beads, bypassing cell calling algorithm.',
default=0
)
parser.add_argument(
'--minumi',
metavar='INT',
help='The min umi for use emptydrops, [default: 1000].',
default=1000
)
return parser | PypiClean |
/Django-ConfPages-0.1.1.tar.gz/Django-ConfPages-0.1.1/confpages/views.py | from __future__ import absolute_import
import requests
from django.views.generic import View
from django.shortcuts import render
from django.http import (
HttpResponse,
HttpResponseForbidden,
HttpResponseNotAllowed,
HttpResponseServerError
)
from django.template import Template, Context
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
try:
from django.utils.module_loading import import_string
except ImportError:
# `import_string` is introduced in Django 1.7,
# use the backup utility for older versions.
from .utils import import_string
from .conf import settings
from .token import check_token
class ConfPages(View):
"""The core view class for the configurable pages."""
# The loader of pages
page_loader = import_string(settings.PAGE_LOADER)()
# The client of the backend API
client = requests.api
def render_content(self, content, is_static, api_url):
"""Render the content if it's non-static or if it contains
any one-time token tag.
"""
token_tag_string = '{% one_time_token %}'
if is_static and token_tag_string not in content:
return content
# Get the context from the backend API if the page is non-static
context = {}
if not is_static and api_url:
response = self.client.get(api_url)
context = response.json()
# Add the `load` tag if the page contains any one-time token tag
if token_tag_string in content:
content = '{% load confpages_tags %}\n' + content
template = Template(content)
return template.render(Context(context))
def get(self, request, name):
"""Show the page whose name is `name`."""
page = self.page_loader.get_page(name)
rendered_content = self.render_content(page.content, page.is_static,
page.api_url)
if not page.base_template:
return HttpResponse(rendered_content)
else:
context = {
'name': page.name,
'title': page.title,
'content': rendered_content
}
return render(request, page.base_template, context)
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
"""Override to make dispatch() CSRF exempt."""
return super(ConfPages, self).dispatch(request, *args, **kwargs)
def post(self, request, name):
"""Handle the form submission by delegating the request to
the backend API.
Note:
The data the API consumes or produces is encoded in JSON.
"""
data = request.POST.dict().copy()
token = data.pop('_onetimetoken', None)
method = data.pop('_method', None) or request.method
# If the one-time token is valid, reject the request
is_valid, reason = check_token(token)
if not is_valid:
return HttpResponseForbidden(reason, content_type='text/html')
page = self.page_loader.get_page(name, only_api_url=True)
if not page.api_url:
return HttpResponseNotAllowed(['GET'])
# Delegate the request to the API
response = self.client.request(method, page.api_url, json=data)
if response.status_code == 404:
return HttpResponseServerError('The backend API can not be found')
else:
return HttpResponse(
response.content,
response.headers['Content-Type'],
response.status_code
) | PypiClean |
/EOxServer-1.2.12-py3-none-any.whl/eoxserver/resources/coverages/management/commands/collection.py |
from django.core.management.base import CommandError, BaseCommand
from django.db import transaction
from eoxserver.resources.coverages import models
from eoxserver.resources.coverages.management.commands import (
CommandOutputMixIn, SubParserMixIn
)
class Command(CommandOutputMixIn, SubParserMixIn, BaseCommand):
""" Command to manage collections. This command uses sub-commands for the
specific tasks: create, delete, insert, exclude, purge.
"""
def add_arguments(self, parser):
create_parser = self.add_subparser(parser, 'create')
delete_parser = self.add_subparser(parser, 'delete')
insert_parser = self.add_subparser(parser, 'insert')
exclude_parser = self.add_subparser(parser, 'exclude')
purge_parser = self.add_subparser(parser, 'purge')
summary_parser = self.add_subparser(parser, 'summary')
parsers = [
create_parser, insert_parser, exclude_parser,
purge_parser, summary_parser
]
# identifier is a common argument (except for delete it is optional,
# if --all is tagged)
for parser in parsers:
parser.add_argument(
'identifier', nargs=1, help='The collection identifier'
)
create_parser.add_argument(
'--type', '-t', dest='type_name',
help='The collection type name. Optional.'
)
create_parser.add_argument(
'--grid', '-g', dest='grid_name', default=None,
help='The optional grid name.'
)
create_parser.add_argument(
'--set', '-s', dest='set_overrides',
nargs=2, default=[], action='append',
help=(
'Set (or override) additional metadata tags like '
'"platform".'
)
)
delete_parser.add_argument(
'--all', '-a', action="store_true",
default=False, dest='all_collections',
help=(
'When this flag is set, all the collections are '
'selected to be derigesterd'
)
)
delete_parser.add_argument(
'identifier', default=None, nargs='?',
help='The identifier of the collection to delete.'
)
# common arguments for insertion/exclusion
insert_parser.add_argument(
'object_identifiers', nargs='+',
help=(
'The identifiers of the objects (Product or Coverage) '
'to insert'
)
)
insert_parser.add_argument(
'--use-extent', action='store_true', default=False,
help=(
'Whether to simply collection the bounding box of the '
'footprint as the collections footprint'
)
)
exclude_parser.add_argument(
'object_identifiers', nargs='+',
help=(
'The identifiers of the objects (Product or Coverage) '
'to exclude'
)
)
exclude_parser.add_argument(
'--use-extent', action='store_true', default=False,
help=(
'Whether to simply collection the bounding box of the '
'footprint as the collections footprint'
)
)
summary_parser.add_argument(
'--products', action='store_true', default=True,
dest='product_summary',
help=('Collect summary product metadata. Default.')
)
summary_parser.add_argument(
'--no-products', action='store_false', default=True,
dest='coverage_summary',
help=("Don't collect summary product metadata.")
)
summary_parser.add_argument(
'--coverages', action='store_true', default=True,
dest='product_summary',
help=('Collect summary coverage metadata. Default.')
)
summary_parser.add_argument(
'--no-coverages', action='store_false', default=True,
dest='coverage_summary',
help=("Don't collect summary coverage metadata.")
)
@transaction.atomic
def handle(self, subcommand, identifier, *args, **kwargs):
""" Dispatch sub-commands: create, delete, insert, exclude, purge.
"""
if subcommand == "create":
self.handle_create(identifier[0], *args, **kwargs)
elif subcommand == "delete":
self.handle_delete(identifier, *args, **kwargs)
elif subcommand == "insert":
self.handle_insert(identifier[0], *args, **kwargs)
elif subcommand == "exclude":
self.handle_exclude(identifier[0], *args, **kwargs)
elif subcommand == "purge":
self.handle_purge(identifier[0], *args, **kwargs)
elif subcommand == "summary":
self.handle_summary(identifier[0], *args, **kwargs)
def handle_create(self, identifier, type_name, grid_name, **kwargs):
""" Handle the creation of a new collection.
"""
if grid_name:
try:
grid = models.Grid.objects.get(name=grid_name)
except models.Grid.DoesNotExist:
raise CommandError("Grid %r does not exist." % grid_name)
else:
grid = None
collection_type = None
if type_name:
try:
collection_type = models.CollectionType.objects.get(
name=type_name
)
except models.CollectionType.DoesNotExist:
raise CommandError(
"Collection type %r does not exist." % type_name
)
models.Collection.objects.create(
identifier=identifier,
collection_type=collection_type, grid=grid
)
print('Successfully created collection %r' % identifier)
def handle_delete(self, identifier, all_collections, *args, **kwargs):
""" Handle the deletion of a collection
"""
if not all_collections and not identifier:
raise CommandError('please specify a collection/s to remove')
else:
if all_collections:
collections = models.Collection.objects.all()
elif identifier:
collections = [self.get_collection(identifier)]
for collection in collections:
try:
collection_id = collection.identifier
collection.delete()
self.print_msg(
'Successfully deregistered collection %r'
% collection_id
)
except models.Collection.DoesNotExist:
raise CommandError('No such Collection %r' % identifier)
def handle_insert(self, identifier, object_identifiers, **kwargs):
""" Handle the insertion of arbitrary objects into a collection
"""
collection = self.get_collection(identifier)
objects = list(
models.EOObject.objects.filter(
identifier__in=object_identifiers
).select_subclasses()
)
if len(objects) != len(set(object_identifiers)):
actual = set(obj.identifier for obj in objects)
missing = set(object_identifiers) - actual
raise CommandError(
"No such object with ID%s: %s"
% (len(missing) > 1, ", ".join(missing))
)
for eo_object in objects:
try:
models.collection_insert_eo_object(
collection, eo_object, kwargs.get('use_extent', False)
)
except Exception as e:
raise CommandError(
"Could not insert object %r into collection %r. "
"Error was: %s"
% (eo_object.identifier, collection.identifier, e)
)
print(
'Successfully inserted object %r into collection %r'
% (eo_object.identifier, collection.identifier)
)
def handle_exclude(self, identifier, object_identifiers, **kwargs):
""" Handle the exclusion of arbitrary objects from a collection
"""
collection = self.get_collection(identifier)
objects = list(
models.EOObject.objects.filter(
identifier__in=object_identifiers
).select_subclasses()
)
if len(objects) != len(set(object_identifiers)):
actual = set(obj.identifier for obj in objects)
missing = set(object_identifiers) - actual
raise CommandError(
"No such object with ID%s: %s"
% (len(missing) > 1, ", ".join(missing))
)
for eo_object in objects:
try:
models.collection_exclude_eo_object(
collection, eo_object, kwargs.get('use_extent', False)
)
except Exception as e:
raise CommandError(
"Could not exclude object %r from collection %r. "
"Error was: %s"
% (eo_object.identifier, collection.identifier, e)
)
print(
'Successfully excluded object %r from collection %r'
% (eo_object.identifier, collection.identifier)
)
def handle_purge(self, identifier, **kwargs):
# TODO: implement
raise CommandError(
"Could not exclude purge collection %r: not implemented"
% identifier
)
print('Successfully purged collection %r' % identifier)
def handle_summary(self, identifier, product_summary, coverage_summary,
**kwargs):
models.collection_collect_metadata(
self.get_collection(identifier),
False, False, False, product_summary, coverage_summary
)
print('Successfully collected metadata for collection %r' % identifier)
def get_collection(self, identifier):
""" Helper method to get a collection by identifier or raise a
CommandError.
"""
try:
return models.Collection.objects.get(identifier=identifier)
except models.Collection.DoesNotExist:
raise CommandError("Collection %r does not exist." % identifier) | PypiClean |
/MetPyQC-0.1.1-py3-none-any.whl/metpyqc/spatial.py | import numpy as np
import pandas as pd
from tqdm import tqdm
from . import calculate as calc
from sklearn import linear_model
def hubbard_consistency(lat, lon, x, start_test, end_test, n_max, t_max, search_radius,
min_neigh, missing_perc, f, flag_val):
r"""
Hubbard spatial weighted regression analysis.
Parameters
------------
lat : array_like, shape(n,)
Array of latitudes in decimal degrees
lon : array_like, shape(n,)
Array of longitudes in decimal degrees
x : pd.DataFrame, shape(t,n)
Pandas dataframe of observations, where t is time and n is the number of stations
start_test: string
Datetime string indicating when to start testing
end_test: string
Datetime string indicating when to end testing
n_max : int
Maximum number of best fit stations to use for the estimate
t_max : int
Number of time steps to be considered in the regression analysis (even number)
search_radius: float
Radius for Hubbard analysis in decimal degrees
min_neigh: int
Minimum number of neighbors to find the estimate
missing_perc: int
Maximum percentage of missing data to perform regression
f : int
Factor multiplying standard deviation for calculating the acceptable range for
valid observations.
flag_val : int
Integer representing flag values to be associated to
erroneous values.
Returns
-----------
df_x_est: pd.DataFrame, shape(t,n)
Estimated observations,
filled with np.nan values where estimate is not possible
df_std_est: pd.DataFrame, shape(t,n)
Standard deviation from the estimated observations,
filled with np.nan values where estimate is not possible
flag : pd.DataFrame, shape(t,n)
Dataframe with flags identifying values which fail the test.
res : pd.DataFrame, shape(t,n)
Dataframe with quantitative residuals from prescribed limits:
positive values indicates wrong values.
Notes
-----
The spatial weighted regression test is based on the algorithm proposed by [Hubbard2005]_.
Firstly for each reference station :math:`(0)` the neighbour stations :math:`(n)` inside a certain
`search_radius` are founded. This search radius should be set close to the average spacing
of the observations and large enough to have at least one neighbour for each station.
Once the neighbours have been established, if the number of missing values in each series
is lower than missing_perc and n :math:`\ge` `n_max` , a linear regression is computed
between their values :math:`x(t,n)` over all the selected time steps `t_max` and
the reference station values :math:`x(t,0)`, in order to find a first estimate
:math:`x^*_n(0)` of :math:`x(0)` that should be consistent with :math:`x(n)` at each time step.
Then the root mean square error between the reference values :math:`x(0)` and
the estimated values :math:`x^*_n(0)` from the regression line with the neighbor station :math:`n`
(correspondent to the sample standard deviation of the residuals :math:`\sigma^*_n` )
is evaluated in order to find a measure of the stations correlation:
.. math::
\sigma^*_n(0)=\sqrt{\frac{1}{t_{max}}\sum_{t=1}^{t_{max}}
\big[\underbrace{x(t,0)-x^*_n(t,0)}_{\text{Residuals}}\big]^2}
This error characterizing each neighbour station is used as weight in the final estimate
of the reference value :math:`x^*(t,0)` and reference standard deviation :math:`\sigma^*(0)`
from the surrounding stations at each instant t:
.. math::
x^*(t,0)=\frac{\sum_{n=1}^{n_{max}} (x^*_n)^2/(\sigma^*_n)^2}{\sum_{n=1}^{n_{max}} 1/\sigma_n^2} \\
\sigma^{*2}(0)= \frac{n_{max}}{\sum_{n=1}^{n_{max}} 1/(\sigma^*_n)^2}
Finally at each time step a tolerance interval is established by considering
a constant factor `f` and the spatial consistency is verified by ensuring that:
.. math::
x^*(t,0)-f\sigma^*(0) < x(t,0) < x^*(t,0)+f\sigma^*(0)
This procedure is repeated for each station and each time step. The final estimates
:math:`x^*(t,n)` and reference standard deviation :math:`\sigma^*(n)` are given as results on the
output dataframes `df_x_est` and `df_std_est`, respectively.
References
----------
.. [Hubbard2005] Hubbard, K. G., et al. "Performance of quality assurance procedures for an applied climate information system. "Journal of Atmospheric and Oceanic Technology 22.1 (2005): 105-112.
"""
print('Minimum number of Neighbors: {}'.format(min_neigh))
print('Search radius in decimal degrees: {}'.format(search_radius))
t, n = x.shape
time_ind = pd.Series(np.arange(0, t), x.index)
x_est = np.full(((time_ind[end_test] - time_ind[start_test]) + 1, n), np.nan)
std_est = np.full(((time_ind[end_test] - time_ind[start_test]) + 1, n), np.nan)
if t < t_max:
print('Warning: Specified interval larger than time series length')
print('Statistics could be not accurate')
# -------------------------------------------------------------------#
# Start cycle for each station
# -------------------------------------------------------------------#
for stat in tqdm(range(0, n)):
# ----- Find neighbors inside a search radius -------------------#
ind_neigh = calc.find_neighbors(np.vstack((np.delete(lat, stat),
np.delete(lon, stat))).T,
np.vstack((lat[stat], lon[stat])).T, search_radius)
# ----- Start estimate only if there is a minimum number of neighbors -----#
if len(ind_neigh) < min_neigh:
print('\n Number of neighbors less than {} for station {}'.format(min_neigh, x.columns[stat]))
else:
x_neigh = np.delete(x.values, stat, axis=1)[:, ind_neigh]
# ---------------------------------------------------------------#
# Start cycle for selected time steps---------------------------#
# ---------------------------------------------------------------#
t1 = 0
for time in range(time_ind[start_test], time_ind[end_test] + 1):
# --------------------------------------------------------
# Extracting time interval to consider for regression
# --------------------------------------------------------
if time < t_max:
x_sel = x_neigh[:time + t_max, :]
x_stat = x.iloc[:time + t_max, stat].values
elif time > (t - t_max):
x_sel = x_neigh[time - t_max:, :]
x_stat = x.iloc[time - t_max:, stat].values
else:
x_sel = x_neigh[time - int(t_max / 2):time + int(t_max / 2), :]
x_stat = x.iloc[time - int(t_max / 2):time + int(t_max / 2), stat].values
# --------------------------------------------------------
# Ensuring a certain percentage of valid data and
# a minimum number of valid neighbors
# --------------------------------------------------------
miss = (np.isnan(x_stat).sum() / (len(x_stat))) * 100
miss_neigh = (np.isnan(x_sel).sum(axis=0) / (len(x_sel))) * 100
ind_valid_neigh = np.where(miss_neigh < missing_perc)[0]
if (miss <= missing_perc) & (len(ind_valid_neigh) >= min_neigh):
# ----------------------------------------------------
# Starting estimate ---------------------------------
# ----------------------------------------------------
y_est_t = np.full(ind_valid_neigh.shape, np.nan) # Instantaneous estimate at the chosen time step
rmse = np.full(ind_valid_neigh.shape, np.nan) # rmse evaluated considering the chosen interval
j = 0
# ---------------------------------------------------
# Iterate over each neighbor
# ---------------------------------------------------
for i in ind_valid_neigh:
mask_nan = ((np.isnan(x_sel[:, i])) | (np.isnan(x_stat)))
regr = linear_model.LinearRegression()
regr.fit(x_sel[:, i][~mask_nan].reshape(-1, 1), x_stat[~mask_nan])
# Local estimate of the selected station
# from the selected neighbors
if ~np.isnan(x_neigh[time, i]):
y_est_t[j] = regr.predict(x_neigh[time, i].reshape(-1, 1))
y_est_tot = regr.predict(x_sel[:, i][~mask_nan].reshape(-1, 1))
rmse[j] = np.maximum(0.0001, np.sqrt(((y_est_tot - x_stat[~mask_nan]) ** 2).mean()))
j += 1
# ----------------------------------------------------
# Select only N best fit neighbors
# ----------------------------------------------------
ind_sel_neigh = np.argsort(rmse)[:n_max]
# ----------------------------------------------------
# Remove nan estimates
# ----------------------------------------------------
mask_nan = np.isnan(y_est_t[ind_sel_neigh])
ind_sel_neigh = ind_sel_neigh[~mask_nan]
# ----------------------------------------------------
# Calculate final estimate only if enough
# neighbors estimate are present
# ----------------------------------------------------
if len(ind_sel_neigh) >= min_neigh:
# Divide into positive and negative contributions
if np.all(np.sign(y_est_t[ind_sel_neigh]) == -1):
# Estimate all neg
x_est[t1, stat] = -1 * np.sqrt(
np.nansum(y_est_t[ind_sel_neigh] ** 2 / rmse[ind_sel_neigh] ** 2) / np.nansum(
1 / rmse[ind_sel_neigh] ** 2))
elif np.all(np.sign(y_est_t[ind_sel_neigh]) >= 0):
# Estimate all pos
x_est[t1, stat] = np.sqrt(
np.nansum(y_est_t[ind_sel_neigh] ** 2 / rmse[ind_sel_neigh] ** 2) / np.nansum(
1 / rmse[ind_sel_neigh] ** 2))
else:
# Estimate mixed type
mask_pos = y_est_t[ind_sel_neigh] >= 0
# X_est_pos=np.sqrt(np.nansum(y_est_t[ind_sel_neigh][mask_pos]**2/rmse[ind_sel_neigh][mask_pos]**2)/np.nansum(1/rmse[ind_sel_neigh][mask_pos]**2))
# X_est_neg=-1*np.sqrt(np.nansum(y_est_t[ind_sel_neigh][~mask_pos]**2/rmse[ind_sel_neigh][~mask_pos]**2)/np.nansum(1/rmse[ind_sel_neigh][~mask_pos]**2))
x_est_pos = np.sqrt(np.nansum(
y_est_t[ind_sel_neigh][mask_pos] ** 2 / rmse[ind_sel_neigh][mask_pos] ** 2) / np.nansum(
1 / rmse[ind_sel_neigh] ** 2))
x_est_neg = -1 * np.sqrt(np.nansum(
y_est_t[ind_sel_neigh][~mask_pos] ** 2 / rmse[ind_sel_neigh][
~mask_pos] ** 2) / np.nansum(1 / rmse[ind_sel_neigh] ** 2))
x_est[t1, stat] = x_est_pos + x_est_neg
# Standard error
std_est[t1, stat] = np.sqrt(1 / np.nanmean(rmse[ind_sel_neigh] ** 2))
t1 += 1
# Saving pandas dataframe
df_x_est = pd.DataFrame(x_est, columns=x.columns,
index=time_ind[start_test:end_test].index)
df_std_est = pd.DataFrame(std_est, columns=x.columns,
index=time_ind[start_test:end_test].index)
flag = pd.DataFrame(0, index=time_ind[start_test:end_test].index, columns=x.columns, )
res = pd.DataFrame(0, index=time_ind[start_test:end_test].index, columns=x.columns, )
min_val = df_x_est - f * df_std_est
max_val = df_x_est + f * df_std_est
mask_spatial = ((x[start_test:end_test] > max_val) | (x[start_test:end_test] < min_val))
flag[mask_spatial] = flag_val
res_min = min_val - x[start_test:end_test]
res_max = x[start_test:end_test] - max_val
mask_res = (res_min >= res_max)
res[mask_res] = res_min[mask_res]
res[~mask_res] = res_max[~mask_res]
return df_x_est, df_std_est, flag, res | PypiClean |
/Discord%20Anti-Spam-1.8.1.tar.gz/Discord Anti-Spam-1.8.1/antispam/caches/redis/redis.py | from __future__ import annotations
import asyncio
import datetime
import logging
from copy import deepcopy
from typing import TYPE_CHECKING, List, AsyncIterable, Dict, cast
from attr import asdict
import orjson as json
from antispam.abc import Cache
from antispam.enums import ResetType
from antispam.exceptions import GuildNotFound, MemberNotFound
from antispam.dataclasses import Message, Member, Guild, Options
if TYPE_CHECKING:
from redis import asyncio as aioredis
from antispam import AntiSpamHandler
log = logging.getLogger(__name__)
class RedisCache(Cache):
"""
A cache backend built to use Redis.
Parameters
----------
handler: AntiSpamHandler
The AntiSpamHandler instance
redis: redis.asyncio.Redis
Your redis connection instance.
"""
def __init__(self, handler: AntiSpamHandler, redis: aioredis.Redis):
self.redis: aioredis.Redis = redis
self.handler: AntiSpamHandler = handler
async def get_guild(self, guild_id: int) -> Guild:
log.debug("Attempting to return cached Guild(id=%s)", guild_id)
resp = await self.redis.get(f"GUILD:{guild_id}")
if not resp:
raise GuildNotFound
as_json = json.loads(resp.decode("utf-8"))
guild: Guild = Guild(**as_json)
# This is actually a dict here
guild.options = cast(dict, guild.options)
guild.options = Options(**guild.options)
guild_members: Dict[int, Member] = {}
async for member in self.get_all_members(guild_id):
guild_members[member.id] = member
guild.members = guild_members
return guild
async def set_guild(self, guild: Guild) -> None:
log.debug("Attempting to set Guild(id=%s)", guild.id)
guild = deepcopy(guild) # Ensure idempotent
# We do this to clear the 'old' guilds members
await self._delete_members_for_guild(guild.id)
members: List[Member] = list(guild.members.values())
guild.members = {}
iters = [self.set_member(m) for m in members]
await asyncio.gather(*iters)
as_json = json.dumps(asdict(guild, recurse=True))
await self.redis.set(f"GUILD:{guild.id}", as_json)
async def delete_guild(self, guild_id: int) -> None:
log.debug("Attempting to delete Guild(id=%s)", guild_id)
await self._delete_members_for_guild(guild_id)
await self.redis.delete(f"GUILD:{guild_id}")
async def get_member(self, member_id: int, guild_id: int) -> Member:
log.debug(
"Attempting to return a cached Member(id=%s) for Guild(id=%s)",
member_id,
guild_id,
)
resp = await self.redis.get(f"MEMBER:{guild_id}:{member_id}")
if not resp:
raise MemberNotFound
as_json = json.loads(resp.decode("utf-8"))
member: Member = Member(**as_json)
messages: List[Message] = []
member.messages = cast(list, member.messages)
for message in member.messages:
message = Message(**message)
message.creation_time = datetime.datetime.fromisoformat(
message.creation_time # type: ignore
)
messages.append(message)
member.messages = messages
return member
async def set_member(self, member: Member) -> None:
log.debug(
"Attempting to cache Member(id=%s) for Guild(id=%s)",
member.id,
member.guild_id,
)
if not await self._does_guild_exist(member.guild_id):
guild = Guild(id=member.guild_id, options=self.handler.options)
guild.members = {}
guild_as_json = json.dumps(asdict(guild, recurse=True))
await self.redis.set(f"GUILD:{guild.id}", guild_as_json)
as_json = json.dumps(asdict(member, recurse=True))
await self.redis.set(f"MEMBER:{member.guild_id}:{member.id}", as_json)
async def delete_member(self, member_id: int, guild_id: int) -> None:
log.debug(
"Attempting to delete Member(id=%s) in Guild(id=%s)", member_id, guild_id
)
await self.redis.delete(f"MEMBER:{guild_id}:{member_id}")
async def add_message(self, message: Message) -> None:
log.debug(
"Attempting to add a Message(id=%s) to Member(id=%s) in Guild(id=%s)",
message.id,
message.author_id,
message.guild_id,
)
try:
member: Member = await self.get_member(message.author_id, message.guild_id)
except (MemberNotFound, GuildNotFound):
member: Member = Member(message.author_id, guild_id=message.guild_id)
member.messages.append(message)
await self.set_member(member)
async def reset_member_count(
self, member_id: int, guild_id: int, reset_type: ResetType
) -> None:
log.debug(
"Attempting to reset counts on Member(id=%s) in Guild(id=%s) with type %s",
member_id,
guild_id,
reset_type.name,
)
try:
member: Member = await self.get_member(member_id, guild_id)
except (MemberNotFound, GuildNotFound):
return
if reset_type == ResetType.KICK_COUNTER:
member.kick_count = 0
else:
member.warn_count = 0
await self.set_member(member)
async def drop(self) -> None:
log.warning("Cache was just dropped")
async for guild in self.get_all_guilds():
await self.delete_guild(guild.id)
async def get_all_guilds(self) -> AsyncIterable[Guild]:
log.debug("Yielding all cached guilds")
keys: List[bytes] = await self.redis.keys("GUILD:*")
for key in keys:
key = key.decode("utf-8").split(":")[1]
yield await self.get_guild(int(key))
async def get_all_members(self, guild_id: int) -> AsyncIterable[Member]:
log.debug("Yielding all cached members for Guild(id=%s)", guild_id)
if not await self._does_guild_exist(guild_id):
raise GuildNotFound
async for member in self._get_all_members(guild_id):
yield member
async def _get_all_members(self, guild_id: int) -> AsyncIterable[Member]:
"""This exists so we don't need to raise GuildNotFound when used internally."""
keys: List[bytes] = await self.redis.keys(f"MEMBER:{guild_id}:*")
for key in keys:
key = key.decode("utf-8").split(":")[2]
yield await self.get_member(int(key), guild_id)
async def _does_guild_exist(self, guild_id: int) -> bool:
resp = await self.redis.get(f"GUILD:{guild_id}")
return bool(resp)
async def _delete_members_for_guild(self, guild_id: int):
async for member in self._get_all_members(guild_id):
await self.delete_member(member.id, member.guild_id) | PypiClean |
/Flask-Script-2.0.6.tar.gz/Flask-Script-2.0.6/docs/index.rst | Flask-Script
======================================
.. module:: Flask-Script
The **Flask-Script** extension provides support for writing external scripts in Flask. This includes running a development server, a customised Python shell, scripts to set up your database, cronjobs, and other command-line tasks that belong outside the web application itself.
**Flask-Script** works in a similar way to Flask itself. You define and add commands that can be called from the command line to a ``Manager`` instance::
# manage.py
from flask_script import Manager
from myapp import app
manager = Manager(app)
@manager.command
def hello():
print "hello"
if __name__ == "__main__":
manager.run()
Once you define your script commands, you can then run them on the command line::
python manage.py hello
> hello
Source code and issue tracking at `GitHub`_.
Installing Flask-Script
------------------------
Install with **pip** and **easy_install**::
pip install Flask-Script
or download the latest version from version control::
git clone https://github.com/smurfix/flask-script.git
cd flask-script
python setup.py develop
If you are using **virtualenv**, it is assumed that you are installing **Flask-Script**
in the same virtualenv as your Flask application(s).
Creating and running commands
-----------------------------
The first step is to create a Python module to run your script commands in. You can call it
anything you like, for our examples we'll call it ``manage.py``.
You don't have to place all your commands in the same file; for example, in a larger project
with lots of commands you might want to split them into a number of files with related commands.
In your ``manage.py`` file you have to create a ``Manager`` instance. The ``Manager`` class
keeps track of all the commands and handles how they are called from the command line::
from flask_script import Manager
app = Flask(__name__)
# configure your app
manager = Manager(app)
if __name__ == "__main__":
manager.run()
Calling ``manager.run()`` prepares your ``Manager`` instance to receive input from the command line.
The ``Manager`` requires a single argument, a **Flask** instance. This may also be a function or other callable
that returns a **Flask** instance instead, if you want to use a factory pattern.
The next step is to create and add your commands. There are three methods for creating commands:
* subclassing the ``Command`` class
* using the ``@command`` decorator
* using the ``@option`` decorator
To take a very simple example, we want to create a ``hello`` command that just prints out "hello world". It
doesn't take any arguments so is very straightforward::
from flask_script import Command
class Hello(Command):
"prints hello world"
def run(self):
print "hello world"
Now the command needs to be added to our ``Manager`` instance, like the one created above::
manager.add_command('hello', Hello())
This of course needs to be called before ``manager.run``. Now in our command line::
python manage.py hello
> hello world
You can also pass the ``Command`` instance in a dict to ``manager.run()``::
manager.run({'hello' : Hello()})
The ``Command`` class must define a ``run`` method. The positional and optional arguments
depend on the command-line arguments you pass to the ``Command`` (see below).
To get a list of available commands and their descriptions, just run with no command::
python manage.py
To get help text for a particular command::
python manage.py runserver -?
This will print usage plus the docstring of the ``Command``.
This first method is probably the most flexible, but it's also the most verbose. For simpler commands you can use
the ``@command`` decorator, which belongs to the ``Manager`` instance::
@manager.command
def hello():
"Just say hello"
print "hello"
Commands created this way are run in exactly the same way as those created with the ``Command`` class::
python manage.py hello
> hello
As with the ``Command`` class, the docstring you use for the function will appear when you run with the ``-?`` or ``--help`` option::
python manage.py -?
> Just say hello
Finally, the ``@option`` decorator, again belonging to ``Manager`` can be used when you want more sophisticated
control over your commands::
@manager.option('-n', '--name', help='Your name')
def hello(name):
print "hello", name
The ``@option`` decorator is explained in more detail below.
*New in version 2.0*
Help was previously available with ``--help`` and ``-h``. This had a couple
of less-than-ideal consequences, among them the inability to use ``-h`` as
a shortcut for ``--host`` or similar options.
*New in version 2.0.2*
If you want to restore the original meaning of ``-h``, set your manager's
``help_args`` attribute to a list of argument strings you want to be
considered helpful::
manager = Manager()
manager.help_args = ('-h', '-?', '--help')
You can override this list in sub-commands and -managers::
def talker(host='localhost'):
pass
ccmd = ConnectCmd(talker)
ccmd.help_args = ('-?', '--help')
manager.add_command("connect", ccmd)
manager.run()
so that ``manager -h`` prints help, while ``manager connect -h fubar.example.com``
connects to a remote host.
Adding arguments to commands
----------------------------
Most commands take a number of named or positional arguments that you pass in the command line.
Taking the above examples, rather than just print "hello world" we would like to be able to print some
arbitrary name, like this::
python manage.py hello --name=Joe
hello Joe
or alternatively::
python manage.py hello -n Joe
To facilitate this you use the ``option_list`` attribute of the ``Command`` class::
from flask_script import Command, Manager, Option
class Hello(Command):
option_list = (
Option('--name', '-n', dest='name'),
)
def run(self, name):
print "hello %s" % name
Positional and optional arguments are stored as ``Option`` instances - see the :ref:`api` below for details.
Alternatively, you can define a ``get_options`` method for your ``Command`` class. This is useful if you want to be able
to return options at runtime based on for example per-instance attributes::
class Hello(Command):
def __init__(self, default_name='Joe'):
self.default_name=default_name
def get_options(self):
return [
Option('-n', '--name', dest='name', default=self.default_name),
]
def run(self, name):
print "hello", name
If you are using the ``@command`` decorator, it's much easier - the options are extracted automatically from your function arguments. This is an example of a positional argument::
@manager.command
def hello(name):
print "hello", name
You then invoke this on the command line like so::
> python manage.py hello Joe
hello Joe
Or you can do optional arguments::
@manager.command
def hello(name="Fred")
print "hello", name
These can be called like so::
> python manage.py hello --name=Joe
hello Joe
alternatively::
> python manage.py hello -n Joe
hello Joe
The short form ``-n`` is formed from the first letter of the argument, so "name" > "-n". Therefore it's a good idea for your
optional argument variable names to begin with different letters.
*New in version 2.0*
Note also that if your optional argument is a boolean, for example::
@manager.command
def verify(verified=False):
"""
Checks if verified
"""
print "VERIFIED?", "YES" if verified else "NO"
You can just call it like this::
> python manage.py verify
VERIFIED? NO
> python manage.py verify -v
VERIFIED? YES
> python manage.py verify --verified
VERIFIED? YES
The ``@command`` decorator is fine for simple operations, but often you need the flexibility. For more sophisticated options it's better to use the ``@option`` decorator::
@manager.option('-n', '--name', dest='name', default='joe')
def hello(name):
print "hello", name
You can add as many options as you want::
@manager.option('-n', '--name', dest='name', default='joe')
@manager.option('-u', '--url', dest='url', default=None)
def hello(name, url):
if url is None:
print "hello", name
else:
print "hello", name, "from", url
This can be called like so::
> python manage.py hello -n Joe -u reddit.com
hello Joe from reddit.com
or alternatively::
> python manage.py hello --name=Joe --url=reddit.com
hello Joe from reddit.com
Adding options to the manager
-----------------------------
Options can also be passed to the ``Manager`` instance. This is allows you to set up options that are passed to the application rather
than a single command. For example, you might want to have a flag to set the configuration file for your application. Suppose you create
your application with a factory function::
def create_app(config=None):
app = Flask(__name__)
if config is not None:
app.config.from_pyfile(config)
# configure your app...
return app
You want to be able to define the ``config`` argument on the command line - for example, if you have a command to set up your database, you
most certainly want to use different configuration files for production and development.
In order to pass that ``config`` argument, use the ``add_option()`` method of your ``Manager`` instance. It takes the same arguments
as ``Option``::
manager.add_option('-c', '--config', dest='config', required=False)
As with any other **Flask-Script** configuration you can call this anywhere in your script module, but it must be called before your ``manager.run()`` call.
Suppose you have this command::
@manager.command
def hello(name):
uppercase = app.config.get('USE_UPPERCASE', False)
if uppercase:
name = name.upper()
print "hello", name
You can now run the following::
> python manage.py -c dev.cfg hello joe
hello JOE
Assuming the ``USE_UPPERCASE`` setting is **True** in your dev.cfg file.
Notice also that the "config" option is **not** passed to the command. In
fact, this usage::
> python manage.py hello joe -c dev.cfg
will show an error message because the ``-c`` option does not belong to the
``hello`` command.
You can attach same-named options to different levels; this allows you to
add an option to your app setup code without checking whether it conflicts with
a command::
@manager.option('-n', '--name', dest='name', default='joe')
@manager.option('-c', '--clue', dest='clue', default='clue')
def hello(name, clue):
uppercase = app.config.get('USE_UPPERCASE', False)
if uppercase:
name = name.upper()
clue = clue.upper()
print "hello {0}, get a {1}!".format(name, clue)
> python manage.py -c dev.cfg hello -c cookie -n frank
hello FRANK, get a COOKIE!
Note that the destination variables (command arguments, corresponding to
``dest`` values) must still be different; this is a limitation of Python's
argument parser.
In order for manager options to work you must pass a factory function, rather than a Flask instance, to your
``Manager`` constructor. A simple but complete example is available in `this gist <https://gist.github.com/smurfix/9307618>`_.
*New in version 2.0*
Before version 2, options and command names could be interspersed freely.
The author decided to discontinue this practice for a number of reasons;
the problem with the most impact was that it was not possible to do::
> python manage.py connect -d DEST
> python manage.py import -d DIR
as these options collided.
Getting user input
------------------
**Flask-Script** comes with a set of helper functions for grabbing user input from the command line. For example::
from flask_script import Manager, prompt_bool
from myapp import app
from myapp.models import db
manager = Manager(app)
@manager.command
def dropdb():
if prompt_bool(
"Are you sure you want to lose all your data"):
db.drop_all()
It then runs like this::
> python manage.py dropdb
Are you sure you want to lose all your data ? [N]
See the :ref:`api` below for details on the various prompt functions.
Default commands
----------------
runserver
+++++++++
**Flask-Script** has a couple of ready commands you can add and customise: ``Server`` and ``Shell``.
The ``Server`` command runs the **Flask** development server.::
from flask_script import Server, Manager
from myapp import create_app
manager = Manager(create_app)
manager.add_command("runserver", Server())
if __name__ == "__main__":
manager.run()
and then run the command::
python manage.py runserver
The ``Server`` command has a number of command-line arguments - run ``python manage.py runserver -?`` for details on these. You can redefine the defaults in the constructor::
server = Server(host="0.0.0.0", port=9000)
Needless to say the development server is not intended for production use.
*New in version 2.0.5*
The most common use-case for ``runserver`` is to run a debug server for
investigating problems. Therefore the default, if it is *not* set in the
configuration file, is to enable debugging and auto-reloading.
Unfortunately, Flask currently (as of May 2014) defaults to set the DEBUG
configuration parameter to ``False``. Until this is changed, you can
safely add ``DEBUG=None`` to your Flask configuration. Flask-Script's
``runserver`` will then turn on debugging, but everything else will treat
it as being turned off.
To prevent misunderstandings -- after all, debug mode is a serious security
hole --, a warning is printed when Flask-Script treats a ``None`` default
value as if it were set to ``True``. You can turn on debugging explicitly
to get rid of this warning.
shell
+++++
The ``Shell`` command starts a Python shell. You can pass in a ``make_context`` argument, which must be a ``callable`` returning a ``dict``. By default, this is just a dict returning the your Flask application instance::
from flask_script import Shell, Manager
from myapp import app
from myapp import models
from myapp.models import db
def _make_context():
return dict(app=app, db=db, models=models)
manager = Manager(create_app)
manager.add_command("shell", Shell(make_context=_make_context))
This is handy if you want to include a bunch of defaults in your shell to save typing lots of ``import`` statements.
The ``Shell`` command will use `IPython <http://ipython.scipy.org/moin/>`_ if it is installed, otherwise it defaults to the standard Python shell. You can disable this behaviour in two ways: by passing the ``use_ipython`` argument to the ``Shell`` constructor, or passing the flag ``--no-ipython`` in the command line::
shell = Shell(use_ipython=False)
There is also a ``shell`` decorator which you can use with a context function::
@manager.shell
def make_shell_context():
return dict(app=app, db=db, models=models)
This enables a ``shell`` command with the defaults enabled::
> python manage.py shell
The default commands ``shell`` and ``runserver`` are included by default, with the default options for these commands. If you wish to
replace them with different commands simply override with ``add_command()`` or the decorators. If you pass ``with_default_commands=False``
to the ``Manager`` constructor these commands will not be loaded::
manager = Manager(app, with_default_commands=False)
Sub-Managers
------------
A Sub-Manager is an instance of ``Manager`` added as a command to another Manager
To create a submanager::
def sub_opts(app, **kwargs):
pass
sub_manager = Manager(sub_opts)
manager = Manager(self.app)
manager.add_command("sub_manager", sub_manager)
If you attach options to the sub_manager, the ``sub_opts`` procedure will
receive their values. Your application is passed in ``app`` for
convenience.
If ``sub_opts`` returns a value other than ``None``, this value will replace
the ``app`` value that's passed on. This way, you can implement a
sub-manager which replaces the whole app. One use case is to create a
separate administrative application for improved security::
def gen_admin(app, **kwargs):
from myweb.admin import MyAdminApp
## easiest but possibly incomplete way to copy your settings
return MyAdminApp(config=app.config, **kwargs)
sub_manager = Manager(gen_admin)
manager = Manager(MyApp)
manager.add_command("admin", sub_manager)
> python manage.py runserver
[ starts your normal server ]
> python manage.py admin runserver
[ starts an administrative server ]
You can cascade sub-managers, i.e. add one sub-manager to another.
A sub-manager does not get default commands added to itself (by default)
*New in version 0.5.0.*
Note to extension developers
----------------------------
Extension developers can easily create convenient sub-manager instance within their extensions to make it easy for a user to consume all the available commands of an extension.
Here is an example how a database extension could provide (ex. database.py)::
manager = Manager(usage="Perform database operations")
@manager.command
def drop():
"Drops database tables"
if prompt_bool("Are you sure you want to lose all your data"):
db.drop_all()
@manager.command
def create(default_data=True, sample_data=False):
"Creates database tables from sqlalchemy models"
db.create_all()
populate(default_data, sample_data)
@manager.command
def recreate(default_data=True, sample_data=False):
"Recreates database tables (same as issuing 'drop' and then 'create')"
drop()
create(default_data, sample_data)
@manager.command
def populate(default_data=False, sample_data=False):
"Populate database with default data"
from fixtures import dbfixture
if default_data:
from fixtures.default_data import all
default_data = dbfixture.data(*all)
default_data.setup()
if sample_data:
from fixtures.sample_data import all
sample_data = dbfixture.data(*all)
sample_data.setup()
Then the user can register the sub-manager to their primary Manager (within manage.py)::
manager = Manager(app)
from flask.ext.database import manager as database_manager
manager.add_command("database", database_manager)
The commands will then be available::
> python manage.py database
Please provide a command:
Perform database operations
create Creates database tables from sqlalchemy models
drop Drops database tables
populate Populate database with default data
recreate Recreates database tables (same as issuing 'drop' and then 'create')
Error handling
--------------
Users do not like to see stack traces, but developers want them for bug reports.
Therefore, ``flask_script.commands`` provides an `InvalidCommand` error
class which is not supposed to print a stack trace when reported.
In your command handler::
from flask_script.commands import InvalidCommand
[… if some command verification fails …]
class MyCommand(Command):
def run(self, foo=None, bar=None):
if foo and bar:
raise InvalidCommand("Options foo and bar are incompatible")
In your main loop::
try:
MyManager().run()
except InvalidCommand as err:
print(err, file=sys.stderr)
sys.exit(1)
This way, you maintain interoperability if some plug-in code supplies
Flask-Script hooks you'd like to use, or vice versa.
Accessing local proxies
-----------------------
The ``Manager`` runs the command inside a `Flask test context <http://flask.pocoo.org/docs/testing/#other-testing-tricks>`_. This means that you can access request-local proxies where appropriate, such as ``current_app``, which may be used by extensions.
.. _api:
API
---
.. module:: flask_script
.. autoclass:: Manager
:members: run, add_option, add_command, command, option, shell
.. autoclass:: Command
:members: run, get_options
.. autoclass:: Shell
.. autoclass:: Server
.. autoclass:: Option
.. autoclass:: Group
.. autofunction:: prompt
.. autofunction:: prompt_bool
.. autofunction:: prompt_pass
.. autofunction:: prompt_choices
.. _Flask: http://flask.pocoo.org
.. _GitHub: http://github.com/smurfix/flask-script
| PypiClean |
/IPlantUML-0.1.1.tar.gz/IPlantUML-0.1.1/README.rst | .. image:: https://travis-ci.org/jbn/IPlantUML.svg?branch=master
:target: https://travis-ci.org/jbn/IPlantUML
What is it?
===========
This Python package defines a `PlantUML <http://plantuml.com/>`__ cell
magic for IPython. It lets you generate UML diagrams as inline SVG in
your notebook. I'll add embellishments as needed. But, for now, I just
needed something that worked and existed as a package (in pypi).
I based my code on `Steven Burke <https://github.com/sberke>`__'s
`plantuml
gist <http://chickenbit.com/blog/2014/10/inline-plantuml-diagrams-in-ipython-notebook/>`__.
Installation
------------
First, install IPlantuml with pip.
.. code:: sh
pip install iplantuml
Then, install plantuml. On Debian based system you can install plantuml
package. Otherwise you can download ``plantuml.jar`` and copy it to
``/usr/local/bin/plantuml.jar``.
.. code:: sh
sudo apt install plantuml
Alternatively you can set a custom path for plantuml.jar during
installation
.. code:: sh
git clone https://github.com/jbn/IPlantUML.git
cd IPlantUML
python setup.py install iplantuml --jarpath /my/custom/path/plantuml.jar
Usage
-----
In Ipython, first,
.. code:: python
import iplantuml
then, create a cell like,
::
%%plantuml
@startuml
Alice -> Bob: Authentication Request
Bob --> Alice: Authentication Response
@enduml
The output will be the generated SVG UML diagram.
By default, the magic removes the intermediate (``tmp.uml``) and target
(``tmp.svg``) files. However, if you enter a name in the ``%%plantuml``
line, it retains both files of ``$name.uml`` and ``$name.svg``. For
example,
::
%%plantuml auth
@startuml
Alice -> Bob: Authentication Request
Bob --> Alice: Authentication Response
@enduml
generates and retains ``auth.uml`` and ``auth.svg``.
| PypiClean |
/CheckMates-0.2.0-py3-none-any.whl/checkmates/objectives/utils.py | from typing import Optional
import pandas as pd
from checkmates import objectives
from checkmates.exceptions import ObjectiveCreationError, ObjectiveNotFoundError
from checkmates.objectives.objective_base import ObjectiveBase
from checkmates.problem_types import ProblemTypes, handle_problem_types
from checkmates.utils.gen_utils import _get_subclasses
from checkmates.utils.logger import get_logger
logger = get_logger(__file__)
def get_non_core_objectives():
"""Get non-core objective classes.
Non-core objectives are objectives that are domain-specific. Users typically need to configure these objectives
before using them in AutoMLSearch.
Returns:
List of ObjectiveBase classes
"""
return [
objectives.MeanSquaredLogError,
objectives.RootMeanSquaredLogError,
]
def get_all_objective_names():
"""Get a list of the names of all objectives.
Returns:
list (str): Objective names
"""
all_objectives_dict = _all_objectives_dict()
return list(all_objectives_dict.keys())
def _all_objectives_dict():
all_objectives = _get_subclasses(ObjectiveBase)
objectives_dict = {}
for objective in all_objectives:
if "checkmates.objectives" not in objective.__module__:
continue
objectives_dict[objective.name.lower()] = objective
return objectives_dict
def get_objective(objective, return_instance=False, **kwargs):
"""Returns the Objective class corresponding to a given objective name.
Args:
objective (str or ObjectiveBase): Name or instance of the objective class.
return_instance (bool): Whether to return an instance of the objective. This only applies if objective
is of type str. Note that the instance will be initialized with default arguments.
kwargs (Any): Any keyword arguments to pass into the objective. Only used when return_instance=True.
Returns:
ObjectiveBase if the parameter objective is of type ObjectiveBase. If objective is instead a valid
objective name, function will return the class corresponding to that name. If return_instance is True,
an instance of that objective will be returned.
Raises:
TypeError: If objective is None.
TypeError: If objective is not a string and not an instance of ObjectiveBase.
ObjectiveNotFoundError: If input objective is not a valid objective.
ObjectiveCreationError: If objective cannot be created properly.
"""
if objective is None:
raise TypeError("Objective parameter cannot be NoneType")
if isinstance(objective, ObjectiveBase):
return objective
all_objectives_dict = _all_objectives_dict()
if not isinstance(objective, str):
raise TypeError(
"If parameter objective is not a string, it must be an instance of ObjectiveBase!",
)
if objective.lower() not in all_objectives_dict:
raise ObjectiveNotFoundError(
f"{objective} is not a valid Objective! "
"Use checkmates.objectives.get_all_objective_names() "
"to get a list of all valid objective names. ",
)
objective_class = all_objectives_dict[objective.lower()]
if return_instance:
try:
return objective_class(**kwargs)
except TypeError as e:
raise ObjectiveCreationError(
f"In get_objective, cannot pass in return_instance=True for {objective} because {str(e)}",
)
return objective_class
def get_problem_type(
input_problem_type: Optional[str],
target_data: pd.Series,
) -> ProblemTypes:
"""Helper function to determine if classification problem is binary or multiclass dependent on target variable values."""
if not input_problem_type:
raise ValueError("problem type is required")
if input_problem_type.lower() == "classification":
values: pd.Series = target_data.value_counts()
if values.size == 2:
return ProblemTypes.BINARY
elif values.size > 2:
return ProblemTypes.MULTICLASS
else:
message: str = "The target field contains less than two unique values. It cannot be used for modeling."
logger.error(message, exc_info=True)
raise ValueError(message)
if input_problem_type.lower() == "regression":
return ProblemTypes.REGRESSION
if input_problem_type.lower() == "time series regression":
return ProblemTypes.TIME_SERIES_REGRESSION
message = f"Unexpected problem type provided in configuration: {input_problem_type}"
logger.error(message, exc_info=True)
raise ValueError(message)
def get_default_primary_search_objective(problem_type):
"""Get the default primary search objective for a problem type.
Args:
problem_type (str or ProblemType): Problem type of interest.
Returns:
ObjectiveBase: primary objective instance for the problem type.
"""
problem_type = handle_problem_types(problem_type)
objective_name = {
"binary": "Log Loss Binary",
"multiclass": "Log Loss Multiclass",
"regression": "R2",
"time series regression": "MedianAE",
"time series binary": "Log Loss Binary",
"time series multiclass": "Log Loss Multiclass",
}[problem_type.value]
return get_objective(objective_name, return_instance=True)
def get_core_objectives(problem_type):
"""Returns all core objective instances associated with the given problem type.
Core objectives are designed to work out-of-the-box for any dataset.
Args:
problem_type (str/ProblemTypes): Type of problem
Returns:
List of ObjectiveBase instances
Examples:
>>> for objective in get_core_objectives("regression"):
... print(objective.name)
ExpVariance
MaxError
MedianAE
MSE
MAE
R2
Root Mean Squared Error
>>> for objective in get_core_objectives("binary"):
... print(objective.name)
MCC Binary
Log Loss Binary
Gini
AUC
Precision
F1
Balanced Accuracy Binary
Accuracy Binary
"""
problem_type = handle_problem_types(problem_type)
all_objectives_dict = _all_objectives_dict()
objectives = [
obj()
for obj in all_objectives_dict.values()
if obj.is_defined_for_problem_type(problem_type)
and obj not in get_non_core_objectives()
]
return objectives | PypiClean |
/Copreus-0.4.0.tar.gz/Copreus-0.4.0/copreus/drivers/epaperdirect.py | from copreus.baseclasses.aepaper import AEPaper
from copreus.baseclasses.aepaper import EPaperMQTTMessageConverter
from time import sleep
import queue
from pelops.mythreading import LoggerThread
import threading
from enum import Enum
from copreus.schema.epaperdirect import get_schema
class TaskType(Enum):
"""Task type for entries in the message queue."""
DISPLAY = 0
SWITCH = 1
class EPaperDirect(AEPaper):
"""
This driver basically updates either the whole display (full_image) or selected parts (partial_image). The latter
one results in faster update times. Internally, the epaper has two buffers. Via a command message the buffers are
flipped. After flipping, one buffer is used to update the display while the other buffer is ready to receive new
data via spi.
When using partial image updates please take the two buffers under consideration. If two different areas are
updated alternatively, it will result in a "blinking" behavior. The most common case - one static background
image - and constantly update of the same area can be realised by first sending the full_image_twice and then
the partial_image messages.
Partial images must have a width and an x-position value that are multiples of eight. Any other value will result
in a ValueError. Some displays have a width that is not compliant to this rule. In this case the display will have
a logic width (e.g. 2.13 inch display has a width of 122 and a logic width of 128).
The driver entry in the yaml file consists of:
* ADriver entries
* topics_sub:
* full_image - a single image covering the whole display to be placed in the current buffer.
* partial_image - list of image covering only parts of the display plus their position to be placed into
the current buffer.
* switch_frame - switch between the two frame buffers
* mqtt-translations:
* switch_frame - the command expected for switch_frame action
* topics_pub:
* message_queue_size - publishes the number of messages that wait to be processes.
Example:
driver:
type: epaperdirect
model: 2.9
spi:
pin_cs: -1 # use spi cs mechanism. GPIO08/SPI_CE0_N
bus: 0
device: 0
maxspeed: 2000000
transpose: 270
pin_rst: 13
pin_dc: 19
pin_busy: 6
VCOM: -3.2
autodeepsleep: True
topics-sub:
full_image: /test/display/full_image
partial_image: /test/display/partial_image
switch_frame: /test/display/switch_frame
mqtt-translations:
switch_frame: SWITCH
topics-pub:
message_queue_size: /test/display/message_queue_size
"""
_msg_queue_size = 0 # number of mqtt messages that wait to be processed
_msg_queue = None # queue with tasks to be executed that are received via mqtt
_msg_queue_worker_thread = None # thread that processes all entries that are put to _msg_queue
_topic_pub_msg_queue_size = None # topic the current message queue size will be published to
_switch_command = None # expected command that initiates a frame buffer switch
_display_frame_lock = None # locked during image update (processing time + update_time)
def __init__(self, config, mqtt_client=None, logger=None, spi_lock=None, stdout_log_level=None, no_gui=None,
manage_monitoring_agent=True):
"""
Constructor
:param config: yaml config structure
:param mqtt_client: mymqttclient instance (optional)
:param logger: logger instance (optional)
:param spi_lock: spi lock instance (optional)
"""
AEPaper.__init__(self, config, mqtt_client, logger, spi_lock=spi_lock, logger_name=self.__class__.__name__,
stdout_log_level=stdout_log_level, no_gui=no_gui,
manage_monitoring_agent=manage_monitoring_agent)
self._display_frame_lock = threading.Lock()
self._msg_queue = queue.Queue()
self._msg_queue_size = 0
self._msg_queue_worker_thread = LoggerThread(target=self._msg_queue_worker, logger=self._logger,
name="epaperdirect.{}".format(self.get_name()))
self._switch_command = self._mqtt_translations["switch_frame"]
self._topic_pub_msg_queue_size = self._topics_pub["message_queue_size"]
def _handler_switch_frame(self, msg):
"""on_message handler for topic sub 'switch_frame'"""
if msg == self._switch_command:
self._logger.info("EPaperDirect._handler_switch_frame - received switch frame command via topic {}.".format(self._topics_sub["switch_frame"]))
self._put_to_msg_queue(TaskType.SWITCH)
else:
self._logger.info("EPaperDirect._handler_switch_frame - received unknown command '{}' via topic {}. expected command {}."
.format(msg, self._topics_sub["switch_frame"], self._switch_command))
def _handler_display_full_image(self, msg):
"""on_message handler for topic sub 'full_image'"""
self._logger.info("EPaperDirect._handler_display_full_image - received full_image in topic {}.".format(self._topics_sub["full_image"]))
image_entry = EPaperMQTTMessageConverter.from_full_image(msg, self._transpose,
self._width, self._height)
self._put_to_msg_queue(TaskType.DISPLAY, image_entry)
def _handler_display_partial_image(self, msg):
"""on_message handler for topic sub 'partial_image'"""
self._logger.info("EPaperDirect._handler_display_partial_image - received partial_image in topic {}.".format(self._topics_sub["partial_image"]))
image_entries = EPaperMQTTMessageConverter.from_partial_images(msg, self._transpose,
self._width, self._height)
self._put_to_msg_queue(TaskType.DISPLAY, image_entries)
def _msg_queue_worker(self):
"""
process each item in queue and decrease _queue_size (new value is published). two different types of tasks
can be processed:
* DISPLAY - take the provided value and call _display_image
* SWITCH - call _switch_frame
this approach ensures that an incoming switch statement is processed if and only if all previously received
images (full or partial) have been processed.
"""
while True:
task = self._msg_queue.get()
if task is None:
break
tasktype, value = task
if tasktype == TaskType.DISPLAY:
self._display_image(value)
elif tasktype == TaskType.SWITCH:
self._switch_frame()
else:
self._logger.error("EPaperDirect._msg_queue_worker - unknown task type '{}'".format(tasktype))
raise ValueError("EPaperDirect._msg_queue_worker - unknown task type '{}'".format(tasktype))
self._msg_queue.task_done()
self._msg_queue_size = self._msg_queue_size - 1
self._logger.info("EPaperDirect._msg_queue_worker - mqtt message queue size decreased to: {}.".format(self._msg_queue_size))
self._publish_value(self._topic_pub_msg_queue_size, self._msg_queue_size)
def _put_to_msg_queue(self, tasktype, value=None):
"""
increase queue size, publish new value and put task togehter with task type to _msg_queue
:param tasktype: TaskType enum value
:param value: payload for task (optional)
:return:
"""
self._msg_queue.put([tasktype, value])
self._msg_queue_size = self._msg_queue_size + 1
self._logger.info("EPaperDirect._put_to_msg_queue - mqtt message queue size increased to: {}.".format(self._msg_queue_size))
self._publish_value(self._topic_pub_msg_queue_size, self._msg_queue_size)
def _empty_msg_queue(self):
"""remove all tasks from _msg_queue"""
while not self._msg_queue.empty():
try:
temp = self._msg_queue.get_nowait()
self._msg_queue.task_done()
except queue.Empty:
pass
self._msg_queue_size = 0
self._logger.info("EPaperDirect._empty_msg_queue - mqtt message queue cleared")
self._publish_value(self._topic_pub_msg_queue_size, self._msg_queue_size)
def _switch_frame(self):
"""
switch between the two frames. the active frame will be displayed.
"""
self._logger.info("EPaperDirect._switch_frame - switch to other frame (_display_frame_lock)")
with self._display_frame_lock:
self._logger.info("EPaperDirect._switch_frame - _display_frame_lock acquired")
self._display_frame()
self._logger.info("EPaperDirect._switch_frame - _display_frame_lock released.")
def _display_image(self, images):
"""send received image(s) to the epaper.
acquire the lock (_display_frame_lock), wake up from deep sleep (optional), put all (partial) images into the current frame buffer,
display frame, send back to deep sleep (optional), wait the _update_time and then release the lock."""
self._logger.info("EPaperDirect._display_image - display image")
with self._display_frame_lock:
self._logger.info("EPaperDirect._display_image - lock _display_frame_lock acquired")
if self._auto_deep_sleep:
self._reset()
for map in images:
x = map["x"]
y = map["y"]
image = map["image"]
self._logger.info("EPaperDirect._display_image - ... added image at {}/{}.".format(x, y))
self._set_frame_memory(image, x, y)
self._display_frame()
if self._auto_deep_sleep:
self._deep_sleep()
sleep(self._update_time)
self._logger.info("EPaperDirect._display_image - ... image displayed. lock _display_frame_lock released.")
def _epaper_start(self):
"""start the message queue workers and subscribe to the incoming topics."""
self._msg_queue_worker_thread.start()
self._mqtt_client.subscribe(self._topics_sub["full_image"], self._handler_display_full_image)
self._mqtt_client.subscribe(self._topics_sub["partial_image"], self._handler_display_partial_image)
self._mqtt_client.subscribe(self._topics_sub["switch_frame"], self._handler_switch_frame)
self._publish_value(self._topic_pub_msg_queue_size, self._msg_queue_size)
def _epaper_stop(self):
"""unsubscribe from all topics and empty message queue"""
with self._display_frame_lock:
self._mqtt_client.unsubscribe(self._topics_sub["full_image"], self._handler_display_full_image)
self._mqtt_client.unsubscribe(self._topics_sub["partial_image"], self._handler_display_partial_image)
self._mqtt_client.unsubscribe(self._topics_sub["switch_frame"], self._handler_switch_frame)
self._empty_msg_queue()
self._msg_queue.join()
@classmethod
def _get_schema(cls):
return get_schema()
def _runtime_information(self):
return {}
def _config_information(self):
return {}
def standalone():
"""Calls the static method EPaperDirect.standalone()."""
EPaperDirect.standalone()
if __name__ == "__main__":
EPaperDirect.standalone() | PypiClean |
/Hydro_Quebec_API_Wrapper-3.0.8-py3-none-any.whl/hydroqc/contract/common.py | import functools
import logging
from abc import ABC
from collections.abc import Callable, Iterator
from datetime import date, datetime, timedelta
from io import StringIO
from typing import ParamSpec, TypeVar, cast
from hydroqc.contract.outage import Outage
from hydroqc.hydro_api.client import HydroClient
from hydroqc.logger import get_logger
from hydroqc.types import (
ConsumpAnnualTyping,
ConsumpDailyTyping,
ConsumpHourlyTyping,
ConsumpMonthlyTyping,
ContractTyping,
OutageListTyping,
PeriodDataTyping,
)
from hydroqc.utils import EST_TIMEZONE
T = TypeVar("T")
P = ParamSpec("P")
def check_period_data_present(
method: Callable[..., None | str | bool | float | PeriodDataTyping]
) -> Callable[..., None | str | bool | float | PeriodDataTyping]:
"""Check if contract's data are present."""
def wrapper(contract: "Contract") -> None | str | bool | float | PeriodDataTyping:
if not hasattr(contract, "_all_period_data"):
contract._logger.warning("You need to call get_period_info method first")
return None
if not contract._all_period_data:
contract._logger.info(
"It seems Hydro-Québec didn't provided some data. "
"Maybe you did a rate change recently. "
"This message should disappear at the beginning of the next bill period."
)
return method(contract)
return wrapper
def check_info_data_present(
method: Callable[..., None | str | bool | float | date]
) -> Callable[..., None | str | bool | float | date]:
"""Check if contract's data are present."""
def wrapper(contract: "Contract") -> None | str | bool | float | date:
if not hasattr(contract, "_raw_info_data"):
contract._logger.warning("You need to call get_info method first")
return None
if not contract._raw_info_data:
contract._logger.info(
"It seems Hydro-Québec didn't provided some data. "
"Maybe you did a rate change recently. "
"This message should disappear at the beginning of the next bill period."
)
return method(contract)
return wrapper
def check_annual_data_present(
method: Callable[..., None | str | bool | float | date]
) -> Callable[..., None | str | bool | float | date]:
"""Check if contract's data are present."""
def wrapper(contract: "Contract") -> None | str | bool | float | date:
if not hasattr(contract, "_annual_info_data"):
contract._logger.warning(
"You need to call get_annual_consumption method first"
)
return None
if not contract._annual_info_data:
contract._logger.info(
"It seems Hydro-Québec didn't provided some data. "
"Maybe you did a rate change recently. "
"This message should disappear at the beginning of the next bill period."
)
return None
return method(contract)
return wrapper
def check_outages_data_present(method: Callable[P, T]) -> Callable[P, T]:
"""Check if contract's data are present."""
@functools.wraps(method)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
contract = cast("Contract", args[0])
if hasattr(contract, "_outages_data"):
return method(*args, **kwargs)
contract._logger.warning("You need to call refresh_outages method first")
return cast(T, None)
return wrapper
class Contract(ABC):
"""Hydroquebec contract.
Represents a contract (contrat)
"""
_mve_activated: bool
_rate_code: str
_rate_option_code: str
_meter_id: str
_address: str
_raw_info_data: ContractTyping
_annual_info_data: ConsumpAnnualTyping
_outages_data: OutageListTyping
_all_period_data: list[PeriodDataTyping]
def __init__(
self,
applicant_id: str,
customer_id: str,
account_id: str,
contract_id: str,
hydro_client: HydroClient,
log_level: str | None = None,
):
"""Create a new Hydroquebec contract."""
self._logger: logging.Logger = get_logger(
f"c-{contract_id}",
log_level=log_level,
parent=f"w-{applicant_id}.c-{customer_id}.a-{account_id}",
)
self._no_partenaire_demandeur: str = applicant_id
self._no_partenaire_titulaire: str = customer_id
self._no_compte_contrat: str = account_id
self._no_contrat: str = contract_id
self._hydro_client: HydroClient = hydro_client
self._address: str = ""
self._outages: list[Outage] = []
# Main attributes
@property
def applicant_id(self) -> str:
"""Get applicant id."""
return self._no_partenaire_demandeur
@property
def customer_id(self) -> str:
"""Get customer id."""
return self._no_partenaire_titulaire
@property
def account_id(self) -> str:
"""Get account id."""
return self._no_compte_contrat
@property
def contract_id(self) -> str:
"""Get contract id."""
return self._no_contrat
@property
def rate(self) -> str:
"""Get current period rate name."""
if hasattr(self, "_rate_code"):
return self._rate_code
return "Unknown rate"
@property
def rate_option(self) -> str:
"""Get current period rate option name."""
return self._rate_option_code
@property
@check_info_data_present
def address(self) -> str:
"""Get contract address."""
return self._raw_info_data["adresseConsommation"].strip()
@property
@check_info_data_present
def meter_id(self) -> str:
"""Get meter id."""
return self._raw_info_data["noCompteur"]
@property
@check_info_data_present
def start_date(self) -> date:
"""Get contract start date."""
start_date = date.fromisoformat(
self._raw_info_data["dateDebutContrat"].split("T")[0]
)
return start_date
@property
@check_info_data_present
def consumption_location_id(self) -> str:
"""Get consumption location id."""
return self._raw_info_data["idLieuConsommation"]
# Main methods
async def get_info(self) -> ContractTyping:
"""Fetch info about this contract."""
self._logger.debug("Getting contract info")
self._raw_info_data = await self._hydro_client.get_contract_info(
self.applicant_id, self.customer_id, self.account_id, self.contract_id
)
self._logger.debug("Got contract info")
return self._raw_info_data
async def get_periods_info(self) -> list[PeriodDataTyping]:
"""Fetch periods info."""
self._logger.debug("Getting contract periods info")
self._all_period_data = await self._hydro_client.get_periods_info(
self.applicant_id, self.customer_id, self.contract_id
)
self._logger.debug("Got contract periods info")
return self._all_period_data
@property
@check_period_data_present
def latest_period_info(self) -> PeriodDataTyping:
"""Fetch latest period info."""
return self._all_period_data[0]
async def refresh_outages(self) -> None:
"""Fetch contract outages."""
if self.consumption_location_id is not None:
res = await self._hydro_client.get_outages(
str(self.consumption_location_id)
)
if res is not None:
self._outages_data = res
self._outages = []
for raw_outage in self._outages_data["interruptions"]:
self._outages.append(Outage(raw_outage, self._logger))
self._outages.sort(key=lambda x: x.start_date)
@property
@check_outages_data_present
def outages(self) -> list[Outage]:
"""Return the list of the contract outages."""
return self._outages
@property
@check_outages_data_present
def next_outage(self) -> Outage | None:
"""Get next or first contract outage."""
if self._outages:
return self.outages[0]
return None
# Consumption methods
async def get_today_hourly_consumption(self) -> ConsumpHourlyTyping:
"""Fetch hourly consumption for today."""
return await self._hydro_client.get_today_hourly_consumption(
self.applicant_id, self.customer_id, self.contract_id
)
async def get_hourly_consumption(self, date_wanted: date) -> ConsumpHourlyTyping:
"""Fetch hourly consumption for a date."""
return await self._hydro_client.get_hourly_consumption(
self.applicant_id, self.customer_id, self.contract_id, date_wanted
)
async def get_daily_consumption(
self, start_date: date, end_date: date
) -> ConsumpDailyTyping:
"""Fetch daily consumption."""
return await self._hydro_client.get_daily_consumption(
self.applicant_id, self.customer_id, self.contract_id, start_date, end_date
)
async def get_today_daily_consumption(self) -> ConsumpDailyTyping:
"""TODO ????.
.. todo::
document this method
"""
today = datetime.today().astimezone(EST_TIMEZONE)
yesterday = today - timedelta(days=1)
return await self.get_daily_consumption(yesterday, today)
async def get_monthly_consumption(self) -> ConsumpMonthlyTyping:
"""Fetch monthly consumption."""
return await self._hydro_client.get_monthly_consumption(
self.applicant_id, self.customer_id, self.contract_id
)
async def get_annual_consumption(self) -> ConsumpAnnualTyping:
"""Fetch annual consumption."""
self._annual_info_data = await self._hydro_client.get_annual_consumption(
self.applicant_id, self.customer_id, self.contract_id
)
return self._annual_info_data
# CSV methods
async def get_daily_energy(
self,
start_date: date,
end_date: date,
raw_output: bool = False,
) -> Iterator[list[str | int | float]] | StringIO:
"""Get daily energy and power data on a specific date range.
date format: 2022-11-23
"""
data_csv = await self._hydro_client.get_consumption_csv(
self.applicant_id,
self.customer_id,
self.contract_id,
start_date,
end_date,
"energie-jour",
raw_output,
)
return data_csv
async def get_hourly_energy(
self,
start_date: date,
end_date: date,
raw_output: bool = False,
) -> Iterator[list[str | int | float]] | StringIO:
"""Get hourly energy on a specific date range.
date format: 2022-11-23
"""
data_csv = await self._hydro_client.get_consumption_csv(
self.applicant_id,
self.customer_id,
self.contract_id,
start_date,
end_date,
"energie-heure",
raw_output,
)
return data_csv
async def get_consumption_overview_csv(
self,
raw_output: bool = False,
) -> Iterator[list[str | int | float]] | StringIO:
"""Get the consumption overview over the last 2 years."""
data_csv = await self._hydro_client.get_consumption_overview_csv(
self.applicant_id,
self.customer_id,
self.contract_id,
raw_output,
)
return data_csv
# Current period attributes
# CP == Current period
@property
@check_period_data_present
def cp_current_day(self) -> int:
"""Get number of days since the current period started."""
return self._all_period_data[0]["nbJourLecturePeriode"]
@property
@check_period_data_present
def cp_duration(self) -> int:
"""Get current period duration in days."""
return self._all_period_data[0]["nbJourPrevuPeriode"]
@property
@check_period_data_present
def cp_current_bill(self) -> float:
"""Get current bill since the current period started."""
return self._all_period_data[0]["montantFacturePeriode"]
@property
@check_period_data_present
def cp_projected_bill(self) -> float:
"""Projected bill of the current period."""
return self._all_period_data[0]["montantProjetePeriode"]
@property
@check_period_data_present
def cp_daily_bill_mean(self) -> float:
"""Daily bill mean since the current period started."""
return self._all_period_data[0]["moyenneDollarsJourPeriode"]
@property
@check_period_data_present
def cp_daily_consumption_mean(self) -> float:
"""Daily consumption mean since the current period started."""
return self._all_period_data[0]["moyenneKwhJourPeriode"]
@property
@check_period_data_present
def cp_total_consumption(self) -> float:
"""Total consumption since the current period started."""
return self._all_period_data[0]["consoTotalPeriode"]
@property
@check_period_data_present
def cp_projected_total_consumption(self) -> float:
"""Projected consumption of the current period started."""
return self._all_period_data[0]["consoTotalProjetePeriode"]
@property
@check_period_data_present
def cp_average_temperature(self) -> float:
"""Average temperature since the current period started."""
return self._all_period_data[0]["tempMoyennePeriode"]
@property
@check_period_data_present
def cp_kwh_cost_mean(self) -> float | None:
"""Mean cost of a kWh since the current period started."""
if self._all_period_data[0]["coutCentkWh"] is not None:
return self._all_period_data[0]["coutCentkWh"] / 100
return None
@property
@check_period_data_present
def cp_epp_enabled(self) -> bool:
"""Is EPP enabled for the current period.
See: https://www.hydroquebec.com/residential/customer-space/
account-and-billing/equalized-payments-plan.html
"""
return self._all_period_data[0]["indMVEPeriode"]
# Repr
def __repr__(self) -> str:
"""Represent object."""
return (
f"""<Contract - {self.rate}|{self.rate_option} - """
f"""{self.applicant_id} - {self.customer_id} - """
f"""{self.account_id} - {self.contract_id}>"""
)
class ContractFallBack(Contract):
"""Hydroquebec fallback contract.
Represents a contract (contrat) not supported by the library.
It will just have a basic features of a contract and could crash anytime.
"""
_rate_code = "FALLBACK"
_rate_option_code = ""
def __init__(
self,
applicant_id: str,
customer_id: str,
account_id: str,
contract_id: str,
hydro_client: HydroClient,
log_level: str | None = None,
):
"""Create a new Hydroquebec contract."""
Contract.__init__(
self,
applicant_id,
customer_id,
account_id,
contract_id,
hydro_client,
log_level,
) | PypiClean |
/MindsDB-23.8.3.0.tar.gz/MindsDB-23.8.3.0/mindsdb/interfaces/chatbot/chatbot_executor.py | from .model_executor import ModelExecutor
from .types import Function, BotException
class BotExecutor:
def __init__(self, chat_task, chat_memory):
self.chat_task = chat_task
self.chat_memory = chat_memory
def _get_model(self, model_name):
return ModelExecutor(self.chat_task, model_name)
def _prepare_available_functions(self):
# collecting functions
functions = []
back_db_name = self.chat_task.bot_params.get('backoffice_db')
if back_db_name is not None:
back_db = self.chat_task.session.integration_controller.get_handler(back_db_name)
if hasattr(back_db, 'back_office_config'):
back_db_config = back_db.back_office_config()
for name, description in back_db_config.get('tools', {}).items():
functions.append(
Function(
name=name,
description=description,
callback=getattr(back_db, name)
))
return functions
def process(self):
functions = self._prepare_available_functions()
model_executor = self._get_model(self.chat_task.base_model_name)
model_output = model_executor.call(self.chat_memory.get_history(), functions)
return model_output
class MultiModeBotExecutor(BotExecutor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._modes = self.chat_task.bot_params['modes']
def _get_avail_modes_items(self):
return [
f'- code: {key}, description: {value["info"]}'
for key, value in self._modes.items()
]
def _make_select_mode_prompt(self):
# select mode tool
task_items = self._get_avail_modes_items()
tasks = '\n'.join(task_items)
prompt = f'You are a helpful assistant and you can help with various types of tasks.' \
f'\nAvailable types of tasks:' \
f'\n{tasks}' \
f'\nUser have to choose task and assistant MUST call select_task function after it'
return prompt
def enter_bot_mode(self, functions):
# choose prompt or model depending on mode
mode_name = self.chat_memory.get_mode()
allowed_tools = None
if mode_name is None:
# mode in not selected, lets to go to select menu
model_executor = self._get_model(self.chat_task.base_model_name)
prompt = self._make_select_mode_prompt()
model_executor.prompt = prompt
else:
# mode is selected
mode = self._modes.get(mode_name)
if mode is None:
# wrong mode
self.chat_memory.set_mode(None)
raise BotException(f'Error to use mode: {mode_name}')
if 'model' in mode:
# this is model
model_executor = self._get_model(mode['model'])
elif 'prompt' in mode:
# it is just a prompt. let's use a bot model and custom prompt
model_executor = self._get_model(self.chat_task.base_model_name)
model_executor.prompt = mode['prompt']
else:
raise BotException(f'Mode is not supported: {mode}')
allowed_tools = mode.get('allowed_tools')
if allowed_tools is not None:
functions = [
fnc
for fnc in functions
if fnc.name in allowed_tools
]
return model_executor, functions
def _mode_switching_function(self, switched_to_mode):
# add mode tool
def _select_task(mode_name):
if mode_name == '':
self.chat_memory.set_mode(None)
switched_to_mode.append(None)
return 'success'
avail_modes = list(self._modes.keys())
if mode_name not in avail_modes:
return f'Error: task is not found. Available tasks: {", ".join(avail_modes)}'
self.chat_memory.set_mode(mode_name)
switched_to_mode.append(mode_name)
return 'success'
return Function(
name='select_task',
callback=_select_task,
description='Have to be used by assistant to select task. Input is task type.'
' If user want to unselect task input should be empty string.'
' Available tasks: ' + '; '.join(self._get_avail_modes_items())
)
def process(self):
# this list should be changed if mode was switched
switched_to_mode = []
functions_all = self._prepare_available_functions()
# Modes handling
functions_all.append(self._mode_switching_function(switched_to_mode))
model_executor, functions = self.enter_bot_mode(functions_all)
# workaround: don't show history if mode is not selected, otherwise bot doesn't decide to change mode
if self.chat_memory.get_mode() is None:
self.chat_memory.hide_history(left_count=1)
model_output = model_executor.call(self.chat_memory.get_history(), functions)
if len(switched_to_mode) > 0:
# mode changed:
# - clear previous history
# - run once again
# start conversation only from last message
self.chat_memory.hide_history(left_count=1)
model_executor, functions = self.enter_bot_mode(functions_all)
model_output = model_executor.call(self.chat_memory.get_history(), functions)
return model_output | PypiClean |
/Electrum-Zcash-Random-Fork-3.1.3b5.tar.gz/Electrum-Zcash-Random-Fork-3.1.3b5/lib/exchange_rate.py | from datetime import datetime
import inspect
import requests
import sys
import os
import json
from threading import Thread
import time
import csv
import decimal
from decimal import Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0,
# Not ISO 4217.
'BTC': 8}
DEFAULT_EXCHANGE = 'BitcoinAverage'
DEFAULT_CCY = 'USD'
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum-Zcash'}, timeout=10)
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum-Zcash'})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def read_historical_rates(self, ccy, cache_dir):
filename = os.path.join(cache_dir, self.name() + '_'+ ccy)
if os.path.exists(filename):
timestamp = os.stat(filename).st_mtime
try:
with open(filename, 'r', encoding='utf-8') as f:
h = json.loads(f.read())
h['timestamp'] = timestamp
except:
h = None
else:
h = None
if h:
self.history[ccy] = h
self.on_history()
return h
def get_historical_rates_safe(self, ccy, cache_dir):
try:
self.print_error("requesting fx history for", ccy)
h = self.request_history(ccy)
self.print_error("received fx history for", ccy)
except BaseException as e:
self.print_error("failed fx history:", e)
return
filename = os.path.join(cache_dir, self.name() + '_' + ccy)
with open(filename, 'w', encoding='utf-8') as f:
f.write(json.dumps(h))
h['timestamp'] = time.time()
self.history[ccy] = h
self.on_history()
def get_historical_rates(self, ccy, cache_dir):
if ccy not in self.history_ccys():
return
h = self.history.get(ccy)
if h is None:
h = self.read_historical_rates(ccy, cache_dir)
if h is None or h['timestamp'] < time.time() - 24*3600:
t = Thread(target=self.get_historical_rates_safe, args=(ccy, cache_dir))
t.setDaemon(True)
t.start()
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'), 'NaN')
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('apiv2.bitcoinaverage.com',
'/indices/local/ticker/ZEC%s' % ccy)
return {ccy: Decimal(json['last'])}
def history_ccys(self):
return ['USD', 'EUR', 'PLN']
def request_history(self, ccy):
history = self.get_json('apiv2.bitcoinaverage.com',
"/indices/local/history/ZEC%s"
"?period=alltime&format=json" % ccy)
return dict([(h['time'][:10], h['average']) for h in history])
class Bittrex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bittrex.com',
'/api/v1.1/public/getticker?market=BTC-ZEC')
quote_currencies = {}
if not json.get('success', False):
return quote_currencies
last = Decimal(json['result']['Last'])
quote_currencies['BTC'] = last
return quote_currencies
class Poloniex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('poloniex.com', '/public?command=returnTicker')
quote_currencies = {}
zcash_ticker = json.get('BTC_ZEC')
quote_currencies['BTC'] = Decimal(zcash_ticker['last'])
return quote_currencies
class CoinMarketCap(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinmarketcap.com', '/v1/ticker/1437/')
quote_currencies = {}
if not isinstance(json, list):
return quote_currencies
json = json[0]
for ccy, key in [
('USD', 'price_usd'),
]:
quote_currencies[ccy] = Decimal(json[key])
return quote_currencies
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
with open(path, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
print(name, "ok")
except:
print(name, "error")
continue
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.cache_dir = os.path.join(config.path, 'cache')
self.set_exchange(self.config_exchange())
if not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
try:
rounded_amount = round(amount, prec)
except decimal.InvalidOperation:
rounded_amount = amount
return fmt_str.format(rounded_amount)
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy, self.cache_dir)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_history_capital_gains_config(self):
return bool(self.config.get('history_rates_capital_gains', False))
def set_history_capital_gains_config(self, b):
self.config.set_key('history_rates_capital_gains', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", DEFAULT_CCY)
def config_exchange(self):
return self.config.get('use_exchange', DEFAULT_EXCHANGE)
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, Bittrex)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
self.exchange.read_historical_rates(self.ccy, self.cache_dir)
def on_quotes(self):
if self.network:
self.network.trigger_callback('on_quotes')
def on_history(self):
if self.network:
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate is None:
return Decimal('NaN')
return Decimal(rate)
def format_amount(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s" % self.value_str(btc_balance, rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate.is_nan() else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def fiat_value(self, satoshis, rate):
return Decimal('NaN') if satoshis is None else Decimal(satoshis) / COIN * Decimal(rate)
def value_str(self, satoshis, rate):
return self.format_fiat(self.fiat_value(satoshis, rate))
def format_fiat(self, value):
if value.is_nan():
return _("No data")
return "%s" % (self.ccy_amount_str(value, True))
def history_rate(self, d_t):
if d_t is None:
return Decimal('NaN')
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate == 'NaN' and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy, 'NaN')
self.history_used_spot = True
return Decimal(rate)
def historical_value_str(self, satoshis, d_t):
return self.format_fiat(self.historical_value(satoshis, d_t))
def historical_value(self, satoshis, d_t):
return self.fiat_value(satoshis, self.history_rate(d_t))
def timestamp_rate(self, timestamp):
from electrum_zcash.util import timestamp_to_datetime
date = timestamp_to_datetime(timestamp)
return self.history_rate(date) | PypiClean |
/MindsDB-23.8.3.0.tar.gz/MindsDB-23.8.3.0/mindsdb/migrations/versions/2021-11-30_17c3d2384711_init.py | import datetime
from alembic.autogenerate import produce_migrations, render, api
from alembic import context
from sqlalchemy import UniqueConstraint
from sqlalchemy.orm import declarative_base
from sqlalchemy import Column, Integer, String, DateTime, Boolean, Index
# required for code execution
from alembic import op # noqa
import sqlalchemy as sa # noqa
import mindsdb.interfaces.storage.db # noqa
from mindsdb.interfaces.storage.db import Json, Array
# revision identifiers, used by Alembic.
revision = '17c3d2384711'
down_revision = None
branch_labels = None
depends_on = None
# ========================================== current database state ========================================
class Base:
__allow_unmapped__ = True
Base = declarative_base(cls=Base)
# Source: https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable
class Semaphor(Base):
__tablename__ = 'semaphor'
id = Column(Integer, primary_key=True)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
entity_type = Column('entity_type', String)
entity_id = Column('entity_id', Integer)
action = Column(String)
company_id = Column(Integer)
uniq_const = UniqueConstraint('entity_type', 'entity_id')
class Datasource(Base):
__tablename__ = 'datasource'
id = Column(Integer, primary_key=True)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
name = Column(String)
data = Column(String) # Including, e.g. the query used to create it and even the connection info when there's no integration associated with it -- A JSON
creation_info = Column(String)
analysis = Column(String) # A JSON
company_id = Column(Integer)
mindsdb_version = Column(String)
datasources_version = Column(String)
integration_id = Column(Integer)
class Predictor(Base):
__tablename__ = 'predictor'
id = Column(Integer, primary_key=True)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
name = Column(String)
data = Column(Json) # A JSON -- should be everything returned by `get_model_data`, I think
to_predict = Column(Array)
company_id = Column(Integer)
mindsdb_version = Column(String)
native_version = Column(String)
datasource_id = Column(Integer)
is_custom = Column(Boolean) # to del
learn_args = Column(Json)
update_status = Column(String, default='up_to_date')
json_ai = Column(Json, nullable=True)
code = Column(String, nullable=True)
lightwood_version = Column(String, nullable=True)
dtype_dict = Column(Json, nullable=True)
class AITable(Base):
__tablename__ = 'ai_table'
id = Column(Integer, primary_key=True)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
name = Column(String)
integration_name = Column(String)
integration_query = Column(String)
query_fields = Column(Json)
predictor_name = Column(String)
predictor_columns = Column(Json)
company_id = Column(Integer)
class Log(Base):
__tablename__ = 'log'
id = Column(Integer, primary_key=True)
created_at = Column(DateTime, default=datetime.datetime.now)
log_type = Column(String) # log, info, warning, traceback etc
source = Column(String) # file + line
company_id = Column(Integer)
payload = Column(String)
created_at_index = Index("some_index", "created_at_index")
class Integration(Base):
__tablename__ = 'integration'
id = Column(Integer, primary_key=True)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
name = Column(String, nullable=False)
data = Column(Json)
company_id = Column(Integer)
class Stream(Base):
__tablename__ = 'stream'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
stream_in = Column(String, nullable=False)
stream_out = Column(String, nullable=False)
anomaly_stream = Column(String)
integration = Column(String)
predictor = Column(String, nullable=False)
company_id = Column(Integer)
updated_at = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
created_at = Column(DateTime, default=datetime.datetime.now)
type = Column(String, default='unknown')
connection_info = Column(Json, default={})
learning_params = Column(Json, default={})
learning_threshold = Column(Integer, default=0)
# ====================================================================================================
def upgrade():
'''
First migration.
Generates a migration script by difference between model and database and executes it
'''
target_metadata = Base.metadata
mc = context.get_context()
migration_script = produce_migrations(mc, target_metadata)
autogen_context = api.AutogenContext(
mc, autogenerate=True
)
# Seems to be the only way to apply changes to the database
template_args = {}
render._render_python_into_templatevars(
autogen_context, migration_script, template_args
)
code = template_args['upgrades']
code = code.replace('\n ', '\n')
print('\nPerforming database changes:')
print(code)
exec(code)
def downgrade():
# We don't know state to downgrade
raise NotImplementedError() | PypiClean |
/NanoCap-1.0b12.tar.gz/NanoCap-1.0b12/nanocap/core/minimisation.py | from nanocap.core.globals import *
import os,sys,math,copy,random,time,threading,Queue,ctypes
import numpy
from scipy.optimize import fmin_l_bfgs_b,fmin
import nanocap.core.forcefield as forcefield
from nanocap.core.points import Points
from nanocap.core.util import *
from nanocap.clib import clib_interface
clib = clib_interface.clib
class Minimiser(object):
'''
Parent class for both dual lattice and carbon lattice
minimisation.
minimise - minimse internal coords
minimise_scale - scale internal coords by factor gamma
'''
def __init__(self,FFID=None,structure=None,callback=None,
min_type="LBFGS",ftol=1e-10,min_steps=100,render_update_freq=1):
self.FFID = FFID
self.structure = structure
try:self.FF = forcefield.FFS[FFID]
except:
printl(FFID,"not yet implemented")
self.FF = forcefield.ForceField("Temp")
self.FF = forcefield.NullForceField()
self.updateFlag = True
self.callback=callback
self.modifyPES = False
self.render_update_freq= render_update_freq
self.ftol=ftol
self.min_steps=min_steps
self.min_type=min_type
self.runSimplexWhenNoAnalyticalForce = False
self.currentPointSet = Points("Default Points")
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __repr__(self):
s = "Optimising: {} with: {} minimiser and forcefield: {}".format(self.currentPointSet.PointSetLabel,
self.min_type,
self.FF.ID)
return s
def minimise(self,pointSet,update=True,callback=None,min_type=None,
ftol=None,min_steps=None):
'''
callback routine called every step and at the end of minimisation
'''
self.currentPointSet = pointSet
pointSet.FF = self.FF.ID
if(callback!=None):self.callback=callback
if(ftol!=None):self.ftol=ftol
if(min_steps!=None):self.min_steps=min_steps
if(min_type!=None):self.min_type=min_type
if self.FF.ID=="NULL":
pointSet.final_energy = 0.0
pointSet.unconstrained_pos = numpy.copy(pointSet.pos)
pointSet.constrained_pos = numpy.copy(pointSet.pos)
pointSet.final_scale = 0.
pointSet.final_scaled_energy = 0.0
return
printl("min_type",min_type,"SIMPLEX")
if (self.min_type!="SIMPLEX"):
if not self.FF.analytical_force:
if(self.runSimplexWhenNoAnalyticalForce):
self.min_type = "SIMPLEX"
else:
printl("will not minimise with ffield:",self.FF.ID,"as does not return analytical force, will return")
pointSet.final_energy = 0.0
pointSet.unconstrained_pos = numpy.copy(pointSet.pos)
self.final_energy = 0.0
self.unconstrained_pos = pointSet.unconstrained_pos
return
stime = time.time()
self.updateFlag=update
self.stopmin = 0
printh("Minimising:",pointSet,"with:",min_type,"minimiser and ffield:",self.FF.ID)
self.setup_force_field(pointSet)
self.minimise_pointSet(pointSet)
self.final_operations(pointSet)
#if not self.FF.analytical_force: self.config.opts["MinType"] = iniMinType
printh("energy",numpy.sum(pointSet.energy),"magnitude force",magnitude(pointSet.force))
printh("time for minimisation",time.time()-stime)
def final_operations(self,pointSet):
'''
reimplement for any final ops post minimisation
'''
pass
# def minima_search(self,pointSet):
# pass
def minimise_pointSet(self,pointSet):
if self.FF.ID=="NULL":return
if(self.min_type=="LBFGS"):
self.minimise_lbfgs_python(pointSet)
if(self.min_type=="SD"):
self.minimise_sd(pointSet)
if(self.min_type=="SIMPLEX"):
self.minimise_simplex_python(pointSet)
pointSet.final_energy = numpy.sum(pointSet.energy)
pointSet.unconstrained_pos = numpy.copy(pointSet.pos)
self.final_energy = pointSet.final_energy
self.unconstrained_pos = pointSet.unconstrained_pos
printl("final_energy",self.final_energy)
def minimise_scale(self,pointSet,ftol=1e-5):
printh("Minimising Scale:",pointSet,"with fmin minimiser and force field:",self.FF.ID)
pointSet.FF = self.FF.ID
self.currentPointSet = pointSet
if self.FF.ID=="NULL":return
self.setup_force_field(pointSet)
initial_gamma = self.get_initial_scale(pointSet)
printl("initial_gamma",initial_gamma)
gamma = fmin(self.minimise_scale_step_operations, initial_gamma,
args=(pointSet,),
xtol=0.0001, ftol=ftol,full_output=0)
final_energy = self.minimise_scale_step_operations(gamma,pointSet)
self.apply_scale(gamma,pointSet)
pointSet.final_scale = gamma[0]
pointSet.final_scaled_energy = numpy.sum(pointSet.energy)
pointSet.constrained_pos = numpy.copy(pointSet.pos)
self.final_scale = pointSet.final_scale
self.final_scaled_energy = pointSet.final_scaled_energy
self.constrained_pos = pointSet.constrained_pos
printl("final_scale", self.final_scale, "final_scaleEnergy",self.final_scaled_energy)
self.final_operations(pointSet)
def get_initial_scale(self,pointSet):
return [1.0,]
def setup_force_field(self):
printl("Please reimplement setup_force_field")
def minimise_step_operations(self,pos,pointSet):
self.pre_force_call_operations(pos,pointSet)
pointSet.pos = pos
energy,force = self.FF.get_energy(pointSet)
self.post_force_call_operations(pos,pointSet)
pointSet.pos = pos
if(self.modifyPES):
energy,force = self.modify_PES(pos,pointSet)
else:
energy,force = numpy.sum(pointSet.energy),pointSet.force
grad = -1.0*force
#printl("energy",energy,magnitude(grad))
return energy,grad
def minimise_step_operations_no_force(self,pos,pointSet):
energy,grad = self.minimise_step_operations(pos,pointSet)
return energy
def modify_PES(self,pos,pointSet):
'''
will be overriden by minimaSearch to add gaussians
'''
energy,force = 0,[0,0]
return energy,force
def apply_scale(self,scale,pointSet):
'''
reimplement to apply the scale transform
'''
pass
def minimise_scale_step_operations(self,gamma,pointSet):
pos0 = numpy.copy(pointSet.pos)
self.pre_scale_force_call_operations(gamma,pointSet)
self.apply_scale(gamma,pointSet)
energy,force = self.FF.get_energy(pointSet)
pointSet.force = force
pointSet.pos = pos0
self.previousGamma = gamma[0]
self.post_scale_force_call_operations(gamma,pointSet)
return energy
def pre_scale_force_call_operations(self,gamma,pointSet):
'''
reimplement for any ops before the scale force call
'''
pass
def post_scale_force_call_operations(self,gamma,pointSet):
'''
reimplement for any ops after the scale force call
'''
pass
def pre_force_call_operations(self,pos,pointSet):
'''
reimplement for any ops before the force call
'''
pass
def post_force_call_operations(self,pos,pointSet):
'''
reimplement for any ops after the force call
'''
pass
def minimise_sd(self,pointSet):
steps,echange = 0,10000000
ienergy,iforce = self.FF.get_energy(pointSet)
oldenergy = ienergy
printl("initial energy, force",ienergy,magnitude(iforce))
force = iforce
while(steps < self.config.opts["MinSteps"] and self.config.opts["StopMin"]==0 and echange> self.config.opts["MinTol"]):
energy, grad = self.minimise_step_operations(pointSet.pos,pointSet)
step = 0.01/numpy.max(numpy.abs(grad))
pointSet.pos = self.line_min(pointSet.pos,pointSet,grad,step,1e-5)
fenergy,fforce = self.FF.get_energy(pointSet)
self.post_force_call_operations(pointSet.pos, pointSet)
echange = numpy.abs(oldenergy - fenergy)
oldenergy = fenergy
pointSet.final_energy = fenergy
if(steps>0 and steps % self.config.opts["RenderUpdate"] ==0):
if(self.updateFlag):self.update_output(pointSet)
printl("step",steps,"energy",fenergy,"echange",echange,"difference from known minimum",fenergy-self.config.opts["KnownMinimum"])
steps+=1
if(self.updateFlag):self.update_output(pointSet)
def line_min(self,pos,pointSet,direction, step, tol):
a_pos,b_pos,c_pos = numpy.copy(pos),numpy.copy(pos),numpy.copy(pos)
energy, grad = self.minimise_step_operations(pointSet.pos,pointSet)
'''
bracket minimum along direction
'''
a,b = 0,step
dfa = numpy.dot(-grad, direction)
b_pos += direction * b * pointSet.freeflagspos
b_energy,b_grad = self.minimise_step_operations(b_pos,pointSet)
dfb = numpy.dot(-b_grad, direction)
while(dfa*dfb >0):
bold = b
b = ((dfa*b - dfb*a) / (dfa - dfb))*1.5
a = bold
dfa=dfb
b_pos = pos + (direction * b * pointSet.freeflagspos)
b_energy,b_grad = self.minimise_step_operations(b_pos,pointSet)
dfb = numpy.dot(-b_grad, direction)
'''
find root of force
'''
loopcount=0
dfc = tol+1
while(abs(dfc) > tol and loopcount < 50):
c = b - ((b-a)/(dfb - dfa))*dfb
c_pos = pos + (direction * c * pointSet.freeflagspos)
c_energy,c_grad = self.minimise_step_operations(c_pos,pointSet)
dfc = numpy.dot(-c_grad, direction)
a = b
dfa = dfb
b = c
dfb = dfc
loopcount+=1
c_pos = pos + (direction * c * pointSet.freeflagspos)
printd("post force z",c_pos[numpy.arange(2,len(pos),3)])
return c_pos
def minimise_simplex_python(self,pointSet):
steps,echange = 0,10000000
ienergy,iforce = self.FF.get_energy(pointSet)
oldenergy = ienergy
while(steps < self.min_steps and echange > self.ftol and self.stopmin==0):
pointSet.pos, final_energy, iters, funceval, errorflags = fmin(self.minimise_step_operations_no_force,
pointSet.pos,
args = (pointSet,),
xtol=self.ftol*1000,
ftol=self.ftol*1000,
full_output=True,
maxiter=10000,
maxfun=10000,
#callback = self.simplex_callback
)
fenergy,fforce = self.FF.get_energy(pointSet)
self.post_force_call_operations(pointSet.pos, pointSet)
echange = numpy.abs(oldenergy - fenergy)
oldenergy = final_energy
pointSet.final_energy = fenergy
if(steps>0 and steps % self.render_update_freq == 0):
if(self.callback):self.callback()
printl("step",steps,"energy",fenergy,"echange",echange)
printl("step",steps,"energy",fenergy,"echange",echange)
steps+=1
if(self.updateFlag):self.update_output(pointSet)
def minimise_lbfgs_python(self,pointSet):
steps,echange = 0,10000000
ienergy,iforce = self.FF.get_energy(pointSet)
oldenergy = ienergy
printl("initial energy, force",ienergy,magnitude(iforce))
while(steps < self.min_steps and echange > self.ftol and self.stopmin==0):
bounds = None
startt = time.time()
pointSet.pos, final_energy, d = fmin_l_bfgs_b(self.minimise_step_operations,
pointSet.pos,
args = (pointSet,),
#fprime = self.get_force,
approx_grad = 0,
bounds = bounds,
m = 100,
factr = 10.0,
pgtol = self.ftol,
iprint = -1, maxfun = 150000)
printl("time for lbfgs",time.time()-startt,"steps",steps)
printl("fmin_l_bfgs_b:" , d['task'],d['funcalls'],magnitude(d['grad']))
fenergy,fforce = self.FF.get_energy(pointSet)
self.post_force_call_operations(pointSet.pos, pointSet)
echange = numpy.abs(oldenergy - fenergy)
oldenergy = final_energy
pointSet.final_energy = fenergy
if(steps>0 and steps % self.render_update_freq == 0):
if(self.callback):self.callback()
printl("step",steps,"energy",fenergy,"echange",echange)
steps+=1
if(self.callback):self.callback()
class DualLatticeMinimiser(Minimiser):
def __init__(self,*args,**kwargs):#,topology=None):
Minimiser.__init__(self,*args,**kwargs)
if(self.structure!=None):
self.structure.set_dual_lattice_minimiser(self)
self.req_radius=1.0
def setup_force_field(self,pointSet):
self.FF.setup(pointSet)
if(self.structure.type==CAPPEDNANOTUBE):
'''set the z-cutoff for thomson potential the nanotube '''
self.FF.set_cutoff(self.structure.cutoff)
def pre_force_call_operations(self,pos,pointSet):
if(self.structure.type==CAPPEDNANOTUBE):
length=self.structure.nanotube.midpoint_z*2
else:
length=None
clib_interface.scale_points_to_rad(pointSet.npoints,pos,self.req_radius,length=length)
def post_force_call_operations(self,pos,pointSet):
if(self.structure.type==CAPPEDNANOTUBE):
#if cap atoms enter the tube add force...
k=1e4
forcefield.force_on_cap_atoms_in_tube(self.structure.cap.dual_lattice.npoints,
pos,
pointSet.force,
pointSet.energy,
k)
forcefield.remove_radial_component_of_force(pointSet.npoints,
pos,
pointSet.force
)
pointSet.force*=pointSet.freeflagspos
class CarbonLatticeMinimiser(Minimiser):
def __init__(self,*args,**kwargs):
Minimiser.__init__(self,*args,**kwargs)
if(self.structure!=None):
self.structure.set_carbon_lattice_minimiser(self)
def setup_force_field(self,pointSet):
self.FF.setup(pointSet)
def final_operations(self,pointSet):
super(CarbonLatticeMinimiser, self).final_operations(pointSet)
self.structure.update_child_structures()
def minimise_onion_step_operations(self,args,onion):
angles = args[0:(onion.NShells-1)*3]
onion.carbonAtoms.pos = args[(onion.NShells-1)*3:]
#printl("angles",angles)
c=0
for i in range(1,onion.NShells):
for j in range(0,3):
angle = angles[c]
if(j==0):
onion.rotate_x(i,angle)
if(j==1):
onion.rotate_y(i,angle)
if(j==2):
onion.rotate_z(i,angle)
c+=1
penergy,pforce = self.FF.get_energy(onion.carbonAtoms)
c = 0
f = []
h = 1e-4
for i in range(1,onion.NShells):
for j in range(0,3):
angle = angles[c]
if(j==0):
stime = time.time()
#onion.rotate_x(i,angle)
#print "r",time.time()-stime
stime = time.time()
#energy,force = self.FF.get_energy(onion.carbonAtoms)
#print "e",time.time()-stime
stime = time.time()
#p1 = numpy.copy(onion.carbonAtoms.pos)
onion.rotate_x(i,h)
#print "r",time.time()-stime
stime = time.time()
fenergy,fforce = self.FF.get_energy(onion.carbonAtoms)
#print "e",time.time()-stime
stime = time.time()
onion.rotate_x(i,-h)
#onion.rotate_x(i,-angle)
#print "r",time.time()-stime
stime = time.time()
f.append((fenergy-penergy)/h)
if(j==1):
# onion.rotate_y(i,angle)
energy,force = self.FF.get_energy(onion.carbonAtoms)
onion.rotate_y(i,h)
fenergy,fforce = self.FF.get_energy(onion.carbonAtoms)
onion.rotate_y(i,-h)
#onion.rotate_y(i,-angle)
f.append((fenergy-penergy)/h)
if(j==2):
#onion.rotate_z(i,angle)
energy,force = self.FF.get_energy(onion.carbonAtoms)
onion.rotate_z(i,h)
fenergy,fforce = self.FF.get_energy(onion.carbonAtoms)
onion.rotate_z(i,-h)
#onion.rotate_z(i,-angle)
f.append((fenergy-penergy)/h)
#print "fenergy",fenergy,"energy",energy
c+=1
gout = numpy.append(numpy.array(f),force*-1)
#printl("energy",penergy,magnitude(gout),angles)
#print energy,f
#return magnitude(gout),gout
return penergy,gout
def minimise_onion(self,onion,rot=True,min_type="LBFGS",ftol=1e-10,min_steps=100,update=True,callback=None):
self.updateFlag=update
self.callback=callback
self.ftol=ftol
self.min_steps=min_steps
self.stopmin = 0
self.min_type=min_type
self.currentPointSet = onion.carbonAtoms
self.setup_force_field(onion.carbonAtoms)
angles = numpy.zeros((onion.NShells-1)*3)
if rot:
printl("minimising onion with rotation")
steps,echange = 0,10000000
ienergy,iforce = self.FF.get_energy(onion.carbonAtoms)
oldenergy = ienergy
printl("initial energy, force",ienergy,magnitude(iforce))
startt = time.time()
while(steps < self.min_steps and echange > self.ftol and self.stopmin==0):
bounds = None
angles = numpy.zeros((onion.NShells-1)*3)
args = numpy.append(angles,onion.carbonAtoms.pos)
f_args, final_energy, d = fmin_l_bfgs_b(self.minimise_onion_step_operations,
args,
args = (onion,),
#fprime = self.get_force,
approx_grad = False,
bounds = bounds,
m = 100,
factr = 10.0,
pgtol = self.ftol,
iprint = -1, maxfun = 150000)
angles = f_args[0:(onion.NShells-1)*3]
#print "outdiff",magnitude(onion.carbonAtoms.pos-f_args[(onion.NShells-1)*3:])
onion.carbonAtoms.pos = f_args[(onion.NShells-1)*3:]
c=0
for i in range(1,onion.NShells):
for j in range(0,3):
angle = angles[c]
if(j==0):
onion.rotate_x(i,angle)
if(j==1):
onion.rotate_y(i,angle)
if(j==2):
onion.rotate_z(i,angle)
c+=1
#onion.write("optimise_onion_step_%04d.xyz"%(steps))
printl("time for lbfgs",time.time()-startt,"steps",steps)
printl("fmin_l_bfgs_b:" , d['task'],d['funcalls'],magnitude(d['grad']))
fenergy,fforce = self.FF.get_energy(onion.carbonAtoms)
printl("final angles",angles,"energy",final_energy,fenergy)
self.post_force_call_operations(onion.carbonAtoms.pos, onion.carbonAtoms)
echange = numpy.abs(oldenergy - final_energy)
oldenergy = final_energy
onion.carbonAtoms.final_energy = final_energy
if(steps>0 and steps % self.render_update_freq == 0):
if(self.callback):self.callback()
printl("step",steps,"energy",final_energy,fenergy,"echange",echange)
steps+=1
if(self.callback):self.callback()
else:
self.minimise(onion.carbonAtoms,min_type="LBFGS")
self.final_operations(onion.carbonAtoms)
def apply_scale(self,gamma,pointSet):
if(self.structure.type==CAPPEDNANOTUBE):
length=self.structure.nanotube.midpoint_z*2.0
else:
length=None
clib_interface.scale_points_to_rad(pointSet.npoints,pointSet.pos,float(gamma[0]),length=length)
def get_initial_scale(self,pointSet):
gamma = numpy.zeros(1,NPF)
gamma[0] = math.sqrt( float(pointSet.npoints) * 0.22)
printl("initial scale",gamma[0])
while(math.fabs(self.minimise_scale_step_operations(gamma,pointSet))<50):
gamma[0]*=0.95
return gamma | PypiClean |
/M2CryptoWin64-0.21.1-3.tar.gz/M2CryptoWin64-0.21.1-3/M2Crypto/BIO.py | import m2
# Deprecated
from m2 import bio_do_handshake as bio_do_ssl_handshake
from cStringIO import StringIO
class BIOError(Exception): pass
m2.bio_init(BIOError)
class BIO:
"""Abstract object interface to the BIO API."""
m2_bio_free = m2.bio_free
def __init__(self, bio=None, _pyfree=0, _close_cb=None):
self.bio = bio
self._pyfree = _pyfree
self._close_cb = _close_cb
self.closed = 0
self.write_closed = 0
def __del__(self):
if self._pyfree:
self.m2_bio_free(self.bio)
def _ptr(self):
return self.bio
# Deprecated.
bio_ptr = _ptr
def fileno(self):
return m2.bio_get_fd(self.bio)
def readable(self):
return not self.closed
def read(self, size=None):
if not self.readable():
raise IOError, 'cannot read'
if size is None:
buf = StringIO()
while 1:
data = m2.bio_read(self.bio, 4096)
if not data: break
buf.write(data)
return buf.getvalue()
elif size == 0:
return ''
elif size < 0:
raise ValueError, 'read count is negative'
else:
return m2.bio_read(self.bio, size)
def readline(self, size=4096):
if not self.readable():
raise IOError, 'cannot read'
buf = m2.bio_gets(self.bio, size)
return buf
def readlines(self, sizehint='ignored'):
if not self.readable():
raise IOError, 'cannot read'
lines=[]
while 1:
buf=m2.bio_gets(self.bio, 4096)
if buf is None:
break
lines.append(buf)
return lines
def writeable(self):
return (not self.closed) and (not self.write_closed)
def write(self, data):
if not self.writeable():
raise IOError, 'cannot write'
return m2.bio_write(self.bio, data)
def write_close(self):
self.write_closed = 1
def flush(self):
m2.bio_flush(self.bio)
def reset(self):
"""
Sets the bio to its initial state
"""
return m2.bio_reset(self.bio)
def close(self):
self.closed = 1
if self._close_cb:
self._close_cb()
def should_retry(self):
"""
Can the call be attempted again, or was there an error
ie do_handshake
"""
return m2.bio_should_retry(self.bio)
def should_read(self):
"""
Returns whether the cause of the condition is the bio
should read more data
"""
return m2.bio_should_read(self.bio)
def should_write(self):
"""
Returns whether the cause of the condition is the bio
should write more data
"""
return m2.bio_should_write(self.bio)
class MemoryBuffer(BIO):
"""
Object interface to BIO_s_mem.
Empirical testing suggests that this class performs less well than cStringIO,
because cStringIO is implemented in C, whereas this class is implemented in
Python. Thus, the recommended practice is to use cStringIO for regular work and
convert said cStringIO object to a MemoryBuffer object only when necessary.
"""
def __init__(self, data=None):
BIO.__init__(self)
self.bio = m2.bio_new(m2.bio_s_mem())
self._pyfree = 1
if data is not None:
m2.bio_write(self.bio, data)
def __len__(self):
return m2.bio_ctrl_pending(self.bio)
def read(self, size=0):
if not self.readable():
raise IOError, 'cannot read'
if size:
return m2.bio_read(self.bio, size)
else:
return m2.bio_read(self.bio, m2.bio_ctrl_pending(self.bio))
# Backwards-compatibility.
getvalue = read_all = read
def write_close(self):
self.write_closed = 1
m2.bio_set_mem_eof_return(self.bio, 0)
close = write_close
class File(BIO):
"""
Object interface to BIO_s_fp.
This class interfaces Python to OpenSSL functions that expect BIO *. For
general file manipulation in Python, use Python's builtin file object.
"""
def __init__(self, pyfile, close_pyfile=1):
BIO.__init__(self, _pyfree=1)
self.pyfile = pyfile
self.close_pyfile = close_pyfile
self.bio = m2.bio_new_fp(pyfile, 0)
def close(self):
self.closed = 1
if self.close_pyfile:
self.pyfile.close()
def openfile(filename, mode='rb'):
return File(open(filename, mode))
class IOBuffer(BIO):
"""
Object interface to BIO_f_buffer.
Its principal function is to be BIO_push()'ed on top of a BIO_f_ssl, so
that makefile() of said underlying SSL socket works.
"""
m2_bio_pop = m2.bio_pop
m2_bio_free = m2.bio_free
def __init__(self, under_bio, mode='rwb', _pyfree=1):
BIO.__init__(self, _pyfree=_pyfree)
self.io = m2.bio_new(m2.bio_f_buffer())
self.bio = m2.bio_push(self.io, under_bio._ptr())
# This reference keeps the underlying BIO alive while we're not closed.
self._under_bio = under_bio
if 'w' in mode:
self.write_closed = 0
else:
self.write_closed = 1
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_bio_pop(self.bio)
self.m2_bio_free(self.io)
def close(self):
BIO.close(self)
class CipherStream(BIO):
"""
Object interface to BIO_f_cipher.
"""
SALT_LEN = m2.PKCS5_SALT_LEN
m2_bio_pop = m2.bio_pop
m2_bio_free = m2.bio_free
def __init__(self, obio):
BIO.__init__(self, _pyfree=1)
self.obio = obio
self.bio = m2.bio_new(m2.bio_f_cipher())
self.closed = 0
def __del__(self):
if not getattr(self, 'closed', 1):
self.close()
def close(self):
self.m2_bio_pop(self.bio)
self.m2_bio_free(self.bio)
self.closed = 1
def write_close(self):
self.obio.write_close()
def set_cipher(self, algo, key, iv, op):
cipher = getattr(m2, algo, None)
if cipher is None:
raise ValueError, ('unknown cipher', algo)
m2.bio_set_cipher(self.bio, cipher(), key, iv, op)
m2.bio_push(self.bio, self.obio._ptr())
class SSLBio(BIO):
"""
Object interface to BIO_f_ssl
"""
def __init__(self, _pyfree=1):
BIO.__init__(self, _pyfree)
self.bio = m2.bio_new(m2.bio_f_ssl())
self.closed = 0
def set_ssl(self, conn, close_flag=m2.bio_noclose):
"""
Sets the bio to the SSL pointer which is
contained in the connection object.
"""
self._pyfree = 0
m2.bio_set_ssl(self.bio, conn.ssl, close_flag)
if close_flag == m2.bio_noclose:
conn.set_ssl_close_flag(m2.bio_close)
def do_handshake(self):
"""
Do the handshake.
Return 1 if the handshake completes
Return 0 or a negative number if there is a problem
"""
return m2.bio_do_handshake(self.bio) | PypiClean |
/Instrumental-lib-0.7.zip/Instrumental-lib-0.7/instrumental/drivers/scopes/rigol.py | import visa
from pyvisa.constants import InterfaceType
import numpy as np
from pint import UndefinedUnitError
from . import Scope
from .. import VisaMixin, SCPI_Facet, Facet
from ..util import visa_context
from ... import u, Q_
from enum import Enum
from .. import ParamSet
from visa import ResourceManager
from enum import Enum
import time
import numpy as np
from struct import unpack
_INST_PARAMS_ = ['visa_address']
_INST_VISA_INFO_ = {
'DS1000Z': ('RIGOL TECHNOLOGIES', ['DS1054Z']),
}
MANUFACTURER_ID = 0x1AB1
class SpecTypes(Enum):
DS1054Z = 0x04CE
def list_instruments():
"""Get a list of all spectrometers currently attached"""
paramsets = []
model_string = ''
for spec in SpecTypes:
model_string += '(VI_ATTR_MODEL_CODE==0x{:04X}) || '.format(spec.value)
model_string = model_string.rstrip(' || ')
search_string = "USB?*?{{VI_ATTR_MANF_ID==0x{:04X} && ({})}}".format(MANUFACTURER_ID, model_string)
rm = ResourceManager()
try:
raw_spec_list = rm.list_resources(search_string)
except:
return paramsets
for spec in raw_spec_list:
_, _, model, serial, _ = spec.split('::', 4)
model = SpecTypes(int(model, 0))
paramsets.append(ParamSet(DS1000Z, usb=spec, serial=serial, model=model))
return paramsets
class OnOffState(Enum):
ON = True
OFF = False
class RigolScope(Scope, VisaMixin):
"""
A base class for Rigol Technologies Scopes
"""
yinc = SCPI_Facet(':WAVeform:YINCrement', convert=float)
yref = SCPI_Facet(':WAVeform:YREFerence', convert=float)
yorig = SCPI_Facet(':WAVeform:YORigin', convert=float)
xincr = SCPI_Facet(':WAVeform:XINCrement', convert=float)
beeper = SCPI_Facet('SYSTem:BEEPer', convert=OnOffState)
def _initialize(self):
self._rsrc.write_termination = '\n'
self._rsrc.read_termination = '\n'
@property
def manufacturer(self):
manufacturer, _, _, _ = self.query('*IDN?').rstrip().split(',', 4)
return manufacturer
@property
def model(self):
_, model, _, _ = self.query('*IDN?').split(',', 4)
return model
@property
def serial(self):
_, _, serial, _ = self.query('*IDN?').split(',', 4)
return serial
@property
def version(self):
_, _, _, version = self.query('*IDN?').rstrip().split(',', 4)
return version
@property
def beeper(self):
val = self.query('SYSTem:BEEPer?')
return OnOffState[val].value
@beeper.setter
def beeper(self, val):
val = int(bool(val))
self.write('SYSTem:BEEPer %s' % OnOffState(val).name)
@property
def vmax_averages(self):
return self.query(':MEASure:STATistic:ITEM? AVERages,VMAX')
@property
def vmax(self):
return self.query(':MEASure:ITEM? VMAX')
@property
def vmin_averages(self):
return self.query(':MEASure:STATistic:ITEM? AVERages,VMIN')
@property
def vmin(self):
return self.query(':MEASure:ITEM? VMIN')
def get_data(self):
self.write(':WAV:SOUR CHAN1')
time.sleep(1)
data = self._rsrc.query_binary_values(':WAVeform:DATA?', datatype='B')
yinc = self.yinc # Don't query multiple times
yref = self.yref
yorig = self.yorig
xincr = self.xincr
Volts = [(val - yorig - yref) * yinc for val in data]
Time = np.arange(0, xincr * len(Volts), xincr)
return Time, Volts
def single_acq(self):
self.write("STOP")
self.write(":FUNCtion:WRECord:ENABle 0")
self.write(":FUNCtion:WRECord:ENABle 1")
self.write("RUN")
while True: # 1 means that the acquisition is still running
time.sleep(0.5)
if self.query(":FUNCtion:WRECord:OPERate?") == "STOP":
self.write(":FUNCtion:WRECord:ENABle 0")
break
def local(self):
self.write('SYSTem:LOCal')
def remote(self):
self.write('SYSTem:REMote')
class DS1000Z(RigolScope, VisaMixin):
pass | PypiClean |
/Joson_yaml_pytest-1.2.7-py3-none-any.whl/Joson_yaml_pytest/plugin.py | import types
import yaml
from pathlib import Path
from _pytest.python import Module
import pytest
from requests.adapters import HTTPAdapter
from . import http_session
from . import runner
from .log import set_log_format, log
from .report_notify import ding_ding_notify, fei_shu_notify
from .create_funtion import import_from_file
import os
import platform
import time
g = {} # 全局 g 对象,获取项目配置
@pytest.fixture(scope="session")
def requests_session(request):
"""全局session 全部用例仅执行一次"""
s = http_session.HttpSession()
# max_retries=2 重试2次
s.mount('http://', HTTPAdapter(max_retries=2))
s.mount('https://', HTTPAdapter(max_retries=2))
proxies_ip = request.config.getoption("--proxies-ip") or request.config.getini("proxies_ip")
if proxies_ip:
# 添加全局代理
s.proxies = {
"http": f"http://{proxies_ip}",
"https": f"https://{proxies_ip}"
}
# 添加全局base_url
s.base_url = request.config.option.base_url
yield s
s.close()
@pytest.fixture()
def requests_function(request):
"""用例级别 session, 每个用例都会执行一次"""
s = http_session.HttpSession()
# max_retries=2 重试2次
s.mount('http://', HTTPAdapter(max_retries=2))
s.mount('https://', HTTPAdapter(max_retries=2))
proxies_ip = request.config.getoption("--proxies-ip") or request.config.getini("proxies_ip")
if proxies_ip:
# 添加全局代理
s.proxies = {
"http": f"http://{proxies_ip}",
"https": f"https://{proxies_ip}"
}
# 添加全局base_url
s.base_url = request.config.option.base_url
yield s
s.close()
@pytest.fixture(scope="module")
def requests_module(request):
"""模块级别 session, 每个模块仅执行一次"""
s = http_session.HttpSession()
# max_retries=2 重试2次
s.mount('http://', HTTPAdapter(max_retries=2))
s.mount('https://', HTTPAdapter(max_retries=2))
proxies_ip = request.config.getoption("--proxies-ip") or request.config.getini("proxies_ip")
if proxies_ip:
# 添加全局代理
s.proxies = {
"http": f"http://{proxies_ip}",
"https": f"https://{proxies_ip}"
}
# 添加全局base_url
s.base_url = request.config.option.base_url
yield s
s.close()
def pytest_collect_file(file_path: Path, parent): # noqa
"""
收集测试用例:
1.测试文件以.yml 或 .yaml 后缀的文件
2.并且以 test 开头或者 test 结尾
"""
if file_path.suffix in [".yml", ".yaml"] and (file_path.name.startswith("test") or file_path.name.endswith("test")):
py_module = Module.from_parent(parent, path=file_path)
# 动态创建 module
module = types.ModuleType(file_path.stem)
# 解析 yaml 内容
raw_dict = yaml.safe_load(file_path.open(encoding='utf-8'))
if not raw_dict:
return
# 用例名称test_开头
run = runner.RunYaml(raw_dict, module, g)
run.run() # 执行用例
# 重写属性
py_module._getobj = lambda: module # noqa
return py_module
def pytest_generate_tests(metafunc): # noqa
"""测试用例参数化功能实现
:param metafunc:共有五个属性值
metafunc.fixturenames:参数化收集时的参数名称
metafunc.module:使用参数名称进行参数化的测试用例所在的模块d对象
metafunc.config:测试用例会话
metafunc.function:测试用例对象,即函数或方法对象
metafunc.cls: 测试用例所属的类的类对象
:return: none
"""
if hasattr(metafunc.module, f'{metafunc.function.__qualname__}_params_data'):
params_data = getattr(metafunc.module, f'{metafunc.function.__qualname__}_params_data')
params_len = 0 # 参数化 参数的个数
if isinstance(params_data, list):
if isinstance(params_data[0], list):
params_len = len(params_data[0])
elif isinstance(params_data[0], dict):
params_len = len(params_data[0].keys())
else:
params_len = 1
params_args = metafunc.fixturenames[-params_len:]
if len(params_args) == 1:
if not isinstance(params_data[0], list):
params_data = [[p] for p in params_data]
metafunc.parametrize(
params_args,
params_data,
scope="function"
)
def pytest_addoption(parser): # noqa
# run env
parser.addini('env', default=None, help='run environment by test or uat ...')
parser.addoption(
"--env", action="store", default=None, help="run environment by test or uat ..."
)
# base url
if 'base_url' not in parser._ininames:
parser.addini("base_url", help="base url for the api test.")
parser.addoption(
"--base-url",
metavar="url",
default=os.getenv("PYTEST_BASE_URL", None),
help="base url for the api test.",
)
# proxies_ip
parser.addini("proxies_ip", default=None, help="proxies_ip for the test.")
parser.addoption(
"--proxies-ip",
action="store", default=None,
help="proxies_ip for the test.",
)
def pytest_configure(config): # noqa
# 配置日志文件和格式
set_log_format(config)
config.addinivalue_line(
"filterwarnings", "ignore::DeprecationWarning"
)
config.addinivalue_line(
"filterwarnings", "ignore::urllib3.exceptions.InsecureRequestWarning"
)
# 加载 项目 config 文件配置
config_path = Path(config.rootdir).joinpath('config.py')
if config_path.exists():
# 如果有配置文件,加载当前运行环境的配置
run_env_name = config.getoption('--env') or config.getini('env')
if run_env_name:
config_module = import_from_file(config_path)
# config_module = __import__("config", globals(), locals(), [])
if hasattr(config_module, 'env'):
g["env"] = config_module.env.get(run_env_name) # noqa
g["env_name"] = run_env_name
if g.get('env'):
# 获取配置环境的 BASE_URL
_base_url = g["env"].BASE_URL if hasattr(g.get('env'), 'BASE_URL') else None
else:
_base_url = None
# base_url
base_url = config.getoption("--base-url") or config.getini("base_url") or _base_url
g["base_url"] = base_url
if base_url is not None:
config.option.base_url = base_url
if hasattr(config, "_metadata"):
config._metadata["base_url"] = base_url # noqa
# 获取 allure 报告的路径
allure_dir = config.getoption('--alluredir') # noqa
if allure_dir:
allure_report_path = Path(os.getcwd()).joinpath(allure_dir)
if not allure_report_path.exists():
allure_report_path.mkdir()
allure_report_env = allure_report_path.joinpath('environment.properties')
if not allure_report_env.exists():
allure_report_env.touch() # 创建
# 写入环境信息
root_dir = str(config.rootdir).replace("\\", "\\\\")
allure_report_env.write_text(f'system={platform.system()}\n'
f'systemVersion={platform.version()}\n'
f'pythonVersion={platform.python_version()}\n'
f'pytestVersion={pytest.__version__}\n'
f'rootDir={root_dir}\n')
def pytest_terminal_summary(terminalreporter, exitstatus, config): # noqa
"""收集测试结果"""
total = terminalreporter._numcollected # noqa
status = {
0: "pass",
1: "failed",
2: "pytest 执行过程被中断",
3: "pytest 内部错误",
4: "pytest 用法错误",
5: "没有收集到测试用例"
}
if exitstatus in [0, 1]:
passed = len([i for i in terminalreporter.stats.get('passed', []) if i.when != 'teardown'])
failed = len([i for i in terminalreporter.stats.get('failed', []) if i.when != 'teardown'])
error = len([i for i in terminalreporter.stats.get('error', []) if i.when != 'teardown'])
skipped = len([i for i in terminalreporter.stats.get('skipped', []) if i.when != 'teardown'])
successful = len(terminalreporter.stats.get('passed', [])) / (terminalreporter._numcollected- skipped) * 100 # noqa
duration = time.time() - terminalreporter._sessionstarttime # noqa
markdown_text = f"""## 执行报告:
- 运行环境: {g.get('env_name')}
- 运行base_url: {g.get('base_url')}
- 持续时间: {duration: .2f} 秒
## 本次运行结果:
- 总用例数: {total}
- 通过用例:{passed}
- 跳过用例:{skipped}
- 失败用例: {failed}
- 异常用例: {error}
- 通过率: {successful: .2f} %
"""
if g.get('env'):
if hasattr(g["env"], 'DING_TALK'):
ding_text = markdown_text.replace('## 本次运行结果:', f'## 本次运行结果: {status.get(exitstatus)} ')
ding_talk = g["env"].DING_TALK
if ding_talk.get('text'):
ding_talk['text'] = ding_text + ding_talk['text']
else:
ding_talk['text'] = ding_text
ding_ding_notify(**ding_talk)
if hasattr(g["env"], 'FEI_SHU'):
color = 'green' if exitstatus == 0 else 'red'
fei_shu_text = markdown_text.replace('## 执行报告:', '** 执行报告: ** ')
fei_shu_text = fei_shu_text.replace(
'## 本次运行结果:',
f'** 本次运行结果: <font color="{color}">{status.get(exitstatus)}</font> **')
fei_shu = g["env"].FEI_SHU
if fei_shu.get('text'):
fei_shu['text'] = fei_shu_text+fei_shu['text']
else:
fei_shu['text'] = fei_shu_text
# 根据运行结果设置标题背景色
fei_shu['color'] = color
fei_shu_res = fei_shu_notify(**fei_shu)
log.info(f"飞书通知结果: {fei_shu_res}")
else:
log.error(f"用例执行异常,失败原因: {status.get(exitstatus)}") | PypiClean |
/Kotti-2.0.9.tar.gz/Kotti-2.0.9/docs/developing/advanced/blobs.rst | .. _blobs:
Working with Blob Data in Kotti
===============================
Kotti provides flexible mechanisms for storing and serving blob data by with the help of `Depot`_.
.. contents::
How File-like Content is stored
-------------------------------
Both ``File`` and ``Image`` store their data in :class:`depot.fields.sqlalchemy.UploadedFileField` and they will offload their blob data to the configured depot storage.
Working together with `Depot`_ configured storages means it is possible to store blob data in a variety of ways: filesystem, GridFS, Amazon storage, etc.
- :class:`depot.io.local.LocalFileStorage`
- :class:`depot.io.awss3.S3Storage`
- :class:`depot.io.gridfs.GridFSStorage`
- etc.
By default Kotti will store its blob data in the configured SQL database, using :class:`kotti.filedepot.DBFileStorage` storage, but you can configure your own preferred way of storing your blob data.
The benefit of storing files in :class:`kotti.filedepot.DBFileStorage` is having *all* content in a single place (the DB) which makes backups, exporting and importing of your site's data easy, as long as you don't have too many or too large files.
The downsides of this approach appear when your database server resides on a different host (network performance becomes a greater issue) or your DB dumps become too large to be handled efficiently.
Configuration
-------------
Mountpoint
~~~~~~~~~~
Kotti provides a Pyramid tween (:ref:`pyramid.registering_tweens`) that is responsible for the actual serving of blob data.
It does pretty much the same as :class:`depot.middleware.DepotMiddleware`, but is better integrated into Pyramid and therefore Kotti.
This tween "intercepts" all requests before they reach the main application (Kotti).
If it's a request for blob data (identified by the configured ``kotti.depot_mountpoint``), it will be served by the tween itself (or redirected to an external storage like S3), otherwise it will be "forwarded" to the main application.
This mountpoint is also used to generate URLs to blobs.
The default value for ``kotti.depot_mountpoint`` is ``/depot``::
kotti.depot_mountpoint = /depot
WSGI File Wrapper
-----------------
In case you have issues serving files with your WSGI server, your can try to set ``kotti.depot_replace_wsgi_file_wrapper = true``.
This forces Kotti to use :class:`pyramid.response.FileIter` instead of the one provided by your WSGI server.
Storages
~~~~~~~~
While `Depot`_ allows storing data in any of the configured filestorages, at this time there's no mechanism in Kotti to select, at runtime, the depot where new data will be saved.
Instead, Kotti will store new files only in the configured ``default`` store.
If, for example, you add a new depot and make that the default, you should leave the old depot configured so that Kotti will continue serving files uploaded there.
By default, Kotti comes configured with a db-based filestorage::
kotti.depot.0.name = dbfiles
kotti.depot.0.backend = kotti.filedepot.DBFileStorage
To configure a depot, several ``kotti.depot.*.*`` lines need to be added.
The number in the first position is used to group backend configuration and to order the file storages in the configuration of `Depot`_.
The depot configured with number 0 will be the default depot, where all new blob data will be saved.
There are 2 options that are required for every storage configuration: ``name`` and ``backend``.
The ``name`` is a unique string that will be used to identify the path of saved files (it is recorded with each blob info), so once configured for a particular storage, it should never change.
The ``backend`` should point to a dotted path for the storage class.
Any further parameters for a particular backend will be passed as keyword arguments to the backend class.
See this example, in which we store, by default, files in ``/var/local/files/`` using the :class:`depot.io.local.LocalFileStorage`::
kotti.depot.0.name = localfs
kotti.depot.0.backend = depot.io.local.LocalFileStorage
kotti.depot.0.storage_path = /var/local/files
kotti.depot.1.name = dbfiles
kotti.depot.1.backend = kotti.filedepot.DBFileStorage
Notice that we kept the ``dbfiles`` storage, but we moved it to position 1.
No blob data will be saved there anymore, but existing files in that storage will continue to be available from there.
How File-like Content is served
-------------------------------
Starting with Kotti 1.3.0, file-like content can be served in two different ways.
Let's look at an example to compare them.
Say we have a :class:`kotti.resources.File` object in our resource tree, located at ``/foo/bar/file``.
Method 1
~~~~~~~~
In the default views this file is served under the URL ``http://localhost/foo/bar/file/attachment-view``.
This URL can be created like this::
>>> from kotti.resources import File
>>> file = File.query.filter(File.name == 'file').one()
>>> request.resource_url(file, 'attachment-view')
'http://localhost/foo/bar/file/attachment-view'
When this URL is requested, a :class:`kotti.filedepot.StoredFileResponse` is returned::
>>> request.uploaded_file_response(file.data)
<StoredFileResponse at 0x10c8d22d0 200 OK>
The request is processed in the same way as for every other type of content in Kotti.
It goes through the full traversal and view lookup machinery *with full permission checks*.
Method 2
~~~~~~~~
Often these permission checks do not need to be enforced strictly.
For such cases Kotti provides a "shortcut" in form of a Pyramid tween, that directly processes all requests under a certain path befor they even reach Kotti.
This means: no traversal, no view lookup, no permission checks.
The URL for this method can be created very similarily::
>>> request.uploaded_file_url(file.data, 'attachment')
'http://localhost//depot/dbfiles/68f31e97-a7f9-11e5-be07-c82a1403e6a7/download'
Comparison
~~~~~~~~~~
Obviously method 2 is *a lot* faster than method 1 - typically at least by the factor of 3.
If you take a look at the callgraphs, you'll understand where this difference comes from:
========== ==========
|m1kotti|_ |m2kotti|_
========== ==========
Method 1 Method 2
========== ==========
.. |m1kotti| image:: /_static/callgraph-served-by-kotti.svg
:width: 100%
.. _m1kotti: ../../_static/callgraph-served-by-kotti.svg
.. |m2kotti| image:: /_static/callgraph-served-by-tween.svg
:width: 100%
.. _m2kotti: ../../_static/callgraph-served-by-tween.svg
The difference will be even more drastic, when you set up proper HTTP caching.
All responses for method 2 can be cached *forever*, because the URL will change when the file's content changes.
Developing (with) File-like Content
-----------------------------------
Add a Blob Field to your Model
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Adding a blob data attribute to your models can be as simple as::
from depot.fields.sqlalchemy import UploadedFileField
from kotti.resources import Content
class Person(Content):
avatar = UploadedFileField()
While you can directly assign a ``bytes`` value to the ``avatar`` column, the ``UploadedFileField`` column type works best when you assign a :class:`cgi.FieldStorage` instance as value::
from StringIO import StringIO
from kotti.util import _to_fieldstorage
content = '...'
data = {
'fp': StringIO(content),
'filename': 'avatar.png',
'mimetype': 'image/png',
'size': len(content),
}
person = Person()
person.avatar = _to_fieldstorage(**data)
Note that the ``data`` dictionary described here has the same format as the deserialized value of a ``deform.widget.FileUploadWidget``.
See :class:`kotti.views.edit.content.FileAddForm` and :class:`kotti.views.edit.content.FileEditForm` for a full example of how to add or edit a model with a blob field.
Reading Blob Data
~~~~~~~~~~~~~~~~~
If you try directly to read data from an ``UploadedFileField`` you'll get a :class:`depot.fields.upload.UploadedFile` instance, which offers a dictionary-like interface to the stored file metadata and direct access to a stream with the stored file through the ``file`` attribute::
person = DBSession.query(Person).get(1)
blob = person.avatar.file.read()
You should never write to the file stream directly.
Instead, you should assign a new value to the ``UploadedFileField`` column, as described in the previous section.
Testing UploadedFileField Columns
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Because :class:`depot.manager.DepotManager` acts as a singleton, special care needs to be taken when testing features that involve saving data into ``UploadedFileField`` columns.
``UploadedFileField`` columns require having at least one depot file storage configured.
You can use a fixture called ``filedepot`` to have a mock file storage available for your tests.
If you're developing new depot file storages you should use the ``no_filedepots`` fixture, which resets the configured depots for the test run and restores the default depots back, as a teardown.
Inheritance Issues with UploadedFileField Columns
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You should be aware that, presently, subclassing a model with an ``UploadedFileField`` column doesn't work properly.
As a workaround, add a ``__declare_last__`` classmethod in your superclass model, similar to the one below, where we're fixing the ``data`` column of the ``File`` class. ::
from depot.fields.sqlalchemy import _SQLAMutationTracker
class File(Content):
data = UploadedFileField()
@classmethod
def __declare_last__(cls):
event.listen(cls.data, 'set', _SQLAMutationTracker._field_set, retval=True)
Migrating data between two different storages
---------------------------------------------
Kotti provides a script that can migrate blob data from one configured stored to another and update the saved fields with the new locations.
It is not needed to do this if you just want to add a new torage, or replace the default one, but you can use it if you'd like to consolidate the blob data in one place only.
You can invoke the script with::
kotti-migrate-storage <config_uri> --from-storage <name> --to-storage <name>
The storage names are those assigned in the configuration file designated in ``<config_uri>``.
For example, let's assume you've started a website that has the default blob storage, the ``DBFileStorage`` named *dbfiles*.
You'd like to move all the existing blob data to a :class:`depot.io.local.LocalFileStorage` storage and make that the default.
First, add the ``LocalFileStorage`` depot, make it the default and place the old ``DBFileStorage`` in position *1*:::
kotti.depot.0.backend = depot.io.local.LocalFileStorage
kotti.depot.0.name = localfs
kotti.depot.0.storage_path = /var/local/files
kotti.depot.1.backend = kotti.filedepot.DBFileStorage
kotti.depot.1.name = dbfiles
Now you can invoke the migration with:::
kotti-migrate-storage <config_uri> --from-storage dbfiles --to-storage localfs
As always when dealing with migrations, make sure you backup your data first!
.. _Depot: https://depot.readthedocs.io/en/latest/
| PypiClean |
/CCC-2.0.1.tar.gz/CCC-2.0.1/ccc/contacts/api/views.py | import xlwt
from accounts.models import Account
from django.db import transaction
from django.http import HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.filters import SearchFilter
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from ccc.contacts.api.serializers import (CompanySocialProfileFilter,
CompanySocialProfileSerializer,
ContactFilter, ContactGroupFilter,
ContactGroupSerializer,
ContactNoteSerializer,
ContactSerializer,
ContactSocialProfileFilter,
ContactSocialProfileSerializer,
ImportContactSerializer,
UploadContactSerializer,
ValidateContactUploadSerializer)
from ccc.contacts.models import (CompanySocialProfile, Contact, ContactGroup,
ContactNote, ContactSocialProfile)
from ccc.contacts.serializers import ContactListSerializer
from ccc.mixin import AuthParsersMixin
from ccc.users.models import UserProfile
from ccc.utils.utils import XLSJobImporter
class CreateBusinessCard(AuthParsersMixin, APIView):
def get(self, request, *args, **kwargs):
primary_data_point = ['first_name', 'last_name', 'email', 'phone', 'profile_image', 'contact_type']
contact = Contact.objects.filter(email=request.user.email).first()
user_data = Account.objects.get(id=request.user.id).__dict__
data = {'user_id': request.user.id, 'contact_type': 'DBC'}
for field in primary_data_point:
try:
data[field] = user_data[field]
except KeyError:
pass
if contact:
for field in primary_data_point:
setattr(contact, field, data[field])
contact.save()
else:
contact = Contact.objects.create(**data)
return JsonResponse({'status': 'success', 'contact_id': contact.id})
class ContactsViewSet(AuthParsersMixin, ModelViewSet):
""" Contact View set
LEAD_TYPE = (
('1', "SMS"),
('2', "MMS"),
('3', "VOICE"),
('4', "CSV UPLOAD"),
('5', "Card Scan"),
('6', "Manual"),
('7', "Survey"),
('8', "System user import"))
To filter by group, add URL param '?group=<group_id>'
You can filter by fields phone, first_name, last_name, email, company_name, start_date, end_date, campaign
NOTE:
start_date: start_date < created_date
end_date: end_date > created_date
To filter by range add start_date and end_date
"""
serializer_class = ContactSerializer
social_serializer_class = ContactSocialProfileSerializer
serializer_excel_class = ContactListSerializer
# queryset = Contact.objects.all().order_by('first_name')
filter_class = ContactFilter
filter_backends = (SearchFilter, DjangoFilterBackend,)
search_fields = ('first_name', 'last_name', 'phone', 'note', 'company_name', 'designation', 'email',
'campaigns__name')
def get_queryset(self):
if self.request.GET.get('group', None):
return get_object_or_404(ContactGroup, pk=self.request.GET.get('group')) \
.contacts.filter(user=self.request.user).order_by('first_name')
return Contact.objects.filter(user=self.request.user).order_by('first_name')
def perform_create(self, serializer):
serializer.save(user=self.request.user)
@transaction.non_atomic_requests
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
social = {}
social_fields = [f.name for f in ContactSocialProfile._meta.get_fields()]
for field in social_fields:
if request.data.get(field):
social[field] = request.data[field]
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if social:
self.serializer_class = self.social_serializer_class
serializer = self.get_serializer(instance.social_profiles.first(), data=social, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
return JsonResponse({'Success': 'Save contact details'})
@action(methods=['get'], detail=False)
def export_contact(self, request, *args, **kwargs):
object_list = self.queryset.filter(user=request.user)
return self.generate_contact_excel(object_list)
def generate_contact_excel(self, object_list):
self.serializer_class = self.serializer_excel_class
object_list = self.get_serializer(object_list, many=True)
s1 = xlwt.Workbook(encoding="utf-8")
# sheets = s1.set_active_sheet(0)
# sheets = s1.get_sheet(0)
sheets = s1.add_sheet("Contact list", cell_overwrite_ok=True)
first = ['Date created', 'Name', 'Phone', 'Email', 'campaigns', 'Groups', 'Survey']
sheets.col(first.index('Date created')).width = 256 * 20
sheets.col(first.index('Name')).width = 256 * 30
sheets.col(first.index('Phone')).width = 256 * 15
sheets.col(first.index('Email')).width = 256 * 40
sheets.col(first.index('campaigns')).width = 256 * 30
sheets.col(first.index('Groups')).width = 256 * 20
sheets.col(first.index('Survey')).width = 256 * 30
self.grey_style = xlwt.XFStyle()
self.grey_style.font.bold = True
self.grey_style.font.colour_index = xlwt.Style.colour_map[
'white']
pattern = xlwt.Pattern()
pattern.pattern = xlwt.Pattern.SOLID_PATTERN
pattern.pattern_background_colour = xlwt.Style.colour_map['gray50']
self.grey_style.pattern = pattern
self.grey_style.alignment.horz = xlwt.Alignment.HORZ_CENTER_ACROSS_SEL
style = xlwt.XFStyle()
style.font.bold = True
pattern = xlwt.Pattern()
pattern.pattern = xlwt.Pattern.SOLID_PATTERN
pattern.pattern_background_colour = xlwt.Style.colour_map['gray50']
style.pattern = pattern
style.alignment.horz = xlwt.Alignment.HORZ_CENTER_ACROSS_SEL
sheets.write_merge(0, 0, 0, len(first), "Contact List", style=self.grey_style)
for i, x in enumerate(first):
sheets.write(2, i, x, style)
starting_posotion = 3
for obj in object_list.data:
custom_height = min(max(len(obj["campaigns_names"]), len(obj["groups_names"]), 1), 5)
sheets.row(starting_posotion).height_mismatch = True
sheets.row(starting_posotion).height = 256 * custom_height
sheets.write(starting_posotion, first.index('Date created'), obj["created_at"] or '')
sheets.write(starting_posotion, first.index('Name'), u"{} {}".format(
obj["first_name"] or '', obj["last_name"] or ''))
sheets.write(starting_posotion, first.index('Phone'), obj["phone"] or '')
sheets.write(starting_posotion, first.index('Email'), obj["email"] or '')
sheets.write(starting_posotion, first.index('campaigns'), "\n".join(obj["campaigns_names"]) or '')
sheets.write(starting_posotion, first.index('Groups'), "\n".join(obj["groups_names"]) or '')
sheets.write(starting_posotion, first.index('Survey'), "\n".join(obj["surveys_names"]) or '')
starting_posotion += 1
response = HttpResponse(content_type="application/ms-excel")
response[
'Content-Disposition'] = 'attachment; filename="{}.xls"'.format("contact_list")
s1.save(response)
return response
@action(methods=['post'], detail=False)
def import_system_user(self, request, *args, **kwargs):
self.serializer_class = ImportContactSerializer
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
campaigns = data.get('campaigns')
groups = data.get('groups')
contacts = []
for user in UserProfile.objects.exclude(pk=request.user.pk).exclude(email='', phone=None):
if user.phone:
contact = Contact.objects.filter(user=self.request.user,
phone=user.phone).first()
elif user.email:
contact = Contact.objects.filter(user=self.request.user,
email=user.email).first()
else:
continue
if contact:
updated = False
for attr in ('email', 'phone', 'first_name', 'last_name'):
if not getattr(contact, attr, False):
setattr(contact, attr, getattr(user, attr, None))
updated = True
if updated:
contact.save()
else:
contact = Contact.objects.create(
first_name=user.first_name,
last_name=user.last_name,
email=user.email,
phone=user.phone,
user=self.request.user,
lead_type=8)
contacts.append(contact)
if contacts:
for contact in contacts:
if campaigns:
for campaign in campaigns:
contact.campaigns.add(campaign)
if groups:
for group in groups:
group.contacts.add(contact)
return JsonResponse({'message': 'System user imported'}, status=201)
return JsonResponse({'message': 'Not found contact'}, status=404)
@action(methods=['post'], detail=False)
def excel_preview(self, request, *args, **kwargs):
self.serializer_class = UploadContactSerializer
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
excel = data.get('excel')
importer = XLSJobImporter(excel.read())
rows = importer.preview()
col_header = ((None, '---'),
('first_name', 'First name', True),
('last_name', 'Last name', True),
('email', 'Email', True),
('phone', 'Phone', True),
('company_name', 'Company name', True),
('custom', 'Custom'))
return JsonResponse({
'rows': rows,
'col_heads': col_header,
'num_cols': len(rows) and list(range(len(rows[0]))) or [],
'campaigns': data.get('campaigns'),
'groups': data.get('groups')
})
@action(methods=['post'], detail=False)
def import_from_file(self, request, *args, **kwargs):
default_selection = {"col_type_0": "first_name",
"col_type_1": "last_name",
"col_type_2": "email",
"col_type_3": "phone",
"col_type_4": "company_name"}
self.serializer_class = ValidateContactUploadSerializer
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
excel = data.get('excel')
drop_first_row = data.get('drop_first_row')
campaigns = data.get('campaigns', [])
groups = data.get('groups', [])
excel_content = excel.read()
importer = XLSJobImporter(excel_content)
num_of_cols = len([x for x in default_selection.keys() if x.startswith('col_type_')])
col_types = []
for n in range(num_of_cols):
col_key = 'col_type_%d' % n
if not default_selection.get(col_key) or default_selection[col_key] in ['', "None"]:
col_types.append(default_selection.get(col_key))
else:
col_types.append(default_selection.get(col_key))
col_custom_names = [data.get('col_custom_name_%d' % n) for n in range(num_of_cols)]
invalid_rows = importer.validate_all(
col_types, col_custom_names, drop_first_row)
if invalid_rows:
col_headers = ['']
for i in range(len(col_types)):
if col_types[i] == 'custom':
col_headers.append(col_custom_names[i])
else:
col_headers.append(col_types[i])
return JsonResponse({
'invalid_rows': invalid_rows,
'num_cols': len(invalid_rows) and list(range(len(invalid_rows[0]))) or [],
'col_headers': col_headers
}, status=status.HTTP_400_BAD_REQUEST)
else:
importer.import_all(request.user, col_types, col_custom_names, drop_first_row, campaigns=campaigns,
groups=groups)
return JsonResponse({'num_of_records': "Importing contacts in asynchronous way"},
status=status.HTTP_201_CREATED)
@action(['GET'], url_path='ids', detail=False)
def contact_ids(self, request):
queryset = self.get_queryset()
queryset = self.filter_queryset(queryset)
queryset = queryset.values_list('id', flat=True)
return Response(list(queryset), status=status.HTTP_200_OK)
class ContactGroupViewSet(AuthParsersMixin, ModelViewSet):
"""Contact Group viewset """
serializer_class = ContactGroupSerializer
queryset = ContactGroup.objects.all().order_by('name')
filter_class = ContactGroupFilter
def get_queryset(self):
return ContactGroup.objects.filter(user=self.request.user).order_by('-created_at')
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class CompanySocialProfileViewSet(AuthParsersMixin, ModelViewSet):
"""Company social profile view"""
serializer_class = CompanySocialProfileSerializer
queryset = CompanySocialProfile.objects.all()
filter_class = CompanySocialProfileFilter
class ContactSocialProfileViewSet(AuthParsersMixin, ModelViewSet):
"""Company social profile view"""
serializer_class = ContactSocialProfileSerializer
queryset = ContactSocialProfile.objects.all()
filter_class = ContactSocialProfileFilter
class ContactNotesViewSet(AuthParsersMixin, ModelViewSet):
serializer_class = ContactNoteSerializer
pagination_class = None
def get_queryset(self):
contact_id = self.request.query_params.get('contact_id')
contact_phone = self.request.query_params.get('contact_phone')
queryset = ContactNote.objects.filter(contact__user=self.request.user).order_by('-created_at')
if contact_id:
queryset = queryset.filter(contact_id=contact_id)
if contact_phone:
phone = '+' + contact_phone.lstrip()
queryset = queryset.filter(contact__phone=phone)
return queryset
def perform_create(self, serializer):
contact_phone = self.request.query_params.get('contact_phone')
if contact_phone:
phone = '+' + contact_phone.lstrip()
contact = Contact.objects.filter(phone=phone)
if contact.exists():
contact = contact.first()
else:
contact = Contact.objects.create(lead_type='3', phone=phone, user=self.request.user)
serializer.save(contact=contact)
else:
serializer.save() | PypiClean |
/My-Serializer-For-Json-And-XML-For_Lab3-9-1.4.tar.gz/My-Serializer-For-Json-And-XML-For_Lab3-9-1.4/Serializators/XML.py | from Core.functions_for_deserialize import Deserialize
from Core.functions_for_serializer import Serialize
class XMLSerializer:
data_serializer = Serialize()
data_deserializer = Deserialize()
def dumps(self, obj):
packed = self.data_serializer.serialize(obj)
if isinstance(packed, (list, tuple)):
return self.__list_n_tuple_to_string_util(packed)
if isinstance(packed, dict):
return self.__dict_to_string_util(packed)
if isinstance(packed, str):
packed = f'"{packed}"'
return self.__ser_primitive(packed)
def dump(self, obj, file):
file.write(self.dumps(obj))
def loads(self, string):
result, ind = self.__loads_with_index(string, 0)
return self.data_deserializer.deserialize(result)
def load(self, file):
return self.loads(file.read())
def __loads_with_index(self, string, index):
index += 1
end_index = index
while string[end_index] != '>':
end_index += 1
tag = string[index:end_index]
match tag:
case 'int' | 'float':
return self.__deserialize_digit(string, end_index + 1)
case 'bool':
return self.__deserialize_bool(string, end_index + 1)
case 'NoneType':
return None, index + 24
case 'str':
return self.__deserialize_str(string, end_index + 1)
case 'list':
return self.__deserialize_list(string, end_index + 1)
case 'dict':
return self.__deserialize_dict(string, end_index + 1)
def __deserialize_digit(self, string, index):
end_index = index
while string[end_index] != '<':
end_index += 1
data_slice = string[index:end_index]
if '.' in data_slice:
return float(data_slice), end_index + 8
return int(data_slice), end_index + 6
def __deserialize_bool(self, string, index):
if string[index] == 'T':
return True, index+11
else:
return False, index+12
def __deserialize_str(self, string, index):
end_index = index
while string[end_index:end_index + 6] != '</str>':
end_index += 1
data_slice = string[index + 1:end_index - 1]
return f'{data_slice}', end_index + 6
def __deserialize_list(self, string, index):
end_index = index
result = []
bracket_count = 1
while bracket_count > 0:
if string[end_index:end_index + 6] == '<list>':
bracket_count += 1
elif string[end_index:end_index + 7] == '</list>':
bracket_count -= 1
end_index += 1
end_index -= 1
while index < end_index:
item, index = self.__loads_with_index(string, index)
result.append(item)
return result, end_index + 7
def __deserialize_dict(self, string, index):
end_index = index
result = {}
# data fragment
bracket_count = 1
while bracket_count > 0:
if string[end_index:end_index + 6] == '<dict>':
bracket_count += 1
elif string[end_index:end_index + 7] == '</dict>':
bracket_count -= 1
end_index += 1
end_index -= 1
# extract
while index < end_index:
item, index = self.__deserialize_dict_item(string, index)
result[item[0]] = item[1]
return result, end_index + 7
def __deserialize_dict_item(self, string, index):
end_index = index + 11
key, end_index = self.__loads_with_index(string, end_index)
end_index += 13
value, end_index = self.__loads_with_index(string, end_index)
return (key, value), end_index + 15
def __list_n_tuple_to_string_util(self, packed):
return f'<{packed.__class__.__name__}>{"".join([self.dumps(item) for item in packed])}</{packed.__class__.__name__}>'
def __dict_to_string_util(self, packed):
return f'<{packed.__class__.__name__}>{"".join([self.__ser_dict_element(key, value) for key, value in packed.items()])}</{packed.__class__.__name__}>'
def __ser_dict_element(self, key, value):
return f'<item><key>{self.dumps(key)}</key><value>{self.dumps(value)}</value></item>'
def __ser_primitive(self, packed):
return f'<{packed.__class__.__name__}>{packed}</{packed.__class__.__name__}>' | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojo/dnd/Manager.js.uncompressed.js | define("dojo/dnd/Manager", ["../main", "../Evented", "./common", "./autoscroll", "./Avatar"], function(dojo, Evented) {
// module:
// dojo/dnd/Manager
// summary:
// TODOC
var Manager = dojo.declare("dojo.dnd.Manager", [Evented], {
// summary:
// the manager of DnD operations (usually a singleton)
constructor: function(){
this.avatar = null;
this.source = null;
this.nodes = [];
this.copy = true;
this.target = null;
this.canDropFlag = false;
this.events = [];
},
// avatar's offset from the mouse
OFFSET_X: 16,
OFFSET_Y: 16,
// methods
overSource: function(source){
// summary:
// called when a source detected a mouse-over condition
// source: Object
// the reporter
if(this.avatar){
this.target = (source && source.targetState != "Disabled") ? source : null;
this.canDropFlag = Boolean(this.target);
this.avatar.update();
}
dojo.publish("/dnd/source/over", [source]);
},
outSource: function(source){
// summary:
// called when a source detected a mouse-out condition
// source: Object
// the reporter
if(this.avatar){
if(this.target == source){
this.target = null;
this.canDropFlag = false;
this.avatar.update();
dojo.publish("/dnd/source/over", [null]);
}
}else{
dojo.publish("/dnd/source/over", [null]);
}
},
startDrag: function(source, nodes, copy){
// summary:
// called to initiate the DnD operation
// source: Object
// the source which provides items
// nodes: Array
// the list of transferred items
// copy: Boolean
// copy items, if true, move items otherwise
this.source = source;
this.nodes = nodes;
this.copy = Boolean(copy); // normalizing to true boolean
this.avatar = this.makeAvatar();
dojo.body().appendChild(this.avatar.node);
dojo.publish("/dnd/start", [source, nodes, this.copy]);
this.events = [
dojo.connect(dojo.doc, "onmousemove", this, "onMouseMove"),
dojo.connect(dojo.doc, "onmouseup", this, "onMouseUp"),
dojo.connect(dojo.doc, "onkeydown", this, "onKeyDown"),
dojo.connect(dojo.doc, "onkeyup", this, "onKeyUp"),
// cancel text selection and text dragging
dojo.connect(dojo.doc, "ondragstart", dojo.stopEvent),
dojo.connect(dojo.body(), "onselectstart", dojo.stopEvent)
];
var c = "dojoDnd" + (copy ? "Copy" : "Move");
dojo.addClass(dojo.body(), c);
},
canDrop: function(flag){
// summary:
// called to notify if the current target can accept items
var canDropFlag = Boolean(this.target && flag);
if(this.canDropFlag != canDropFlag){
this.canDropFlag = canDropFlag;
this.avatar.update();
}
},
stopDrag: function(){
// summary:
// stop the DnD in progress
dojo.removeClass(dojo.body(), ["dojoDndCopy", "dojoDndMove"]);
dojo.forEach(this.events, dojo.disconnect);
this.events = [];
this.avatar.destroy();
this.avatar = null;
this.source = this.target = null;
this.nodes = [];
},
makeAvatar: function(){
// summary:
// makes the avatar; it is separate to be overwritten dynamically, if needed
return new dojo.dnd.Avatar(this);
},
updateAvatar: function(){
// summary:
// updates the avatar; it is separate to be overwritten dynamically, if needed
this.avatar.update();
},
// mouse event processors
onMouseMove: function(e){
// summary:
// event processor for onmousemove
// e: Event
// mouse event
var a = this.avatar;
if(a){
dojo.dnd.autoScrollNodes(e);
//dojo.dnd.autoScroll(e);
var s = a.node.style;
s.left = (e.pageX + this.OFFSET_X) + "px";
s.top = (e.pageY + this.OFFSET_Y) + "px";
var copy = Boolean(this.source.copyState(dojo.isCopyKey(e)));
if(this.copy != copy){
this._setCopyStatus(copy);
}
}
},
onMouseUp: function(e){
// summary:
// event processor for onmouseup
// e: Event
// mouse event
if(this.avatar){
if(this.target && this.canDropFlag){
var copy = Boolean(this.source.copyState(dojo.isCopyKey(e))),
params = [this.source, this.nodes, copy, this.target, e];
dojo.publish("/dnd/drop/before", params);
dojo.publish("/dnd/drop", params);
}else{
dojo.publish("/dnd/cancel");
}
this.stopDrag();
}
},
// keyboard event processors
onKeyDown: function(e){
// summary:
// event processor for onkeydown:
// watching for CTRL for copy/move status, watching for ESCAPE to cancel the drag
// e: Event
// keyboard event
if(this.avatar){
switch(e.keyCode){
case dojo.keys.CTRL:
var copy = Boolean(this.source.copyState(true));
if(this.copy != copy){
this._setCopyStatus(copy);
}
break;
case dojo.keys.ESCAPE:
dojo.publish("/dnd/cancel");
this.stopDrag();
break;
}
}
},
onKeyUp: function(e){
// summary:
// event processor for onkeyup, watching for CTRL for copy/move status
// e: Event
// keyboard event
if(this.avatar && e.keyCode == dojo.keys.CTRL){
var copy = Boolean(this.source.copyState(false));
if(this.copy != copy){
this._setCopyStatus(copy);
}
}
},
// utilities
_setCopyStatus: function(copy){
// summary:
// changes the copy status
// copy: Boolean
// the copy status
this.copy = copy;
this.source._markDndStatus(this.copy);
this.updateAvatar();
dojo.replaceClass(dojo.body(),
"dojoDnd" + (this.copy ? "Copy" : "Move"),
"dojoDnd" + (this.copy ? "Move" : "Copy"));
}
});
// dojo.dnd._manager:
// The manager singleton variable. Can be overwritten if needed.
dojo.dnd._manager = null;
Manager.manager = dojo.dnd.manager = function(){
// summary:
// Returns the current DnD manager. Creates one if it is not created yet.
if(!dojo.dnd._manager){
dojo.dnd._manager = new dojo.dnd.Manager();
}
return dojo.dnd._manager; // Object
};
return Manager;
}); | PypiClean |
/DEME-0.2.8.tar.gz/DEME-0.2.8/thirdparty/pybind11/docs/advanced/pycpp/numpy.rst | .. _numpy:
NumPy
#####
Buffer protocol
===============
Python supports an extremely general and convenient approach for exchanging
data between plugin libraries. Types can expose a buffer view [#f2]_, which
provides fast direct access to the raw internal data representation. Suppose we
want to bind the following simplistic Matrix class:
.. code-block:: cpp
class Matrix {
public:
Matrix(size_t rows, size_t cols) : m_rows(rows), m_cols(cols) {
m_data = new float[rows*cols];
}
float *data() { return m_data; }
size_t rows() const { return m_rows; }
size_t cols() const { return m_cols; }
private:
size_t m_rows, m_cols;
float *m_data;
};
The following binding code exposes the ``Matrix`` contents as a buffer object,
making it possible to cast Matrices into NumPy arrays. It is even possible to
completely avoid copy operations with Python expressions like
``np.array(matrix_instance, copy = False)``.
.. code-block:: cpp
py::class_<Matrix>(m, "Matrix", py::buffer_protocol())
.def_buffer([](Matrix &m) -> py::buffer_info {
return py::buffer_info(
m.data(), /* Pointer to buffer */
sizeof(float), /* Size of one scalar */
py::format_descriptor<float>::format(), /* Python struct-style format descriptor */
2, /* Number of dimensions */
{ m.rows(), m.cols() }, /* Buffer dimensions */
{ sizeof(float) * m.cols(), /* Strides (in bytes) for each index */
sizeof(float) }
);
});
Supporting the buffer protocol in a new type involves specifying the special
``py::buffer_protocol()`` tag in the ``py::class_`` constructor and calling the
``def_buffer()`` method with a lambda function that creates a
``py::buffer_info`` description record on demand describing a given matrix
instance. The contents of ``py::buffer_info`` mirror the Python buffer protocol
specification.
.. code-block:: cpp
struct buffer_info {
void *ptr;
py::ssize_t itemsize;
std::string format;
py::ssize_t ndim;
std::vector<py::ssize_t> shape;
std::vector<py::ssize_t> strides;
};
To create a C++ function that can take a Python buffer object as an argument,
simply use the type ``py::buffer`` as one of its arguments. Buffers can exist
in a great variety of configurations, hence some safety checks are usually
necessary in the function body. Below, you can see a basic example on how to
define a custom constructor for the Eigen double precision matrix
(``Eigen::MatrixXd``) type, which supports initialization from compatible
buffer objects (e.g. a NumPy matrix).
.. code-block:: cpp
/* Bind MatrixXd (or some other Eigen type) to Python */
typedef Eigen::MatrixXd Matrix;
typedef Matrix::Scalar Scalar;
constexpr bool rowMajor = Matrix::Flags & Eigen::RowMajorBit;
py::class_<Matrix>(m, "Matrix", py::buffer_protocol())
.def(py::init([](py::buffer b) {
typedef Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic> Strides;
/* Request a buffer descriptor from Python */
py::buffer_info info = b.request();
/* Some basic validation checks ... */
if (info.format != py::format_descriptor<Scalar>::format())
throw std::runtime_error("Incompatible format: expected a double array!");
if (info.ndim != 2)
throw std::runtime_error("Incompatible buffer dimension!");
auto strides = Strides(
info.strides[rowMajor ? 0 : 1] / (py::ssize_t)sizeof(Scalar),
info.strides[rowMajor ? 1 : 0] / (py::ssize_t)sizeof(Scalar));
auto map = Eigen::Map<Matrix, 0, Strides>(
static_cast<Scalar *>(info.ptr), info.shape[0], info.shape[1], strides);
return Matrix(map);
}));
For reference, the ``def_buffer()`` call for this Eigen data type should look
as follows:
.. code-block:: cpp
.def_buffer([](Matrix &m) -> py::buffer_info {
return py::buffer_info(
m.data(), /* Pointer to buffer */
sizeof(Scalar), /* Size of one scalar */
py::format_descriptor<Scalar>::format(), /* Python struct-style format descriptor */
2, /* Number of dimensions */
{ m.rows(), m.cols() }, /* Buffer dimensions */
{ sizeof(Scalar) * (rowMajor ? m.cols() : 1),
sizeof(Scalar) * (rowMajor ? 1 : m.rows()) }
/* Strides (in bytes) for each index */
);
})
For a much easier approach of binding Eigen types (although with some
limitations), refer to the section on :doc:`/advanced/cast/eigen`.
.. seealso::
The file :file:`tests/test_buffers.cpp` contains a complete example
that demonstrates using the buffer protocol with pybind11 in more detail.
.. [#f2] http://docs.python.org/3/c-api/buffer.html
Arrays
======
By exchanging ``py::buffer`` with ``py::array`` in the above snippet, we can
restrict the function so that it only accepts NumPy arrays (rather than any
type of Python object satisfying the buffer protocol).
In many situations, we want to define a function which only accepts a NumPy
array of a certain data type. This is possible via the ``py::array_t<T>``
template. For instance, the following function requires the argument to be a
NumPy array containing double precision values.
.. code-block:: cpp
void f(py::array_t<double> array);
When it is invoked with a different type (e.g. an integer or a list of
integers), the binding code will attempt to cast the input into a NumPy array
of the requested type. This feature requires the :file:`pybind11/numpy.h`
header to be included. Note that :file:`pybind11/numpy.h` does not depend on
the NumPy headers, and thus can be used without declaring a build-time
dependency on NumPy; NumPy>=1.7.0 is a runtime dependency.
Data in NumPy arrays is not guaranteed to packed in a dense manner;
furthermore, entries can be separated by arbitrary column and row strides.
Sometimes, it can be useful to require a function to only accept dense arrays
using either the C (row-major) or Fortran (column-major) ordering. This can be
accomplished via a second template argument with values ``py::array::c_style``
or ``py::array::f_style``.
.. code-block:: cpp
void f(py::array_t<double, py::array::c_style | py::array::forcecast> array);
The ``py::array::forcecast`` argument is the default value of the second
template parameter, and it ensures that non-conforming arguments are converted
into an array satisfying the specified requirements instead of trying the next
function overload.
There are several methods on arrays; the methods listed below under references
work, as well as the following functions based on the NumPy API:
- ``.dtype()`` returns the type of the contained values.
- ``.strides()`` returns a pointer to the strides of the array (optionally pass
an integer axis to get a number).
- ``.flags()`` returns the flag settings. ``.writable()`` and ``.owndata()``
are directly available.
- ``.offset_at()`` returns the offset (optionally pass indices).
- ``.squeeze()`` returns a view with length-1 axes removed.
- ``.view(dtype)`` returns a view of the array with a different dtype.
- ``.reshape({i, j, ...})`` returns a view of the array with a different shape.
``.resize({...})`` is also available.
- ``.index_at(i, j, ...)`` gets the count from the beginning to a given index.
There are also several methods for getting references (described below).
Structured types
================
In order for ``py::array_t`` to work with structured (record) types, we first
need to register the memory layout of the type. This can be done via
``PYBIND11_NUMPY_DTYPE`` macro, called in the plugin definition code, which
expects the type followed by field names:
.. code-block:: cpp
struct A {
int x;
double y;
};
struct B {
int z;
A a;
};
// ...
PYBIND11_MODULE(test, m) {
// ...
PYBIND11_NUMPY_DTYPE(A, x, y);
PYBIND11_NUMPY_DTYPE(B, z, a);
/* now both A and B can be used as template arguments to py::array_t */
}
The structure should consist of fundamental arithmetic types, ``std::complex``,
previously registered substructures, and arrays of any of the above. Both C++
arrays and ``std::array`` are supported. While there is a static assertion to
prevent many types of unsupported structures, it is still the user's
responsibility to use only "plain" structures that can be safely manipulated as
raw memory without violating invariants.
Vectorizing functions
=====================
Suppose we want to bind a function with the following signature to Python so
that it can process arbitrary NumPy array arguments (vectors, matrices, general
N-D arrays) in addition to its normal arguments:
.. code-block:: cpp
double my_func(int x, float y, double z);
After including the ``pybind11/numpy.h`` header, this is extremely simple:
.. code-block:: cpp
m.def("vectorized_func", py::vectorize(my_func));
Invoking the function like below causes 4 calls to be made to ``my_func`` with
each of the array elements. The significant advantage of this compared to
solutions like ``numpy.vectorize()`` is that the loop over the elements runs
entirely on the C++ side and can be crunched down into a tight, optimized loop
by the compiler. The result is returned as a NumPy array of type
``numpy.dtype.float64``.
.. code-block:: pycon
>>> x = np.array([[1, 3], [5, 7]])
>>> y = np.array([[2, 4], [6, 8]])
>>> z = 3
>>> result = vectorized_func(x, y, z)
The scalar argument ``z`` is transparently replicated 4 times. The input
arrays ``x`` and ``y`` are automatically converted into the right types (they
are of type ``numpy.dtype.int64`` but need to be ``numpy.dtype.int32`` and
``numpy.dtype.float32``, respectively).
.. note::
Only arithmetic, complex, and POD types passed by value or by ``const &``
reference are vectorized; all other arguments are passed through as-is.
Functions taking rvalue reference arguments cannot be vectorized.
In cases where the computation is too complicated to be reduced to
``vectorize``, it will be necessary to create and access the buffer contents
manually. The following snippet contains a complete example that shows how this
works (the code is somewhat contrived, since it could have been done more
simply using ``vectorize``).
.. code-block:: cpp
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
namespace py = pybind11;
py::array_t<double> add_arrays(py::array_t<double> input1, py::array_t<double> input2) {
py::buffer_info buf1 = input1.request(), buf2 = input2.request();
if (buf1.ndim != 1 || buf2.ndim != 1)
throw std::runtime_error("Number of dimensions must be one");
if (buf1.size != buf2.size)
throw std::runtime_error("Input shapes must match");
/* No pointer is passed, so NumPy will allocate the buffer */
auto result = py::array_t<double>(buf1.size);
py::buffer_info buf3 = result.request();
double *ptr1 = static_cast<double *>(buf1.ptr);
double *ptr2 = static_cast<double *>(buf2.ptr);
double *ptr3 = static_cast<double *>(buf3.ptr);
for (size_t idx = 0; idx < buf1.shape[0]; idx++)
ptr3[idx] = ptr1[idx] + ptr2[idx];
return result;
}
PYBIND11_MODULE(test, m) {
m.def("add_arrays", &add_arrays, "Add two NumPy arrays");
}
.. seealso::
The file :file:`tests/test_numpy_vectorize.cpp` contains a complete
example that demonstrates using :func:`vectorize` in more detail.
Direct access
=============
For performance reasons, particularly when dealing with very large arrays, it
is often desirable to directly access array elements without internal checking
of dimensions and bounds on every access when indices are known to be already
valid. To avoid such checks, the ``array`` class and ``array_t<T>`` template
class offer an unchecked proxy object that can be used for this unchecked
access through the ``unchecked<N>`` and ``mutable_unchecked<N>`` methods,
where ``N`` gives the required dimensionality of the array:
.. code-block:: cpp
m.def("sum_3d", [](py::array_t<double> x) {
auto r = x.unchecked<3>(); // x must have ndim = 3; can be non-writeable
double sum = 0;
for (py::ssize_t i = 0; i < r.shape(0); i++)
for (py::ssize_t j = 0; j < r.shape(1); j++)
for (py::ssize_t k = 0; k < r.shape(2); k++)
sum += r(i, j, k);
return sum;
});
m.def("increment_3d", [](py::array_t<double> x) {
auto r = x.mutable_unchecked<3>(); // Will throw if ndim != 3 or flags.writeable is false
for (py::ssize_t i = 0; i < r.shape(0); i++)
for (py::ssize_t j = 0; j < r.shape(1); j++)
for (py::ssize_t k = 0; k < r.shape(2); k++)
r(i, j, k) += 1.0;
}, py::arg().noconvert());
To obtain the proxy from an ``array`` object, you must specify both the data
type and number of dimensions as template arguments, such as ``auto r =
myarray.mutable_unchecked<float, 2>()``.
If the number of dimensions is not known at compile time, you can omit the
dimensions template parameter (i.e. calling ``arr_t.unchecked()`` or
``arr.unchecked<T>()``. This will give you a proxy object that works in the
same way, but results in less optimizable code and thus a small efficiency
loss in tight loops.
Note that the returned proxy object directly references the array's data, and
only reads its shape, strides, and writeable flag when constructed. You must
take care to ensure that the referenced array is not destroyed or reshaped for
the duration of the returned object, typically by limiting the scope of the
returned instance.
The returned proxy object supports some of the same methods as ``py::array`` so
that it can be used as a drop-in replacement for some existing, index-checked
uses of ``py::array``:
- ``.ndim()`` returns the number of dimensions
- ``.data(1, 2, ...)`` and ``r.mutable_data(1, 2, ...)``` returns a pointer to
the ``const T`` or ``T`` data, respectively, at the given indices. The
latter is only available to proxies obtained via ``a.mutable_unchecked()``.
- ``.itemsize()`` returns the size of an item in bytes, i.e. ``sizeof(T)``.
- ``.ndim()`` returns the number of dimensions.
- ``.shape(n)`` returns the size of dimension ``n``
- ``.size()`` returns the total number of elements (i.e. the product of the shapes).
- ``.nbytes()`` returns the number of bytes used by the referenced elements
(i.e. ``itemsize()`` times ``size()``).
.. seealso::
The file :file:`tests/test_numpy_array.cpp` contains additional examples
demonstrating the use of this feature.
Ellipsis
========
Python provides a convenient ``...`` ellipsis notation that is often used to
slice multidimensional arrays. For instance, the following snippet extracts the
middle dimensions of a tensor with the first and last index set to zero.
.. code-block:: python
a = ... # a NumPy array
b = a[0, ..., 0]
The function ``py::ellipsis()`` function can be used to perform the same
operation on the C++ side:
.. code-block:: cpp
py::array a = /* A NumPy array */;
py::array b = a[py::make_tuple(0, py::ellipsis(), 0)];
Memory view
===========
For a case when we simply want to provide a direct accessor to C/C++ buffer
without a concrete class object, we can return a ``memoryview`` object. Suppose
we wish to expose a ``memoryview`` for 2x4 uint8_t array, we can do the
following:
.. code-block:: cpp
const uint8_t buffer[] = {
0, 1, 2, 3,
4, 5, 6, 7
};
m.def("get_memoryview2d", []() {
return py::memoryview::from_buffer(
buffer, // buffer pointer
{ 2, 4 }, // shape (rows, cols)
{ sizeof(uint8_t) * 4, sizeof(uint8_t) } // strides in bytes
);
});
This approach is meant for providing a ``memoryview`` for a C/C++ buffer not
managed by Python. The user is responsible for managing the lifetime of the
buffer. Using a ``memoryview`` created in this way after deleting the buffer in
C++ side results in undefined behavior.
We can also use ``memoryview::from_memory`` for a simple 1D contiguous buffer:
.. code-block:: cpp
m.def("get_memoryview1d", []() {
return py::memoryview::from_memory(
buffer, // buffer pointer
sizeof(uint8_t) * 8 // buffer size
);
});
.. versionchanged:: 2.6
``memoryview::from_memory`` added.
| PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/diofant/vector/coordsysrect.py | from ..core import Basic, Symbol, cacheit
from ..matrices import ImmutableMatrix, eye
from ..simplify import trigsimp
from .orienters import (AxisOrienter, BodyOrienter, Orienter,
QuaternionOrienter, SpaceOrienter)
from .scalar import BaseScalar
class CoordSysCartesian(Basic):
"""Represents a coordinate system in 3-D space."""
def __new__(cls, name, location=None, rotation_matrix=None,
parent=None, vector_names=None, variable_names=None):
"""
The orientation/location parameters are necessary if this system
is being defined at a certain orientation or location wrt another.
Parameters
==========
name : str
The name of the new CoordSysCartesian instance.
location : Vector
The position vector of the new system's origin wrt the parent
instance.
rotation_matrix : Diofant ImmutableMatrix
The rotation matrix of the new coordinate system with respect
to the parent. In other words, the output of
new_system.rotation_matrix(parent).
parent : CoordSysCartesian
The coordinate system wrt which the orientation/location
(or both) is being defined.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
"""
from .deloperator import Del
from .point import Point
from .vector import BaseVector, Vector
name = str(name)
# If orientation information has been provided, store
# the rotation matrix accordingly
if rotation_matrix is None:
parent_orient = ImmutableMatrix(eye(3))
else:
if not isinstance(rotation_matrix, ImmutableMatrix):
raise TypeError('rotation_matrix should be an Immutable' +
'Matrix instance')
parent_orient = rotation_matrix
# If location information is not given, adjust the default
# location as Vector.zero
if parent is not None:
if not isinstance(parent, CoordSysCartesian):
raise TypeError('parent should be a ' +
'CoordSysCartesian/None')
if location is None:
location = Vector.zero
else:
if not isinstance(location, Vector):
raise TypeError('location should be a Vector')
origin = parent.origin.locate_new(name + '.origin',
location)
else:
location = Vector.zero
origin = Point(name + '.origin')
# All systems that are defined as 'roots' are unequal, unless
# they have the same name.
# Systems defined at same orientation/position wrt the same
# 'parent' are equal, irrespective of the name.
# This is true even if the same orientation is provided via
# different methods like Axis/Body/Space/Quaternion.
# However, coincident systems may be seen as unequal if
# positioned/oriented wrt different parents, even though
# they may actually be 'coincident' wrt the root system.
if parent is not None:
obj = super().__new__(cls, Symbol(name), location, parent_orient, parent)
else:
obj = super().__new__(cls, Symbol(name), location, parent_orient)
obj._name = name
# Initialize the base vectors
if vector_names is None:
vector_names = (name + '.i', name + '.j', name + '.k')
latex_vects = [(r'\mathbf{\hat{i}_{%s}}' % name),
(r'\mathbf{\hat{j}_{%s}}' % name),
(r'\mathbf{\hat{k}_{%s}}' % name)]
pretty_vects = (name + '_i', name + '_j', name + '_k')
else:
_check_strings('vector_names', vector_names)
vector_names = list(vector_names)
latex_vects = [(r'\mathbf{\hat{%s}_{%s}}' % (x, name)) for
x in vector_names]
pretty_vects = [(name + '_' + x) for x in vector_names]
obj._i = BaseVector(vector_names[0], 0, obj,
pretty_vects[0], latex_vects[0])
obj._j = BaseVector(vector_names[1], 1, obj,
pretty_vects[1], latex_vects[1])
obj._k = BaseVector(vector_names[2], 2, obj,
pretty_vects[2], latex_vects[2])
# Initialize the base scalars
if variable_names is None:
variable_names = (name + '.x', name + '.y', name + '.z')
latex_scalars = [(r'\mathbf{{x}_{%s}}' % name),
(r'\mathbf{{y}_{%s}}' % name),
(r'\mathbf{{z}_{%s}}' % name)]
pretty_scalars = (name + '_x', name + '_y', name + '_z')
else:
_check_strings('variable_names', vector_names)
variable_names = list(variable_names)
latex_scalars = [(r'\mathbf{{%s}_{%s}}' % (x, name)) for
x in variable_names]
pretty_scalars = [(name + '_' + x) for x in variable_names]
obj._x = BaseScalar(variable_names[0], 0, obj,
pretty_scalars[0], latex_scalars[0])
obj._y = BaseScalar(variable_names[1], 1, obj,
pretty_scalars[1], latex_scalars[1])
obj._z = BaseScalar(variable_names[2], 2, obj,
pretty_scalars[2], latex_scalars[2])
# Assign a Del operator instance
obj._delop = Del(obj)
# Assign params
obj._parent = parent
if obj._parent is not None:
obj._root = obj._parent._root
else:
obj._root = obj
obj._parent_rotation_matrix = parent_orient
obj._origin = origin
# Return the instance
return obj
def __str__(self, printer=None):
return self._name
__repr__ = __str__
_diofantstr = __str__
def __iter__(self):
return iter([self.i, self.j, self.k])
@property
def origin(self):
return self._origin
@property
def delop(self):
return self._delop
@property
def i(self):
return self._i
@property
def j(self):
return self._j
@property
def k(self):
return self._k
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def z(self):
return self._z
def base_vectors(self):
return self._i, self._j, self._k
def base_scalars(self):
return self._x, self._y, self._z
@cacheit
def rotation_matrix(self, other):
"""
Returns the direction cosine matrix(DCM), also known as the
'rotation matrix' of this coordinate system with respect to
another system.
If v_a is a vector defined in system 'A' (in matrix format)
and v_b is the same vector defined in system 'B', then
v_a = A.rotation_matrix(B) * v_b.
A Diofant Matrix is returned.
Parameters
==========
other : CoordSysCartesian
The system which the DCM is generated to.
Examples
========
>>> q1 = symbols('q1')
>>> N = CoordSysCartesian('N')
>>> A = N.orient_new_axis('A', q1, N.i)
>>> N.rotation_matrix(A)
Matrix([
[1, 0, 0],
[0, cos(q1), -sin(q1)],
[0, sin(q1), cos(q1)]])
"""
from .functions import _path
if not isinstance(other, CoordSysCartesian):
raise TypeError(str(other) +
' is not a CoordSysCartesian')
# Handle special cases
if other == self:
return eye(3)
elif other == self._parent:
return self._parent_rotation_matrix
elif other._parent == self:
return other._parent_rotation_matrix.T
# Else, use tree to calculate position
rootindex, path = _path(self, other)
result = eye(3)
i = -1
for i in range(rootindex):
result *= path[i]._parent_rotation_matrix
i += 2
while i < len(path):
result *= path[i]._parent_rotation_matrix.T
i += 1
return result
@cacheit
def position_wrt(self, other):
"""
Returns the position vector of the origin of this coordinate
system with respect to another Point/CoordSysCartesian.
Parameters
==========
other : Point/CoordSysCartesian
If other is a Point, the position of this system's origin
wrt it is returned. If its an instance of CoordSyRect,
the position wrt its origin is returned.
Examples
========
>>> N = CoordSysCartesian('N')
>>> N1 = N.locate_new('N1', 10 * N.i)
>>> N.position_wrt(N1)
(-10)*N.i
"""
return self.origin.position_wrt(other)
def scalar_map(self, other):
"""
Returns a dictionary which expresses the coordinate variables
(base scalars) of this frame in terms of the variables of
otherframe.
Parameters
==========
otherframe : CoordSysCartesian
The other system to map the variables to.
Examples
========
>>> A = CoordSysCartesian('A')
>>> q = Symbol('q')
>>> B = A.orient_new_axis('B', q, A.k)
>>> A.scalar_map(B)
{A.x: -sin(q)*B.y + cos(q)*B.x, A.y: sin(q)*B.x + cos(q)*B.y, A.z: B.z}
"""
relocated_scalars = []
origin_coords = tuple(self.position_wrt(other).to_matrix(other))
for i, x in enumerate(other.base_scalars()):
relocated_scalars.append(x - origin_coords[i])
vars_matrix = (self.rotation_matrix(other) *
ImmutableMatrix(relocated_scalars))
mapping = {}
for i, x in enumerate(self.base_scalars()):
mapping[x] = trigsimp(vars_matrix[i]).doit()
return mapping
def locate_new(self, name, position, vector_names=None,
variable_names=None):
"""
Returns a CoordSysCartesian with its origin located at the given
position wrt this coordinate system's origin.
Parameters
==========
name : str
The name of the new CoordSysCartesian instance.
position : Vector
The position vector of the new system's origin wrt this
one.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> A = CoordSysCartesian('A')
>>> B = A.locate_new('B', 10 * A.i)
>>> B.origin.position_wrt(A.origin)
10*A.i
"""
return CoordSysCartesian(name, location=position,
vector_names=vector_names,
variable_names=variable_names,
parent=self)
def orient_new(self, name, orienters, location=None,
vector_names=None, variable_names=None):
"""
Creates a new CoordSysCartesian oriented in the user-specified way
with respect to this system.
Please refer to the documentation of the orienter classes
for more information about the orientation procedure.
Parameters
==========
name : str
The name of the new CoordSysCartesian instance.
orienters : iterable/Orienter
An Orienter or an iterable of Orienters for orienting the
new coordinate system.
If an Orienter is provided, it is applied to get the new
system.
If an iterable is provided, the orienters will be applied
in the order in which they appear in the iterable.
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = CoordSysCartesian('N')
Using an AxisOrienter
>>> axis_orienter = AxisOrienter(q1, N.i + 2 * N.j)
>>> A = N.orient_new('A', [axis_orienter])
Using a BodyOrienter
>>> body_orienter = BodyOrienter(q1, q2, q3, '123')
>>> B = N.orient_new('B', [body_orienter])
Using a SpaceOrienter
>>> space_orienter = SpaceOrienter(q1, q2, q3, '312')
>>> C = N.orient_new('C', [space_orienter])
Using a QuaternionOrienter
>>> q_orienter = QuaternionOrienter(q0, q1, q2, q3)
>>> D = N.orient_new('D', [q_orienter])
"""
if isinstance(orienters, Orienter):
if isinstance(orienters, AxisOrienter):
final_matrix = orienters.rotation_matrix(self)
else:
final_matrix = orienters.rotation_matrix()
# TODO: trigsimp is needed here so that the matrix becomes
# canonical (scalar_map also calls trigsimp; without this, you can
# end up with the same CoordinateSystem that compares differently
# due to a differently formatted matrix). However, this is
# probably not so good for performance.
final_matrix = trigsimp(final_matrix)
else:
final_matrix = ImmutableMatrix(eye(3))
for orienter in orienters:
if isinstance(orienter, AxisOrienter):
final_matrix *= orienter.rotation_matrix(self)
else:
final_matrix *= orienter.rotation_matrix()
return CoordSysCartesian(name, rotation_matrix=final_matrix.doit(),
vector_names=vector_names,
variable_names=variable_names,
location=location,
parent=self)
def orient_new_axis(self, name, angle, axis, location=None,
vector_names=None, variable_names=None):
"""
Axis rotation is a rotation about an arbitrary axis by
some angle. The angle is supplied as a Diofant expr scalar, and
the axis is supplied as a Vector.
Parameters
==========
name : string
The name of the new coordinate system
angle : Expr
The angle by which the new system is to be rotated
axis : Vector
The axis around which the rotation has to be performed
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> q1 = symbols('q1')
>>> N = CoordSysCartesian('N')
>>> B = N.orient_new_axis('B', q1, N.i + 2 * N.j)
"""
orienter = AxisOrienter(angle, axis)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def orient_new_body(self, name, angle1, angle2, angle3,
rotation_order, location=None,
vector_names=None, variable_names=None):
"""
Body orientation takes this coordinate system through three
successive simple rotations.
Body fixed rotations include both Euler Angles and
Tait-Bryan Angles, see https://en.wikipedia.org/wiki/Euler_angles.
Parameters
==========
name : string
The name of the new coordinate system
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSysCartesian('N')
A 'Body' fixed rotation is described by three angles and
three body-fixed rotation axes. To orient a coordinate system D
with respect to N, each sequential rotation is always about
the orthogonal unit vectors fixed to D. For example, a '123'
rotation will specify rotations about N.i, then D.j, then
D.k. (Initially, D.i is same as N.i)
Therefore,
>>> D = N.orient_new_body('D', q1, q2, q3, '123')
is same as
>>> D = N.orient_new_axis('D', q1, N.i)
>>> D = D.orient_new_axis('D', q2, D.j)
>>> D = D.orient_new_axis('D', q3, D.k)
Acceptable rotation orders are of length 3, expressed in XYZ or
123, and cannot have a rotation about about an axis twice in a row.
>>> B = N.orient_new_body('B', q1, q2, q3, '123')
>>> B = N.orient_new_body('B', q1, q2, 0, 'ZXZ')
>>> B = N.orient_new_body('B', 0, 0, 0, 'XYX')
"""
orienter = BodyOrienter(angle1, angle2, angle3, rotation_order)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def orient_new_space(self, name, angle1, angle2, angle3,
rotation_order, location=None,
vector_names=None, variable_names=None):
"""
Space rotation is similar to Body rotation, but the rotations
are applied in the opposite order.
Parameters
==========
name : string
The name of the new coordinate system
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
See Also
========
diofant.vector.coordsysrect.CoordSysCartesian.orient_new_body :
method to orient via Euler angles
Examples
========
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSysCartesian('N')
To orient a coordinate system D with respect to N, each
sequential rotation is always about N's orthogonal unit vectors.
For example, a '123' rotation will specify rotations about
N.i, then N.j, then N.k.
Therefore,
>>> D = N.orient_new_space('D', q1, q2, q3, '312')
is same as
>>> B = N.orient_new_axis('B', q1, N.i)
>>> C = B.orient_new_axis('C', q2, N.j)
>>> D = C.orient_new_axis('D', q3, N.k)
"""
orienter = SpaceOrienter(angle1, angle2, angle3, rotation_order)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def orient_new_quaternion(self, name, q0, q1, q2, q3, location=None,
vector_names=None, variable_names=None):
"""
Quaternion orientation orients the new CoordSysCartesian with
Quaternions, defined as a finite rotation about lambda, a unit
vector, by some amount theta.
This orientation is described by four parameters:
q0 = cos(theta/2)
q1 = lambda_x sin(theta/2)
q2 = lambda_y sin(theta/2)
q3 = lambda_z sin(theta/2)
Quaternion does not take in a rotation order.
Parameters
==========
name : string
The name of the new coordinate system
q0, q1, q2, q3 : Expr
The quaternions to rotate the coordinate system by
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = CoordSysCartesian('N')
>>> B = N.orient_new_quaternion('B', q0, q1, q2, q3)
"""
orienter = QuaternionOrienter(q0, q1, q2, q3)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def __init__(self, name, location=None, rotation_matrix=None,
parent=None, vector_names=None, variable_names=None,
latex_vects=None, pretty_vects=None, latex_scalars=None,
pretty_scalars=None):
# Dummy initializer for setting docstring
pass
__init__.__doc__ = __new__.__doc__
def _check_strings(arg_name, arg):
errorstr = arg_name + ' must be an iterable of 3 string-types'
if len(arg) != 3:
raise ValueError(errorstr)
for s in arg:
if not isinstance(s, str):
raise TypeError(errorstr) | PypiClean |
/MindYourNeighbors-1.0.0.tar.gz/MindYourNeighbors-1.0.0/src/mind_your_neighbors/cache.py | import json
import logging
from os import path
from functools import wraps
logger = logging.getLogger('MindYourNeighbors')
def _read_cache(file_path):
if path.exists(file_path):
with open(file_path, 'r') as fp:
return json.load(fp)
return {}
def _write_cache(file_path, cache):
with open(file_path, 'w') as fp:
json.dump(cache, fp)
class Cache:
def __init__(self, cache_file):
self.section_name = None
self.cache_file = cache_file
self._cache_dict = {}
def __enter__(self):
self._cache_dict = _read_cache(self.cache_file)
return self
def __exit__(self, rtype, rvalue, traceback):
if not isinstance(rvalue, Exception):
_write_cache(self.cache_file, self._cache_dict)
@property
def section(self):
"""Returns the stored dictionnary for the instance's section."""
assert self.section_name is not None, "you must set section_name"
if self.section_name not in self._cache_dict:
self._cache_dict[self.section_name] = {
'results': [], 'last_command': None}
return self._cache_dict[self.section_name]
def cache_result(self, result, threshold):
"""Store a result into cache and maintain the cache coherent."""
count = self.get_result_count(result)
self.section['results'].append(result)
self.section['results'] = self.section['results'][-threshold:]
if count != threshold:
logger.debug('cache/%s/%s %d => %d', self.section_name, result,
count, self.get_result_count(result))
def get_result_count(self, result):
return self.section['results'].count(result)
def cache_command(self, command):
"""Store *command* as the last command launched."""
self.section['last_command'] = command
@property
def last_command(self):
return self.section['last_command']
def wrap(func):
"Will wrap func between cache constructing and cache dumping."
@wraps(func)
def wrapper(config, *args, **kwargs):
cache_file = config.get(config.default_section, 'cache_file')
with Cache(cache_file) as cache:
return func(config, cache, *args, **kwargs)
return wrapper | PypiClean |
/IdracRedfishSupport-0.0.8.tar.gz/IdracRedfishSupport-0.0.8/TestNetworkShareREDFISH.py |
import argparse
import getpass
import json
import logging
import re
import requests
import sys
import time
import warnings
from datetime import datetime
from pprint import pprint
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description="Python script using Redfish API with OEM extension to test iDRAC access to network share. This is helpful in validating iDRAC can access a network share before attempting to use a feature like Server Configuration Profile, export/import to neename cifs_share_vm --username administrator --password P@ss --sharetype CIFS, this example shows testing network share connection to CIFS share.")
parser.add_argument('-ip',help='iDRAC IP address', required=False)
parser.add_argument('-u', help='iDRAC username', required=False)
parser.add_argument('-p', help='iDRAC password. If you do not pass in argument -p, script will prompt to enter user password which will not be echoed to the screen.', required=False)
parser.add_argument('-x', help='Pass in X-Auth session token for executing Redfish calls. All Redfish calls will use X-Auth token instead of username/password', required=False)
parser.add_argument('--ssl', help='SSL cert verification for all Redfish calls, pass in value \"true\" or \"false\". By default, this argument is not required and script ignores validating SSL cert for all Redfish calls.', required=False)
parser.add_argument('--script-examples', help='Get executing script examples', action="store_true", dest="script_examples", required=False)
parser.add_argument('--shareip', help='Pass in the IP address of the network share', required=False)
parser.add_argument('--sharetype', help='Pass in the share type of the network share. Supported values are NFS, CIFS, HTTP, HTTPS.', required=False)
parser.add_argument('--sharename', help='Pass in the network share share name', required=False)
parser.add_argument('--username', help='Pass in the CIFS username. This is only required if using CIFS share', required=False)
parser.add_argument('--password', help='Pass in the CIFS username pasword. This is only required if using CIFS share', required=False)
parser.add_argument('--ignorecertwarning', help='Supported values are Off and On. This argument is only required if using HTTPS for share type', required=False)
args = vars(parser.parse_args())
logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.INFO)
def script_examples():
print("""\n- TestNetworkShareREDFISH.py -ip 192.168.0.120 -u root -p calvin --ipaddress 192.168.0.130 --sharename cifs_share_vm --username administrator --password P@ss --sharetype CIFS, this example shows testing network share connection to CIFS share.""")
sys.exit(0)
def check_supported_idrac_version():
if args["x"]:
response = requests.get('https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService' % idrac_ip, verify=verify_cert, auth=(idrac_username, idrac_password))
data = response.json()
if response.status_code == 401:
logging.warning("\n- WARNING, status code %s returned, check your iDRAC username/password is correct or iDRAC user has correct privileges to execute Redfish commands" % response.status_code)
sys.exit(0)
if response.status_code != 200:
logging.warning("\n- WARNING, GET command failed to check supported iDRAC version, status code %s returned" % response.status_code)
sys.exit(0)
def test_network_share():
global job_id
url = 'https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.TestNetworkShare' % (idrac_ip)
method = "TestNetworkShare"
payload = {}
if args["shareip"]:
payload["IPAddress"] = args["shareip"]
if args["sharetype"]:
payload["ShareType"] = args["sharetype"]
if args["sharename"]:
payload["ShareName"] = args["sharename"]
if args["username"]:
payload["UserName"] = args["username"]
if args["password"]:
payload["Password"] = args["password"]
if args["ignorecertwarning"]:
payload["IgnoreCertWarning"] = args["ignorecertwarning"]
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
data = response.json()
if response.status_code == 202 or response.status_code == 200:
logging.info("\n- PASS: POST command passed for %s method, status code %s returned" % (method, response.status_code))
else:
logging.error("\n- FAIL, POST command failed for %s method, status code is %s" % (method, response.status_code))
data = response.json()
logging.error("\n- POST command failure results:\n %s" % data)
sys.exit(0)
if __name__ == "__main__":
if args["script_examples"]:
script_examples()
if args["ip"] or args["ssl"] or args["u"] or args["p"] or args["x"]:
idrac_ip = args["ip"]
idrac_username = args["u"]
if args["p"]:
idrac_password = args["p"]
if not args["p"] and not args["x"] and args["u"]:
idrac_password = getpass.getpass("\n- Argument -p not detected, pass in iDRAC user %s password: " % args["u"])
if args["ssl"]:
if args["ssl"].lower() == "true":
verify_cert = True
elif args["ssl"].lower() == "false":
verify_cert = False
else:
verify_cert = False
else:
verify_cert = False
check_supported_idrac_version()
else:
logging.error("\n- FAIL, invalid argument values or not all required parameters passed in. See help text or argument --script-examples for more details.")
sys.exit(0)
test_network_share() | PypiClean |
/BraTS_Toolkit-1.0.5-py3-none-any.whl/brats_toolkit/preprocessor.py | import socketio
from brats_toolkit.util.docker_functions import start_docker, stop_docker, update_docker
import os
import tempfile
from pathlib import Path
from brats_toolkit.util.prep_utils import tempFiler
import sys
class Preprocessor(object):
def __init__(self, noDocker=False):
# settings
self.clientVersion = "0.0.1"
self.confirmationRequired = True
self.mode = "cpu"
self.gpuid = "0"
# init sio client
self.sio = socketio.Client()
# set docker usage
self.noDocker = noDocker
@self.sio.event
def connect():
print("connection established! sid:", self.sio.sid)
# client identification
self.sio.emit(
"clientidentification",
{"brats_cli": self.clientVersion, "proc_mode": self.mode},
)
@self.sio.event
def connect_error():
print("The connection failed!")
@self.sio.event
def disconnect():
print("disconnected from server")
@self.sio.on("message")
def message(data):
print("message", data)
@self.sio.on("status")
def on_status(data):
print("status reveived: ", data)
if data["message"] == "client ID json generation finished!":
self._inspect_input()
elif data["message"] == "input inspection finished!":
if "data" in data:
print("input inspection found the following exams: ", data["data"])
if self.confirmationRequired:
confirmation = input(
'press "y" to continue or "n" to scan the input folder again.'
).lower()
else:
confirmation = "y"
if confirmation == "n":
self._inspect_input()
if confirmation == "y":
self._process_start()
elif data["message"] == "image processing successfully completed.":
self.sio.disconnect()
stop_docker()
sys.exit(0)
@self.sio.on("client_outdated")
def outdated(data):
print(
"Your client version",
self.clientVersion,
"is outdated. Please download version",
data,
"from:",
)
print("https://neuronflow.github.io/brats-preprocessor/")
self.sio.disconnect()
stop_docker()
sys.exit(0)
@self.sio.on("ipstatus")
def on_ipstatus(data):
print("image processing status reveived:")
print(data["examid"], ": ", data["ipstatus"])
def single_preprocess(
self,
t1File,
t1cFile,
t2File,
flaFile,
outputFolder,
mode,
confirm=False,
skipUpdate=False,
gpuid="0",
):
# assign name to file
print("basename:", os.path.basename(outputFolder))
outputPath = Path(outputFolder)
dockerOutputFolder = os.path.abspath(outputPath.parent)
# create temp dir
storage = tempfile.TemporaryDirectory()
# TODO this is a potential security hazzard as all users can access the files now, but currently it seems the only way to deal with bad configured docker installations
os.chmod(storage.name, 0o777)
dockerFolder = os.path.abspath(storage.name)
tempFolder = os.path.join(dockerFolder, os.path.basename(outputFolder))
os.makedirs(tempFolder, exist_ok=True)
print("tempFold:", tempFolder)
# create temp Files
tempFiler(t1File, "t1", tempFolder)
tempFiler(t1cFile, "t1c", tempFolder)
tempFiler(t2File, "t2", tempFolder)
tempFiler(flaFile, "fla", tempFolder)
self.batch_preprocess(
exam_import_folder=dockerFolder,
exam_export_folder=dockerOutputFolder,
mode=mode,
confirm=confirm,
skipUpdate=skipUpdate,
gpuid=gpuid,
)
def batch_preprocess(
self,
exam_import_folder=None,
exam_export_folder=None,
dicom_import_folder=None,
nifti_export_folder=None,
mode="cpu",
confirm=True,
skipUpdate=False,
gpuid="0",
):
if confirm != True:
self.confirmationRequired = False
self.mode = mode
self.gpuid = gpuid
if self.noDocker != True:
stop_docker()
if skipUpdate != True:
update_docker()
start_docker(
exam_import_folder=exam_import_folder,
exam_export_folder=exam_export_folder,
dicom_import_folder=dicom_import_folder,
nifti_export_folder=nifti_export_folder,
mode=self.mode,
gpuid=self.gpuid,
)
# setup connection
# TODO do this in a more elegant way and somehow check whether docker is up and running before connect
self.sio.sleep(5) # wait 5 secs for docker to start
self._connect_client()
self.sio.wait()
def _connect_client(self):
self.sio.connect("http://localhost:5000")
print("sid:", self.sio.sid)
def _inspect_input(self):
print("sending input inspection request!")
self.sio.emit("input_inspection", {"hurray": "yes"})
def _process_start(self):
print("sending processing request!")
self.sio.emit("brats_processing", {"hurray": "yes"}) | PypiClean |
/Brain-Brew-0.3.10.tar.gz/Brain-Brew-0.3.10/brain_brew/representation/generic/csv_file.py | import csv
import re
import logging
from enum import Enum
from typing import List
from brain_brew.representation.generic.source_file import SourceFile
from brain_brew.utils import list_of_str_to_lowercase, sort_dict
_encoding = "utf-8"
class CsvKeys(Enum):
GUID = "guid"
TAGS = "tags"
class CsvFile(SourceFile):
file_location: str = ""
_data: List[dict] = []
column_headers: list = []
delimiter: str = ','
def __init__(self, file, delimiter=None):
self.file_location = file
self.set_delimiter(delimiter)
def set_delimiter(self, delimiter: str):
if delimiter:
self.delimiter = delimiter
elif re.match(r'.*\.tsv', self.file_location, re.RegexFlag.IGNORECASE):
self.delimiter = '\t'
@classmethod
def from_file_loc(cls, file_loc) -> 'CsvFile':
return cls(file_loc)
def read_file(self):
self._data = []
with open(self.file_location, mode='r', newline='', encoding=_encoding) as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=self.delimiter)
self.column_headers = list_of_str_to_lowercase(csv_reader.fieldnames)
for row in csv_reader:
self._data.append({key.lower(): row[key] for key in row})
def write_file(self):
logging.info(f"Writing to Csv '{self.file_location}'")
with open(self.file_location, mode='w+', newline='', encoding=_encoding) as csv_file:
csv_writer = csv.DictWriter(csv_file, fieldnames=self.column_headers, lineterminator='\n', delimiter=self.delimiter)
csv_writer.writeheader()
for row in self._data:
csv_writer.writerow(row)
def set_data(self, data_override):
self._data = data_override
self.column_headers = list(data_override[0].keys()) if data_override else []
def set_data_from_superset(self, superset: List[dict], column_header_override=None):
if column_header_override:
self.column_headers = column_header_override
data_to_set: List[dict] = []
for row in superset:
if not all(column in row for column in self.column_headers):
continue
new_row = {}
for column in self.column_headers:
new_row[column] = row[column]
data_to_set.append(new_row)
self.set_data(data_to_set)
def get_data(self, deep_copy=False) -> List[dict]:
return self.get_deep_copy(self._data) if deep_copy else self._data
@staticmethod
def to_filename_csv(filename: str, delimiter: str = None) -> str:
if not re.match(r'.*\.(csv|tsv)', filename, re.RegexFlag.IGNORECASE):
if delimiter == '\t':
return filename + '.tsv'
else:
return filename + ".csv"
return filename
@classmethod
def formatted_file_location(cls, location):
return cls.to_filename_csv(location)
def sort_data(self, sort_by_keys, reverse_sort, case_insensitive_sort):
self._data = sort_dict(self._data, sort_by_keys, reverse_sort, case_insensitive_sort)
@classmethod
def create_file_with_headers(cls, filepath: str, headers: List[str], delimiter: str = None):
with open(filepath, mode='w+', newline='', encoding=_encoding) as csv_file:
csv_writer = csv.DictWriter(csv_file, fieldnames=headers, lineterminator='\n', delimiter=delimiter or ",")
csv_writer.writeheader()
@staticmethod
def delimiter_matches_file_type(delimiter: str, filename: str) -> bool:
if delimiter == '\t' and re.match(r'.*\.tsv', filename, re.RegexFlag.IGNORECASE):
return True
if delimiter == ',' and re.match(r'.*\.csv', filename, re.RegexFlag.IGNORECASE):
return True
return False | PypiClean |
/BMI500caonia-2.0.0.tar.gz/BMI500caonia-2.0.0/README.md | # The Kmeans unsupervised clustering package
# Contents
This package is an example Kmeans' package for BMI500's project in Emory University. The package will automatically download the iris data collected from UCI. The dataset contains three classes of flowers, and the clustering algorithm is to seperate each group of the flowers. It does not necessary tell you what kind of flower is, but will tell you which flowers are in a same group.
# FAQ
## How to install?
In your command line, type "pip install BMI500caonia"
## How to use
python
from BMI500caonia import BMI500clustering
BMI500clustering.Kmeans_run(n, iteration, random_state)
(n is the number of clusters, iteration is the number of iteration, random_state is the number of random initializations)
## Running time and hardware requirement
The running time is 14 seconds in Titan'x 12 GB gpu and Intel Iris 16 GB cpu.
## Future work
The function should be modified to be more flexible in the future, so the user can customize parameters. | PypiClean |
/HR_Neural_Networks-1.0.6.tar.gz/HR_Neural_Networks-1.0.6/HR_Neural_Networks/HR.py | import cvxpy as cp
import torch
import numpy as np
from torch.autograd import Variable
import torchattacks
import warnings
import mosek
import os
import torch.nn as nn
import time
import warnings
warnings.filterwarnings("ignore")
os.environ['MOSEKLM_LICENSE_FILE'] = "mosek.lic"
class HR_Neural_Networks:
def __init__(self, NN_model,
train_batch_size,
loss_fn,
normalisation_used,
α_choice,
r_choice,
ϵ_choice,
learning_approach = "HD",
adversarial_steps=10,
adversarial_step_size=0.2,
noise_set = "l-2",
defense_method = "PGD",
output_return = "pytorch_loss_function"
):
# End model
self.NN_model = NN_model
self.train_batch_size = train_batch_size
self.adversarial_steps = adversarial_steps
self.adversarial_step_size = adversarial_step_size
self.numerical_eps = 0.000001
self.noise_set = noise_set
self.learning_approach = learning_approach
self.output_return = output_return
self.defense_method = defense_method
if loss_fn == None:
print("Loss is defaulted to cross entropy loss. Consider changing if not doing classification.")
self.loss_fn = nn.CrossEntropyLoss(reduction="none")
else:
self.loss_fn = loss_fn
# Handling choice of α
if α_choice == 0:
self.α_choice = self.numerical_eps
else:
self.α_choice = α_choice
# Handling choice of r
if r_choice == 0 and α_choice != 0:
self.r_choice = 0.001 # For numerical stability. 0 or very small values of r cause algorithm to be slow.
else:
self.r_choice = r_choice
# Handling choice of epsilon. We wont set equal to numerical eps, since running PGD is very slow
self.ϵ_choice = ϵ_choice
# Initialising either HR or HD to be used in DPP. DPP is an approach where the decision variables,
# constraints and problem are set up just once. Only parameters (here loss and worst-case)
# are reinitialised at each step, which is much faster than reinstating the entire problem.
if self.learning_approach == "HR":
self._initialise_HR_problem()
elif self.learning_approach == "HD":
self._initialise_HD_problem()
self._initialise_adversarial_setup()
if normalisation_used == None:
pass
else:
self.adversarial_attack_train.set_normalization_used(
mean=normalisation_used[0], std=normalisation_used[1])
def _initialise_HR_problem(self):
# The primal - inner maximisation problem.
N = self.train_batch_size
Pemp = 1/N * np.ones(N) # Change for a diffrent Pemp
# Parameter controlling robustness to misspecification
α = cp.Constant(self.α_choice)
# Parameter controlling robustness to statistical error
r = cp.Constant(self.r_choice)
# Primal variables and constraints, indep of problem
self.p = cp.Variable(shape=N+1, nonneg=True)
q = cp.Variable(shape=N+1, nonneg=True)
s = cp.Variable(shape=N, nonneg=True)
self.nn_loss = cp.Parameter(shape=N)
self.nn_loss.value = [1/N]*N # Initialising
self.worst = cp.Parameter()
self.worst.value = 0.01 # Initialising
# Objective function
objective = cp.Maximize(
cp.sum(cp.multiply(self.p[0:N], self.nn_loss)) + self.p[N] * self.worst)
# Simplex constraints
simplex_constraints = [cp.sum(self.p) == 1, cp.sum(q) == 1]
# KL constr -----
t = cp.Variable(name="t", shape=N)
# Exponential cone constraints
exc_constraints = []
exc_constraints.append(
cp.constraints.exponential.ExpCone(-1*t, Pemp, q[:-1]))
# ------------------------
extra_constraints = [cp.sum(t) <= r,
cp.sum(s) <= α,
cp.sum(s) + q[N] == self.p[N],
self.p[0:N] + s == q[0:N]]
# ------------------------
# Combining constraints to a single list
complete_constraints = simplex_constraints + exc_constraints + extra_constraints
# Problem definition
self.model = cp.Problem(
objective=objective,
constraints=complete_constraints)
def _initialise_HD_problem(self):
# The primal - inner maximisation problem.
N = self.train_batch_size
Pemp = 1/N * np.ones(N) # Change for a diffrent Pemp
# Parameter controlling robustness to misspecification
α = cp.Constant(self.α_choice)
# Parameter controlling robustness to statistical error
r = cp.Constant(self.r_choice)
# Primal variables and constraints, indep of problem
self.p = cp.Variable(shape=N+1, nonneg=True)
q = cp.Variable(shape=N+1, nonneg=True)
s = cp.Variable(shape=N, nonneg=True)
self.nn_loss = cp.Parameter(shape=N)
self.nn_loss.value = [1/N]*N # Initialising
self.worst = cp.Parameter()
self.worst.value = 0.01 # Initialising
# Objective function
objective = cp.Maximize(
cp.sum(cp.multiply(self.p[0:N], self.nn_loss)) + self.p[N] * self.worst)
# Simplex constraints
simplex_constraints = [cp.sum(self.p) == 1, cp.sum(q) == 1]
# KL constr -----
t = cp.Variable(name="t", shape=N+1)
# Exponential cone constraints
exc_constraints = []
exc_constraints.append(
cp.constraints.exponential.ExpCone(-1*t, q, self.p))
# ------------------------
extra_constraints = [cp.sum(t) <= r,
cp.sum(s) <= α,
q[0:N] + s == Pemp]
# ------------------------
# Combining constraints to a single list
complete_constraints = simplex_constraints + exc_constraints + extra_constraints
# Problem definition
self.model = cp.Problem(
objective=objective,
constraints=complete_constraints)
def _initialise_adversarial_setup(self):
if self.noise_set == "l-2":
if self.defense_method == "PGD":
self.adversarial_attack_train = torchattacks.PGDL2(self.NN_model,
eps=self.ϵ_choice,
alpha=self.adversarial_step_size,
steps=self.adversarial_steps,
random_start=True,
eps_for_division=1e-10)
elif self.defense_method == "FFGSM":
raise Exception("FGSM for l-2 defense not currently supported")
elif self.noise_set == "l-inf":
if self.defense_method == "PGD":
self.adversarial_attack_train = torchattacks.attacks.pgd.PGD(self.NN_model,
eps=self.ϵ_choice,
alpha=self.adversarial_step_size,
steps=self.adversarial_steps,
random_start=True)
elif self.defense_method == "FFGSM":
self.adversarial_attack_train = torchattacks.FFGSM(self.NN_model,
eps=self.ϵ_choice,
alpha=self.adversarial_step_size)
def HR_criterion(self, inputs = None,
targets = None,
inf_loss = None,
device='cuda'):
'''Solving the primal problem.
Returning the weighted loss as a tensor Pytorch can autodiff'''
if self.ϵ_choice > 0:
adv = self.adversarial_attack_train(inputs, targets)
outputs = self.NN_model(adv)
inf_loss = self.loss_fn(outputs, targets)
else:
outputs = self.NN_model(inputs)
inf_loss = self.loss_fn(outputs, targets)
# May in the end be different from the training batch size,
batch_size = len(inf_loss)
# For instance for the last batch
if batch_size != self.train_batch_size: # If the batches passed are not the same length as the pre-specified
# train_batch_size, then we need to renitialise the DPP.
# DPP assumes certain parameters of the problem (here, N) remain fixed.
warnings.warn(
"Warning - changing the batch_size from the pre-specified train_batch_size can cause the algorithm to be slower.")
self.train_batch_size = batch_size
if self.learning_approach == "HR":
self._initialise_HR_problem()
elif self.learning_approach == "HD":
self._initialise_HD_problem()
if self.r_choice > self.numerical_eps or self.α_choice > self.numerical_eps:
if self.output_return == 'pytorch_loss_function':
self.nn_loss.value = np.array(inf_loss.cpu().detach().numpy()) # DPP step
elif self.output_return == 'weights':
self.nn_loss.value = inf_loss
self.worst.value = np.max(self.nn_loss.value) # DPP step
try:
self.model.solve(solver=cp.ECOS)
# ECOS is normally faster than MOSEK for conic problems (it is built for this purpose),
# but generally also more unstable.
# We will revert to MOSEK incase of solving issues.
# This should happen very infrequently (<1/1000 calls or so, depending on α, r)
except:
try:
self.nn_loss.value += self.numerical_eps # Small amt of noise incase its a numerical issue
self.worst.value = np.max(self.nn_loss.value) # Must also re-instate worst-case for DPP
self.model.solve(solver=cp.MOSEK)
# MOSEK is the second fastest,
# But also occasionally fails when α and r are too large.
except:
self.model.solve(solver=cp.SCS)
# Last resort. Rarely needed.
weights = Variable(torch.from_numpy(self.p.value),
requires_grad=True).to(torch.float32).to(device) # Converting primal weights to tensors
if self.output_return == "pytorch_loss_function":
return torch.dot(weights[0:batch_size], inf_loss) + torch.max(inf_loss)*weights[batch_size]
elif self.output_return == 'weights':
return weights
else:
raise Exception("Not a valid choice of output, please pass pytorch_loss_function if using Pytorch or weights if using another framework")
else: # If we use only epsilon (could be zero or not)
if self.output_return == "pytorch_loss_function":
return (1/self.train_batch_size)*torch.sum(inf_loss) # ERM for Pytorch
elif self.output_return == 'weights':
return [1/self.train_batch_size for i in range(self.train_batch_size)] # ERM (equal weights) | PypiClean |
/DAQC2apps-1.000.tar.gz/DAQC2apps-1.000/piplates/Applications/QTmotor.py | import sys
import os
import subprocess
from functools import partial
try:
from PyQt4 import QtGui, QtCore, uic
except ImportError:
os.system("lxterminal -e 'python QTinstall.py'")
sys.exit()
try:
import piplates.DAQC2plate as DAQC2
except ImportError:
os.system("lxterminal -e 'python py23install.py'")
sys.exit()
class MyWindow(QtGui.QMainWindow):
def __init__(self):
super(MyWindow, self).__init__()
uic.loadUi('DAQC2Motor.ui', self)
self.show()
qr = self.frameGeometry()
cp = QtGui.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
self.addr=0
self.initADDR2()
self.mOFF=0
self.mON=1
self.mSTOP=2
self.mMOVE=3
self.mJOG=4
self.mState1=self.mOFF
self.mState2=self.mOFF
#init step rate
self.SRval1=1
self.SRval2=1
#init step counts
self.SCval1=1
self.SCval2=1
#init directions
self.SDval1='cw'
self.SDval2='cw'
#init step size
self.SSval1='w'
self.SSval2='w'
self.connect(self.SR1, QtCore.SIGNAL('valueChanged(int)'), self.upDateSR1)
self.connect(self.SR2, QtCore.SIGNAL('valueChanged(int)'), self.upDateSR2)
self.connect(self.SC1, QtCore.SIGNAL('valueChanged(int)'), self.upDateSC1)
self.connect(self.SC2, QtCore.SIGNAL('valueChanged(int)'), self.upDateSC2)
self.WHOLE1.clicked.connect(partial(self.setSize1,value='w'))
self.WHOLE2.clicked.connect(partial(self.setSize2,value='w'))
self.HALF1.clicked.connect(partial(self.setSize1,value='h'))
self.HALF2.clicked.connect(partial(self.setSize2,value='h'))
self.CW1.clicked.connect(partial(self.setDir1,value='cw'))
self.CW2.clicked.connect(partial(self.setDir2,value='cw'))
self.CCW1.clicked.connect(partial(self.setDir1,value='ccw'))
self.CCW2.clicked.connect(partial(self.setDir2,value='ccw'))
self.MOVE1.clicked.connect(self.handleMove1)
self.MOVE2.clicked.connect(self.handleMove2)
self.JOG1.clicked.connect(self.handleJog1)
self.JOG2.clicked.connect(self.handleJog2)
self.STOP1.clicked.connect(self.handleStop1)
self.STOP2.clicked.connect(self.handleStop2)
self.OFF1.clicked.connect(self.handleOff1)
self.OFF2.clicked.connect(self.handleOff2)
self.HelpButton.clicked.connect(self.getHelp)
def setDir1(self,value):
#self.chkMotor1()
self.SDval1=value
DAQC2.motorDIR(self.addr,1,self.SDval1)
def setDir2(self,value):
#self.chkMotor2()
self.SDval2=value
DAQC2.motorDIR(self.addr,2,self.SDval2)
def setSize1(self,value):
#self.chkMotor1()
self.SSval1=value
DAQC2.motorRATE(self.addr,1,self.SRval1,self.SSval1)
def setSize2(self,value):
#self.chkMotor2()
self.SSval2=value
DAQC2.motorRATE(self.addr,2,self.SRval2,self.SSval2)
def handleMove1(self):
if (self.mState1==self.mOFF):
self.mState1=self.mON
DAQC2.motorMOVE(self.addr,1,self.SCval1)
def handleMove2(self):
if (self.mState2==self.mOFF):
self.mState2=self.mON
DAQC2.motorMOVE(self.addr,2,self.SCval2)
def handleJog1(self):
DAQC2.motorJOG(self.addr,1)
def handleJog2(self):
DAQC2.motorJOG(self.addr,2)
def handleStop1(self):
DAQC2.motorSTOP(self.addr,1)
def handleStop2(self):
DAQC2.motorSTOP(self.addr,2)
def handleOff1(self):
DAQC2.motorOFF(self.addr,1)
def handleOff2(self):
DAQC2.motorOFF(self.addr,2)
def upDateSR1(self):
#self.chkMotor1()
self.SRval1=self.SR1.sliderPosition()
self.SRD1.display(self.SRval1)
DAQC2.motorRATE(self.addr,1,self.SRval1,self.SSval1)
def upDateSR2(self):
#self.chkMotor2()
self.SRval2=self.SR2.sliderPosition()
self.SRD2.display(self.SRval2)
DAQC2.motorRATE(self.addr,2,self.SRval2,self.SSval2)
def upDateSC1(self):
#self.chkMotor1()
self.SCval1=self.SC1.sliderPosition()
self.SCD1.display(self.SCval1)
def upDateSC2(self):
#self.chkMotor2()
self.SCval2=self.SC2.sliderPosition()
self.SCD2.display(self.SCval2)
def chkMotor1(self):
if (self.mState1==self.mOFF):
self.mState1=self.mON
DAQC2.motorON(self.addr,1)
def chkMotor2(self):
if (self.mState2==self.mOFF):
self.mState2=self.mON
DAQC2.motorON(self.addr,2)
def initADDR2(self):
addrSet=False
addresses = [False,False,False,False,False,False,False,False]
for i in range(8):
tempADDR=DAQC2.getADDR(i)
if(tempADDR==i):
addresses[i]=True
if(addrSet==False):
self.setAddr(i)
addrSet=True
self.ADDRlabel.setText('Running on Address '+str(self.addr))
def setAddr(self, value):
self.addr=value
DAQC2.motorDISABLE(self.addr)
DAQC2.motorENABLE(self.addr)
def getHelp(self):
cmd_line = "xpdf DAQC2plateMotorControllerManual.pdf"
p = subprocess.Popen(cmd_line, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = p.communicate()[0]
def closeEvent(self,event):
DAQC2.motorDISABLE(self.addr)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
window = MyWindow()
sys.exit(app.exec_()) | PypiClean |
/MyNQL-0.2.1.tar.gz/MyNQL-0.2.1/README.rst |
Install
-------
MyNLQ’s source code hosted on `GitHub <https://github.com/livinter/MyNQL>`_.
.. code-block:: bash
git clone https://github.com/livinter/MyNQL.git
python setup.py install
or just
.. code-block:: bash
pip install MyNQL
Teach the Network
-----------------
For example if a customer make a purchase of a product you assume a relation between ``customer.id`` and ``product.id``,
so you connect them. Optional you can specify a distance between nodes, to represent how close the nodes are related.
* ``connect`` - connect two nodes
* ``delete`` - delete a connection
Nodes are created automatically when you do the connection, and removed if they do not have any more connections. So do not worry about them.
Ask the Network
---------------
Now you can query all kinds of relations, not only the once you taught. With select you specify a starting point, like
``customer.id`` and specify the category where you like to know its closes relation.
* ``select`` - gives you the best related nodes from a specified category
The searching query takes into account all the different routes up to a radius you specify.
Example
-------
Lets imagine we already have a table *customer*
.. list-table::
:header-rows: 1
* - Id
- Name
- ..
* - 101
- jose
- ...
* - 102
- maria
- ...
* - 103
- juan
- ...
and you want to teach the network about recent purchases.
.. code-block:: python
from MyNQL import MyNQL
mynql = MyNQL('store')
mynql.connect('customer.juan', 'product.jeans')
mynql.connect('customer.juan', 'product.socks')
mynql.connect('customer.maria', 'product.socks')
If the column ``Name`` is unique you can use it as a key, otherwise you would need column ``Id``\ , and your code would look like this:
.. code-block:: python
mynql.connect("customer.103', 'product.12')
Now you can ask questions from other points of view. You always specify a starting point, and the category where you want to know the best matches:
.. code-block:: python
>>> mynql.select('customer.maria', 'product')
['socks', 'jeans']
Maria is more connected to ``socks``, as she has a direct connection, but also a bit to ``jeans`` as there exist an indirect connection through Juan.
.. code-block:: python
>>> mynql.select('product.jeans', 'product')
['socks']
Any combination is valid. For example you can ask about how one product is related to other.
Backend
-------
Storage is done in memory, but if you want to use MySQL, SQLite or PostgreSQL as a backend take a look at ``test/pee_example.py``.
This will keep a copy of all updates in your database.
| PypiClean |
/Flask-Bootstrap4-4.0.2.tar.gz/Flask-Bootstrap4-4.0.2/flask_bootstrap/__init__.py |
import re
from flask import Blueprint, current_app, url_for
try:
from wtforms.fields import HiddenField
except ImportError:
def is_hidden_field_filter(field):
raise RuntimeError('WTForms is not installed.')
else:
def is_hidden_field_filter(field):
return isinstance(field, HiddenField)
from .forms import render_form
__version__ = '4.0.0'
BOOTSTRAP_VERSION_RE = re.compile(r'(\d+\.\d+\.\d+(\-[a-z]+)?)')
POPPER_VERSION = '1.12.9'
JQUERY_VERSION = '3.2.1'
def get_bootstrap_version(version):
return BOOTSTRAP_VERSION_RE.match(version).group(1)
BOOTSTRAP_VERSION = get_bootstrap_version(__version__)
class CDN(object):
"""Base class for CDN objects."""
def get_resource_url(self, filename):
"""Return resource url for filename."""
raise NotImplementedError
class StaticCDN(object):
"""A CDN that serves content from the local application.
:param static_endpoint: Endpoint to use.
:param rev: If ``True``, honor ``BOOTSTRAP_QUERYSTRING_REVVING``.
"""
def __init__(self, static_endpoint='bootstrap.static', rev=False):
self.static_endpoint = static_endpoint
self.rev = rev
def get_resource_url(self, filename):
extra_args = {}
if self.rev and current_app.config['BOOTSTRAP_QUERYSTRING_REVVING']:
extra_args['bootstrap'] = __version__
return url_for(self.static_endpoint, filename=filename, **extra_args)
class WebCDN(object):
"""Serves files from the Web.
:param baseurl: The baseurl. Filenames are simply appended to this URL.
"""
def __init__(self, baseurl):
self.baseurl = baseurl
def get_resource_url(self, filename):
return self.baseurl + filename
class ConditionalCDN(object):
"""Serves files from one CDN or another, depending on whether a
configuration value is set.
:param confvar: Configuration variable to use.
:param primary: CDN to use if the configuration variable is ``True``.
:param fallback: CDN to use otherwise.
"""
def __init__(self, confvar, primary, fallback):
self.confvar = confvar
self.primary = primary
self.fallback = fallback
def get_resource_url(self, filename):
if current_app.config[self.confvar]:
return self.primary.get_resource_url(filename)
return self.fallback.get_resource_url(filename)
def bootstrap_find_resource(filename, cdn, use_minified=None, local=True):
"""Resource finding function, also available in templates.
Tries to find a resource, will force SSL depending on
``BOOTSTRAP_CDN_FORCE_SSL`` settings.
:param filename: File to find a URL for.
:param cdn: Name of the CDN to use.
:param use_minified': If set to ``True``/``False``, use/don't use
minified. If ``None``, honors
``BOOTSTRAP_USE_MINIFIED``.
:param local: If ``True``, uses the ``local``-CDN when
``BOOTSTRAP_SERVE_LOCAL`` is enabled. If ``False``, uses
the ``static``-CDN instead.
:return: A URL.
"""
config = current_app.config
if None == use_minified:
use_minified = config['BOOTSTRAP_USE_MINIFIED']
if use_minified:
filename = '%s.min.%s' % tuple(filename.rsplit('.', 1))
cdns = current_app.extensions['bootstrap']['cdns']
resource_url = cdns[cdn].get_resource_url(filename)
if resource_url.startswith('//') and config['BOOTSTRAP_CDN_FORCE_SSL']:
resource_url = 'https:%s' % resource_url
return resource_url
class Bootstrap(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
app.config.setdefault('BOOTSTRAP_USE_MINIFIED', True)
app.config.setdefault('BOOTSTRAP_CDN_FORCE_SSL', False)
app.config.setdefault('BOOTSTRAP_QUERYSTRING_REVVING', True)
app.config.setdefault('BOOTSTRAP_SERVE_LOCAL', False)
app.config.setdefault('BOOTSTRAP_LOCAL_SUBDOMAIN', None)
blueprint = Blueprint(
'bootstrap',
__name__,
template_folder='templates',
static_folder='static',
# static_url_path=app.static_url_path + '/bootstrap',
subdomain=app.config['BOOTSTRAP_LOCAL_SUBDOMAIN'])
# add the form rendering template filter
blueprint.add_app_template_filter(render_form)
app.register_blueprint(blueprint, url_prefix='/bootstrap')
app.jinja_env.globals['bootstrap_is_hidden_field'] =\
is_hidden_field_filter
app.jinja_env.globals['bootstrap_find_resource'] =\
bootstrap_find_resource
app.jinja_env.add_extension('jinja2.ext.do')
if not hasattr(app, 'extensions'):
app.extensions = {}
local = StaticCDN('bootstrap.static', rev=True)
static = StaticCDN()
def lwrap(cdn, primary=static):
return ConditionalCDN('BOOTSTRAP_SERVE_LOCAL', primary, cdn)
popper = lwrap(
WebCDN('//cdnjs.cloudflare.com/ajax/libs/popper.js/%s/' %
POPPER_VERSION), local)
bootstrap = lwrap(
WebCDN('//maxcdn.bootstrapcdn.com/bootstrap/%s/' %
BOOTSTRAP_VERSION), local)
jquery = lwrap(
WebCDN('//cdnjs.cloudflare.com/ajax/libs/jquery/%s/' %
JQUERY_VERSION), local)
app.extensions['bootstrap'] = {
'cdns': {
'local': local,
'static': static,
'popper': popper,
'bootstrap': bootstrap,
'jquery': jquery,
},
}
# setup support for flask-nav
renderers = app.extensions.setdefault('nav_renderers', {})
renderer_name = (__name__ + '.nav', 'BootstrapRenderer')
renderers['bootstrap'] = renderer_name
# make bootstrap the default renderer
renderers[None] = renderer_name | PypiClean |
/LeoX-0.6.1-py3-none-any.whl/lx/batch_lx.py | import os
import time
import subprocess
import numpy as np
import sys
import shutil
import lx.tools
##CHECKS WHETHER THE JOB IS TWO STEP###########################
def set_factor(file):
factor = 1
with open(file, 'r') as f:
for line in f:
if 'Link1' in line:
factor = 2
break
return factor
###############################################################
#SETS NUMBER OF SIMULTANEOUS JOBS##############################
def limite():
numero = np.loadtxt('../limit.lx')
return numero
###############################################################
##MAIN LOOP####################################################
try:
batch_file = sys.argv[1]
num = int(sys.argv[2])
command = sys.argv[3]
scripts = [i for i in os.listdir('.') if '.sh' in i]
for file in scripts:
shutil.copy(file,'Geometries')
os.chdir('Geometries')
inputs = [i for i in os.listdir('.') if 'Geometr' in i and '.com' in i]
inputs = sorted(inputs, key=lambda pair: float(pair.split('-')[1]))
factor = set_factor(inputs[0])
inputs = lx.tools.watcher(inputs,factor)
if len(inputs) == 0:
print('No jobs left to run! Goodbye!')
sys.exit()
rodando = []
queue, batch_num = 0, 0
newcommand = ''
leftover = len(inputs)%num
for input in inputs:
rodando = lx.tools.watcher(rodando,factor)
nlim = limite()
newcommand += '{} {} \n'.format(command, input)
queue += 1
if queue == num or (queue == leftover and batch_num >= len(inputs) - leftover):
newcommand += 'wait'
with open('cmd_{}_.sh'.format(batch_num), 'w') as q:
q.write(newcommand)
a = subprocess.call(['bash',batch_file, 'cmd_{}_.sh'.format(batch_num)])
queue = 0
newcommand = ''
rodando.append(input)
batch_num += 1
while len(rodando)/num >= nlim:
time.sleep(20)
rodando = lx.tools.watcher(rodando,factor)
nlim = limite()
except:
print('Something went wrong! Abort.')
############################################################### | PypiClean |
/ConSSL-0.0.1-py3-none-any.whl/CSSL/models/vision/segmentation.py | from argparse import ArgumentParser
import pytorch_lightning as pl
import torch
from torch.nn import functional as F
from CSSL.models.vision.unet import UNet
class SemSegment(pl.LightningModule):
def __init__(
self,
lr: float = 0.01,
num_classes: int = 19,
num_layers: int = 5,
features_start: int = 64,
bilinear: bool = False
):
"""
Basic model for semantic segmentation. Uses UNet architecture by default.
The default parameters in this model are for the KITTI dataset. Note, if you'd like to use this model as is,
you will first need to download the KITTI dataset yourself. You can download the dataset `here.
<http://www.cvlibs.net/datasets/kitti/eval_semseg.php?benchmark=semantics2015>`_
Implemented by:
- `Annika Brundyn <https://github.com/annikabrundyn>`_
Args:
num_layers: number of layers in each side of U-net (default 5)
features_start: number of features in first layer (default 64)
bilinear: whether to use bilinear interpolation (True) or transposed convolutions (default) for upsampling.
lr: learning (default 0.01)
"""
super().__init__()
self.num_classes = num_classes
self.num_layers = num_layers
self.features_start = features_start
self.bilinear = bilinear
self.lr = lr
self.net = UNet(
num_classes=num_classes,
num_layers=self.num_layers,
features_start=self.features_start,
bilinear=self.bilinear
)
def forward(self, x):
return self.net(x)
def training_step(self, batch, batch_nb):
img, mask = batch
img = img.float()
mask = mask.long()
out = self(img)
loss_val = F.cross_entropy(out, mask, ignore_index=250)
log_dict = {'train_loss': loss_val}
return {'loss': loss_val, 'log': log_dict, 'progress_bar': log_dict}
def validation_step(self, batch, batch_idx):
img, mask = batch
img = img.float()
mask = mask.long()
out = self(img)
loss_val = F.cross_entropy(out, mask, ignore_index=250)
return {'val_loss': loss_val}
def validation_epoch_end(self, outputs):
loss_val = torch.stack([x['val_loss'] for x in outputs]).mean()
log_dict = {'val_loss': loss_val}
return {'log': log_dict, 'val_loss': log_dict['val_loss'], 'progress_bar': log_dict}
def configure_optimizers(self):
opt = torch.optim.Adam(self.net.parameters(), lr=self.lr)
sch = torch.optim.lr_scheduler.CosineAnnealingLR(opt, T_max=10)
return [opt], [sch]
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--lr", type=float, default=0.01, help="adam: learning rate")
parser.add_argument("--num_layers", type=int, default=5, help="number of layers on u-net")
parser.add_argument("--features_start", type=float, default=64, help="number of features in first layer")
parser.add_argument(
"--bilinear",
action='store_true',
default=False,
help="whether to use bilinear interpolation or transposed"
)
return parser
def cli_main():
from CSSL.datamodules import KittiDataModule
pl.seed_everything(1234)
parser = ArgumentParser()
# trainer args
parser = pl.Trainer.add_argparse_args(parser)
# model args
parser = SemSegment.add_model_specific_args(parser)
# datamodule args
parser = KittiDataModule.add_argparse_args(parser)
args = parser.parse_args()
# data
dm = KittiDataModule(args.data_dir).from_argparse_args(args)
# model
model = SemSegment(**args.__dict__)
# train
trainer = pl.Trainer().from_argparse_args(args)
trainer.fit(model, datamodule=dm)
if __name__ == '__main__':
cli_main() | PypiClean |
/MetaPathways-3.1.6.tar.gz/MetaPathways-3.1.6/metapathways/scripts/MetaPathways_func_search.py | __author__ = "Kishori M Konwar"
__copyright__ = "Copyright 2020, MetaPathways"
__version__ = "3.5.0"
__maintainer__ = "Kishori M Konwar"
__status__ = "Release"
try:
import sys, re, csv, traceback
from os import path, _exit, rename
import logging.handlers
from optparse import OptionParser, OptionGroup
from metapathways.utils.sysutil import pathDelim
from metapathways.utils.metapathways_utils import (
fprintf,
printf,
eprintf,
exit_process,
)
from metapathways.utils.sysutil import getstatusoutput
from metapathways.utils.pathwaytoolsutils import *
from metapathways.utils.errorcodes import (
error_message,
get_error_list,
insert_error,
)
except:
print(""" Could not load some user defined module functions""")
print(""" """)
sys.exit(3)
PATHDELIM = pathDelim()
def fprintf(file, fmt, *args):
file.write(fmt % args)
def printf(fmt, *args):
sys.stdout.write(fmt % args)
def files_exist(files, errorlogger=None):
status = True
for file in files:
if not path.exists(file):
if errorlogger:
errorlogger.write("ERROR\tCould not find ptools input file : " + file)
status = False
return not status
usage = sys.argv[0] + """ -i input -o output [algorithm dependent options]"""
parser = None
def createParser():
global parser
epilog = """This script is used for running a homology search algorithm such as BLAST or LAST
on a set of query amino acid sequences against a target of reference protein sequences.
Currently it supports the BLASTP and LAST algorithm. Any other homology search algorithm
can be added by first adding the new algorithm name in upper caseusing in to the
choices parameter in the algorithm option of this script.
The results are put in a tabular form in the folder blast_results, with individual files
for each of the databases. The files are named as "<samplename>.<dbname>.<algorithm>out"
In the case of large number of amino acid sequences, this step of the computation can be
also done using multiple grids (to use batch processing system) """
epilog = re.sub(r"\s+", " ", epilog)
parser = OptionParser(usage=usage, epilog=epilog)
# Input options
parser.add_option(
"--algorithm",
dest="algorithm",
default="BLAST",
choices=["BLAST", "LAST"],
help="the homology search algorithm",
)
blast_group = OptionGroup(parser, "BLAST parameters")
blast_group.add_option(
"--blast_query",
dest="blast_query",
default=None,
help="Query amino acid sequences for BLASTP",
)
blast_group.add_option(
"--blast_db",
dest="blast_db",
default=None,
help="Target reference database sequenes for BLASTP",
)
blast_group.add_option(
"--blast_out", dest="blast_out", default=None, help="BLAST output file"
)
blast_group.add_option(
"--blast_outfmt",
dest="blast_outfmt",
default="6",
help="BLASTP output format [default 6, tabular]",
)
blast_group.add_option(
"--blast_evalue",
dest="blast_evalue",
default=None,
help="The e-value cutoff for the BLASTP",
)
blast_group.add_option(
"--num_threads",
dest="num_threads",
default="1",
type="str",
help="Number of BLAST threads",
)
blast_group.add_option(
"--blast_max_target_seqs",
dest="blast_max_target_seqs",
default=None,
help="Maximum number of target hits per query",
)
blast_group.add_option(
"--blast_executable",
dest="blast_executable",
default=None,
help="The BLASTP executable",
)
blast_group.add_option(
"--num_hits",
dest="num_hits",
default="10",
type="str",
help="The BLASTP executable",
)
parser.add_option_group(blast_group)
last_group = OptionGroup(parser, "LAST parameters")
last_group.add_option(
"--last_query",
dest="last_query",
default=None,
help="Query amino acid sequences for LAST",
)
last_group.add_option(
"--last_db",
dest="last_db",
default=None,
help="Target reference database sequenes for LAST",
)
last_group.add_option(
"--last_f",
dest="last_f",
default="0",
help="LAST output format [default 0, tabular]",
)
last_group.add_option(
"--last_o", dest="last_o", default=None, help="LAST output file"
)
last_group.add_option(
"--last_executable",
dest="last_executable",
default=None,
help="The LAST executable",
)
parser.add_option_group(last_group)
def main(argv, errorlogger=None, runcommand=None, runstatslogger=None):
global parser
options, args = parser.parse_args(argv)
if options.algorithm == "BLAST":
(code, message) = _execute_BLAST(options, logger=errorlogger)
elif options.algorithm == "LAST":
(code, message) = _execute_LAST(options, logger=errorlogger)
else:
eprintf("ERROR\tUnrecognized algorithm name for FUNC_SEARCH\n")
if errorlogger:
errorlogger.printf("ERROR\tUnrecognized algorithm name for FUNC_SEARCH\n")
# exit_process("ERROR\tUnrecognized algorithm name for FUNC_SEARCH\n")
return -1
if code != 0:
a = "\nERROR\tCannot successfully execute the %s for FUNC_SEARCH\n" % (
options.algorithm
)
b = "ERROR\t%s\n" % (message)
c = "INFO\tDatabase you are searching against may not be formatted correctly (if it was formatted for an earlier version) \n"
d = "INFO\tTry removing the files for that database in 'formatted' subfolder for MetaPathways to trigger reformatting \n"
if options.algorithm == "BLAST":
e = "INFO\tYou can remove as 'rm %s.*','\n" % (options.blast_db)
if options.algorithm == "LAST":
e = "INFO\tYou can remove as 'rm %s.*','\n" % (options.last_db)
(code, message) = _execute_LAST(options, logger=errorlogger)
f = "INFO\tIf removing the files did not work then format it manually (see manual)"
outputStr = a + b + c + d + e + f
eprintf(outputStr + "\n")
if errorlogger:
errorlogger.printf(outputStr + "\n")
return code
return 0
def _execute_LAST(options, logger=None):
args = []
if options.last_executable:
args.append(options.last_executable)
if options.last_f:
args += ["-f", options.last_f]
if options.last_o:
args += ["-o", options.last_o + ".tmp"]
if options.num_threads:
args += ["-P", options.num_threads]
args += [" -K", options.num_hits]
if options.last_db:
args += [options.last_db]
if options.last_query:
args += [options.last_query]
result = None
print(" ".join(args))
try:
result = getstatusoutput(" ".join(args))
rename(options.last_o + ".tmp", options.last_o)
except:
message = "Could not run LASTAL correctly"
if result and len(result) > 1:
message = result[1]
if logger:
logger.printf("ERROR\t%s\n", message)
return (1, message)
return (result[0], result[1])
def _execute_BLAST(options, logger=None):
args = []
if options.blast_executable:
args.append(options.blast_executable)
if options.blast_max_target_seqs:
args += ["-max_target_seqs", options.blast_max_target_seqs]
if options.num_threads:
args += ["-num_threads", options.num_threads]
if options.blast_outfmt:
args += ["-outfmt", options.blast_outfmt]
if options.blast_db:
args += ["-db", options.blast_db]
if options.blast_query:
args += ["-query", options.blast_query]
if options.blast_evalue:
args += ["-evalue", options.blast_evalue]
if options.blast_out:
args += ["-out", options.blast_out + ".tmp"]
try:
result = getstatusoutput(" ".join(args))
rename(options.blast_out + ".tmp", options.blast_out)
except:
return (1, "Cannot execute BLAST successfully")
return (result[0], result[1])
def MetaPathways_func_search(
argv, extra_command=None, errorlogger=None, runstatslogger=None
):
if errorlogger != None:
errorlogger.write("#STEP\tFUNC_SEARCH\n")
createParser()
try:
code = main(
argv,
errorlogger=errorlogger,
runcommand=extra_command,
runstatslogger=runstatslogger,
)
except:
insert_error(4)
return (0, "")
return (0, "")
if __name__ == "__main__":
createParser()
main(sys.argv[1:]) | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.